seq_id stringlengths 4 11 | text stringlengths 113 2.92M | repo_name stringlengths 4 125 ⌀ | sub_path stringlengths 3 214 | file_name stringlengths 3 160 | file_ext stringclasses 18
values | file_size_in_byte int64 113 2.92M | program_lang stringclasses 1
value | lang stringclasses 93
values | doc_type stringclasses 1
value | stars int64 0 179k ⌀ | dataset stringclasses 3
values | pt stringclasses 78
values |
|---|---|---|---|---|---|---|---|---|---|---|---|---|
928620310 | from logging import exception
import os
import platform
import random
from selenium import webdriver
import csv
from selenium.webdriver.common.by import By
from selenium.webdriver.common.keys import Keys
from selenium.webdriver.chrome.options import Options
import time
import subprocess
#
# get current directory per OS
#
OSname=platform.system()
if OSname=="Darwin" or OSname=="Linux": # MacOS/Unix
pwd=subprocess.getoutput('pwd')
elif OSname=="Windows":
pwd=subprocess.getoutput('cd')
#
# validate existence of chromedriver
#
default_file_chromedriver = pwd + '/chromedriver'
file_chromedriver = input("Enter the full path of the chromedriver file [./chromedriver]: ")
if file_chromedriver=="": file_chromedriver=default_file_chromedriver
try:
file_exists=os.path.exists(file_chromedriver)
if file_exists == False: raise exception
except:
print ("File does not exist "+file_chromedriver)
exit(1)
#
# validate existence of numbers file
#
default_file_numbers= pwd + '/numbers.csv'
file_numbers = input("Enter the name of the files with numbers [./numbers.csv]: ")
if file_numbers=="": file_numbers=default_file_numbers
try:
file_exists=os.path.exists(file_numbers)
if file_exists == False: raise exception
except:
print ("File does not exist "+file_numbers)
exit(1)
#
# validate existence of message file
#
default_file_msg= pwd + '/message.txt'
file_msg = input("Enter the name of the message file [./message.txt]: ")
if file_msg=="": file_msg=default_file_msg
try:
file_exists=os.path.exists(file_msg)
if file_exists == False: raise exception
except:
print ("File does not exist "+file_msg)
exit(1)
#
# read numbers file into arrays
#
whatsappnumber_from_csv = []
var1_from_csv = []
var2_from_csv = []
var3_from_csv = []
try:
file = open(file_numbers, 'r')
csv_reader = csv.reader(file)
print('opened numbers file')
for row in csv_reader:
print (row)
whatsappnumber_from_csv.append(row[0])
var1_from_csv.append(row[1])
var2_from_csv.append(row[2])
var3_from_csv.append(row[3])
except:
print("The numbers csv file input is invalid")
exit(1)
file.close()
#
# prepare whatsapp window
#
options = Options();
if OSname=="Darwin" or OSname=="Linux": # MacOS/Unix
options.add_argument("user-data-dir=/tmp/whatsapp")
elif OSname=="Windows": #Windows
options.add_argument("user-data-dir=" + os.environ['USERPROFILE'] + "\\AppData\\Local\\Google\\Chrome\\User Data")
driver = webdriver.Chrome(executable_path= file_chromedriver, chrome_options=options)
driver.get('https://web.whatsapp.com/')
input('Press enter after scanning QR code')
#
# read the message into a variable
#
with open(file_msg, 'r') as file:
message = file.read()
#
# Loop through each whatsapp number and substitute the variable values into the message tokens
#
for i in range(len(whatsappnumber_from_csv)):
x = message.replace("x1", var1_from_csv[i])
y = x.replace("x2", var2_from_csv[i])
z = y.replace("x3", var3_from_csv[i])
try:
print ('Sending message ' + z + ' to ' + whatsappnumber_from_csv[i])
#
# Enter the Whatsapp number into the chrome window
#
user = driver.find_element(by=By.XPATH, value='/html/body/div[1]/div/div/div[3]/div/div[1]/div/div/div[2]/div/div[2]')
#print ('after find element whatsapp #')
user.send_keys(whatsappnumber_from_csv[i])
#print ('after send_keys whatsapp #')
user.send_keys(Keys.RETURN)
#print ('after send_keys return')
#
# Enter the message with substituted tokens into the chrome window
#
msg_box = driver.find_element(by=By.XPATH, value='/html/body/div[1]/div/div/div[4]/div/footer/div[1]/div/span[2]/div/div[2]/div[1]/div/div[2]')
#print ('after find element message')
msg_box.send_keys(z)
#print ('after send keys message')
msg_box.send_keys(Keys.RETURN)
#print ('after send keys return')
#
# wait for a bit before the next message
#
time.sleep(random.randint(1,4))
print ('Message sent successfully for ' + whatsappnumber_from_csv[i])
except:
print ('Error raised while sending message for ' + whatsappnumber_from_csv[i])
driver.quit() | pranv11/send-bulk-personalized-whatsapp | send.py | send.py | py | 4,452 | python | en | code | 1 | github-code | 13 |
5262923352 | # -*- coding: utf-8 -*-
# Problem Set 3: Simulating robots
# Name: Tuo Sun
# Collaborators (discussion): None
# Time: 4:30
import math
import random
import matplotlib
matplotlib.use("TkAgg")
import ps3_visualize
import pylab
from ps3_verify_movement3 import test_robot_movement
# === Provided class Position
class Position(object):
"""
A Position represents a location in a two-dimensional room, where
coordinates are given by floats (x, y).
"""
def __init__(self, x, y):
"""
Initializes a position with coordinates (x, y).
"""
self.x = x
self.y = y
def get_x(self):
return self.x
def get_y(self):
return self.y
def get_new_position(self, angle, speed):
"""
Computes and returns the new Position after a single clock-tick has
passed, with this object as the current position, and with the
specified angle and speed.
Does NOT test whether the returned position fits inside the room.
angle: float representing angle in degrees, 0 <= angle < 360
speed: positive float representing speed
Returns: a Position object representing the new position.
"""
old_x, old_y = self.get_x(), self.get_y()
# Compute the change in position
delta_y = speed * math.cos(math.radians(angle))
delta_x = speed * math.sin(math.radians(angle))
# Add that to the existing position
new_x = old_x + delta_x
new_y = old_y + delta_y
return Position(new_x, new_y)
def __str__(self):
return "Position: " + str(math.floor(self.x)) + ", " + str(math.floor(self.y))
# === Problem 1
class RectangularRoom(object):
"""
A RectangularRoom represents a rectangular region containing clean or dirty
tiles.
A room has a width and a height and contains (width * height) tiles. Each tile
has some fixed amount of dirt. The tile is considered clean only when the amount
of dirt on this tile is 0.
"""
def __init__(self, width, height, dirt_amount):
"""
Initializes a rectangular room with the specified width, height, and
dirt_amount on each tile.
width: an integer > 0
height: an integer > 0
dirt_amount: an integer >= 0
"""
self.width = width
self.height = height
# create a list of tiles
self.tiles = [(x, y) for x in range(self.width) for y in range(self.height)]
# use the list to create a dictionary {tile: assigned dirt ammounts}
self.tiles_dirt = {tile: dirt_amount for tile in self.tiles}
def clean_tile_at_position(self, pos, capacity):
"""
Mark the tile under the position pos as cleaned by capacity amount of dirt.
Assumes that pos represents a valid position inside this room.
pos: a Position object
capacity: the amount of dirt to be cleaned in a single time-step
can be negative which would mean adding dirt to the tile
Note: The amount of dirt on each tile should be NON-NEGATIVE.
If the capacity exceeds the amount of dirt on the tile, mark it as 0.
"""
# regularize the position into the tile and set a non-negative number to the dictionary
tile_pos = (math.floor(pos.get_x()), math.floor(pos.get_y()))
self.tiles_dirt[tile_pos] = max(self.tiles_dirt[tile_pos] - capacity,0)
def is_tile_cleaned(self, m, n):
"""
Return True if the tile (m, n) has been cleaned.
Assumes that (m, n) represents a valid tile inside the room.
m: an integer
n: an integer
Returns: True if the tile (m, n) is cleaned, False otherwise
Note: The tile is considered clean only when the amount of dirt on this
tile is 0.
"""
return self.tiles_dirt[(m, n)] == 0
def get_num_cleaned_tiles(self):
"""
Returns: an integer; the total number of clean tiles in the room
"""
# return a length of list of cleaned tiles
return len([x for x in list(self.tiles_dirt.keys()) if self.is_tile_cleaned(x[0], x[1])])
def is_position_in_room(self, pos):
"""
Determines if pos is inside the room.
pos: a Position object.
Returns: True if pos is in the room, False otherwise.
"""
# make sure the position is greater than 0 and less than the dimension of room
return 0 <= pos.get_x() < self.width and 0 <= pos.get_y() < self.height
def get_dirt_amount(self, m, n):
"""
Return the amount of dirt on the tile (m, n)
Assumes that (m, n) represents a valid tile inside the room.
m: an integer
n: an integer
Returns: an integer
"""
# get the dirt of the tile, not need to use 'get' since we create them at first
return int(self.tiles_dirt[(m, n)])
def get_num_tiles(self):
"""
Returns: an integer; the total number of tiles in the room
"""
# length of tiles is the total number of tiles
return len(self.tiles)
def get_random_position(self):
"""
Returns: a Position object; a random position inside the room
"""
# return a position object with 1 precision degree coordinate
return Position(round(random.randrange(self.width),1),
round(random.randrange(self.height),1))
class Robot(object):
"""
Represents a robot cleaning a particular room.
At all times, the robot has a particular position and direction in the room.
The robot also has a fixed speed and a fixed cleaning capacity.
Subclasses of Robot should provide movement strategies by implementing
update_position_and_clean, which simulates a single time-step.
"""
def __init__(self, room, speed, capacity):
"""
Initializes a Robot with the given speed and given cleaning capacity in the
specified room. The robot initially has a random direction and a random
position in the room.
room: a RectangularRoom object.
speed: a float (speed > 0)
capacity: a positive interger; the amount of dirt cleaned by the robot
in a single time-step
"""
self.room = room
self.speed = speed
self.capacity = capacity
self.pos = self.room.get_random_position() # get a random position in the room
self.direction = round(random.randrange(360),1) # get d for 0.0 <= d < 360.0
def get_robot_position(self):
"""
Returns: a Position object giving the robot's position in the room.
"""
return self.pos
def get_robot_direction(self):
"""
Returns: a float d giving the direction of the robot as an angle in
degrees, 0.0 <= d < 360.0.
"""
return self.direction
def set_robot_position(self, position):
"""
Set the position of the robot to position.
position: a Position object.
"""
self.pos = position
def set_robot_direction(self, direction):
"""
Set the direction of the robot to direction.
direction: float representing an angle in degrees
"""
self.direction = direction
def update_position_and_clean(self):
"""
Simulate the raise passage of a single time-step.
Move the robot to a new position (if the new position is invalid,
rotate once to a random new direction, and stay stationary) and mark the tile it is on as having
been cleaned by capacity amount.
"""
# do not change -- implement in subclasses
raise NotImplementedError
# === Problem 2
class StandardRobot(Robot):
"""
A StandardRobot is a Robot with the standard movement strategy.
At each time-step, a StandardRobot attempts to move in its current
direction; when it would hit a wall, it *instead*
chooses a new direction randomly.
"""
def update_position_and_clean(self):
"""
Simulate the raise passage of a single time-step.
Move the robot to a new position (if the new position is invalid,
rotate once to a random new direction, and stay stationary) and clean the dirt on the tile
by its given capacity.
"""
# pre-caculate the next position
new_pos = self.get_robot_position().get_new_position(self.direction, self.speed)
# only execute cleaning without out of boundary violation
if self.room.is_position_in_room(new_pos):
self.room.clean_tile_at_position(new_pos, self.capacity)
self.set_robot_position(new_pos) # update the position for the next time-step
else: # otherwise, change the direction randomly
self.set_robot_direction(round(random.randrange(360),1))
# Uncomment this line to see your implementation of StandardRobot in action!
# test_robot_movement(StandardRobot, RectangularRoom)
# === Problem 3
class RobotWithACat(Robot):
"""
A RobotWithACat is a robot with a cat mounted on it. A RobotWithACat will
not clean the tile it moves to and pick a new, random direction for itself
with probability p rather than simply cleaning the tile it moves to.
"""
p = 0.1337
@staticmethod
def set_cat_probability(prob):
"""
Sets the probability of the cat messing with the controls equal to PROB.
prob: a float (0 <= prob <= 1)
"""
RobotWithACat.p = prob
def gets_cat_interference(self):
"""
Answers the question: Does the cat mess with this RobotWithACat's controls
at this timestep?
The cat messes with the RobotWithACat's controls with probability p.
returns: True if the cat messes with RobotWithACat's controls, False otherwise.
"""
return random.random() < RobotWithACat.p
def update_position_and_clean(self):
"""
Simulate the passage of a single time-step.
Check if the cat messes with the controls. If the robot does get cat
interference, do not clean the current tile and change its direction randomly.
If the cat does not mess with the controls, the robot should behave like
StandardRobot at this time-step (checking if it can move to a new position,
move there if it can, pick a new direction and stay stationary if it can't)
"""
# pre-caculate the next position
new_pos = self.get_robot_position().get_new_position(self.direction, self.speed)
# only execute cleaning without out of boundary violation or cat interference
if (not self.gets_cat_interference()) and self.room.is_position_in_room(new_pos):
self.room.clean_tile_at_position(new_pos, self.capacity)
self.set_robot_position(new_pos) # update the position for the next time-step
else: # otherwise, change the direction randomly
self.set_robot_direction(round(random.randrange(360),1))
# test_robot_movement(RobotWithACat, RectangularRoom)
# === Problem 4
class SuperRobot(Robot):
"""
A SuperRobot is a robot that moves extra fast and cleans two tiles in one timestep.
It moves in its current direction, cleans the tile it lands on, and continues
moving in that direction and cleans the second tile it lands on, all in one unit of time.
If the SuperRobot hits a wall when it attempts to move in its current direction,
it may dirty the current tile by one unit because it moves very fast and can knock dust off of the wall.
There are three possible cases:
1. The robot tries to move. If it would hit the wall on the first move, it
does not move. Instead, it turns to face a random direction and stops for this timestep.
2. If it can move, it moves and cleans the tile it moves to. Then, it tries to move a second time.
b. If it does not hit the wall, it moves and cleans the tile it moves to.
"""
p = 0.15
@staticmethod
def set_dirty_probability(prob):
"""
Sets the probability of getting the tile dirty equal to PROB.
prob: a float (0 <= prob <= 1)
"""
SuperRobot.p = prob
def dirties_tile(self):
"""
Answers the question: Does this SuperRobot dirty the tile if it hits the wall at full speed?
A SuperRobot dirties a tile with probability p.
returns: True if the SuperRobot dirties the tile, False otherwise.
"""
return random.random() < SuperRobot.p
def update_position_and_clean(self):
"""
Simulate the passage of a single time-step.
Check if the robot is going to hit a wall when it tries moving to the second tile.
If it is, clean the tile adjacent to the wall and then dirty it by 1 unit with probability p,
and rotate to a random direction.
If the robot is not going to run into a wall when going to the second tile, the robot should
behave like StandardRobot, but move two tiles at a time (checking if it can move to both new
positions and move there if it can, or pick a new direction and stay stationary if it is adjacent
to a wall)
"""
# pre-caculate the next 1st position
new_pos = self.get_robot_position() # get a original position
i = 0
while i < 2: # create i represent (i+1)th move
ori_pos = new_pos # save the original position
new_pos = new_pos.get_new_position(self.direction, self.speed)
# only execute cleaning without out of boundary violation
if self.room.is_position_in_room(new_pos):
self.room.clean_tile_at_position(new_pos, self.capacity)
self.set_robot_position(new_pos) # update the position for the next/second half of time-step
else: # if it hits the wall
if i == 1 and self.dirties_tile(): # dirty the tile when it is the second time and with probability p
self.room.clean_tile_at_position(ori_pos, self.capacity-1) # clean but dirty the tile by unit 1
self.set_robot_direction(round(random.randrange(360),1)) # set a random direction and stop
i = 2
pass
i += 1
# test_robot_movement(SuperRobot, RectangularRoom)
# === Problem 5
def run_simulation(num_robots, speed, capacity, width, height, dirt_amount, min_coverage, num_trials,
robot_type):
"""
Runs num_trials trials of the simulation and returns the mean number of
time-steps needed to clean the fraction min_coverage of the room. For example,
if we want to test the amount of time it takes to clean 75% of the room, min_coverage
would be 0.75.
The simulation is run with num_robots robots of type robot_type, each
with the input speed and capacity in a room of dimensions width x height
with the dirt dirt_amount on each tile.
num_robots: an int (num_robots > 0)
speed: a float (speed > 0)
capacity: an int (capacity >0)
width: an int (width > 0)
height: an int (height > 0)
dirt_amount: an int
min_coverage: a float (0 <= min_coverage <= 1.0)
num_trials: an int (num_trials > 0)
robot_type: class of robot to be instantiated (e.g. StandardRobot or
RobotWithACat)
"""
record = [] #create a record list to store the time steps of each trial
for x in range(num_trials):
step = 0 # initialize time step, room and robots
room = RectangularRoom(width, height, dirt_amount)
robots = [robot_type(room, speed, capacity)]*num_robots
# keep running the updates until the safisfication of the coverage
while room.get_num_cleaned_tiles()/room.get_num_tiles() < min_coverage:
step += 1
for robot in robots:
robot.update_position_and_clean()
record.append(step) # add the result into record list
return sum(record)/len(record) # return the mean value of the record list
#print ('avg time steps: ' + str(run_simulation(1, 1.0, 1, 5, 5, 3, 1.0, 50, StandardRobot)))
#print ('avg time steps: ' + str(run_simulation(1, 1.0, 1, 10, 10, 3, 0.8, 50, StandardRobot)))
#print ('avg time steps: ' + str(run_simulation(1, 1.0, 1, 10, 10, 3, 0.9, 50, StandardRobot)))
#print ('avg time steps: ' + str(run_simulation(1, 1.0, 1, 20, 20, 3, 0.5, 50, StandardRobot)))
#print ('avg time steps: ' + str(run_simulation(3, 1.0, 1, 20, 20, 3, 0.5, 50, StandardRobot)))
# === Problem 6
#
# ANSWER THE FOLLOWING QUESTIONS:
#
# 1)How does the performance of the three robot types compare when cleaning 80%
# of a 20x20 room?
# RobotWithACat takes always longer than StandardRobot,
# but SuperRobot always performs much better
#
# 2) How does the performance of the three robot types compare when two of each
# robot cleans 80% of rooms with dimensions
# 10x30, 20x15, 25x12, and 50x6?
# time_steps: RobotWithACat > StandardRobot > SuperRobot
# a room with w-h ratio closed to 1:1 takes shorter time
def show_plot_compare_strategies(title, x_label, y_label):
"""
Produces a plot comparing the three robot strategies in a 20x20 room with 80%
minimum coverage.
"""
num_robot_range = range(1, 11)
times1 = []
times2 = []
times3 = []
for num_robots in num_robot_range:
print ("Plotting", num_robots, "robots...")
times1.append(run_simulation(num_robots, 1.0, 1, 20, 20, 3, 0.8, 20, StandardRobot))
times2.append(run_simulation(num_robots, 1.0, 1, 20, 20, 3, 0.8, 20, RobotWithACat))
times3.append(run_simulation(num_robots, 1.0, 1, 20, 20, 3, 0.8, 20, SuperRobot))
pylab.plot(num_robot_range, times1)
pylab.plot(num_robot_range, times2)
pylab.plot(num_robot_range, times3)
pylab.title(title)
pylab.legend(('StandardRobot', 'RobotWithACat', 'SuperRobot'))
pylab.xlabel(x_label)
pylab.ylabel(y_label)
pylab.show()
def show_plot_room_shape(title, x_label, y_label):
"""
Produces a plot showing dependence of cleaning time on room shape.
"""
aspect_ratios = []
times1 = []
times2 = []
times3 = []
for width in [10, 20, 25, 50]:
height = int(300/width)
print ("Plotting cleaning time for a room of width:", width, "by height:", height)
aspect_ratios.append(float(width) / height)
times1.append(run_simulation(2, 1.0, 1, width, height, 3, 0.8, 200, StandardRobot))
times2.append(run_simulation(2, 1.0, 1, width, height, 3, 0.8, 200, RobotWithACat))
times3.append(run_simulation(2, 1.0, 1, width, height, 3, 0.8, 200, SuperRobot))
pylab.plot(aspect_ratios, times1, 'o-')
pylab.plot(aspect_ratios, times2, 'o-')
pylab.plot(aspect_ratios, times3, 'o-')
pylab.title(title)
pylab.legend(('StandardRobot', 'RobotWithACat', 'SuperRobot'), fancybox=True, framealpha=0.5)
pylab.xlabel(x_label)
pylab.ylabel(y_label)
pylab.show()
show_plot_compare_strategies('Time to clean 80% of a 20x20 room, for various numbers of robots','Number of robots','Time / steps')
show_plot_room_shape('Time to clean 80% of a 300-tile room for various room shapes','Aspect Ratio', 'Time / steps')
| sunt727/Data-Analysis-by-Python | ps3/ps3.py | ps3.py | py | 19,620 | python | en | code | 0 | github-code | 13 |
35997243786 | from azure.core.exceptions import ResourceNotFoundError
from azure.mgmt.network import NetworkManagementClient
import utils
@utils.Decorator()
def create(cli_args, nsg):
resource_client = NetworkManagementClient(cli_args.credential, cli_args.subscription_id)
async_create = resource_client.virtual_networks.begin_create_or_update(
cli_args.resource_group,
cli_args.vnet_name,
{
"addressSpace": {
"addressPrefixes": [
"10.0.0.0/16"
],
},
"location": cli_args.region,
"subnets": [
{
"name": "default",
"address_prefix": "10.0.0.0/24",
"network_security_group": {
"id": nsg.id,
}
}
]
},
)
async_create.wait()
return async_create.result()
@utils.Decorator()
def get(cli_args):
resource_client = NetworkManagementClient(cli_args.credential, cli_args.subscription_id)
return resource_client.virtual_networks.get(
cli_args.resource_group,
cli_args.vnet_name
)
@utils.Decorator()
def subnets_list(cli_args):
resource_client = NetworkManagementClient(cli_args.credential, cli_args.subscription_id)
return resource_client.subnets.list(
cli_args.resource_group,
cli_args.vnet_name,
)
def get_or_create(args, nsg):
try:
vnet = get(args)
except ResourceNotFoundError:
vnet = create(args, nsg)
return vnet
| crodriguezde/reimage | vnet.py | vnet.py | py | 1,591 | python | en | code | 0 | github-code | 13 |
37985659498 | from Digitization.DigitizationFlags import jobproperties
from AthenaCommon.BeamFlags import jobproperties
from AthenaCommon import CfgMgr
from AthenaCommon.AppMgr import ToolSvc, ServiceMgr
# The earliest bunch crossing time for which interactions will be sent
# to the sTGCDigitizationTool.
def sTGC_FirstXing():
return -375
# The latest bunch crossing time for which interactions will be sent
# to the sTGCDigitizationTool.
def sTGC_LastXing():
return 175
def sTgcDigitizationTool(name="sTgcDigitizationTool",**kwargs):
kwargs.setdefault("RndmSvc", jobproperties.Digitization.rndmSvc() )
# set rndm seeds
sTgcRndm = kwargs.setdefault("RndmEngine","sTGC_Digitization")
jobproperties.Digitization.rndmSeedList.addSeed(sTgcRndm, 49261510, 105132394 )
if jobproperties.Digitization.doXingByXingPileUp():
kwargs.setdefault("FirstXing", sTGC_FirstXing() ) # this should match the range for the sTGC in Digitization/share/MuonDigitization.py
kwargs.setdefault("LastXing", sTGC_LastXing() ) # this should match the range for the sTGC in Digitization/share/MuonDigitization.py
kwargs.setdefault("InputObjectName", "sTGCSensitiveDetector")
kwargs.setdefault("OutputObjectName", "sTGC_DIGITS")
kwargs.setdefault("OutputSDOName", "sTGC_SDO")
kwargs.setdefault("doToFCorrection", False)
return CfgMgr.sTgcDigitizationTool(name,**kwargs)
def getSTGCRange(name="sTgcRange", **kwargs):
# bunch crossing range in ns
kwargs.setdefault('FirstXing', sTGC_FirstXing() )
kwargs.setdefault('LastXing', sTGC_LastXing() )
kwargs.setdefault('CacheRefreshFrequency', 1.0 ) #default 0 no dataproxy reset
kwargs.setdefault('ItemList', ["GenericMuonSimHitCollection#sTGCSensitiveDetector"] )
return CfgMgr.PileUpXingFolder(name, **kwargs)
| rushioda/PIXELVALID_athena | athena/MuonSpectrometer/MuonDigitization/sTGC_Digitization/python/sTGC_DigitizationConfig.py | sTGC_DigitizationConfig.py | py | 1,834 | python | en | code | 1 | github-code | 13 |
31412235728 | from fpdf import FPDF
import qrcode
from PIL import Image
import os
import subprocess
from src.PDF_creator.BaseTicket import BaseTicket
class NationalTicket(BaseTicket):
""" Class for ticket from vote to National concil """
def __init__(self,data: dict) -> None:
"""
Constructor for saving vote data
Keyword arguments:
data -- Dictionary of data, which contains whole vote
"""
super().__init__(data)
def create_pdf(self):
""" Method for creating PDF file from vote """
if 'src' in os.listdir():
os.chdir("src/PDF_creator")
pdf = FPDF('P', 'mm', (80, 100))
pdf.add_page()
pdf.add_font('slovak', '', "Calibri Regular.ttf", uni=True)
pdf.add_font('slovakBold', '', "Calibri Bold.TTF", uni=True)
pdf.set_font('slovak', '', 9)
pdf.multi_cell(0,0,self.voting_data['title'],align='C')
pdf.set_xy(0,10)
pdf.set_font('slovakBold','',9)
pdf.write(5,'Strana:')
pdf.set_xy(0,15)
pdf.set_font('slovak',"", 9)
party_str = BaseTicket.preprocessText(
self,
self.voting_data['party'],
45
)
pdf.multi_cell(0,3,party_str)
pdf.set_xy(0,22)
pdf.set_font('slovakBold','',9)
pdf.write(5,'Kandidáti:')
pdf.set_xy(0,27)
pdf.set_font('slovak','', 9)
candidates = BaseTicket.preprocessText(
self,
self.voting_data['candidates'],
30
)
pdf.multi_cell(40, 5,candidates)
pdf.set_xy(43,22)
data_str = str(self.voting_data_token)
img = qrcode.make(data_str)
img.save("Temp/sample.png")
pdf.image("Temp/sample.png",w=30,h=30)
pdf.output('NewTicket.pdf', 'F')
os.chdir('../..')
| tp17-2021/vt | backend/src/PDF_creator/NationalTicket.py | NationalTicket.py | py | 1,865 | python | en | code | 0 | github-code | 13 |
20905421301 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
]
operations = [
migrations.CreateModel(
name='MegabetMatchOdds',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('home_team', models.CharField(max_length=255)),
('away_team', models.CharField(max_length=255)),
('match_date', models.DateTimeField()),
('home_team_win_odd', models.FloatField()),
('draw_odd', models.FloatField()),
('away_team_win_odd', models.FloatField()),
],
options={
},
bases=(models.Model,),
),
migrations.AlterUniqueTogether(
name='megabetmatchodds',
unique_together=set([('home_team', 'away_team', 'match_date')]),
),
]
| almeynman/arbitragedjangoscrapy | arbitrage_web/matches/migrations/0001_initial.py | 0001_initial.py | py | 1,026 | python | en | code | 0 | github-code | 13 |
23442093802 | import torch
from torch import nn
from torch_scatter import scatter_mean, scatter_max, scatter_add
from torch_scatter.composite import scatter_softmax
from mot_neural_solver.models.mlp import MLP
from mot_neural_solver.models.cnn import CNN, MaskRCNNPredictor
class MetaLayer(torch.nn.Module):
"""
Core Message Passing Network Class. Extracted from torch_geometric, with minor modifications.
(https://rusty1s.github.io/pytorch_geometric/build/html/modules/nn.html)
"""
def __init__(self, edge_model=None, node_model=None):
"""
Args:
edge_model: Callable Edge Update Model
node_model: Callable Node Update Model
"""
super(MetaLayer, self).__init__()
self.edge_model = edge_model
self.node_model = node_model
self.reset_parameters()
def reset_parameters(self):
for item in [self.node_model, self.edge_model]:
if hasattr(item, 'reset_parameters'):
item.reset_parameters()
def forward(self, x, edge_index, edge_attr):
"""
Does a single node and edge feature vectors update.
Args:
x: node features matrix
edge_index: tensor with shape [2, M], with M being the number of edges, indicating nonzero entries in the
graph adjacency (i.e. edges)
edge_attr: edge features matrix (ordered by edge_index)
Returns: Updated Node and Edge Feature matrices
"""
# Edge Update
if self.edge_model is not None:
edge_attr = self.edge_model(x, edge_index, edge_attr)
# Node Update
if self.node_model is not None:
x = self.node_model(x, edge_index, edge_attr)
return x, edge_attr
def __repr__(self):
return '{}(edge_model={}, node_model={})'.format(self.__class__.__name__, self.edge_model, self.node_model)
class EdgeModel(nn.Module):
"""
Class used to peform the edge update during Neural message passing
"""
def __init__(self, edge_model):
super(EdgeModel, self).__init__()
self.edge_model = edge_model
def forward(self, node_feats, edge_index, edge_attr):
row, col = edge_index
return self.edge_model(torch.cat([node_feats[row], node_feats[col], edge_attr], dim=1))
class TimeAwareNodeModel(nn.Module):
"""
Class used to peform the node update during Neural mwssage passing
"""
def __init__(self, flow_in_model, flow_out_model, node_model, node_agg_fn):
super(TimeAwareNodeModel, self).__init__()
self.flow_in_model = flow_in_model
self.flow_out_model = flow_out_model
self.node_model = node_model
self.node_agg_fn = node_agg_fn
def forward(self, x, edge_index, edge_attr):
row, col = edge_index
flow_out_mask = row < col
flow_out_row, flow_out_col = row[flow_out_mask], col[flow_out_mask]
flow_out_input = torch.cat([x[flow_out_col], edge_attr[flow_out_mask]], dim=1)
flow_out = self.flow_out_model(flow_out_input)
flow_out = self.node_agg_fn(flow_out, flow_out_row, x.size(0))
flow_in_mask = row > col
flow_in_row, flow_in_col = row[flow_in_mask], col[flow_in_mask]
flow_in_input = torch.cat([x[flow_in_col], edge_attr[flow_in_mask]], dim=1)
flow_in = self.flow_in_model(flow_in_input)
flow_in = self.node_agg_fn(flow_in, flow_in_row, x.size(0))
flow = torch.cat((flow_in, flow_out), dim=1)
return self.node_model(flow)
class TimeAwareAttentionModel(nn.Module):
"""
Class used to peform the node update during Neural mwssage passing
"""
def __init__(self, node_model, flow_in_attention_model, flow_out_attention_model):
super(TimeAwareAttentionModel, self).__init__()
self.node_model = node_model
def forward(self, x, edge_index, edge_attr, cls_net):
row, col = edge_index
dec_edge_feats, _ = cls_net(edge_attr)
flow_out_mask = row < col
flow_out_row, flow_out_col = row[flow_out_mask], col[flow_out_mask]
flow_out_weights = dec_edge_feats[flow_out_mask]
flow_out_weights = scatter_softmax(flow_out_weights, flow_out_row, dim=0)
flow_out = x[flow_out_col]*flow_out_weights[:, :, None, None] # Element-wise multiplication with neighbors
flow_out = scatter_add(flow_out, flow_out_row, dim=0, dim_size=x.size(0))
flow_in_mask = row > col
flow_in_row, flow_in_col = row[flow_in_mask], col[flow_in_mask]
flow_in_weights = dec_edge_feats[flow_in_mask]
flow_in_weights = scatter_softmax(flow_in_weights, flow_in_row, dim=0)
flow_in = x[flow_in_col]*flow_in_weights[:, :, None, None] # Element-wise multiplication with neighbors
flow_in = scatter_add(flow_in, flow_in_row, dim=0, dim_size=x.size(0))
flow = torch.cat((x, flow_in, flow_out), dim=1)
return self.node_model(flow), dec_edge_feats
class MLPGraphIndependent(nn.Module):
"""
Class used to to encode (resp. classify) features before (resp. after) neural message passing.
It consists of two networks, one for nodes and one for edges, and they are applied independently to node and edge
features, respectively.
This class is based on: https://github.com/deepmind/graph_nets tensorflow implementation.
"""
def __init__(self, edge_in_dim = None, node_in_dim = None, edge_out_dim = None, node_out_dim = None,
node_dims = None, edge_dims = None, dropout_p = None, use_batchnorm = None):
super(MLPGraphIndependent, self).__init__()
if node_in_dim is not None :
self.node_model = MLP(input_dim=node_in_dim, fc_dims=list(node_dims) + [node_out_dim],
dropout_p=dropout_p, use_batchnorm=use_batchnorm)
else:
self.node_model = None
if edge_in_dim is not None :
self.edge_model = MLP(input_dim=edge_in_dim, fc_dims=list(edge_dims) + [edge_out_dim],
dropout_p=dropout_p, use_batchnorm=use_batchnorm)
else:
self.edge_model = None
def forward(self, edge_feats = None, nodes_feats = None):
if self.node_model is not None:
out_node_feats = self.node_model(nodes_feats)
else:
out_node_feats = nodes_feats
if self.edge_model is not None:
out_edge_feats = self.edge_model(edge_feats)
else:
out_edge_feats = edge_feats
return out_edge_feats, out_node_feats
class MaskModel(nn.Module):
"""
Class used to perform mask predictions
"""
def __init__(self, mask_model_params):
super(MaskModel, self).__init__()
feature_encoder_feats_dict = mask_model_params['feature_encoder_feats_dict']
mask_head_feats_dict = mask_model_params['mask_head_feats_dict']
mask_predictor_feats_dict = mask_model_params['mask_predictor_feats_dict']
# Simple feature encoder network to reduce the number of channels obtained from the backbone
self.feature_encoder = CNN(**feature_encoder_feats_dict)
self.layer_norm = nn.LayerNorm([64, 14, 14])
# Mask head and mask predictor inspired from the MaskRCNN
self.mask_head = CNN(**mask_head_feats_dict)
self.mask_predictor = MaskRCNNPredictor(**mask_predictor_feats_dict)
def forward(self, feature_embeds, node_embeds):
feature_embeds = self.feature_encoder(feature_embeds)
x = torch.cat((feature_embeds, node_embeds), dim=1)
x = self.layer_norm(x)
x = self.mask_head(x)
x = self.mask_predictor(x)
return x
class MOTMPNet(nn.Module):
"""
Main Model Class. Contains all the components of the model. It consists of of several networks:
- 2 encoder networks (1 for nodes, 1 for edges) that provide the initial node and edge embeddings, respectively,
- 4 update networks (3 for nodes, 1 per edges used in the 'core' Message Passing Network
- 1 edge classifier MLP that performs binary classification over the Message Passing Network's output.
- 1 mask refinement network that performs mask prediction over the Message Passing Network's output.
This class was initially based on: https://github.com/deepmind/graph_nets tensorflow implementation.
"""
def __init__(self, model_params, bb_encoder = None):
"""
Defines all components of the model
Args:
bb_encoder: (might be 'None') CNN used to encode bounding box apperance information.
model_params: dictionary contaning all model hyperparameters
"""
super(MOTMPNet, self).__init__()
self.node_cnn = bb_encoder
self.model_params = model_params
# Define Encoder and Classifier Networks
encoder_feats_dict = model_params['encoder_feats_dict']
classifier_feats_dict = model_params['classifier_feats_dict']
node_ext_encoder_feats_dict = model_params['node_ext_encoder_feats_dict']
self.encoder = MLPGraphIndependent(**encoder_feats_dict)
self.classifier = MLPGraphIndependent(**classifier_feats_dict)
self.node_ext_encoder = CNN(**node_ext_encoder_feats_dict)
self.mask_predictor = MaskModel(model_params['mask_model_feats_dict'])
# Define the 'Core' message passing network (i.e. node and edge update models)
self.MPNet = self._build_core_MPNet(model_params=model_params, encoder_feats_dict=encoder_feats_dict)
# Define 2nd MPN
self.MPAttentionNet = self._build_attention_MPNet(model_params=model_params)
self.num_enc_steps = model_params['num_enc_steps']
self.num_class_steps = model_params['num_class_steps']
self.global_avgpool = nn.AdaptiveAvgPool2d((1, 1))
def _build_core_MPNet(self, model_params, encoder_feats_dict):
"""
Builds the core part of the Message Passing Network: Node Update and Edge Update models.
Args:
model_params: dictionary contaning all model hyperparameters
encoder_feats_dict: dictionary containing the hyperparameters for the initial node/edge encoder
"""
# Define an aggregation operator for nodes to 'gather' messages from incident edges
node_agg_fn = model_params['node_agg_fn']
assert node_agg_fn.lower() in ('mean', 'max', 'sum'), "node_agg_fn can only be 'max', 'mean' or 'sum'."
if node_agg_fn == 'mean':
node_agg_fn = lambda out, row, x_size: scatter_mean(out, row, dim=0, dim_size=x_size)
elif node_agg_fn == 'max':
node_agg_fn = lambda out, row, x_size: scatter_max(out, row, dim=0, dim_size=x_size)[0]
elif node_agg_fn == 'sum':
node_agg_fn = lambda out, row, x_size: scatter_add(out, row, dim=0, dim_size=x_size)
# Define all MLPs involved in the graph network
self.reattach_initial_nodes = model_params['reattach_initial_nodes']
self.reattach_initial_edges = model_params['reattach_initial_edges']
self.edge_factor = 2 if self.reattach_initial_edges else 1
self.node_factor = 2 if self.reattach_initial_nodes else 1
edge_model_in_dim = self.node_factor * 2 * encoder_feats_dict['node_out_dim'] + \
self.edge_factor * encoder_feats_dict['edge_out_dim']
node_model_in_dim = self.node_factor * encoder_feats_dict['node_out_dim'] + encoder_feats_dict['edge_out_dim']
attention_model_in_dim = encoder_feats_dict['edge_out_dim']
# Define all MLPs used within the MPN
edge_model_feats_dict = model_params['edge_model_feats_dict']
node_model_feats_dict = model_params['node_model_feats_dict']
# attention_model_feats_dict = model_params['attention_model_feats_dict']
edge_model = MLP(input_dim=edge_model_in_dim,
fc_dims=edge_model_feats_dict['dims'],
dropout_p=edge_model_feats_dict['dropout_p'],
use_batchnorm=edge_model_feats_dict['use_batchnorm'])
flow_in_model = MLP(input_dim=node_model_in_dim,
fc_dims=node_model_feats_dict['dims'],
dropout_p=node_model_feats_dict['dropout_p'],
use_batchnorm=node_model_feats_dict['use_batchnorm'])
flow_out_model = MLP(input_dim=node_model_in_dim,
fc_dims=node_model_feats_dict['dims'],
dropout_p=node_model_feats_dict['dropout_p'],
use_batchnorm=node_model_feats_dict['use_batchnorm'])
node_model = nn.Sequential(*[nn.Linear(2 * encoder_feats_dict['node_out_dim'], encoder_feats_dict['node_out_dim']),
nn.ReLU(inplace=True)])
# Define all MLPs used within the MPN
return MetaLayer(edge_model=EdgeModel(edge_model = edge_model),
node_model=TimeAwareNodeModel(flow_in_model = flow_in_model,
flow_out_model = flow_out_model,
node_model = node_model,
node_agg_fn = node_agg_fn))
def _build_attention_MPNet(self, model_params):
attention_model_feats_dict = model_params['attention_model_feats_dict']
node_ext_model_feats_dict = model_params['node_ext_model_feats_dict']
attention_model_in_dim = model_params['encoder_feats_dict']['edge_out_dim'] * self.edge_factor
node_ext_model_in_dim = 3 * model_params['node_ext_encoder_feats_dict']['dims'][-1] * self.node_factor
flow_in_attention_model = MLP(input_dim=attention_model_in_dim, **attention_model_feats_dict)
flow_out_attention_model = MLP(input_dim=attention_model_in_dim, **attention_model_feats_dict)
node_ext_model = CNN(input_dim=node_ext_model_in_dim, **node_ext_model_feats_dict)
return TimeAwareAttentionModel(node_model=node_ext_model, flow_in_attention_model=flow_in_attention_model,
flow_out_attention_model=flow_out_attention_model)
def forward(self, data):
"""
Provides a fractional solution to the data association problem.
First, node and edge features are independently encoded by the encoder network. Then, they are iteratively
'combined' for a fixed number of steps via the Message Passing Network (self.MPNet). Finally, they are
classified independently by the classifiernetwork.
Args:
data: object containing attribues
- x: node features matrix
- edge_index: tensor with shape [2, M], with M being the number of edges, indicating nonzero entries in the
graph adjacency (i.e. edges) (i.e. sparse adjacency)
- edge_attr: edge features matrix (sorted by edge apperance in edge_index)
Returns:
classified_edges: list of unnormalized node probabilites after each MP step
"""
x, x_ext, edge_index, edge_attr = data.x, data.x_ext, data.edge_index, data.edge_attr
x = self.global_avgpool(x)
x = x.view(x.size(0), -1)
# Encoding features step
latent_edge_feats, latent_node_feats = self.encoder(edge_attr, x)
latent_node_ext_feats = self.node_ext_encoder(x_ext)
initial_edge_feats = latent_edge_feats
initial_node_feats = latent_node_feats
initial_node_ext_feats = latent_node_ext_feats
# During training, the feature vectors that the MPNetwork outputs for the last self.num_class_steps message
# passing steps are classified in order to compute the loss.
first_class_step = self.num_enc_steps - self.num_class_steps + 1
outputs_dict = {'classified_edges': [], 'mask_predictions': []}
for step in range(1, self.num_enc_steps + 1):
# Reattach the initially encoded embeddings before the update
if self.reattach_initial_edges:
latent_edge_feats = torch.cat((initial_edge_feats, latent_edge_feats), dim=1)
if self.reattach_initial_nodes:
latent_node_feats = torch.cat((initial_node_feats, latent_node_feats), dim=1)
latent_node_ext_feats = torch.cat((initial_node_ext_feats, latent_node_ext_feats), dim=1)
# Message Passing Step
latent_node_feats, latent_edge_feats = self.MPNet(latent_node_feats, edge_index, latent_edge_feats)
latent_node_ext_feats, dec_edge_feats = self.MPAttentionNet(latent_node_ext_feats, edge_index, latent_edge_feats, self.classifier)
if step >= first_class_step:
# Classification Step
outputs_dict['classified_edges'].append(dec_edge_feats)
# Mask Prediction Step
mask_preds = self.mask_predictor(x_ext, latent_node_ext_feats)
outputs_dict['mask_predictions'].append(mask_preds)
if self.num_enc_steps == 0:
dec_edge_feats, _ = self.classifier(latent_edge_feats)
outputs_dict['classified_edges'].append(dec_edge_feats)
mask_preds = self.mask_predictor(x_ext, latent_node_ext_feats)
outputs_dict['mask_predictions'].append(mask_preds)
return outputs_dict | ocetintas/MPNTrackSeg | src/mot_neural_solver/models/mpn.py | mpn.py | py | 17,463 | python | en | code | 15 | github-code | 13 |
41932763853 | #!/usr/bin/env python3
import re
import requests
def google(keywords):
url = "https://www.google.com/search"
headers = {"User-Agent": "Mozilla/5.0 (X11; Linux x86_64; rv:106.0) Gecko/20100101 Firefox/106.0"}
return_data = []
for keyword in keywords:
params = {"tbm": "bks", 'q': keyword}
r = requests.get(url, headers=headers, params=params)
response = r.text
response = re.findall("\<a href=\"https://books\.google\.com/books\?id=.*?\"\>\<br\>\<h3 .*?\<\/h3\>", response)
for content in response:
url_return = re.search("\<a href=\"https://books.google.com\/books\?id=(.*?)&", content)
url_return = url_return.group(1)
url_return = f"https://www.google.com.br/books/edition/_/{url_return}"
title = re.search("\<h3 class=\".*?\"\>(.*?)\<\/h3\>", content)
title = title.group(1)
title = title.title()
if title.endswith("..."):
title = title[0:-3:]
return_data.append((title, url_return))
return return_data
def zlib(keywords):
url = "https://b-ok.lat/s/"
return_data = []
for keyword in keywords:
r = requests.get(url + keyword)
response = r.text
response = re.findall("\<a href=\"(\/book/.*?)\" style=.*?\"\>(.*?)\<\/a>", response)
for content in response:
url_return = "https://b-ok.lat" + content[0]
title = content[1].title()
return_data.append((title, url_return))
return return_data
| bruno-1337/guiagen-spark | trusted_sources/books.py | books.py | py | 1,604 | python | en | code | 0 | github-code | 13 |
19527939362 | from flask_restful import Resource, Api, reqparse
import bcrypt
from flask import Flask, request, Response, jsonify, Blueprint
from bson import json_util
from bson.objectid import ObjectId
import pymongo
import jwt
import datetime
import json
from functools import wraps
from Database.Database import Database as mydb
# try:
# app = Flask(__name__)
# client = pymongo.MongoClient(
# "mongodb+srv://karimhafez:KojGCyxxTJXTYKYV@cluster0.buuqk.mongodb.net/admin")
# app.db = client.Twitter_new
# except:
# print("can't connect")
# api = Api(app)
update_user = Blueprint('update_user', __name__)
def token_required(f):
@wraps(f)
def decorated(*args, **kwargs):
token = None
if 'x-access-token' in request.headers:
token = request.headers['x-access-token']
if not token:
return jsonify({'message': 'Token is missing!'}), 401
try:
data = jwt.decode(token, "SecretKey1911", "HS256")
user_id = ObjectId(data['_id'])
current_user = mydb.User.find_one({'_id': user_id})
except:
return jsonify({'message': 'Token is invalid!'}), 401
return f(current_user, *args, **kwargs)
return decorated
############################################
@update_user.route("/update_profile", methods=["PUT"])
@token_required
def updateuser(current_user):
# print(request.form["_id"])
try:
data = request.get_json()
name = data["name"]
date_of_birth = data["date_of_birth"]
bio = data["bio"]
location = data["location"]
website = data["website"]
prof_pic_url = data["prof_pic_url"]
cover_pic_url = data["cover_pic_url"]
user_id = ObjectId(current_user["_id"])
myquery1 = {"_id": user_id}
mydb.User.update_one(
{"_id": myquery1},
{"$set": {
"name": name,
"date_of_birth": date_of_birth,
"bio": bio,
"location": location,
"website": website,
"prof_pic_url": prof_pic_url,
"cover_pic_url": cover_pic_url
}}
)
user = mydb.User.find_one(user_id)
del user['password']
user["creation_date"] = user["creation_date"].date()
user["creation_date"] = user["creation_date"].strftime("%Y-%m-%d")
user["_id"] = str(user["_id"])
return Response(
response=json.dumps(
{"message": "The request was succesful"
}),
status=200,
mimetype="application/json")
except Exception as ex:
print("**********")
print(ex)
print("**********")
#############################################
# if __name__ == "__main__":
# app.run(port=9090, debug=True)
| OmarNashat01/Back-End-Twitter-Clone | Routes/update_user/update_user.py | update_user.py | py | 2,865 | python | en | code | 2 | github-code | 13 |
31288998845 | #!/usr/bin/env python
# coding: utf-8
# ## Description
# This is an acronym generator that produces abbreviations using the first letter of each word in a phrase.
# The first line of the code uses the input function which allows the user to input whatever phrase they want.
# Then an empty string is initialized and stored in a varibale called output. Using the string method split to converts the string into a list then a 'for loop' is used to iterate over the first letter of each word in the list.
# In[3]:
text= input('Enter a phrase: ')
output=''
for letter in text.split():
output+= str(letter[0]).upper()
print(output)
# In[ ]:
# In[ ]:
| Storerun/acronym_generator | acronym_generator.py | acronym_generator.py | py | 669 | python | en | code | 0 | github-code | 13 |
25938854965 | import sys
from color import Color
#
#
# class Color:
# COLORS = {
# "BLACK": 0,
# "RED":1,
# "GREEN":2,
# "YELLOW":3,
# "BLUE":4
# }
#
# MODES = {
# "FOREGROUND": 3
# }
#
# def __init__(self):
# # "\u001b[31mRed Text\u001b[0m"
# self.BASIC = "\u001b[{:1d}{1d}m{text}"
# self.ESCAPE = self.BASIC + "0m"
# # self.color
# pass
#
# def color(self, string, mode, color):
# colored_string = "\u001b[{}{}m{}\u001b[0m".format(self.MODES[mode], self.COLORS[color], string)
# return colored_string
if __name__ == '__main__':
c = Color()
mode = "FOREGROUND"
color = "MAGENTA"
string1 = "Test"
print(c.color(string1, mode=mode,color=color))
| clairebearz32bit/colors | main.py | main.py | py | 781 | python | en | code | 0 | github-code | 13 |
73874619859 | import os
import numpy as np
def read_file_toList(file, startingLine = 0):
# index i is the i-th line of the file
values = []
with open(file, 'r') as of:
lines = of.readlines()
data = [line.lstrip() for line in lines if line != ""]
for line in data[startingLine:]:
lineValues = [float(i) for i in line.split()]
values.append(lineValues)
return values
def write_list_toFile(listValues, outputPath):
with open(outputPath, 'w') as of:
for i in listValues:
line = ""
for j in i:
line += str(j) + "\t"
line += "\n"
of.write(line)
return
def find_extrema(file):
values = read_file_toList(file)
nbObjectives = len(values[0])
ideal = []
nadir = []
for j in range(nbObjectives):
ideal.append(min([i[j] for i in values]))
nadir.append(max([i[j] for i in values]))
return ideal, nadir
def obtain_ideal_nadir_size(benchmark, instances):
ideal, nadir, size = [], [], []
for instance in instances:
path = os.path.join("resources", "reference_front", "VRPTW", benchmark, benchmark+"_"+instance["size"], instance["name"]+instance["id"], "FUN.tsv")
ideal_Instance, nadir_Instance = find_extrema(path)
size_Instance = read_file_toList(path)
ideal.append(ideal_Instance)
nadir.append(nadir_Instance)
size.append(len(size_Instance))
return ideal, nadir, size
def obtain_correlations(benchmark, instances):
correlations = []
for instance in instances:
path = os.path.join("resources", "reference_front","VRPTW", benchmark, benchmark+"_"+instance["size"], instance["name"]+instance["id"], "correlation.tsv")
value = read_file_toList(path)
correlations.append(value[0][0])
return correlations
def obtain_instances(instances):
nameInstances = []
for instance in instances:
nameInstances.append(instance["name"] + instance["id"])
return nameInstances
def create_table_instances(benchmark, instances, outputPath, outputFile):
if not os.path.exists(outputPath):
os.makedirs(outputPath)
completePath = os.path.join(outputPath, outputFile)
header = "Instance & Correlation & Front Size & Best Ideal & Best Nadir \\\\\n"
nbLines = len(instances)
nameInstances = obtain_instances(instances)
correlations = obtain_correlations(benchmark, instances)
ideal, nadir, size = obtain_ideal_nadir_size(benchmark, instances)
results = [nameInstances, correlations, size, ideal, nadir]
with open(completePath, 'w') as of:
of.write(header)
for i in range(nbLines):
line = ""
for j in range(len(results)):
line += str(results[j][i]) + " & "
line += "\\\\\n"
of.write(line)
return
def obtain_ttr(metrics, threshold):
for l in metrics:
if l[4] > threshold:
return l[1]
return metrics[-1][1]
def create_line_algorithm(instance, runs, threshold, startingPath):
sizes = []
uhv = []
ttr = []
for i in range(runs[0], runs[1]+1):
inputPath = os.path.join(startingPath, instance["name"]+instance["id"], "Run"+str(i), "Final", "METRICS.tsv" )
metrics = read_file_toList(inputPath, 1)
sizes.append(metrics[-1][2])
uhv.append(metrics[-1][4])
ttr.append(obtain_ttr(metrics, threshold))
line = " & " + str(round(np.mean(sizes), 1)) + " & " + str(round(np.mean(uhv), 4)) + " & " + str(round(np.mean(ttr), 1))
return line
def create_table_algorithm(benchmark, algorithm, instances, runs, threshold, outputPath):
header = "Instance & Avg Front Size & Avg uHV & Time ("+str(threshold*100)+"\%) \\\\\n"
if not os.path.exists(outputPath):
os.makedirs(outputPath)
outputFile = os.path.join(outputPath, "table_"+benchmark+'_'+algorithm+".tsv")
with open(outputFile, 'w') as of:
of.write(header)
for instance in instances:
line = instance["name"] + instance["id"]
startingPath = os.path.join("performance_results", algorithm, benchmark, benchmark+"_"+instance["size"])
line += create_line_algorithm(instance, runs, threshold, startingPath)
with open(outputFile, "a+") as of:
of.write(line+"\\\\\n")
return
def create_specific_table(benchmark, algorithms, instances, runs, threshold, outputPath):
header = "Instance & "
header_alg = "Size & uHV & Time ("+str(threshold*100)+"\%)"
outputFile = os.path.join(outputPath, "table_"+benchmark+'_'+"spec.tsv")
for instance in instances:
line = instance["name"] + instance["id"]
for algorithm in algorithms:
startingPath = os.path.join("performance_results", algorithm, benchmark, benchmark+"_"+instance["size"])
part_line = create_line_algorithm(instance, runs, threshold, startingPath)
line += part_line
with open(outputFile, "a+") as of:
of.write(line+"\\\\\n")
return
def create_line_dataFrame(instance, algorithm, run, threshold, inputPath):
metrics = read_file_toList(inputPath, 1)
uhv = metrics[-1][4]
ttr = obtain_ttr(metrics, threshold)
line = instance["name"] + "\t" + instance["id"] + "\t" + algorithm + "\t" + str(run) + "\t" + str(uhv) + "\t" + str(ttr) + "\n"
return line
def create_dataFrame_R(benchmark, instances, algorithms, nbRuns, threshold, outputPath):
header = "Category \t ID \t Algorithm \t idRun \t HV \t Time\n"
if not os.path.exists(outputPath):
os.makedirs(outputPath)
outputFile = os.path.join(outputPath, "dataFrame_"+benchmark+".tsv")
with open(outputFile, 'w') as of:
of.write(header)
for instance in instances:
for algorithm in algorithms:
for run in range(1,nbRuns+1):
inputPath = os.path.join("performance_results", algorithm, benchmark, benchmark+"_"+instance["size"], instance["name"]+instance["id"], "Run"+str(run), "Final", "METRICS.tsv")
line = create_line_dataFrame(instance, algorithm, run, threshold, inputPath)
with open(outputFile, "a+") as of:
of.write(line)
return
def decimals(precision, max):
l = []
for i in range(1, max+1):
d = str(i)
l.append('0'*(precision-len(d))+d)
return l
DATAFOLDER = "performance_results"
ALGORITHMS = ["fbd2", "bd2", "moeadls"]
BENCHMARK = "Homberger"
NBRUNS = 30
generated_instances = []
if BENCHMARK == "Generated":
for l1 in range(1, 4):
for l2 in ['L', 'S']:
for l3 in ['T', 'W']:
for l4 in ['R']:
instance = {}
instance['size'] = '100'
instance['name'] = l4 + l2 + l3
instance['id'] = str(l1)
generated_instances.append(instance)
elif BENCHMARK == "Solomon":
number_instances = {}
number_instances['C1'] = decimals(2, 9)
number_instances['C2'] = decimals(2, 8)
number_instances['R1'] = decimals(2, 12)
number_instances['R2'] = decimals(2, 11)
number_instances['RC1'] = decimals(2, 8)
number_instances['RC2'] = decimals(2, 8)
for size in ['100']:
for type in ['C1', 'C2', 'R1', 'R2']:
for id in number_instances[type]:
instance = {}
instance['size'] = size
instance['name'] = type
instance['id'] = id
generated_instances.append(instance)
elif BENCHMARK == "Homberger":
idInstance = ["_2_"+str(i) for i in range(1,11)]
typeInstances_Hom = ["C1", "C2", "R1", "R2"]
for size in ['200']:
for type in typeInstances_Hom:
for id in idInstance:
instance = {}
instance['size'] = size
instance["name"] = type
instance["id"] = id
generated_instances.append(instance)
outputPath = os.path.join("Results", BENCHMARK, BENCHMARK+"_"+instance["size"])
"""
outputFile = "table.tsv"
create_table_instances(BENCHMARK, generated_instances, outputPath, outputFile)
for algorithm in ALGORITHMS:
create_table_algorithm(BENCHMARK, algorithm, generated_instances, [1,30], 0.80, outputPath)
create_dataFrame_R(BENCHMARK, generated_instances, ALGORITHMS, 30, 0.80, outputPath)
"""
create_specific_table(BENCHMARK, ALGORITHMS, generated_instances, [1, 30], 0.80, outputPath) | Clegrandlixon/data_itor2023 | generate_tables.py | generate_tables.py | py | 8,530 | python | en | code | 0 | github-code | 13 |
10809673767 | import torch.nn as nn
from torch.nn.utils import clip_grad_norm_
from .basics import *
from .dqn import DQN
MonteCarloUpdateTuple = namedtuple('MonteCarloUpdateTuple', ('state', 'action', 'return_', 'weight'))
class MonteCarloContext(DiscreteRLContext):
def __init__(self, layer_size, soft_is, soft_is_decay, **kwargs):
super().__init__(**kwargs)
self.dqn = DQN(self.state_dim, len(self.action_space), layer_size).to(DEVICE)
self.optimizer = torch.optim.Adam(self.dqn.parameters())
self.soft_is = soft_is
self.soft_is_decay = soft_is_decay
self.real_action_index = None
self.buffer_size = 0
self.ts_model = os.path.join(self.output_dir, "mc_ts.pt")
self.loss_accumulator = RLContext.Accumulator()
self.log_dict["loss"] = 0
self.state_stack = []
self.actions_stack = []
self.ee_position_stack = []
self.weights_stack = []
self.rewards_stack = []
self.is_terminal_stack = []
self.her_terminal_stack = []
self.ignore_stack = []
self.abort_stack = []
self.episode_lengths = torch.zeros([self.robot_batch, 1], device=DEVICE)
def _get_state_dict_(self):
return {"model_state_dict": self.dqn.state_dict(),
"optimizer_state_dict": self.optimizer.state_dict(),
"soft_is": self.soft_is}
# torch.jit.script(self.dqn).save(self.ts_model)
def _load_impl_(self, state_dict):
self.dqn.load_state_dict(state_dict["model_state_dict"])
self.optimizer.load_state_dict(state_dict["optimizer_state_dict"])
self.soft_is = state_dict["soft_is"]
def _update_impl(self, state_dict, reward):
if self.train:
if len(self.last_state_dict):
self.state_stack.append(state_dict["state"])
self.actions_stack.append(self.action_index)
was_exploring = self.real_action_index != self.action_index
weights = torch.where(was_exploring, 0, 1 / self.exploration_probs.gather(-1, self.action_index))
self.weights_stack.append(weights)
self.rewards_stack.append(reward)
self.episode_lengths += 1
if self.soft_is < EPSILON:
self.episode_lengths = torch.where(was_exploring, 0, self.episode_lengths)
# hindsight experience replay
self.ee_position_stack.append(state_dict["robot_position"])
self.her_terminal_stack.append(state_dict["is_timeout"])
self.episode_lengths *= torch.where(state_dict["is_timeout"], 2, 1)
# update other buffers
self.buffer_size += torch.masked_select(self.episode_lengths, state_dict["is_terminal"]).sum()
self.episode_lengths = torch.where(state_dict["is_terminal"], 0, self.episode_lengths)
self.is_terminal_stack.append(state_dict["is_terminal"])
self.ignore_stack.append(torch.zeros_like(was_exploring))
self.abort_stack.append(state_dict.get("abort", torch.zeros_like(was_exploring)))
# we're ready for an update
if self.buffer_size >= self.batch_size:
time_steps = len(self.actions_stack)
states = torch.stack(self.state_stack)
actions = torch.stack(self.actions_stack)
her_goals = torch.stack(self.ee_position_stack)
weights = torch.stack(self.weights_stack)
rewards = torch.stack(self.rewards_stack)
is_terminal = torch.stack(self.is_terminal_stack)
her_terminal = torch.stack(self.her_terminal_stack)
no_ignore = torch.stack(self.ignore_stack).logical_not()
no_abort = torch.stack(self.abort_stack).logical_not()
accumulated_weights = torch.ones_like(weights)
returns = torch.zeros_like(rewards)
her_returns = torch.zeros_like(rewards)
is_valid = torch.zeros_like(is_terminal)
her_is_valid = torch.zeros_like(is_terminal)
for i in reversed(range(time_steps)):
i_plus_one = min(i + 1, time_steps - 1)
accumulated_weights[i, :, :] = torch.where(no_ignore[i, :, :],
torch.where(is_terminal[i, :, :], weights[i, :, :], accumulated_weights[i_plus_one, :, :] * weights[i, :, :]),
accumulated_weights[i, :, :])
returns[i, :, :] = torch.where(no_ignore[i, :, :],
torch.where(is_terminal[i, :, :], rewards[i, :, :], self.discount_factor * returns[i_plus_one, :, :] + rewards[i, :, :]),
returns[i, :, :])
her_returns[i, :, :] = torch.where(no_ignore[i, :, :],
torch.where(her_terminal[i, :, :], self.her_reward, self.discount_factor * her_returns[i_plus_one, :, :] + rewards[i, :, :]),
her_returns[i, :, :])
is_valid[i, :, :] = is_valid[i_plus_one, :, :].logical_or(is_terminal[i, :, :]).logical_and(no_abort[i, :, :])
her_is_valid[i, :, :] = (her_is_valid[i_plus_one, :, :].logical_and(is_terminal[i, :, :].logical_not())).logical_or(her_terminal[i, :, :])
her_goals[i, :, :] = torch.where(her_terminal[i, :, :], her_goals[i, :, :], her_goals[i_plus_one, :, :])
is_valid = is_valid.logical_and(rewards.isnan().logical_not()).logical_and(no_ignore)
if self.soft_is < EPSILON:
is_valid = is_valid.logical_and(weights > 0)
her_is_valid = her_is_valid.logical_and(is_valid)
her_states = states.clone()
noise = self.her_noise_dist.sample([her_goals.size(1)]).to(DEVICE)
noise_magnitudes = torch.linalg.vector_norm(noise, dim=-1, keepdims=True)
noise /= noise_magnitudes
noise *= torch.minimum(noise_magnitudes, torch.tensor(self.goal_reached_threshold_distance))
her_states[:, :, self.goal_state_index:self.goal_state_index + 3] = her_goals + noise.unsqueeze(0)
# stack HER batch on top
states = torch.cat([states, her_states])
actions = torch.cat([actions, actions])
returns = torch.cat([returns, her_returns])
is_valid = torch.cat([is_valid, her_is_valid])
accumulated_weights = torch.cat([accumulated_weights, accumulated_weights])
batch_size = is_valid.sum()
# create batch
state_batch = torch.masked_select(states, is_valid).reshape([batch_size, states.size(-1)]).contiguous()
action_batch = torch.masked_select(actions, is_valid).reshape([batch_size, 1]).contiguous()
return_batch = torch.masked_select(returns, is_valid).reshape([batch_size, 1]).contiguous()
weight_batch = (1 - self.soft_is) * torch.masked_select(accumulated_weights, is_valid).reshape([batch_size, 1]).contiguous() + self.soft_is
# update
self.optimizer.zero_grad()
q = self.dqn(state_batch).gather(1, action_batch)
loss = weight_batch * nn.MSELoss(reduction="none")(q, return_batch)
loss = loss.mean()
if not loss.isnan().any():
loss.backward()
clip_grad_norm_(self.dqn.parameters(), 100)
self.optimizer.step()
# logging etc.
self.soft_is *= self.soft_is_decay
self.loss_accumulator.update_state(loss.detach().mean().cpu())
self.log_dict["loss"] += self.loss_accumulator.get_value().item()
self.loss_accumulator.reset()
else:
self.warn(f"NaNs in MC loss. Epoch {self.epoch}. \n"
f"State batch has NaNs: {state_batch.isnan().any()}\n"
f"Action batch has NaNs: {action_batch.isnan().any()}\n"
f"Return batch has NaNs: {return_batch.isnan().any()}\n"
f"Weight batch has NaNs: {weight_batch.isnan().any()}")
self.buffer_size = 0
self.state_stack = []
self.actions_stack = []
self.ee_position_stack = []
self.weights_stack = []
self.rewards_stack = []
self.is_terminal_stack = []
self.her_terminal_stack = []
self.ignore_stack = []
self.abort_stack = []
self.episode_lengths = torch.zeros([self.robot_batch, 1], device=DEVICE)
if self.epoch % self.log_interval == 0:
self.summary_writer.add_scalar("soft_is", self.soft_is, self.epoch)
else:
self.buffer_size = 0
self.state_stack = []
self.actions_stack = []
self.ee_position_stack = []
self.weights_stack = []
self.rewards_stack = []
self.is_terminal_stack = []
self.her_terminal_stack = []
self.ignore_stack = []
self.abort_stack = []
self.episode_lengths = torch.zeros([self.robot_batch, 1], device=DEVICE)
self.dqn.eval()
with torch.no_grad():
self.action_index = self.dqn(state_dict["state"]).max(-1)[1].unsqueeze(-1)
self.real_action_index = self.action_index
self.dqn.train()
| streifenfrei/control-force-provider | control_force_provider/src/control_force_provider/rl/monte_carlo.py | monte_carlo.py | py | 9,890 | python | en | code | 1 | github-code | 13 |
15496729574 | ##list##
a=[]
a.append('a') ##****
a.insert(5,'a') ###this will be the latest position ****
a.pop() ##return the last item in the list *****
a.pop(i) ##return i th item in the list
a.sort() ##Modifies a list to be sorted
a.reverse() ##reverse the list
del a[i] ##delete i the element
a.index("a") ##return the first index of occurence of item
a.count('a') ##return the number of item
a.remove('a') ##remove the first occurance of item 'a'
##string##
myname='jinchi'
myname.upper()
myname.lower()
myname.center(w) ##return a string with lenth w which ensure myname is in the center
myname.split("i")
##set##
myskill={'python','R','SQL','Pytorch'}
required_skill={"python",'R'}
required_skill <= myskill
required_skill | myskill
required_skill & myskill
myskill.add('Tensorflow')
myskill.remove("cool")
myskill.pop()
myskill.clear() ##delete all element
##dictionary##
myskill_set={'python':'expert','R':"expert",'SQL':'expert'}
myskill_set.keys()
myskill_set.values()
##input##
aName = input('Please enter your name: ')
##string formating3#
print(myskill, sep="***")
print("%s is %d years old." % (aName, age))
##converting infix to posfix
a='(A+B)*(C+D)*(E+F)'
c='A*B*C*D+E+F'
prior_set={}
prior_set['*']=3
prior_set['/']=3
prior_set['+']=2
prior_set['-']=2
prior_set['(']=1
def posfix_f (str_e):
prior_set={}
prior_set['*']=3
prior_set['/']=3
prior_set['+']=2
prior_set['-']=2
prior_set['(']=1
out_list=[]
a_stack=[]
for i in str_e:
if i in 'ABCDEFGHIJKLMNOPQRSTUVWXYZ' or i in '0123456789':
out_list+=[i]
elif i=="(" :
a_stack+=[i]
elif i==')':
top_token=a_stack.pop()
while top_token != "(":
out_list+=[top_token]
top_token=a_stack.pop()
else:
while a_stack!=[] and prior_set[a_stack[-1]]>=prior_set[i]:
out_list+=a_stack.pop()
a_stack+=[i]
while a_stack !=[]:
out_list+=a_stack.pop()
return ''.join(out_list)
a='A*B+C*D'
def prefix_f (str_e):
prior_set={}
prior_set['*']=3
prior_set['/']=3
prior_set['+']=2
prior_set['-']=2
prior_set['(']=1
out_list=[]
a_stack=[]
for i in str_e:
if i in 'ABCDEFGHIJKLMNOPQRSTUVWXYZ' or i in '0123456789':
out_list+=[i]
elif i=="(" :
a_stack+=[i]
elif i==')':
top_token=a_stack.pop()
while top_token != "(": ##there is at least one operation inside of the ()
out_list_n= top_token+out_list.pop(-2)+out_list.pop(-1)
out_list+=[out_list_n]
top_token=a_stack.pop()
else:
while a_stack!=[] and prior_set[a_stack[-1]]>=prior_set[i]:
out_list_n= a_stack.pop()+out_list.pop(-2)+out_list.pop(-1)
out_list+=[out_list_n]
a_stack+=[i]
while a_stack !=[]:
out_list_n= a_stack.pop()+out_list.pop(-2)+out_list.pop(-1)
out_list+=[out_list_n]
return ''.join(out_list)
prefix_f(a) | Jinchili/Leetcode | data structure/basic_o.py | basic_o.py | py | 3,096 | python | en | code | 0 | github-code | 13 |
31955346814 | #有 n 个气球,编号为0 到 n-1,每个气球上都标有一个数字,这些数字存在数组?nums?中。
#现在要求你戳破所有的气球。如果你戳破气球 i ,就可以获得?nums[left] * nums[i] * nums[right]?个硬币。?
#这里的?left?和?right?代表和?i?相邻的两个气球的序号。注意当你戳破了气球 i 后,气球?left?和气球?right?就变成了相邻的气球。
#求所能获得硬币的最大数量。
#来源:力扣(LeetCode)
#链接:https://leetcode-cn.com/problems/burst-balloons
class Solution:
def maxCoins(self, nums: List[int]) -> int:
n = len(nums)
val = [1] + nums + [1]
@lru_cache(None)
def solve(left:int, right:int) -> int:
if left >= right-1:
return 0
best = 0
for i in range(left+1, right):
total = val[left] * val[i] * val[right]
total += solve(left, i) + solve(i, right)
best = max(best, total)
return best
return solve(0, n+1) | Youyouz/testgit | leetcode/pokeBalloons.py | pokeBalloons.py | py | 948 | python | zh | code | 2 | github-code | 13 |
27458296182 | from __future__ import annotations
import os
import numpy as np
from gym.envs.classic_control import rendering
from collections import deque
from typing import Union
from .planet import Planet
from .helpers import angle_to_unit_vector
MAX_SCREEN_SIZE = 600
SHIP_BODY_RADIUS = 15
class Renderer:
def __init__(
self,
planets: list[Planet],
world_size: float,
goal_pos: np.ndarray = None,
num_prev_pos_vis: int = 30,
prev_pos_color_decay: float = 0.85,
debug_mode=False,
):
self.world_translation = np.full(2, -world_size / 2)
self.world_scale = MAX_SCREEN_SIZE / world_size
screen_size = np.full(2, world_size * self.world_scale, dtype=np.int64)
self.viewer = rendering.Viewer(*screen_size)
self.ship_transform = rendering.Transform()
self.planets = planets
self._init_planets()
self._init_engine()
self.exhaust = None
self._init_exhaust()
self._init_ship()
self.goal_pos = goal_pos
self.goal_transform = None
if self.goal_pos is not None:
self._init_goal()
self.prev_ship_pos = deque(maxlen=num_prev_pos_vis)
self.prev_pos_color_decay = prev_pos_color_decay
self.debug_mode = debug_mode
self.reset(self.goal_pos)
def reset(self, goal_pos: np.array = None):
self._move_planets()
self.move_goal(goal_pos)
self.prev_ship_pos.clear()
def render(
self, ship_world_position: np.array, action: np.array, goal_lidar: np.array, planets_lidars: np.array, mode: str
):
self.viewer.add_onetime(self._torque_img)
self._torque_img_transform.set_rotation(4)
ship_screen_position = self._world_to_screen(ship_world_position[:2])
self.prev_ship_pos.append(ship_screen_position)
self.ship_transform.set_translation(*ship_screen_position)
self.ship_transform.set_rotation(ship_world_position[2])
if action is not None:
thrust_action, torque_action = action
else:
thrust_action = torque_action = 0
# hack to be able to set opacity
self.exhaust._color.vec4 = (0, 0, 0, thrust_action)
self._torque_img_transform.scale = (-torque_action, np.abs(torque_action))
self._draw_ship_trace()
planets_lidar_screen = np.zeros_like(planets_lidars)
if self.debug_mode:
goal_lidar_screen = self._world_to_screen(ship_world_position[:2] + goal_lidar)
for i in range(planets_lidars.shape[0]):
planets_lidar_screen[i] = self._world_to_screen(ship_world_position[:2] + planets_lidars[i])
self._draw_lidar(ship_screen_position, goal_lidar_screen, planets_lidar_screen)
return self.viewer.render(mode == "rgb_array")
def _init_planets(self):
self._planets_transforms = []
for planet in self.planets:
planet_geom = rendering.make_circle(planet.radius * self.world_scale, filled=False)
transform = rendering.Transform()
self._planets_transforms.append(transform)
planet_geom.add_attr(transform)
self.viewer.add_geom(planet_geom)
def _init_engine(self):
engine_edge_length = SHIP_BODY_RADIUS * 1.7
engine_width_angle = np.pi / 4
engine_left_bottom_angle = -engine_width_angle / 2
engine_right_bottom_angle = engine_width_angle / 2
engine_left_bottom_pos = engine_edge_length * angle_to_unit_vector(engine_left_bottom_angle)
engine_right_bottom_pos = engine_edge_length * angle_to_unit_vector(engine_right_bottom_angle)
engine = rendering.FilledPolygon([(0.0, 0.0), engine_left_bottom_pos, engine_right_bottom_pos])
engine.add_attr(self.ship_transform)
self.viewer.add_geom(engine)
def _init_exhaust(self):
engine_width_angle = np.pi / 4
exhaust_begin_radius = SHIP_BODY_RADIUS * 1.9
exhaust_end_radius = SHIP_BODY_RADIUS * 2.2
flames = []
for flame_angle in np.linspace(
-engine_width_angle / 4,
engine_width_angle / 4,
3,
):
vec = angle_to_unit_vector(flame_angle)
flame = rendering.Line(exhaust_begin_radius * vec, exhaust_end_radius * vec)
flames.append(flame)
self.exhaust = rendering.Compound(flames)
self.exhaust.add_attr(self.ship_transform)
self.viewer.add_geom(self.exhaust)
def _init_ship(self):
ship_body = rendering.make_circle(SHIP_BODY_RADIUS, filled=True)
ship_body.set_color(1.0, 1.0, 1.0)
ship_body.add_attr(self.ship_transform)
self.viewer.add_geom(ship_body)
ship_body_outline = rendering.make_circle(SHIP_BODY_RADIUS, filled=False)
ship_body_outline.add_attr(self.ship_transform)
self.viewer.add_geom(ship_body_outline)
ship_body_middle = rendering.Point()
ship_body_middle.add_attr(self.ship_transform)
ship_body_middle.set_color(0.5, 0.5, 0.5)
self.viewer.add_geom(ship_body_middle)
torque_img_filename = os.path.join(os.path.dirname(__file__), "assets/torque_img.png")
self._torque_img = rendering.Image(torque_img_filename, 20.0, 20.0)
self._torque_img_transform = rendering.Transform()
self._torque_img.add_attr(self._torque_img_transform)
self._torque_img.add_attr(self.ship_transform)
def _init_goal(self):
line1 = rendering.Line((-10, -10), (10, 10))
line2 = rendering.Line((-10, 10), (10, -10))
goal = rendering.Compound([line1, line2])
self.goal_transform = rendering.Transform()
goal.add_attr(self.goal_transform)
self.viewer.add_geom(goal)
def _move_planets(self):
for planet, transform in zip(self.planets, self._planets_transforms):
transform.set_translation(*self._world_to_screen(planet.center_pos))
def move_goal(self, goal_pos: Union[np.array, None]):
assert (goal_pos is None) == (self.goal_pos is None)
self.goal_pos = goal_pos
if self.goal_pos is not None:
self.goal_transform.set_translation(*self._world_to_screen(self.goal_pos))
def _draw_ship_trace(self):
opacity = 1.0
for i in range(1, len(self.prev_ship_pos)):
line = rendering.Line(self.prev_ship_pos[-i], self.prev_ship_pos[-i - 1])
# hack to be able to set opacity
line._color.vec4 = (0, 0, 0, opacity)
opacity *= self.prev_pos_color_decay
self.viewer.add_onetime(line)
def _world_to_screen(self, world_pos: np.array):
return self.world_scale * (world_pos - self.world_translation)
def _draw_lidar(self, ship_screen_position, goal_lidar_screen, planets_lidar_screen):
# print(f"goal_l={goal_lidar_screen}")
# print(f"planets_l={planets_lidar_screen}")
opacity = 1.0
vec_to_screen = 50
line = rendering.Line(ship_screen_position, goal_lidar_screen)
line._color.vec4 = (0, 0, 0, opacity)
self.viewer.add_onetime(line)
for i in range(planets_lidar_screen.shape[0]):
line = rendering.Line(ship_screen_position, planets_lidar_screen[i, :])
line._color.vec4 = (0, 0, 0, opacity)
self.viewer.add_onetime(line)
| MIMUW-RL/space-gym | gym_space/rendering.py | rendering.py | py | 7,418 | python | en | code | 6 | github-code | 13 |
2686753199 | ''' Py_unittest.py
Author: BSS9395
Update: 2022-10-23T17:30:00+08@China-Shanghai+08
Design: Python Standard Library: unittest
'''
from To_Test import *
import unittest
class Test(unittest.TestCase):
@classmethod
def setUpClass(cls):
print("=" * 10 + "setUpClass" + "=" * 10)
@classmethod
def tearDownClass(cls):
print("=" * 10 + "tearDownClass" + "=" * 10)
def setUp(self):
print("=" * 10 + "setUp" + "=" * 10)
def tearDown(self):
print("=" * 10 + "tearDown" + "=" * 10)
def test_Linear_Equation(self):
self.assertEqual(_Linear_Equation(5, 9), -1.8)
self.assertTrue(_Linear_Equation(4, 10) == -2.5)
self.assertTrue(_Linear_Equation(4, -27) == 27 / 4)
self.skipTest("skip temporarily")
with self.assertRaises(ValueError):
_Linear_Equation(0, 9)
@unittest.skip("Skip Temporarily")
def test_Quadratic_Equation(self):
r1 = _Quadratic_Equation(1, -3, 2)
self.assertCountEqual(r1, (1.0, 2.0))
r1, r2 = _Quadratic_Equation(2, -7, 6)
self.assertCountEqual((r1, r2), (1.5, 2.0))
r = _Quadratic_Equation(1, -4, 4)
self.assertEqual(r, 2.0)
with self.assertRaises(ValueError):
_Quadratic_Equation(0, 9, 3)
if __name__ == "__main__":
unittest.main()
| bss9395/bss9395.github.io | _en/Computer/Operating_System/Python_Programming/Py_unittest/Py_unittest.py | Py_unittest.py | py | 1,338 | python | en | code | 0 | github-code | 13 |
20883484264 | import datetime
from typing import List
from common.auth import FirebaseAuthentication
from common.logger import StructuredLogger
from discovery.api.schema import FeedItemSchema
from django.db.models import Q
from django.views.decorators import csrf
from episode.models import Episode
from ninja import Router
from ninja.pagination import LimitOffsetPagination, paginate
from series.models import Series
router = Router()
logger = StructuredLogger(__name__)
@router.get(
"/new",
response={200: List[FeedItemSchema]},
)
@paginate(LimitOffsetPagination)
@csrf.csrf_exempt
def new_feed(request):
return (
Episode.objects.select_related(
"thumbnail",
"series__owner",
)
.filter(
status__in=[
Episode.EpisodeStatus.PUBLIC,
Episode.EpisodeStatus.PRE_RELEASE,
],
series__status=Series.SeriesStatus.PUBLIC,
is_banned=False,
series__is_banned=False,
series__owner__is_banned=False,
)
.order_by("-publish_date")
)
@router.get(
"/trending",
response={200: List[FeedItemSchema]},
)
@paginate(LimitOffsetPagination)
@csrf.csrf_exempt
def trending_feed(request):
return (
Episode.objects.select_related(
"thumbnail",
"series__owner",
)
.filter(
status__in=[
Episode.EpisodeStatus.PUBLIC,
Episode.EpisodeStatus.PRE_RELEASE,
],
series__status=Series.SeriesStatus.PUBLIC,
publish_date__gt=datetime.datetime.now().replace(
tzinfo=datetime.timezone.utc
)
- datetime.timedelta(days=200),
is_banned=False,
series__is_banned=False,
series__owner__is_banned=False,
)
.order_by("-trend_score")
)
@router.get(
"/subscribed",
response={200: List[FeedItemSchema]},
auth=FirebaseAuthentication(),
)
@paginate(LimitOffsetPagination)
@csrf.csrf_exempt
def subscribed_feed(request):
# TODO implement subscription
return []
@router.get(
"/suggested",
response={200: List[FeedItemSchema]},
auth=FirebaseAuthentication(),
)
@paginate(LimitOffsetPagination)
@csrf.csrf_exempt
def suggested_feed(request):
# TODO implement recommendation
return []
@router.get("/search/content", response={200: List[FeedItemSchema]})
@paginate(LimitOffsetPagination)
@csrf.csrf_exempt
def search_content(request, q: str):
return (
Episode.objects.select_related(
"thumbnail",
"series__owner",
)
.filter(
Q(title__icontains=q)
| Q(series__title__icontains=q)
| Q(series__owner__display_name__icontains=q)
| Q(series__tags__name__icontains=q)
& Q(
status__in=[
Episode.EpisodeStatus.PUBLIC,
Episode.EpisodeStatus.PRE_RELEASE,
]
)
& Q(series__status__exact=Series.SeriesStatus.PUBLIC)
& Q(is_banned=False)
& Q(series__is_banned=False)
& Q(series__owner__is_banned=False)
)
.distinct()
)
| bluejay9676/moka | moka/discovery/api/v1.py | v1.py | py | 3,267 | python | en | code | 3 | github-code | 13 |
73726270098 | import requests
import time
import logging
logger = logging.getLogger(__name__)
s = requests.Session()
search_url = 'https://inberlinwohnen.de/wp-content/themes/ibw/skript/search-flats.php'
result_url = 'https://inberlinwohnen.de/suchergebnis/'
search_headers = {
'accept': '*/*',
'origin': 'https://inberlinwohnen.de',
'x-requested-with': 'XMLHttpRequest',
'content-type': 'application/x-www-form-urlencoded; charset=UTF-8',
}
result_headers = {
'upgrade-insecure-requests': '1',
'accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8',
}
common_headers = {
'accept-encoding': 'gzip, deflate, br',
'accept-language': 'en-US,en;q=0.9',
'pragma': 'no-cache',
'user-agent': 'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/64.0.3282.167 Safari/537.36',
'cache-control': 'no-cache',
'authority': 'inberlinwohnen.de',
'referer': 'https://inberlinwohnen.de/wohnungsfinder/'
}
s.headers.update(common_headers)
search_data = {
'q': 'srch',
'lang': 'de',
'qtype': 'advanced',
'order': 'mietpreis_nettokalt',
'odx': 'ASC',
'bez': '',
'qmiete_min': '',
'qmiete_max': '1000',
'qqm_min': '50',
'qqm_max': '',
'qrooms_min': '2',
'qrooms_max': '4',
'qetage_min': '',
'qetage_max': '',
'qbaujahr_min': '',
'qbaujahr_max': '',
'qheizung_zentral': '0',
'qheizung_etage': '0',
'qenergy_fernwaerme': '0',
'qheizung_nachtstrom': '0',
'qheizung_ofen': '0',
'qheizung_gas': '0',
'qheizung_oel': '0',
'qheizung_solar': '0',
'qheizung_erdwaerme': '0',
'qheizung_fussboden': '0',
'qbalkon_loggia_terrasse': '0', # was 1 in my capture. verified manually that this will return flats with and without balcony
'qgarten': '0',
'qwbs': 'must_not',
'qbarrierefrei': '0',
'qmoebliert': '0',
'qgaeste_wc': '0',
'qaufzug': '0',
'qstellplatz': '0',
'qkeller': '0',
'qbadewanne': '0',
'qdusche': '0',
}
def get_search(min_rooms, max_rooms, max_rent, wbs):
wbs_map = {
0: 'must_not',
1: 'must',
2: 'all',
}
s = search_data.copy()
s['qrooms_min'] = str(min_rooms)
s['qrooms_max'] = str(max_rooms)
s['qmiete_max'] = str(max_rent)
s['qwbs'] = wbs_map[wbs]
return s
def scrape(min_rooms, max_rooms, max_rent, wbs):
search_d = get_search(min_rooms, max_rooms, max_rent, wbs)
search = s.post(search_url, data=search_d, headers=search_headers)
search.raise_for_status()
logger.debug("Sleeping for 5 seconds before querying for the results")
# The web UI sleeps for a few seconds here, lets mimick that
# It seemst to work without, but better to mimick more
time.sleep(5.0)
html_result = s.get(result_url, headers=result_headers)
return html_result.text.encode("utf-8")
| benediktkr/wohnen | inberlinwohnen/scraper.py | scraper.py | py | 2,902 | python | en | code | 1 | github-code | 13 |
3188669649 | from gensim.models import word2vec
import pandas as pd
import jieba.posseg as psg
import logging
import os
from nltk.tokenize import WordPunctTokenizer
stopwords = [line.strip() for line in open('eng_stopwords.txt').readlines()]
def preprocess(text):
result = ""
words = WordPunctTokenizer().tokenize(text)
for word in words:
if word not in stopwords:
if not word.isdigit():
if word != "\r\n":
result += word
result += " "
return result
#文本分词 去停用词
df = pd.read_csv("quora_completed.csv")
df["cutted_words"] = df.content.apply(preprocess)
fo = open("new_quora_cut.txt", "w+")
for row in df.content:
fo.writelines(preprocess(row) + "\n")
fo.close()
def model_train(train_file_name, save_model_file): # model_file_name为训练语料的路径,save_model为保存模型名
print ('model_train begin.')
try:
# 模型训练,生成词向量
logging.basicConfig(format='%(asctime)s : %(levelname)s : %(message)s', level=logging.INFO)
sentences = word2vec.Text8Corpus(train_file_name) # 加载语料
model = word2vec.Word2Vec(sentences, size=2700, min_count=10, sg=1) # 训练skip-gram模型; 默认window=5
model.save(save_model_file)
model.wv.save_word2vec_format(save_model_file + ".bin", binary=True) # 以二进制类型保存模型以便重用
except BaseException as e: # 因BaseException是所有错误的基类,用它可以获得所有错误类型
print(Exception, ":", e) # 追踪错误详细信息
print ('model_train end.')
train_file_name = "quora_cut.txt"
save_model_file = "quora_cut.model"
# model_train(train_file_name, save_model_file)
model_file = "quora_cut.model"
model_file_bin = "quora_cut.model.bin"
def word2vec_test():
print ('word2vec_test begin.')
try:
# 加载日志输出配置
logging.basicConfig(format='%(asctime)s : %(levelname)s : %(message)s', level=logging.INFO)
# 训练模型
print ('从文件:%s 训练模型存放在: %s' % (train_file_name, model_file))
# if not os.path.exists(model_file): # 判断文件是否存在
# model_train(train_file_name, model_file)
# else:
# print('此训练模型已经存在,不用再次训练')
model_train(train_file_name, model_file)
# 加载已训练好的模型
print ('从文件:%s 中加载模型' % model_file)
# model_1 = gensim.models.KeyedVectors.load_word2vec_format(model_file_bin, binary=True)
model_1 = word2vec.Word2Vec.load(model_file)
# 计算某个词的相关词列表
y2 = model_1.most_similar(u"quit", topn=19) # 19个最相关的
print(u"和quit最相关的词有:\n")
for item in y2:
print ("%s: %g" % (item[0], item[1]))
print("-------------------------------\n")
except Exception:
print ("Exception")
print ('word2vec_test end.')
# word2vec_test() | GGGWX/Some-Informatics | TopicModeling/eng_wordTovec.py | eng_wordTovec.py | py | 3,042 | python | en | code | 0 | github-code | 13 |
74888825936 | import math
class Solution(object):
def myAtoi(self, str):
str = str.lstrip()
flag = True
result = 0
length = len(str)
times = 1
if len(str) == 0:
return 0
if str[0] == "-":
flag = False
str = str[1:length]
length = length - 1
elif str[0] == "+":
str = str = str[1:length]
length = length - 1
try:
type(int(str[0])) == type(1)
except:
return 0
for i in range(0, length):
try:
type(int(str[i])) == type(1)
#312009
adder = int(str[i])
result = (result * times) + adder
times = 10
except:
break
if result >= math.pow(2, 31) and flag == True:
return 2147483647
elif result >= math.pow(2, 31) and flag == False:
return -2147483648
if flag == True:
return result
else:
result = result * -1
return result | jiselectric/leet-code-solutions | stringToInteger.py | stringToInteger.py | py | 1,124 | python | en | code | 0 | github-code | 13 |
27175305389 | import cv2
import numpy as np
from flask import current_app
def rotate(img, angle=0):
"""
Applies angular Rotationn to the input image
Args:
img: Input image to be augmented
angle(float): Angle of Rotation for image
Output:
timg: Roatated Image
Source:
https://docs.opencv.org/master/
"""
rows, cols, _ = img.shape
M = cv2.getRotationMatrix2D((cols/2,rows/2), angle, 1)
timg = cv2.warpAffine(img, M, (cols,rows))
return timg
def average_blur(img, kdim=8):
"""
Applies Average Blur to the input image
Args:
img: Input image to be augmented
kdim(int): Dimension of Kernel to do Blur
Output:
timg: Average Blured Image
Source:
https://docs.opencv.org/master/
"""
timg = cv2.blur(img, (kdim, kdim))
return timg
def gaussian_blur(img, kdim=8, var=5):
"""
Applies Gaussian Blur to the input image
Args:
img: Input image to be augmented
kdim(int):
Dimension of Kernel to do Blur.
Default: 8
var(float):
Variance for gaussian Blur
Default: 5
Output:
timg: Gaussian Blured Image
Source:
https://docs.opencv.org/master/
"""
try:
timg = cv2.GaussianBlur(img, (kdim, kdim), var)
return timg
except:
if (kdim[0] % 2 == 0):
print("kernel dimension cannot be even for gaussian blur.")
def gaussian_noise(img, var=10, mean=0):
"""
Applies Gaussian Noise to the input image
Args:
img: Input image to be augmented
var(float):
Variance for gaussian noise
Default: 10
mean(float):
Mean for gaussian noise
Default: 0
Output:
timg: Image with gaussian noise
Source:
https://docs.opencv.org/master/
https://numpy.org/doc/
"""
row, col, _ = img.shape
sigma = var ** 0.5
gaussian = np.random.normal(mean,sigma,(row, col))
timg = np.zeros(img.shape, np.float32)
timg[:, :, 0] = img[:, :, 0] + gaussian
timg[:, :, 1] = img[:, :, 1] + gaussian
timg[:, :, 2] = img[:, :, 2] + gaussian
cv2.normalize(timg, timg, 0, 255, cv2.NORM_MINMAX, dtype=-1)
timg = timg.astype(np.uint8)
return timg
def sharpen(img, kdim=5, sigma=1.0, amount=1.0, threshold=0):
"""
Applies sharpen to the input image
Args:
img: Input image to be augmented
kdim(int):
Dimension of Kernel to do sharpening.
Default: 8
sigma(float):
standard deviation for sharpening
Default: 1.0
amount(float):
Amount of sharpening Required
Default: 1.0
threshold(float):
threshold for sharpening
Default: 0
Output:
timg: Image with sharpening
Source:
https://docs.opencv.org/master/
https://numpy.org/doc/
"""
blurred = cv2.GaussianBlur(img, (kdim, kdim), sigma)
timg = float(amount + 1) * img - float(amount) * blurred
timg = np.maximum(timg, np.zeros(timg.shape))
timg = np.minimum(timg, 255 * np.ones(timg.shape))
timg = timg.round().astype(np.uint8)
if threshold > 0:
low_contrast_mask = np.absolute(img - blurred) < threshold
np.copyto(timg, img, where=low_contrast_mask)
return timg
def horizontal_flip(img):
"""
Applies horizontal flip to the input image
Args:
img: Input image to be flipped
Output:
timg: Horizontal Flipped Image
Source:
https://docs.opencv.org/master/
"""
timg = cv2.flip(img, 1)
return timg
def vertical_flip(img):
"""
Applies Vertical flip to the input image
Args:
img: Input image to be flipped
Output:
timg: Vertically Flipped Image
Source:
https://docs.opencv.org/master/
"""
timg = cv2.flip(img, 0)
return timg
def perspective_transform(img, input_pts=np.float32([[0, 0], [32, 0], [0, 32], [32, 32]])):
"""
Applies Prespective Transform to the input image
Args:
img: Input image to be Transformed
input_pts(nparray): NumPy array of points to transform
Shape:
input_pts :maths: '(4,2)'
Output:
timg: Prespective Transformed Image
Source:
https://docs.opencv.org/master/
"""
row, col, _ = img.shape
output_pts=np.float32([[0, 0], [32, 0], [0, 32], [32, 32]])
M = cv2.getPerspectiveTransform(input_pts, output_pts)
timg = cv2.warpPerspective(img, M, (32, 32))
return timg
def crop(img, input_pts=np.float32([[0, 0], [32, 0], [0, 32], [32, 32]])):
"""
Crops Input Image
Args:
img: Input image to be Cropped
input_pts(nparray): NumPy array of points to transform
Shape:
input_pts :maths: '(4,2)'
Output:
timg: Cropped Image
Source:
https://docs.opencv.org/master/
"""
row, col, _ = img.shape
output_pts=np.float32([[0, 0], [32, 0], [0, 32], [32, 32]])
M = cv2.getPerspectiveTransform(input_pts, output_pts)
timg = cv2.warpPerspective(img, M, (32, 32))
return timg
def random_erasing(img, randomize, grayIndex, mean, var, region=np.array([[12, 12], [20, 12], [12, 20], [20, 20]])):
"""
Applies Random Erasing to the input image
Args:
img: Input image to be Transformed
randomize(bool): Option to randomize fill or not
grayIndex(float): Index to grayscale fill in void
mean(float): mean of randomize fill
var(float): variance of randomize fill
region(nparray): Coordinates of random erase region
Shape:
region :maths: '(4,2)'
Output:
timg: Image with erasing in given region
Source:
https://docs.opencv.org/master/
"""
row, col, _ = img.shape
sigma = var ** 0.5
timg = img
a = int(region[0, 0])
b = int(region[1, 0])
c = int(region[0, 1])
d = int(region[2, 1])
if randomize:
gaussian = np.random.normal(mean, sigma, (b-a, d-c))
timg[a:b, c:d, 0] = gaussian
timg[a:b, c:d, 1] = gaussian
timg[a:b, c:d, 2] = gaussian
cv2.normalize(timg, timg, 0, 255, cv2.NORM_MINMAX, dtype=-1)
else:
patch = grayIndex*np.ones((b-a, d-c))
timg[a:b, c:d, 0] = patch
timg[a:b, c:d, 1] = patch
timg[a:b, c:d, 2] = patch
return timg | dsgiitr/BOSCH-TRAFFIC-SIGN-RECOGNITION | utils/augmentations.py | augmentations.py | py | 6,657 | python | en | code | 12 | github-code | 13 |
41249285961 | from django.core.urlresolvers import resolve
from django.test import TestCase, Client
from django.http import HttpRequest
from django.template.loader import render_to_string
from users.models import ServiceUser, ServiceUserManager
from blog.views import post_list, blog_post, post_edit
from blog.models import Post
import unittest
def create_instance(num_instances, isActive):
if num_instances == 1:
sample = Post()
sample.author = 'Fadi'
sample.title = 'TestTitle'
sample.created_date = '2015-09-04'
sample.published_date = '2015-09-04'
sample.text = 'AND NEW'
sample.active = isActive
sample.save()
return Post.objects.all()
elif num_instances == 2:
sample = Post()
sample.title = 'TestTitle'
sample.text = 'TestText'
sample.created_date = '2015-09-04'
sample.published_date = '2015-09-04'
sample.active = isActive
sample.save()
sample2 = Post()
sample2.title = 'SecondTestTitle'
sample2.text = 'SecondTestText'
sample2.created_date = '2015-09-04'
sample2.published_date = '2015-09-04'
sample2.active = isActive
sample2.save()
return Post.objects.all()
def login_to_superuser(self):
self.client = Client()
self.client.login(email='blog@test.com', password='password')
class blog_post_view(TestCase):
fixtures = ['users.json']
def test_specific_post_redirects_to_blog_post_view(self):
sample = Post()
sample.save()
found = resolve('/blog/post/1')
self.assertEqual(found.func, blog_post)
def test_invalid_post_redirects_to_404(self):
request = HttpRequest()
response = blog_post(request, 2)
self.assertIn('Page not found', response.content.decode())
def test_blog_page_renders_blog_template(self):
response = self.client.get('/blog')
self.assertTemplateUsed(response, 'base.html')
def test_inactive_posts_arent_viewable_by_casuals(self):
sample = Post()
sample.active = False
sample.save()
request = HttpRequest()
response = blog_post(request, 1)
self.assertIn('Page not found', response.content.decode())
def test_active_posts_are_viewable_by_casuals(self):
sample = Post()
sample.active = True
sample.save()
request = HttpRequest()
response = blog_post(request, 1)
self.assertIn('Page not found', response.content.decode())
def test_inactivate_posts_are_viewable_by_superusers(self):
login_to_superuser(self)
post = create_instance(1, False)
response = self.client.get('/blog/post/%s' % post[0].id)
self.assertIn('TestTitle', response.content.decode())
def test_login(self):
self.client = Client()
response = self.client.login(email='blog@test.com', password='password')
self.assertTrue(response)
class post_list_view(TestCase):
fixtures = ['users.json']
def test_blog_redirect_to_blog_view(self):
found = resolve('/blog')
self.assertEqual(found.func, post_list)
def test_uses_blog_template(self):
response = self.client.get('/blog')
self.assertTemplateUsed(response,'blog.html')
def test_inactive_posts_in_list_arent_viewable_by_casuals(self):
create_instance(2, False)
response = self.client.get('/blog')
expected_html = response.content.decode()
self.assertNotIn('TestTitle', expected_html)
self.assertNotIn('SecondTestTitle', expected_html)
def test_active_posts_show_on_blog_list_page(self):
create_instance(2, True)
response = self.client.get('/blog')
expected_html = response.content.decode()
self.assertIn('TestTitle', expected_html)
self.assertIn('SecondTestText', expected_html)
def test_inactivate_posts_in_list_are_viewable_by_superusers(self):
login_to_superuser(self)
create_instance(1, False)
response = self.client.get('/blog')
self.assertIn('TestTitle', response.content.decode())
class post_edit_view(TestCase):
fixtures = ['users.json']
def test_unauthorized_user_redirected_to_front_blog_page(self):
response = self.client.get('/blog/post/1/post_edit')
self.assertEqual('http://testserver/blog', response.url)
def test_edit_redirects_to_post_edit_view(self):
sample = Post()
sample.save()
found = resolve('/blog/post/7/post_edit')
self.assertEqual(found.func, post_edit)
def test_if_submit_is_pressed_redirects_to_specific_post(self):
login_to_superuser(self)
post = create_instance(1, False)
response = self.client.post(
'/blog/post/' + str(post[0].id) + '/post_edit',
{
'author':'Fadi Alnabolsi',
'title':'Fadi',
'text':'Testing out this blog',
'active':False,
'submit':'submit'
},
follow=True)
self.assertRedirects(response, '/blog/post/' + str(post[0].id))
def test_if_preview_is_pressed_redirects_to_preview_page(self):
login_to_superuser(self)
post = create_instance(1, False)
response = self.client.post(
'/blog/post/' + str(post[0].id) + '/post_edit',
{
'author':'Fadi Alnabolsi',
'title':'Fadi',
'text':'Testing out this blog',
'active':False,
'preview':'preview'
},
follow=True)
self.assertTemplateUsed(response, 'preview_post.html')
def test_if_delete_if_pressed_redirects_to_front_page(self):
login_to_superuser(self)
post = create_instance(1, False)
response = self.client.post(
'/blog/post/' + str(post[0].id) + '/post_edit',
{
'title':'Fadi',
'text':'Testing out this blog',
'active':False,
'delete':'delete'
},
follow=True)
self.assertRedirects(response, '/blog/')
#class post_new_view(TestCase):
# class BlogPageTest(TestCase):
# fixtures = ['users.json']
# def test_forms_show_on_blog_post_page(self):
# login_to_superuser(self)
# response = self.client.get('/blog/post/new')
# self.assertIsInstance(response.context['form'], initialPostForm)
| FadiAlnabolsi/Blog | serviceblog/blog/tests/test_views.py | test_views.py | py | 5,666 | python | en | code | 0 | github-code | 13 |
73924062419 | """"
Learning better flow control
"""
year = int(input("Which year do you want to check? "))
if year % 4 == 0:
if year % 100 != 0 and year % 4 == 0:
print("Leap year.")
else:
print("Not leap year.")
else:
print("Not leap year.")
# ===================================================================================================
print("Welcome to Python Pizza Deliveries!")
size = input("What size pizza do you want? S, M, or L ")
add_pepperoni = input("Do you want pepperoni? Y or N ")
extra_cheese = input("Do you want extra cheese? Y or N ")
total_bill = 0
if size == "S":
if add_pepperoni == "Y":
if extra_cheese == "Y":
total_bill = 15 + 2 + 1
else:
total_bill = 15 + 2
else:
if extra_cheese == "Y":
total_bill = 15 + 1
else:
total_bill = 15
elif size == "M":
if add_pepperoni == "Y":
if extra_cheese == "Y":
total_bill = 20 + 3 + 1
else:
total_bill = 20 + 3
else:
if extra_cheese == "Y":
total_bill = 20 + 1
else:
total_bill = 20
else:
if size == "L":
if add_pepperoni == "Y":
if extra_cheese == "Y":
total_bill = 25 + 3 + 1
else:
total_bill = 25 + 3
else:
if extra_cheese == "Y":
total_bill = 25 + 1
else:
total_bill = 25
else:
print("Invalid input.")
print(f"Your final bill is: ${total_bill}.")
# Better way to do this is................
bill = 0
if size == "S":
bill += 15
elif size == "M":
bill += 20
else:
bill += 25
if add_pepperoni == "Y":
if size == "S":
bill += 2
else:
bill += 3
if extra_cheese == "Y":
bill += 1
print(f"Your final bill is: ${bill}.")
# ======================================================================================
# The love calculator
print("Welcome to the Love Calculator!")
name1 = input("What is your name? \n").upper()
name2 = input("What is their name? \n").upper()
combined_names = name1 + name2
t = list(combined_names).count('T')
r = list(combined_names).count('R')
u = list(combined_names).count('U')
e = list(combined_names).count('E')
first_digit = t + r + u + e
l = list(combined_names).count('L')
o = list(combined_names).count('O')
v = list(combined_names).count('V')
e = list(combined_names).count('E')
second_digit = l + o + v + e
true_love = int(str(first_digit) + str(second_digit))
if true_love < 10 or true_love > 90:
print(f"Your score is {true_love}, you go together like coke and mentors.")
elif 40 <= true_love <= 50:
print(f"Your score is {true_love}, you are alright together.")
else:
print(f"Your score is {true_love}.")
# Treasure Island ====================================
print("""Welcome to Treasure Island.
Your mission is to find the treasure. Good Luck!""")
while True:
print("You're at a cross road. Where do you want to go?", 'Type "left" or "right"')
choice = input().lower().strip()
if choice == "right":
print('You get robbed and killed. Game Over.')
break
print("You come to a lake. There is an island in the middle of the lake.",
'Type "wait" to wait for a boat. Type "swim" to swim across.')
choice = input().lower().strip()
if choice == "swim":
print('You get eaten by a shark. Game Over.')
break
print("You arrive at the island unharmed. There is a house with 3 doors. One red, one yellow, and one blue.",
'Which color do you choose?')
choice = input().lower().strip()
if choice == "red" or "blue":
print('You enter a room full of beasts. Game Over.')
break
print("Hurray! You found the treasure.")
| kvngdre/100DaysofPythonCode | Day3.py | Day3.py | py | 3,820 | python | en | code | 0 | github-code | 13 |
37875428782 | import time
import openpyxl
from selenium import webdriver
from openpyxl import load_workbook
from selenium.webdriver.common.by import By
from selenium.webdriver import ActionChains
from selenium.webdriver.common.keys import Keys
from selenium.webdriver.support.ui import Select
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
import pandas as pd
import utils
def parse_page(range_start,range_stop,driver,df):
for sonuc in range(range_start, range_stop):
driver.switch_to_window(driver.window_handles[0])
time.sleep(0.3)
driver._switch_to.active_element
time.sleep(0.3)
# gather result texts
yil2 = driver.find_element_by_css_selector(
".watable > tbody:nth-child(2) > tr:nth-child(%d) > td:nth-child(3)" % sonuc).text
baslik = driver.find_element_by_css_selector(
".watable > tbody:nth-child(2) > tr:nth-child(%d) > td:nth-child(4)" % sonuc).text
tur = driver.find_element_by_css_selector(
".watable > tbody:nth-child(2) > tr:nth-child(%d) > td:nth-child(5)" % sonuc).text
sonuclar = driver.find_element_by_css_selector(
".watable > tbody:nth-child(2) > tr:nth-child(%d) > td:nth-child(1) > span:nth-child(1)" % sonuc).click()
time.sleep(0.5)
sonuc_ust_baslik = driver.find_element_by_css_selector("tr.renkp:nth-child(2) > td:nth-child(3)").text
sonuc_alt_metin = driver.find_element_by_css_selector("#td0").text
yaz = sonuc_ust_baslik.find("Yazar:")
dan = sonuc_ust_baslik.find("Danışman:")
yer = sonuc_ust_baslik.find("Yer Bilgisi:")
kon = sonuc_ust_baslik.find("Konu:")
diz = sonuc_ust_baslik.find("Dizin:")
ana = sonuc_alt_metin.find("Kelimeler:")
yazar = sonuc_ust_baslik[(yaz + 6): dan]
danisman = sonuc_ust_baslik[(dan + 9): yer]
yer_bilgisi = sonuc_ust_baslik[(yer + 12): kon]
konu = sonuc_ust_baslik[(kon + 5): diz]
dizin = sonuc_ust_baslik[(diz + 6):]
if ana != -1:
anahtar = sonuc_alt_metin[(ana + 10):]
else:
anahtar = " "
# arrange results for writing
final = [baslik, yil2, tur, yazar, danisman, yer_bilgisi, konu, dizin, anahtar]
df.index += 1
df.loc[0] = final
# write and out
# (bu blokta try isimde buyuk yumusak g varsa hata verdigi icin var)
"""
try:
#sheet.append(final)
except:
print('pass, exception')
result_df
#workbook.save("new.xlsx")
"""
time.sleep(0.05)
# close detail view
driver.find_element_by_css_selector(".ui-icon").click()
try:
# wait.until(EC.element_to_be_clickable(sonuclar))
driver.find_element_by_css_selector(".ui-icon").click()
except:
pass
time.sleep(0.05)
# change back into main frame
# //*[@id="dialog-modal"]driver.switch_to_window(driver.window_handles[0])
# click next page
driver.find_element_by_css_selector(".pagination > ul:nth-child(1) > li:nth-child(7) > a:nth-child(1)").click()
return df
def main_scrap(yil):
driver = webdriver.Chrome()
main_url = "https://tez.yok.gov.tr/UlusalTezMerkezi/tarama.jsp"
counter = 0
"""
workbook = load_workbook(filename = "new.xlsx")
sheet = workbook.active
"""
wait = WebDriverWait(driver, 10)
#ana dongu yılları cevirecek
df = pd.DataFrame(
columns=['baslik', 'yil', 'tur', 'yazar', 'danisman', 'yer bilgisi', 'konu', 'dizin', 'anahtar'])
#arama sayfasına gir
driver.get(main_url)
main_page = driver.window_handles[0]
#konu sec
select_button = driver.find_element_by_xpath("/html/body/div[2]/div[1]/table/tbody/tr[2]/td/div/div[1]/form/table/tbody/tr/td/table/tbody/tr[6]/td[2]/input[2]").click()
#sayfa gecisi (pop-up)
konu = driver.window_handles[1]
driver.switch_to_window(konu)
time.sleep(1)
#Konuya tikla (din 49, felsefe 65)
my_konu = driver.find_element_by_css_selector("tr.renka:nth-child(65) > td:nth-child(1) > a:nth-child(1)").click()
#geri don
driver.switch_to_window(main_page)
#yil sec
driver.find_element_by_xpath("/html/body/div[2]/div[1]/table/tbody/tr[2]/td/div/div[1]/form/table/tbody/tr/td/table/tbody/tr[2]/td[6]/select[1]/option[%d]" % yil).click()
driver.find_element_by_xpath("/html/body/div[2]/div[1]/table/tbody/tr[2]/td/div/div[1]/form/table/tbody/tr/td/table/tbody/tr[2]/td[6]/select[2]/option[%d]" % yil).click()
#bul tuşuna bas
driver.find_element_by_xpath("/html/body/div[2]/div[1]/table/tbody/tr[2]/td/div/div[1]/form/table/tbody/tr/td/table/tbody/tr[8]/td/input[3]").click()
wait.until(EC.visibility_of_element_located((By.XPATH, '//*[@id="divuyari"]')))
#sonuc sayisini al
results_text = driver.find_element_by_xpath('//*[@id="divuyari"]').text
x1 = results_text.split(' ')
sonuc_sayisi = x1[2]
print(sonuc_sayisi)
sonuc_sayisi = int(sonuc_sayisi)
#max 2000 sonuc gosteriliyor
if sonuc_sayisi > 2000:
sonuc_sayisi = 2000
print("warning year" + str(yil))
# sonuc sayisina gore kac sayfa oldugunu bul ve son sayfaya kac tane sonuc kaldigini cikar
# sayfa sayisi +1'ler range fonksiyonu son degeri kapsamadigi icin var
if sonuc_sayisi >= 30:
if sonuc_sayisi % 30 == 0 :
sayfa_sayisi = int(sonuc_sayisi / 30) + 1
son_sayfa = sayfa_sayisi
artan = sonuc_sayisi % 30
else:
sayfa_sayisi = int( sonuc_sayisi / 30 ) + 1
artan = sonuc_sayisi % 30
son_sayfa = sayfa_sayisi
elif sonuc_sayisi <= 30 and sonuc_sayisi > 0 :
sayfa_sayisi = 1
artan = (sonuc_sayisi % 30)
son_sayfa = 1
else:
sayfa_sayisi = "sonuc yok"
print(sayfa_sayisi, son_sayfa, sonuc_sayisi, artan)
#sonucların icerisinde gezinip gerekli bilgiyi cek, dosyaya yazdır
#iteration over total pages
for sayfa in range(1, sayfa_sayisi+1):
print(sayfa)
#checkpoint for if any result exists
if sayfa_sayisi != "sonuc yok":
#checkpoint for if the page is last or contains 30 results
if sayfa != son_sayfa:
df = parse_page(1,31,driver,df)
#iterartion over pages with 30 results
elif sayfa == son_sayfa :
#insert residual loop here
df = parse_page(1,artan+1,driver,df)
else:
break
counter += 1
utils.save_var(df, 'result_dataframe'+str(yil))
del df
driver.close()
| unnamed-idea/scrapper-yok_tez_extract | scrapper_ilk.py | scrapper_ilk.py | py | 6,937 | python | en | code | 0 | github-code | 13 |
26925337336 | class Solution:
def merge(self, intervals: List[List[int]]) -> List[List[int]]:
intervals.sort()
ans = []
index = 0
while index < len(intervals):
a = intervals[index]
if index == len(intervals) - 1:
ans.append(a)
break
for i in range(index + 1, len(intervals)):
if intervals[i][0] <= a[1]:
if a[1] < intervals[i][1]:
a[1] = intervals[i][1]
if i == len(intervals) - 1:
ans.append(a)
index = len(intervals)
break
else:
ans.append(a)
index = i
break
return ans
# 执行用时:
# 72 ms
# , 在所有 Python3 提交中击败了
# 18.31%
# 的用户
# 内存消耗:
# 14.5 MB
# , 在所有 Python3 提交中击败了
# 93.75%
# 的用户 | hwngenius/leetcode | learning/merge_intervals/56.py | 56.py | py | 977 | python | zh | code | 1 | github-code | 13 |
35514125355 | import numpy as np
import torch
import torch.nn as nn
from easydict import EasyDict
import os
import sys
src_dir = os.path.dirname(os.path.realpath(__file__))
while not src_dir.endswith("AR3D"):
src_dir = os.path.dirname(src_dir)
if src_dir not in sys.path:
sys.path.append(src_dir)
from utils.anchor_generator import AnchorGenerator
from utils.box_coder_utils import ResidualCoder
class DetHead(nn.Module):
def __init__(self, cfgs):
super().__init__()
self.decoupled_item = ['x', 'y', 'z']
self.det_item = ['x', 'y', 'z', 'dx', 'dy', 'dz', 'rz']
self.retain_item = [x for x in self.det_item if x not in self.decoupled_item]
self.conv_cls = nn.Conv2d(384, 2, kernel_size=1)
self.conv_box1 = nn.Sequential(
nn.Conv2d(384, 384, kernel_size=1),
nn.BatchNorm2d(384, eps=1e-3, momentum=0.01),
nn.ReLU(),
nn.Conv2d(384, 6, kernel_size=1)
)
self.conv_box2 = nn.Conv2d(384, 8, kernel_size=1)
self.conv_dir_cls = nn.Conv2d(384, 4, kernel_size=1)
self.dir_offset = 0.78539
self.box_coder = ResidualCoder(len(self.det_item))
self.point_range = cfgs.point_range
self.grid_size = cfgs.grid_size
self.anchor_config = [
EasyDict({
'anchor_sizes': [[3.9, 1.6, 1.56]],
'anchor_rotations': [0, 1.57],
'anchor_bottom_heights': [-1.78],
'align_center': False,
})
]
anchor_generator = AnchorGenerator(
anchor_range=self.point_range,
anchor_generator_config=self.anchor_config
)
feature_map_size = [self.grid_size[:2] // 2]
anchors_list, _ = anchor_generator.generate_anchors(feature_map_size)
self.anchors = [x.cuda() for x in anchors_list]
def limit_period(self, val, offset=0.5, period=np.pi):
ans = val - torch.floor(val / period + offset) * period
return ans
def generate_predicted_boxes(self, batch_size, cls_preds, box_preds, dir_cls_preds):
anchors = torch.cat(self.anchors, dim=-3)
num_anchors = anchors.view(-1, anchors.shape[-1]).shape[0]
batch_anchors = anchors.view(1, -1, anchors.shape[-1]).repeat(batch_size, 1, 1)
batch_cls_preds = cls_preds.view(batch_size, num_anchors, -1).float()
batch_box_preds = box_preds.view(batch_size, num_anchors, -1)
batch_box_preds = self.box_coder.decode_torch(batch_box_preds, batch_anchors)
dir_cls_preds = dir_cls_preds.view(batch_size, num_anchors, -1)
dir_labels = torch.max(dir_cls_preds, dim=-1)[1]
dir_rot = self.limit_period(batch_box_preds[..., 6] - self.dir_offset, 0)
batch_box_preds[..., 6] = dir_rot + self.dir_offset + np.pi * dir_labels.to(batch_box_preds.dtype)
return batch_cls_preds, batch_box_preds
def forward(self, x):
batch_size = x.shape[0]
cls_preds = self.conv_cls(x)
box_preds1 = self.conv_box1(x)
box_preds2 = self.conv_box2(x)
dir_cls_preds = self.conv_dir_cls(x)
box_preds_list = []
box_preds_dict = {}
for i in range(len(self.decoupled_item)):
box_preds_dict[self.decoupled_item[i]] = box_preds1[:, i * 2:(i + 1) * 2, ...]
for i in range(len(self.retain_item)):
box_preds_dict[self.retain_item[i]] = box_preds2[:, i * 2:(i + 1) * 2, ...]
for k in self.det_item:
box_preds_list.append(box_preds_dict[k])
box_preds = torch.cat(box_preds_list, dim=1)
cls_preds = cls_preds.permute(0, 2, 3, 1).contiguous() # [N, H, W, C]
box_preds = box_preds.permute(0, 2, 3, 1).contiguous() # [N, H, W, C]
dir_cls_preds = dir_cls_preds.permute(0, 2, 3, 1).contiguous()
batch_cls_preds, batch_box_preds = self.generate_predicted_boxes(batch_size, cls_preds, box_preds,
dir_cls_preds)
return batch_cls_preds, batch_box_preds
| weiyangdaren/ER3D | model/det_head.py | det_head.py | py | 4,161 | python | en | code | 0 | github-code | 13 |
23185699145 | import pandas as pd
'''
@alt(表データ=[データフレーム|データフレーム|表[データ|]])
@alt(カラム=[列|列|カラム])
@alt(インデックス|行)
@alt(欠損値|NaN|未入力値)
@alt(変更する|増やす|減らす)
@alt(抽出する|取り出す|[選択する|選ぶ])
@alt(全ての|すべての|全)
@alt(の名前|名)
@alt(の一覧|一覧|[|の]リスト)
@prefix(df;[データフレーム|表データ])
@prefix(ds;[データ列|データフレームの[列|カラム]])
@prefix(column;[列|カラム];[列|カラム])
@prefix(value;[文字列|日付])
'''
columns = ['列A', '列B', '列C']
column, column2, column3 = '列A', '列B', '列C'
df = pd.DataFrame(data=[[1, 2, 3], [4, 5, 6]], columns=['列A', '列B', '列C'])
#df2 = pd.DataFrame(data={'列A': [1, 2], '列B': [2, 1]})
#ds, ds2 = df[column], df[column2]
def func(x): return '列A'
関数 = func
# グループ化
__X__ = '列A'
'''
@X('列A';['列A', '列B'];関数)
@Y([ある|指定した|]カラム;2つの[列|列|カラム];関数)
'''
df.groupby(__X__)
'''
@alt(グループ化する=[グループ化する|集[約|計]する|[グループ分け|分類]する])
@alt(グループ化した|集[約|計]した|まとめた)
@alt(表グループ=[グループ|表])
@alt(ごと|毎|)
@alt(それぞれの|各|)
{dfを|__Y__[の値|のカテゴリ|][によって|で]}グループ化する
{dfを|__Y__[|の値][によって|で]}まとめた表グループ[|を得る]
'''
df.groupby(__X__).describe()
'''
@alt(要約統計量|記述統計量|[|基本]統計量)
{dfを|__Y__[の値|][によって|で]}グループ化し、要約統計量を求める
'''
df.groupby('列A', dropna=False)
'''
{dfを|欠損値を含めて|あるカラム[の値|]で}グループ化する
'''
dropna = True
'''
option: 欠損値[は無視する|を含めない]
'''
dropna = True
'''
option: 欠損値[も無視しない|[も|を]含める]
'''
[(name, group_df) for name, group_df in df.groupby(__X__)]
'''
{dfを|__Y__[の値|][によって|ごとに|で]}グループ化して、列挙する
'''
[name for name, _ in df.groupby(__X__)]
'''
{dfを|__Y__[によって|ごとに|で]}グループ化して、グループ名を列挙する
'''
グループ名 = 'A'
df.groupby('列A').get_group(グループ名)
'''
@alt(のカテゴリ|の値|)
dfをあるカラムのカテゴリで_グループ化して、グループ名で取り出す
'''
df.groupby('列A').size()
'''
dfをあるカラムのカテゴリで_グループ化して、それぞれのグループごとの件数を知る
'''
df.groupby(column).size()[s]
'''
dfを各column毎にグループ化して、sというグループの[個数|大きさ]を求める
'''
df.groupby('列A').__X__
'''
@X(sum()|mean()|count()|max()|min()|var()|std())
@Y(合計;平均値;個数;最大値;最小値;分散;標準偏差)
指定したカラムのカテゴリで集計し、[それぞれの|]__Y__を求める
dfをグループ化し、[それぞれの|]__Y__を求める
あるカラムのカテゴリごとの__Y__[|を求める]
'''
df.groupby(['列A', '列B'], as_index=False).__X__
'''
[ふたつ|2つ|複数]のカラム[から|を組み合わせて|で_]グループ化し、__Y__を求める
'''
df.groupby('列A')['列B'].__X__
'''
dfをグループ化し、あるカラムに対し__Y__を求める
'''
df.groupby('列A').describe()['列B']
'''
dfをグループ化し、あるカラムの要約統計量を求める
'''
| KuramitsuLab/multiese | new_corpus/_pandas_groupby.py | _pandas_groupby.py | py | 3,498 | python | ja | code | 1 | github-code | 13 |
73859750738 | """Add error column to swaps
Revision ID: 92f28a2b4f52
Revises: 9b8ae51c5d56
Create Date: 2021-08-17 03:46:21.498821
"""
import sqlalchemy as sa
from alembic import op
# revision identifiers, used by Alembic.
revision = "92f28a2b4f52"
down_revision = "9b8ae51c5d56"
branch_labels = None
depends_on = None
def upgrade():
op.add_column("swaps", sa.Column("error", sa.String(256), nullable=True))
def downgrade():
op.drop_column("swaps", "error")
| flashbots/mev-inspect-py | alembic/versions/92f28a2b4f52_add_error_column_to_swaps.py | 92f28a2b4f52_add_error_column_to_swaps.py | py | 459 | python | en | code | 750 | github-code | 13 |
34341129142 | import school_scores
lists = school_scores.get_all()
#1st element in data set
#print(lists)
#print(lists[0])
#each state and year
#for i in range(len(lists)):
#print(lists[i]["State"])
#print(lists[i]["Year"])
#same, but for each row
#for row in lists:
#info = row["State"]
#print(info["Name"], row["Year"])
#print(row["Year"])
#shorter Way
#for row in lists:
#print(row["State"]["Name"], row["Year"])
#print # of test takers per state per year
for num in lists:
print(num["State"]["Name"], num["Total"]["Test-takers"])
| jjefferson34/projects | projects/schoolscores/schoolscores.py | schoolscores.py | py | 578 | python | en | code | 0 | github-code | 13 |
1949397494 | import pytest
from os import environ
@pytest.fixture(autouse=True)
def env_setup(monkeypatch):
if environ.get('GOM_GITHUB_TOKEN') is None:
monkeypatch.setenv('GOM_GITHUB_TOKEN', 'some-fake-token-123456')
if environ.get('GOM_ORG') is None:
monkeypatch.setenv('GOM_ORG', 'tinwhiskersband')
@pytest.fixture(scope='module')
def vcr_config():
return {
# Replace the Authorization request header with "DUMMY" in cassettes
"filter_headers": [('Authorization', 'DUMMY')],
}
| ianchesal/github-organization-manager | tests/conftest.py | conftest.py | py | 517 | python | en | code | 0 | github-code | 13 |
26925245876 | from typing import List
class Solution:
def findMin(self, nums: List[int]) -> int:
l,r=0,len(nums)-1
if nums[l]<=nums[r]:return nums[l]
while l<=r:
mid=(l+r)//2
if nums[mid+1]<nums[mid]:return nums[mid+1]
if nums[mid-1]>nums[mid]:return nums[mid]
if nums[mid]>nums[0]:l=mid+1
else: r=mid-1
# 执行用时:
# 44 ms
# , 在所有 Python3 提交中击败了
# 38.41%
# 的用户
# 内存消耗:
# 15.1 MB
# , 在所有 Python3 提交中击败了
# 5.10%
# 的用户 | hwngenius/leetcode | learning/Modified_Binary_Search/153.py | 153.py | py | 567 | python | zh | code | 1 | github-code | 13 |
9209658987 | import base64
import hashlib
import hmac
import json
import os
import time
import requests
lfasr_host = 'http://raasr.xfyun.cn/api'
# 请求的接口名
api_prepare = '/prepare'
api_upload = '/upload'
api_merge = '/merge'
api_get_progress = '/getProgress'
api_get_result = '/getResult'
# 文件分片大小10M
file_piece_sice = 10485760
# ——————————————————转写可配置参数————————————————
# 参数可在官网界面(https://doc.xfyun.cn/rest_api/%E8%AF%AD%E9%9F%B3%E8%BD%AC%E5%86%99.html)查看,根据需求可自行在gene_params方法里添加修改
# 转写类型
lfasr_type = 0
# 是否开启分词
has_participle = 'false'
has_seperate = 'true'
# 多候选词个数
max_alternatives = 0
# 子用户标识
suid = ''
class SliceIdGenerator:
"""slice id生成器"""
def __init__(self):
self.__ch = 'aaaaaaaaa`'
def getNextSliceId(self):
ch = self.__ch
j = len(ch) - 1
while j >= 0:
cj = ch[j]
if cj != 'z':
ch = ch[:j] + chr(ord(cj) + 1) + ch[j + 1:]
break
else:
ch = ch[:j] + 'a' + ch[j + 1:]
j = j - 1
self.__ch = ch
return self.__ch
class RequestApi(object):
def __init__(self, appid, secret_key, upload_file_path):
self.appid = appid
self.secret_key = secret_key
self.upload_file_path = upload_file_path
# 根据不同的apiname生成不同的参数,本示例中未使用全部参数您可在官网(https://doc.xfyun.cn/rest_api/%E8%AF%AD%E9%9F%B3%E8%BD%AC%E5%86%99.html)查看后选择适合业务场景的进行更换
def gene_params(self, apiname, taskid=None, slice_id=None):
appid = self.appid
secret_key = self.secret_key
upload_file_path = self.upload_file_path
ts = str(int(time.time()))
m2 = hashlib.md5()
m2.update((appid + ts).encode('utf-8'))
md5 = m2.hexdigest()
md5 = bytes(md5, encoding='utf-8')
# 以secret_key为key, 上面的md5为msg, 使用hashlib.sha1加密结果为signa
signa = hmac.new(secret_key.encode('utf-8'), md5, hashlib.sha1).digest()
signa = base64.b64encode(signa)
signa = str(signa, 'utf-8')
file_len = os.path.getsize(upload_file_path)
file_name = os.path.basename(upload_file_path)
param_dict = {}
if apiname == api_prepare:
# slice_num是指分片数量,如果您使用的音频都是较短音频也可以不分片,直接将slice_num指定为1即可
slice_num = 1 # int(file_len / file_piece_sice) + (0 if (file_len % file_piece_sice == 0) else 1)
param_dict['app_id'] = appid
param_dict['signa'] = signa
param_dict['ts'] = ts
param_dict['file_len'] = str(file_len)
param_dict['file_name'] = file_name
param_dict['slice_num'] = str(slice_num)
elif apiname == api_upload:
param_dict['app_id'] = appid
param_dict['signa'] = signa
param_dict['ts'] = ts
param_dict['task_id'] = taskid
param_dict['slice_id'] = slice_id
elif apiname == api_merge:
param_dict['app_id'] = appid
param_dict['signa'] = signa
param_dict['ts'] = ts
param_dict['task_id'] = taskid
param_dict['file_name'] = file_name
elif apiname == api_get_progress or apiname == api_get_result:
param_dict['app_id'] = appid
param_dict['signa'] = signa
param_dict['ts'] = ts
param_dict['task_id'] = taskid
return param_dict
# 请求和结果解析,结果中各个字段的含义可参考:https://doc.xfyun.cn/rest_api/%E8%AF%AD%E9%9F%B3%E8%BD%AC%E5%86%99.html
def gene_request(self, apiname, data, files=None, headers=None):
response = requests.post(lfasr_host + apiname, data=data, files=files, headers=headers)
result = response.json()
if result["ok"] == 0:
print("{} success:".format(apiname) + str(result))
return result
else:
print("{} error:".format(apiname) + str(result))
exit(0)
return result
# 预处理
def prepare_request(self):
return self.gene_request(apiname=api_prepare,
data=self.gene_params(api_prepare))
# 上传
def upload_request(self, taskid, upload_file_path):
file_object = open(upload_file_path, 'rb')
try:
index = 1
sig = SliceIdGenerator()
while True:
content = file_object.read(file_piece_sice)
if not content or len(content) == 0:
break
files = {
"filename": self.gene_params(api_upload).get("slice_id"),
"content": content
}
response = self.gene_request(api_upload,
data=self.gene_params(api_upload, taskid=taskid,
slice_id=sig.getNextSliceId()),
files=files)
if response.get('ok') != 0:
# 上传分片失败
print('upload slice fail, response: ' + str(response))
return False
print('upload slice ' + str(index) + ' success')
index += 1
finally:
'file index:' + str(file_object.tell())
file_object.close()
return True
# 合并
def merge_request(self, taskid):
return self.gene_request(api_merge, data=self.gene_params(api_merge, taskid=taskid))
# 获取进度
def get_progress_request(self, taskid):
return self.gene_request(api_get_progress, data=self.gene_params(api_get_progress, taskid=taskid))
# 获取结果
def get_result_request(self, taskid):
return self.gene_request(api_get_result, data=self.gene_params(api_get_result, taskid=taskid))
def all_api_request(self):
# 1. 预处理
pre_result = self.prepare_request()
taskid = pre_result["data"]
# 2 . 分片上传
self.upload_request(taskid=taskid, upload_file_path=self.upload_file_path)
# 3 . 文件合并
self.merge_request(taskid=taskid)
# 4 . 获取任务进度
while True:
# 每隔20秒获取一次任务进度
progress = self.get_progress_request(taskid)
progress_dic = progress
if progress_dic['err_no'] != 0 and progress_dic['err_no'] != 26605:
print('task error: ' + progress_dic['failed'])
return
else:
data = progress_dic['data']
task_status = json.loads(data)
if task_status['status'] == 9:
print('task ' + taskid + ' finished')
break
print('The task ' + taskid + ' is in processing, task status: ' + str(data))
# 每次获取进度间隔20S
time.sleep(0.5)
# 5 . 获取结果
result = self.get_result_request(taskid=taskid)
if result['err_no'] == 0:
root, file = os.path.split(self.upload_file_path)
file_split = file.split('.')
print(file_split)
word = json.loads(result['data'])
try:
if len(file_split) == 2 and word:
os.rename(self.upload_file_path, os.path.join(root, word[0]['onebest'] + '.' + file_split[1]))
os.remove(self.upload_file_path)
except FileExistsError as e:
print(self.upload_file_path + "转换结果相同")
def change_music(p):
stack = [FOLDER]
while stack:
folder = stack.pop()
for home, dirs, files in os.walk(folder):
# 当前目录下的文件夹,等待遍历
for d in dirs:
stack.append(d)
for f in files:
f_format = f.split('.')
# 验证格式
if f_format and f_format[1] in MUSIC_FORMAT:
# 进行转换
path = home + "\\" + f
api = RequestApi(appid="5e25f6bc", secret_key="edcee713d4612abf7ea503cbf7023d48",
upload_file_path=path)
p.apply_async(api.all_api_request)
from multiprocessing import Pool
if __name__ == '__main__':
p = Pool(8)
# 需要转换的文件夹路径
FOLDER = r"D:\CloudMusic"
# 音频格式
MUSIC_FORMAT = ['wav', 'mp3']
start = time.time()
change_music(p)
end = time.time()
p.close()
p.join()
print(end - start)
| hzeyuan/100-Python | lol语音转文字.py | lol语音转文字.py | py | 8,952 | python | en | code | 8 | github-code | 13 |
29860826649 |
from distutils.core import setup
from iterutils import iterutils_version
classifiers = [
'Development Status :: 4 - Beta',
'Natural Language :: English',
'Programming Language :: Python :: 2.6',
'Topic :: Software Development :: Libraries :: Python Modules']
# This seems like a standard method.
long_description = open('README.rst').read()
setup(
name = 'iterutils',
version = iterutils_version,
author = 'Raymond Hettinger and friends',
author_email = '',
license = 'http://docs.python.org/license.html',
description = 'Itertools recipes.',
url = 'http://pypi.python.org/pypi/iterutils',
py_modules = ['iterutils'],
long_description = long_description,
classifiers = classifiers)
| argriffing/iterutils | setup.py | setup.py | py | 800 | python | en | code | 3 | github-code | 13 |
42659483394 | import socket
skt = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
host = '127.0.0.1'
port= 4500
str= input('Give the value to be encrypted --> \n')
temp = input('Give the value to encrypt --> \n')
def generateKey(string, key):
key = list(key)
if len(string) == len(key):
return(key)
else:
for i in range(len(string) -
len(key)):
key.append(key[i % len(key)])
return("" . join(key))
def encrypt(string, key):
cipher_text = []
for i in range(len(string)):
x = (ord(string[i]) +
ord(key[i])) % 26
x += ord('A')
cipher_text.append(chr(x))
return("" . join(cipher_text))
key = generateKey(str, temp)
encrypted = encrypt(str, key)
print(f'generated key --> {key}')
print(f'encrypted string --> {encrypted}')
skt.connect((host, port))
skt.send(encrypted.encode('utf-8'))
str2 = skt.recv(2048).decode('utf-8')
print(f'the decrypted string recieved from the server --> {str2}')
if(str2 == str) :
print('Server decrypted the message correctly')
else :
print('Server did not decrypt the message correctly')
| AatirNadim/Socket-Programming | vigenere_cipher_v2/client.py | client.py | py | 1,135 | python | en | code | 0 | github-code | 13 |
1472998304 | from django.contrib.auth import get_user_model
from django.contrib.auth.forms import UserCreationForm
from django import forms
from SU_Transportation.accounts.models import SuUser
user_model = get_user_model()
class SuUserCreateForm(UserCreationForm):
class Meta(UserCreationForm):
model = user_model
fields = ('username', 'email')
class SuUserEditForm(forms.ModelForm):
class Meta:
model = SuUser
fields = ('username', 'first_name', 'last_name', 'email', 'profile_picture', 'gender')
labels = {
'username': 'Username',
'first_name': 'First Name',
'last_name': 'Last Name',
'email': 'Email',
'profile_picture': 'Image',
'gender': 'Gender'
} | Mihail0708/SU_Transport_inc | SU_Transportation/accounts/forms.py | forms.py | py | 774 | python | en | code | 0 | github-code | 13 |
25041245109 | #! /usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Use pretrained networks to detect cells in the images.
https://www.pyimagesearch.com/2017/09/11/object-detection-with-deep-learning-and-opencv/
First create a new train data, where each data point represent only ONE cell.
This data is programmatically generated using the image and masks provided.
Then, train the model with this new train data so that all the cells can be
detected in the test data set.
"""
import os
import cv2
import yaml
from data import Image
from logger import Logger
_ = Logger(__name__).create()
def file_path(id_, mask=None):
if mask is not None:
return os.path.join(os.getcwd(), id_, 'masks', mask)
return os.path.join(os.getcwd(), id_, 'images', id_ + '.png')
def read_img(id_, mask=None):
if mask is not None:
p = file_path(id_, mask)
else:
p = file_path(id_)
return cv2.imread(p, cv2.IMREAD_GRAYSCALE)
def load_data(id_):
_.debug('Reading image ID {}'.format(id_))
mask_files = os.listdir(os.path.join(os.getcwd(), id_, 'masks'))
image = read_img(id_)
masks = [read_img(id_, mask=i) for i in mask_files]
return Image(id_, image, masks)
def save_data(image):
"""
Save individual cell images to the directory of the respective image IDs
"""
cells = image.cells
target_dir = os.path.join(ROOT_DIR, image.id, 'cells')
current_dir = os.getcwd()
if not os.path.exists(target_dir):
os.makedirs(target_dir)
os.chdir(target_dir)
Image.write(cells, template='cell')
os.chdir(current_dir)
return cells
def sample_background(image_entry, size=(25, 25)):
"""
image_entry is an entry of the list returned by load_data().
Use the masks to extract the parts of images that do not contain
pixels that belong to a cell. Then, use padding to normalize the
image sizes. They must be the same size as cell images.
"""
return
def preprocess(*data):
"""
Each datum is an array of images.
"""
return data
def train_cell_image(train_data):
"""
original_data is an array of dictionaries returned by load_data().
"""
# Array of images of individual cells
cell_images = [save_data(image) for image in train_data]
# Array of images of background that doesn't contain cells
background_images = [sample_background(k) for k in train_data]
cell_images, background_images = preprocess(cell_images, background_images)
if __name__ == '__main__':
# Make sure to edit the config file to the proper root directory.
# It is the full path to the stage1_train folder.
with open('config.yml', 'r') as rf:
config = yaml.safe_load(rf)
ROOT_DIR = config['root-dir']
os.chdir(ROOT_DIR)
img_ids = os.listdir('.')
train_data = [load_data(i) for i in img_ids]
train_cell_image(train_data)
| tylerhslee/kaggle2018 | main.py | main.py | py | 2,881 | python | en | code | 0 | github-code | 13 |
28076971980 | import os
import torch
import torchvision
from torch import nn
from torch.utils.data import DataLoader
from torchvision.transforms import ToTensor
import torch.nn.functional as F
from ml.models.MNIST import MNIST
# class Network(nn.Module):
# def __init__(self):
# super().__init__()
#
# # define layers
# self.conv1 = nn.Conv2d(in_channels=1, out_channels=6, kernel_size=5)
# self.conv2 = nn.Conv2d(in_channels=6, out_channels=12, kernel_size=5)
#
# self.fc1 = nn.Linear(in_features=12*4*4, out_features=120)
# self.fc2 = nn.Linear(in_features=120, out_features=60)
# self.out = nn.Linear(in_features=60, out_features=10)
#
# # define forward function
# def forward(self, t):
# # conv 1
# t = self.conv1(t)
# t = F.relu(t)
# t = F.max_pool2d(t, kernel_size=2, stride=2)
#
# # conv 2
# t = self.conv2(t)
# t = F.relu(t)
# t = F.max_pool2d(t, kernel_size=2, stride=2)
#
# # fc1
# t = t.reshape(-1, 12*4*4)
# t = self.fc1(t)
# t = F.relu(t)
#
# # fc2
# t = self.fc2(t)
# t = F.relu(t)
#
# # output
# t = self.out(t)
# # don't need softmax here since we'll use cross-entropy as activation.
#
# return t
def get_acc(model):
with torch.no_grad():
model.eval()
test_loss = 0
test_corr = 0
device_name = "cuda" if torch.cuda.is_available() else "cpu"
device = torch.device(device_name)
model.to(device)
for data, target in fashion_test:
data, target = data.to(device), target.to(device)
output = model(data)
test_loss += F.nll_loss(output, target, reduction='sum').item()
pred = output.argmax(dim=1, keepdim=True)
test_corr += pred.eq(target.view_as(pred)).sum().item()
test_loss /= len(fashion_test)
test_acc = 100. * test_corr / (len(fashion_test) * 120)
print(test_acc)
if __name__ == '__main__':
fashion = DataLoader(torchvision.datasets.MNIST(
root=os.path.join(os.path.dirname(__file__), '../data', 'train'), train=True, download=True, transform=ToTensor()
), batch_size=32, shuffle=True)
fashion_test = DataLoader(torchvision.datasets.MNIST(
root=os.path.join(os.path.dirname(__file__), '../data', 'test'), train=False, download=True, transform=ToTensor()
), batch_size=120, shuffle=True)
model = MNIST()
device_name = "cuda" if torch.cuda.is_available() else "cpu"
print(device_name)
device = torch.device(device_name)
model = model.to(device)
optimizer = torch.optim.SGD(
model.parameters(),
lr=0.001)
error = nn.CrossEntropyLoss()
print(f"Training for {3} epochs with dataset length: {len(fashion)}")
for _ in range(50):
model.train()
for i, data in enumerate(fashion):
inputs, labels = data
inputs, labels = inputs.to(device), labels.to(device)
outputs = model(inputs)
loss = error(outputs, labels)
optimizer.zero_grad()
loss.backward()
optimizer.step()
if i % 100 == 0:
print(f"{i}/{len(fashion)} Loss: {loss.item()}")
# break
get_acc(model)
| ThomasWerthenbach/Sybil-Resilient-Decentralized-Learning | scripts/MNIST_trainer.py | MNIST_trainer.py | py | 3,263 | python | en | code | 0 | github-code | 13 |
19119121340 | # Input system to ask for heads or tails
# match user action for "heads" or "tails" response
# store the two words into variables and assign them
# stores the heads or tails in a .txt file
# the test is calculated based on the amount of times head is counted
while True:
with open("sides.txt", "r") as file:
head_or_tail = file.readlines()
coin_input = input("Throw the coin and enter heads or tails here: ") + "\n"
head_or_tail.append(coin_input)
with open("sides.txt", "w") as file:
file.writelines(head_or_tail)
nr_flips = head_or_tail.count(coin_input) #number of times head is counted
percentage = nr_flips / len(head_or_tail) * 100
print(f"{coin_input}: {percentage}%")
| akira-kujo/python101 | assessments/exercise8.py | exercise8.py | py | 732 | python | en | code | 0 | github-code | 13 |
72423644178 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
from django.conf import settings
class Migration(migrations.Migration):
dependencies = [
('core', '0001_initial'),
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='Emprestimo',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('data_emprestimo', models.DateTimeField(auto_now=True)),
('data_devolucao', models.DateTimeField()),
('leitor', models.ForeignKey(to=settings.AUTH_USER_MODEL)),
('livro', models.ForeignKey(to='core.Livro')),
],
),
migrations.CreateModel(
name='Reserva',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('data', models.DateTimeField(auto_now=True)),
('ativo', models.BooleanField(default=True)),
('leitor', models.ForeignKey(to=settings.AUTH_USER_MODEL)),
('livro', models.ForeignKey(to='core.Livro')),
],
),
]
| LEDS/jediteca | jediteca/emprestimo/migrations/0001_initial.py | 0001_initial.py | py | 1,319 | python | en | code | 0 | github-code | 13 |
5615808224 | from chainer import training
import chainer.functions as F
import chainer.links as L
from chainer import Chain
from chainer import datasets, iterators, optimizers
from chainer import training
from chainer.training import extensions
train, test = datasets.mnist.get_mnist()
batchsize = 128
train_iter = iterators.SerialIterator(train, batchsize)
test_iter = iterators.SerialIterator(test, batchsize, False, False)
class MLP(Chain):
def __init__(self, n_mid_units=100, n_out=10):
super(MLP, self).__init__()
with self.init_scope():
self.l1 = L.Linear(None, n_mid_units)
self.l2 = L.Linear(None, n_mid_units)
self.l3 = L.Linear(None, n_out)
def forward(self, x):
h1 = F.relu(self.l1(x))
h2 = F.relu(self.l2(h1))
return self.l3(h2)
model = MLP()
model.to_cpu()
max_epoch = 10
model = L.Classifier(model)
optimizer = optimizers.Adam()
optimizer.setup(model)
updater = training.updaters.StandardUpdater(train_iter, optimizer)
trainer = training.Trainer(updater, (max_epoch, 'epoch'), out='mnist_result')
trainer.extend(extensions.LogReport())
trainer.extend(extensions.Evaluator(test_iter, model))
trainer.extend(extensions.PrintReport(['epoch', 'main/loss', 'main/accuracy', 'validation/main/loss', 'validation/main/accuracy', 'elapsed_time']))
trainer.run() | hackmylife/ml-study | flameworks/chainer/mnist.py | mnist.py | py | 1,352 | python | en | code | 0 | github-code | 13 |
25889012052 | from django.shortcuts import get_object_or_404
from rest_framework import status
from rest_framework.response import Response
from rest_framework.views import APIView
from webapp.models import Tracker
from api.serializers import TrackerSerializer
from webapp.models import Project
from api.serializers import ProjectSerializer
class TrackerDetailView(APIView):
def get(self, request, pk):
project = get_object_or_404(Project, pk=pk)
serializer_project = ProjectSerializer(project)
tracker = Tracker.objects.filter(project_id=pk)
serializer = TrackerSerializer(tracker, many=True)
data = {'project': serializer_project.data, 'tracker': serializer.data}
return Response(data, status=status.HTTP_200_OK)
class TrackerDeleteView(APIView):
def delete(self, request, pk):
project = get_object_or_404(Project, pk=pk)
tracker = Tracker.objects.filter(project_id=pk)
tracker.delete()
project.delete()
return Response({'pk': pk}, status=status.HTTP_204_NO_CONTENT)
class TrackerUpdateView(APIView):
def put(self, request, pk):
project = get_object_or_404(Project, pk=pk)
tracker = Tracker.objects.filter(project_id=pk).first()
serializer_project = ProjectSerializer(project, data=request.data)
if serializer_project.is_valid():
serializer_project.save()
else:
return Response(serializer_project.errors, status=400)
serializer = TrackerSerializer(tracker, data=request.data)
if serializer.is_valid():
serializer.save()
return Response(serializer.data)
else:
return Response(serializer.errors, status=400)
| Novel1/homework_70_desyatskii_roman | source/api/views.py | views.py | py | 1,733 | python | en | code | 0 | github-code | 13 |
26039945350 | """
Created on May 23rd, 2021
This script handles the GET and POST requests to the register API endpoint http://localhost:8000/api/places/
'GET':
Returns the html for the search form.
'POST':
Using the location information provided by the user, first connects to the Google's Geocode API
to transform this location in text format into coordinates. Afterwards, using these coordinates
and the keywords provided, retrieves nearby information of nearby locations and passes it to the
Django template 'search_places.html' where the data is processed and displayed to the user.
JSON Format : { 'location': "", string, identifies the location
'keyword': "", string, contains relevant keywords about what type of place to search for
@author: Tolga Kerimoğlu
"""
import googlemaps
from django.shortcuts import render
from rest_framework.response import Response
from .forms import SearchPlacesForm
def places_api(request):
"""
Process the GET and POST requests sent to the places API.
This function processes the GET and POST requests seperately. Returns the search page for a GET request. For a POST request,
it first connects to the Google's Geocode API to transform this location in text format into coordinates. Afterwards, using these coordinates
and the keywords provided, retrieves nearby information of nearby locations and passes it to the Django template
'search_places.html' where the data is processed and displayed to the user with address and ratings information displayed.
Arguments:
request (HttpRequest): django.http.HttpRequest object representing the incoming request
Returns(for POST requests):
response (Response): an HttpResponse along with a rendered html file
"""
if request.method == 'GET':
form = SearchPlacesForm() # Initialize the search form
return render(request, 'search_places.html', { 'form': form}) # Return the form
if request.method == 'POST':
location, keyword = request.data['location'], request.data['keyword'] # Store information received from the user
gmaps = googlemaps.Client(key='AIzaSyBTjZQUnMQtaGDI_M_6Zrv0tHTh2sY767c') # Connect to the Google Maps API
loc = gmaps.geocode(location)[0]['geometry']['location'] # Convert the entered location into coordinates using the geocode API
address = gmaps.geocode(location)[0]['formatted_address'] # Store the address
lat, lng = int(loc['lat']), int(loc['lng']) # Store the latitute and longitude information
search = gmaps.places_nearby(location=(lat, lng), rank_by='distance', keyword=keyword) # Use coordinate information to search for places nearby with given keywords
if (location == "" or keyword == ""): # Check for empty requests
return render(request, 'search_places.html', {'fail': True}, status=400)
return render(request, 'search_places.html', search, status=200) # Return the response to be processed inside the html template
| bounswe/2021SpringGroup4 | practice-app/api/places/places.py | places.py | py | 3,106 | python | en | code | 2 | github-code | 13 |
34005530314 | from agenda.Agenda import Agenda
agenda = Agenda()
dados = {}
dados['loja'] = 'Lojas XYZ'
dados['cnpj'] = '123456879'
dados['inicio_dia'] = '20'
dados['inicio_mes'] = '4'
dados['inicio_ano'] = '2022'
dados['inicio_hora'] = '8'
dados['inicio_minuto'] = '30'
dados['erp'] = 'ERP1'
dados['tempo_estimado'] = 60
dados['projeto'] = 'projeto'
dados['objetivo'] = 'Manutenção'
dados['responsavel'] = 'José da Silva'
agenda.adicionar_agendamento(**dados)
# Adicionar diversas Integrações em horários diversos
dados['objetivo'] = 'Integração'
dados['tempo_estimado'] = 180
dados['inicio_hora'] = 8
for dia in range(16, 21):
dados['loja'] = 'Nova Loja dia ' + str(dia)
dados['inicio_dia'] = dia
agenda.adicionar_agendamento(**dados)
# Adicionar diversas manutenções em dias e horários diversos
dados['loja'] = 'Lojas ABC'
dados['objetivo'] = 'Manutenção'
for dia in range(3, 7):
for hora in range(8, 11):
dados['inicio_dia'] = dia
dados['inicio_hora'] = hora
agenda.adicionar_agendamento(**dados)
# Adicionar diversas manutenções em um mesmo horário
dados['objetivo'] = 'Manutenção'
dados['inicio_dia'] = 10
dados['inicio_mes'] = 5
dados['inicio_hora'] = 14
dados['inicio_minutos'] = 30
for loja in range(15):
dados['loja'] = 'Loja ' + str(loja + 1) + ' manutencao '
agenda.adicionar_agendamento(**dados)
# Esperado NappError por estourar o limite de 15
# agenda.adicionar_agendamento(**dados)
# Filtros
agenda.filtro_por_data_hora(20, 4, 2022, 8, 35)
agenda.filtro_por_data_hora(20, 4, 2022, 15, 35)
agenda.filtro_por_data_hora(4, 4, 2022, 8, 35)
agenda.filtro_por_objetivo('Manutenção')
agenda.filtro_por_objetivo('Integração')
agenda.filtro_por_data_hora(10, 5, 2022, 14, 30)
# Adicionar Limite de Regras para um determinado horário
# Hipoteticamente, 17/05/2022 15:30 teremos NappAcademy e vamos
# reduzir o limite de atendimento para no máximo 7 atendimentos.
agenda.adicionar_regra(17, 5, 2022, 15, 30, 4, 5, 7)
dados['objetivo'] = 'Manutenção'
dados['inicio_dia'] = 17
dados['inicio_mes'] = 5
dados['inicio_hora'] = 15
dados['inicio_minutos'] = 30
for loja in range(7):
dados['loja'] = 'Loja ' + str(loja + 1) + ' manutencao '
agenda.adicionar_agendamento(**dados)
# Verificando se temos 7 atendimento
agenda.filtro_por_data_hora(17, 5, 2022, 15, 30)
# Descomente e teremos um NappError
# agenda.adicionar_agendamento(**dados) | orlandosaraivajr/agenda | main.py | main.py | py | 2,415 | python | pt | code | 0 | github-code | 13 |
26952804833 | import cv2
import threading
import numpy as np
import time
import os
import camera
import counter
class VideoRecorder:
def __init__(self, width, height, brightness, contrast, saturation, hue, gain, exposure, gamma, backlight, temperature, sharpness):
self._running = True
self.width = width
self.height = height
self.brightness = brightness
self.contrast = contrast
self.saturation = saturation
self.hue = hue
self.gain = gain
self.exposure = exposure
self.gamma = gamma
self.backlight = backlight
self.temperature = temperature
self.sharpness = sharpness
self.cap = camera.Camera(self.width, self.height, self.brightness, self.contrast, self.saturation, self.hue, self.gain, self.exposure, self.gamma, self.backlight, self.temperature, self.sharpness).start()
self.fps = self.cap.get(cv2.CAP_PROP_FPS)
self.real_width = int(self.cap.get(cv2.CAP_PROP_FRAME_WIDTH))
self.real_height = int(self.cap.get(cv2.CAP_PROP_FRAME_HEIGHT))
self.fourcc = cv2.VideoWriter_fourcc(*"MJPG")
def terminate(self):
self._running = False
def run(self, path):
out = cv2.VideoWriter(path, self.fourcc, self.fps, (self.real_width, self.real_height))
fp = counter.FPS(self.fps).start()
while self._running:
frame = self.cap.read()
if fp.ready():
out.write(frame)
fp.update()
fp.stop()
print("[INFO] video saved in", path)
print("[INFO] width:", self.real_width, 'height:', self.real_height)
print("[INFO] elasped time: {:.2f}".format(fp.elapsed()))
print("[INFO] approx. FPS: {:.2f}".format(fp.fps()))
self.cap.stop()
out.release()
if __name__ == '__main__':
vr = VideoRecorder(640, 480, 29, 40, 30, -23, 55, 25, 121, 1, 4690, 2)
vrt = threading.Thread(target = vr.run, args=('output.avi',))
vrt.start()
time.sleep(10)
vr.terminate()
vrt.join() | euxcet/record | backend/record.py | record.py | py | 1,812 | python | en | code | 0 | github-code | 13 |
71876016657 | """
This module takes care of starting the API Server, Loading the DB and Adding the endpoints
"""
import os
from flask import Flask, request, jsonify, url_for, json
from flask_migrate import Migrate
from flask_swagger import swagger
from flask_cors import CORS
from utils import APIException, generate_sitemap
from admin import setup_admin
from models import db, User, Character, Location, favorites
from models import Character
from models import Location
from models import favorites
app = Flask(__name__)
app.url_map.strict_slashes = False
app.config['SQLALCHEMY_DATABASE_URI'] = os.environ.get('DB_CONNECTION_STRING')
app.config['SQLALCHEMY_TRACK_MODIFICATIONS'] = False
MIGRATE = Migrate(app, db)
db.init_app(app)
CORS(app)
setup_admin(app)
# Handle/serialize errors like a JSON object
@app.errorhandler(APIException)
def handle_invalid_usage(error):
return jsonify(error.to_dict()), error.status_code
# generate sitemap with all your endpoints
@app.route('/')
def sitemap():
return generate_sitemap(app)
@app.route('/characters', methods=['GET', 'POST'])
def get_create_characters():
if request.method == 'GET':
character = Character.query.all()
characters_list = list(map(lambda character: character.serialize(), character))
return jsonify(characters_list), 200
else:
body = request.get_json()
character = Character()
character.name = body['name']
character.status = body['status']
character.species = body['species']
character.gender = body['gender']
character.origin = body['origin']
db.session.add(character)
db.session.commit()
response_body = {
"created": True,
"character": character.serialize()
}
return jsonify(response_body), 201
@app.route('/characters/<int:character_id>', methods=['GET'])
def get_single_character(character_id):
character = Character.query.get(character_id)
return jsonify(character.serialize()), 200
@app.route('/characters/<int:character_id>', methods=['PUT', 'DELETE'])
def edit_delete_character(character_id):
body = request.get_json()
update_character = Character.query.get(character_id)
if request.method == 'PUT':
if 'name' in body:
update_character.name = body['name']
if 'status' in body:
update_character.status = body['status']
if 'species' in body:
update_character.species = body['species']
if 'gender' in body:
update_character.gender = body['gender']
if 'origin' in body:
update_character.origin = body['origin']
db.session.commit()
response_body = {
"modified": True,
"character": update_character.serialize()
}
return jsonify(response_body), 200
else:
db.session.delete(update_character)
db.session.commit()
character = Character.query.all()
characters_list = list(map(lambda character: character.serialize(), character))
response_body = {
"deleted": True,
"characters": characters_list
}
return jsonify(response_body), 200
@app.route('/locations', methods=['GET', 'POST'])
def get_create_locations():
if request.method == 'GET':
location = Location.query.all()
locations_list = list(map(lambda location: location.serialize(), location))
return jsonify(locations_list), 200
else:
body = request.get_json()
location = Location()
location.name = body['name']
location.dimension = body['dimension']
db.session.add(location)
db.session.commit()
response_body = {
"created": True,
"location": location.serialize()
}
return jsonify(response_body), 201
@app.route('/locations/<int:location_id>', methods=['GET'])
def get_single_location(location_id):
location = Location.query.get(location_id)
return jsonify(location.serialize()), 200
@app.route('/locations/<int:location_id>', methods=['PUT', 'DELETE'])
def edit_delete_location(location_id):
body = request.get_json()
update_location = Location.query.get(location_id)
if request.method == 'PUT':
if 'name' in body:
update_location.name = body['name']
if 'dimension' in body:
update_location.dimension = body['dimension']
db.session.commit()
response_body = {
"modified": True,
"location": update_location.serialize()
}
return jsonify(response_body), 200
else:
db.session.delete(update_location)
db.session.commit()
location = Location.query.all()
locations_list = list(map(lambda location: location.serialize(), location))
response_body = {
"deleted": True,
"locations": locations_list
}
return jsonify(response_body), 200
@app.route('/user', methods=['GET', 'POST'])
def get_create_user():
if request.method == 'GET':
user = User.query.all()
user_list = list(map(lambda user : user.serialize(), user))
return jsonify(user_list), 200
else:
body = request.get_json()
user = User()
user.username = body['username']
user.email = body['email']
user.password = body['password']
user.is_active = body['is_active']
db.session.add(user)
db.session.commit()
response_body = {
"created": True,
"user": user.serialize()
}
return jsonify(response_body), 201
@app.route('/user/<int:user_id>', methods=['GET'])
def get_single_user(user_id):
user = User.query.get(user_id)
return jsonify(user.serialize()), 200
@app.route('/user/<int:user_id>/favorite/character/<int:character_id>', methods=['POST', 'GET'])
def add_fav_char(user_id, character_id):
body = request.get_json()
user = User.query.get(user_id)
if request.method == 'POST':
character = Character.query.get(character_id)
fav_char = favorites(user_id=user_id, character_id=character_id)
db.session.add(fav_char)
db.session.commit()
response_body = {
"is_fav": True,
"favorites": fav_char.serialize(),
"user": user.username
}
return jsonify(response_body), 201
else:
favorites = favorites.query.filter_by(user_id=user_id).all()
all_characters = list(map(lambda character: character.serialize(), favorites))
response_body = {
"favorites": all_characters,
"user": user.username
}
return jsonify(response_body), 200
@app.route('/user/<int:user_id>/favorite/character/<int:character_id>', methods=['DELETE'])
def delete_fav_char(user_id, character_id):
body = request.get_json()
user = User.query.get(user_id)
character = Character.query.get(character_id)
favorites = favorites.query.filter_by(user_id=user_id, character_id=character_id).first()
db.session.delete(favorites)
db.session.commit()
response_body = {
"is_fav": False,
"user": user.username
}
return jsonify(response_body), 200
@app.route('/user/<int:user_id>/favorite/location/<int:location_id>', methods=['POST', 'GET'])
def add_fav_loc(user_id, location_id):
body = request.get_json()
user = User.query.get(user_id)
if request.method == 'POST':
location = Location.query.get(location_id)
fav_loc = favorites(user_id=user_id, location_id=location_id)
print(fav_loc)
db.session.add(fav_loc)
db.session.commit()
response_body = {
"is_fav": True,
"favorites": fav_loc.serialize(),
"user": user.username
}
return jsonify(response_body), 200
else:
favorites = favorites.query.filter_by(user_id=user_id).all()
all_locs = list(map(lambda location: location.serialize(), favorites))
response_body = {
"favorites": all_locs,
"user": user.username
}
return jsonify(response_body), 200
@app.route('/user/<int:user_id>/favorite/location/<int:location_id>', methods=['DELETE'])
def delete_fav_loc(user_id, location_id):
body = request.get_json()
user = User.query.get(user_id)
location = Location.query.get(location_id)
favorites = favorites.query.filter_by(user_id=user_id, location_id=location_id).first()
db.session.delete(favorites)
db.session.commit()
response_body = {
"is_fav": False,
"user": user.username
}
return jsonify(response_body), 200
@app.route('/user/<int:user_id>/favorites', methods=['GET'])
def get_all_favs(user_id):
body = request.get_json()
user = User.query.get(user_id)
favorites = favorites.query.filter_by(user_id=user_id)
all_favs = list(map(lambda favorites:favorites.serialize(),favorites))
response_body = {
"user": user.username,
"favorites": all_favs
}
return jsonify(response_body), 200
# this only runs if `$ python src/main.py` is executed
if __name__ == '__main__':
PORT = int(os.environ.get('PORT', 3000))
app.run(host='0.0.0.0', port=PORT, debug=False)
| Binkitubo/Rick-And-Morty-REST-API | src/main.py | main.py | py | 9,806 | python | en | code | 0 | github-code | 13 |
20451628978 | #%%importiamo le librerie
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
from sklearn.preprocessing import MinMaxScaler
from keras.models import Sequential
from keras.layers import LSTM, Dense, Dropout, Flatten, Bidirectional
from sklearn.cluster import KMeans
#%%carichiamo il file e diamogli una prima occhiata
Raw_data = pd.read_csv('database.csv')
print (Raw_data)
#Non possiamo utilizzare il .describe perché ci andrebbe solo a descrivere colonne che hanno float come dtype
Raw_data.shape
Raw_data.describe()
Raw_data.head()
#Andiamo ad analizzare le diverse colonne:
Raw_data.dtypes
#%%Partiamo dalle più semplici:
#Andremo ad esludere il Time e le ultime variabili in quanto il loro valore non è
#necessario per la rete:
Data = Raw_data
Data.drop(Data.columns[1], axis=1, inplace=True)
Data.drop(Data.columns[15:20], axis=1, inplace=True)
print(Data.dtypes)
#%% Encodiamo la Date:
#Dobbiamo trasformare per prima cosa l'object in datetime, per poi avere un modo
#per trasformarlo in giorni dal giorno 0 (primo terremoto).
from datetime import date
Data['Date'] = pd.to_datetime(Data['Date'], utc= 'True')
Data.dtypes
dummy_dates = list(Data.iloc[:,0])
for i in range(len(dummy_dates)):
if i == 0:
pass
else:
nanoseconds = dummy_dates[i] - dummy_dates[0]
nanoseconds = nanoseconds.days
dummy_dates[i] = nanoseconds
dummy_dates[0] = 0
Data['Date'] = dummy_dates
Data.head()
#%%Latitude e longitude
Dimensions = np.zeros((23412,2))
Dimensions[:,0] = Data.iloc[:,1]
Dimensions[:,1] = Data.iloc[:,2]
#%% Per trovare i centroidi
kmeans = KMeans(n_clusters=15, n_init=5, max_iter=50, precompute_distances=True).fit(Dimensions)
inertias = []
for i in range(10,40):
clusters = i
kmeans = KMeans(n_clusters=clusters, n_init=5, max_iter=280, verbose = 0, precompute_distances=True).fit(Dimensions)
inertias.append(kmeans.inertia_)
print(len(inertias))
#comandi a mia disposizione:
#labels_: label di ogni punto
#cluter_centers_ coordinate centroidi
#inertia_: somma distanze al quadrato di ogni sample rispetto ad ogni centroide
#Printiamo le inertias, quelle necessarie sono 8
plt.plot(range(30),inertias)
#%% One-hot encodiamo tutto in Data
Data_labels = []
Data_kmeans = KMeans(n_clusters=8, n_init=5, max_iter=280, verbose = 0, precompute_distances=True).fit(Dimensions)
Data_labels = Data_kmeans.labels_
Data_labels = np.array(Data_labels)
print(Data_labels)
Data['coordlabels'] = pd.Series(Data_labels, index=Data.index)
Data['coordlabels'] = pd.Categorical(Data['coordlabels'])
Zone = pd.get_dummies(Data['coordlabels'], prefix = 'zona')
Data.drop(Data.columns[15], axis=1, inplace=True)
Data = pd.concat([Data, Zone], axis=1)
Data.drop(Data.columns[1:3], axis=1, inplace=True)
Data.dtypes
#%%Occupiamoci degli object data, gli altri one-hot encodabili
Data.iloc[:,1].unique()
#cosa vogliamo fare con i dati che non sono Eathquake né Nuclear Explosion?
#%%cerchiamo di capire quanti Type ci sono con un ciclo if:
Type = Data.iloc[:,4]
Type = np.array(Type)
Earthquake = 0
for x in range(Type.size):
if Type[x] == 'Earthquake':
Earthquake = Earthquake+1
Earthquake
#dovremo one-hot encodare questa feature
Data.head()
#%%Occupiamoci della magnitude type:
Raw_data.iloc[:,8].unique()
# ML: magnitudo locale (Richter)
# MS: Surface wave magnitude scale
# MB: Body wave magnitude scale: "body-wave magnitude" developed to
# overcome the distance and magnitude limitations of the ML scale inherent
# in the use of surface waves. maximum amplitude of P waves in the first 10 seconds.
# MW: Moment magnitude scale: rigidità della Terra moltiplicata per il momento
# medio di spostamento della faglia e la dimensione dell'area dislocata.
# misurare le dimensioni dei terremoti in termini di energia liberata.
# MD: Duration magnitude signal
# li one-hot encodiamo per evitare di rovinare i dati.
# per fillare i nan metto MW.
#train 75 percento, y differenza in giorni
#%% One-hot encodiamo i Types:
Data['Type'] = pd.Categorical(Data['Type'])
Type_encoded = pd.get_dummies(Data['Type'], prefix = 'tipologia')
Data = pd.concat([Data, Type_encoded], axis=1)
Data.drop(Data.columns[1], axis=1, inplace=True)
#%% One-hot encodiamo i Magnitude Type:
Data['Magnitude Type'] = pd.Categorical(Data['Magnitude Type'])
Magnitude_encoded = pd.get_dummies(Data['Magnitude Type'], prefix = 'tipo_magnitudo')
Data = pd.concat([Data, Magnitude_encoded], axis=1)
Data.drop(Data.columns[5], axis=1, inplace=True)
Data.dtypes
#%%Normalizziamo il resto dei dati:
#qui abbiamo normalizzato la Depth
Data_scaled = Data
scaler_1 = MinMaxScaler()
Depth_sk = np.array(Data.iloc[:,1])
Depth_sk = Depth_sk.reshape(-1,1)
Depth_sk = scaler_1.fit_transform(Depth_sk)
Data_scaled.iloc[:,1] = Depth_sk
Data_scaled.head()
#%% Depth Error 4.993115
import math
Deptherr_sk = list(Data.iloc[:,2])
Deptherr_sk = [4.993115 if math.isnan(x) else x for x in Deptherr_sk]
scaler_2 = MinMaxScaler()
Deptherr_sk = np.array(Deptherr_sk)
Deptherr_sk = Deptherr_sk.reshape(-1,1)
Deptherr_sk = scaler_2.fit_transform(Deptherr_sk)
print(Deptherr_sk)
Data_scaled.iloc[:,2] = Deptherr_sk
Data_scaled.head()
#%%Dss 275.364098
Data.drop(Data.columns[2], axis=1, inplace=True)
Data_scaled.drop(Data_scaled.columns[2], axis=1, inplace=True)
Data.drop(Data.columns[3], axis=1, inplace=True)
Data_scaled.drop(Data_scaled.columns[3], axis=1, inplace=True)
Data.drop(Data.columns[5], axis=1, inplace=True)
Data_scaled.drop(Data_scaled.columns[5], axis=1, inplace=True)
Data.dtypes
#%%Magnitude
Magnitude = list(Data.iloc[:,2])
Magnitude = [5.8825 if math.isnan(x) else x for x in Magnitude]
scaler_3 = MinMaxScaler()
Magnitude = np.array(Magnitude)
Magnitude = Magnitude.reshape(-1,1)
Magnitude = scaler_3.fit_transform(Magnitude)
print(Magnitude)
Data_scaled.iloc[:,2] = Magnitude
Data_scaled.head()
#%%Azimuthal Gap
Data_scaled.describe()
AZgap = list(Data.iloc[:,3])
AZgap = [44.163532 if math.isnan(x) else x for x in AZgap]
scaler_4 = MinMaxScaler()
AZgap = np.array(AZgap)
AZgap = AZgap.reshape(-1,1)
AZgap = scaler_4.fit_transform(AZgap)
print(AZgap)
Data_scaled.iloc[:,3] = AZgap
Data.describe()
#%%Horizontal_distance
Hdis = list(Data.iloc[:,4])
Hdis = [3.992660 if math.isnan(x) else x for x in Hdis]
scaler_5 = MinMaxScaler()
Hdis = np.array(Hdis)
Hdis = Hdis.reshape(-1,1)
Hdis = scaler_5.fit_transform(Hdis)
print(Hdis)
Data_scaled.iloc[:,4] = Hdis
Data.head()
#%%Reshapiamo l'array
Data_scaled = Data_scaled.values
x_train = Data_scaled[:17560, 1:]
x_train = x_train.reshape(17560,26)
print(x_train.shape)
y_train = Data_scaled[:17560,0]
y_train = y_train.reshape(17560,1)
print(y_train.shape)
x_test = Data_scaled[17560:, 1:]
print(x_test.shape)
y_test = Data_scaled[17560:,0]
print(y_test.shape)
#%%Architecture
model = Sequential()
model.add(Dense(32, input_dim=26))
model.add(Dense(32))
model.add(Dense(32))
model.add(Dense(32))
model.add(Dense(32))
model.add(Dense(32))
model.add(Dense(1, activation = 'linear'))
model.compile(optimizer = 'Adam', loss='mean_squared_error', metrics=['accuracy','mean_squared_error'])
model.summary()
#%%trainiamo la rete
model.fit(x_train, y_train, verbose = 2, epochs = 300 )
| Hitomamacs/Web1 | cazzate/Terromoti/Rete.py | Rete.py | py | 7,188 | python | it | code | 0 | github-code | 13 |
9024299157 | #!/usr/bin/env python3
from flask import Flask, request, render_template, Response
import os, pickle, base64
from flask_limiter.util import get_remote_address
from flask_limiter import Limiter
app = Flask(__name__)
app.secret_key = os.urandom(32)
INFO = ['name', 'username', 'password']
limiter = Limiter(
get_remote_address,
app=app,
default_limits=["50000 per hour"],
storage_uri="memory://",
)
@app.route('/')
def index():
return render_template('create_session.jinja2')
@app.route('/create_session', methods=['GET', 'POST'])
@limiter.limit("5/second")
def create_session():
if request.method == 'GET':
return render_template('create_session.jinja2')
elif request.method == 'POST':
info = {}
for _ in INFO:
info[_] = request.form.get(_, '')
try:
data = base64.b64encode(pickle.dumps(info)).decode('utf8')
except:
data = "Invalid data!"
return render_template('create_session.jinja2', data=data)
@app.route('/check_session', methods=['GET', 'POST'])
@limiter.limit("5/second")
def check_session():
if request.method == 'GET':
return render_template('check_session.jinja2')
elif request.method == 'POST':
session = request.form.get('session', '')
try:
info = pickle.loads(base64.b64decode(session))
except:
info = "Invalid session!"
return render_template('check_session.jinja2', info=info)
@app.route('/source')
def source():
return Response(open(__file__).read(), mimetype="text/plain")
app.run(host='0.0.0.0', port=1337) | giangnamG/CTF-WriteUps | CookieArenaCTFWriteUps/Escape the session/source.py | source.py | py | 1,625 | python | en | code | 1 | github-code | 13 |
39448997985 | import pandas as pd
import requests
from bs4 import BeautifulSoup
def get_sp500_details():
wikiurl="https://en.wikipedia.org/wiki/List_of_S%26P_500_companies"
table_class="wikitable sortable jquery-tablesorter"
response=requests.get(wikiurl)
soup = BeautifulSoup(response.text, 'html.parser')
sp500=soup.find('table',{'class':"wikitable"})
df=pd.read_html(str(sp500))
# convert list to dataframe
df=pd.DataFrame(df[0])
df = df[["Symbol", "Security"]]
return df | tallalUsman/NLP_alternative_googlenews_reddit_data | data_get_sp500.py | data_get_sp500.py | py | 501 | python | en | code | 0 | github-code | 13 |
11510512823 | #!/usr/bin/env python
import sys
import os
sys.path.insert(0, os.path.join(os.path.dirname(__file__), "..", "lib"))
import dns.resolver
from dns import reversename
from collections import OrderedDict
from splunklib.searchcommands import \
dispatch, StreamingCommand, Configuration, Option, validators
resolver = dns.resolver.Resolver()
cache = OrderedDict()
CACHE_LIMIT = 100
def dnslookup(query, record_type = 'A'):
record_type=record_type.upper()
try:
record_type = record_type.upper()
if record_type == 'PTR' or record_type=='REVERSE':
record_type = 'PTR'
try:
query = reversename.from_address(query)
except:
pass
elif record_type=='FORWARD':
record_type = 'A'
answer = resolver.resolve(query, record_type)
answers = []
if hasattr(answer, 'rrset'):
for a in answer.rrset:
b = a.to_text()
# Dot record is identical to the query
if b == ".":
b = query
if b[-1] == '.':
b = b[0:-1]
answers.append(b)
return answers
else:
return ["Error:NoResults"]
except dns.resolver.NoAnswer:
sys.stderr.write("No answer: " + str(query) + "\n")
return ["Error:NoAnswer"]
except dns.resolver.NXDOMAIN:
sys.stderr.write("Does not resolve: " + str(query) + "\n")
return ["Error:NXDOMAIN"]
except BaseException as e:
sys.stderr.write("query=" + query + " error=\"" + str(e) + "\"\n")
return ["Error:"+str(e)]
def cache_lookup(input,search_type):
if input in cache:
return cache[input]
else:
result = dnslookup(input, record_type=search_type)
if len(cache) >= CACHE_LIMIT:
ignore=cache.popitem(last=False)
cache[input] = result
return result
@Configuration()
class DnslookupCommand(StreamingCommand):
""" This allows custom dns requests to select servers from splunk
##Syntax
.. code-block::
dnslookup (recordtype=<string>) (input_field=<string>) | (output_field=<string>) | (search_suffix=<string>)*
##Description
This streaming command resolves the host/ip or other string in from
input_field in the data and outputs to output_field using the
custom server and search suffix provided.
##Example
This example resolves the name "www" using the first domain in the suffix
list (google.com), and using the name servers in the server list.
..code-block::
"| makeresults
| eval hostname = "www",
ip = "4.2.2.2",
_raw = "www.google.com,4.2.2.2"
| dnslookup recordtype="FORWARD" input_field=hostname
output_field="xyz" search_suffix="google.com,yahoo.com
server=8.8.8.8,8.8.4.4""
"""
recordtype = Option(
doc='''
**Syntax:** **recordtype=** *A|PTR|forward|reverse|MX*
**Description:** Type of dns record being requested''',
require=True, validate=validators.Set("FORWARD","REVERSE","PTR","A","AAAA","CNAME"), default="A")
input_field = Option(
doc='''
**Syntax:** **input_field=** *<fieldname>*
**Description:** Name of the field that holds the input value to be looked up''',
require=True, validate=validators.Fieldname())
output_field = Option(
doc='''
**Syntax:** **output_field=** *<fieldname>*
**Description:** Name of the field that will result of the command''',
require=True, validate=validators.Fieldname())
server = Option(
doc='''
**Syntax:** **server=** *hostname|IPv4|IPv6*
**Description:** Custom DNS resolver to use ''',
require=False)
search_suffix = Option(
doc='''
**Syntax:** **server=** *server|IPv4|IPv6*
**Description:** Comma separated list of DNS search suffix to use to resolve relative hostnames ''',
require=False)
def stream(self, events):
# Put your event transformation code here
if self.server:
dns_servers = self.server.split(',')
resolver.nameservers = dns_servers
resolver.timeout = 2.5
resolver.lifetime = 2.5
if self.search_suffix:
resolver.search = [dns.name.from_text(s) for s in self.search_suffix.split(',')]
resolver.use_search_by_default = True
for event in events:
if self.input_field and self.output_field:
event[self.output_field] = cache_lookup(event[self.input_field], self.recordtype)
yield event
dispatch(DnslookupCommand, sys.argv, sys.stdin, sys.stdout, __name__)
| seunomosowon/SA-dnslookup | bin/dnslookup.py | dnslookup.py | py | 4,810 | python | en | code | 0 | github-code | 13 |
18788950824 | from transcoding_cluster import task
from .task_view import TaskHumanView, TaskJsonView
from .task_list_view import TaskListListView, TaskListJsonView, TaskListTableView
class TaskManager( object ):
def __init__(self, client):
self.client = client
self.task = None
def loadTask( self, taskId ):
self.task = self.client.getTask( taskId )
def requireTask( self ):
if self.task == None:
raise RuntimeError( "Task must be loaded" )
def listTasks(self, format):
viewMap = {
"json" : TaskListJsonView,
"table": TaskListTableView,
"list" : TaskListListView
}
if format in viewMap.keys():
listV = viewMap[ format ]()
else:
listV = TaskListListView()
tList = self.client.getTasks()
if tList != None:
listV.show( tList )
def describe( self, format ):
self.requireTask()
if format == "json":
taskV = TaskJsonView()
else:
taskV = TaskHumanView()
taskV.show( self.task )
def updateDependencies( self, depList, remove=False ):
newDeps = []
for d in depList:
if not d in self.task.depends and not remove:
# updates self.task
self.client.addTaskDependency(self.task, d)
elif d in self.task.depends and remove:
# updates self.task
self.client.removeTaskDependency(self.task, d)
def updateAttributes( self, data ):
attrs = []
if data["affinity"] != None:
self.task.affinity = data["affinity"]
attrs.append( "affinity" )
if data["priority"] != None:
self.task.priority = data["priority"]
attrs.append( "priority" )
if data["command"] != None:
self.task.command = data["command"]
attrs.append( "command" )
return attrs
def update( self, data, removeDeps=False ):
self.requireTask()
attrs = self.updateAttributes( data )
self.task = self.client.updateTask( self.task, attrs )
if "depends" in data:
self.updateDependencies( data["depends"], removeDeps )
def create( self, data ):
self.task = task.Task()
self.updateAttributes( data )
self.task = self.client.createTask( self.task )
if "depends" in data:
self.updateDependencies( data["depends"], False )
| ObviusOwl/transcoding-cluster | transcoding_cluster_cli/task_manager.py | task_manager.py | py | 2,542 | python | en | code | 0 | github-code | 13 |
36294911301 | import json, os, argparse, time
import numpy as np
from numpy import append
import torch
from utils.train import train_iter, save_model_ckp, validate
from utils.general import get_optimizer, get_scheduler, build_model, set_device, seed_everything, get_loss_func, initialize_epoch_info, load_dict
from utils.plots import plot_train_val_eval
from configs.config import update_config, save_exp_info
from datasets.build import get_dataloader
if __name__ == "__main__":
import warnings
warnings.filterwarnings("ignore")
parser = argparse.ArgumentParser()
# parser.add_argument("--config", required=True, type=str,
# help="Path to the json config file.")
parser.add_argument("--config", type=str,
default="C:/Users/jiang/Documents/AllLog/OMC/2022-Apr-27/16-06-28/Exp_Config.json",
help="Path to the json config file.")
parser.add_argument("--machine", type=str,
default="pc")
args = parser.parse_args()
config = json.load(open(args.config, "r"))
config = update_config(config, args)
save_root, config = save_exp_info(config)
fig_dir = os.path.join(save_root, "figures")
os.makedirs(fig_dir, exist_ok=True)
mv_ckp_dir = os.path.join(save_root, "min_val_ckp")
os.makedirs(mv_ckp_dir, exist_ok=True)
seed_everything(config)
device = set_device(config)
model = build_model(config, device)
optimizer = get_optimizer(config, model)
scheduler = get_scheduler(config, optimizer)
loss_func = get_loss_func(config)
# Get DataLoader
train_loader = get_dataloader(config, mode="train")
val_loader = get_dataloader(config, mode="val")
num_train_iter_per_epoch = len(train_loader)
print("Training Set Using %d samples" % (len(train_loader)*config["trainer"]["train_batch_size"]))
print("Validation Set Using %d samples" % (len(val_loader)*config["trainer"]["test_batch_size"]))
# Load Resume Training Configs
if config["resume_training"]:
checkpoint_path = os.path.join(config["ckp_dir"], "checkpoint.pth")
cpk_content = torch.load(checkpoint_path)
epoch = cpk_content["epoch"]
total_iter = cpk_content["iter"]
model_state_dict = cpk_content["model_state_dict"]
model.load_state_dict(model_state_dict)
opt_state_dict = cpk_content["optimizer_state_dict"]
optimizer.load_state_dict(opt_state_dict)
scheduler = cpk_content["scheduler"]
print("CheckPoint Loaded")
# === Create Dict to save Result
summary = load_dict(config["ckp_dir"], name="summary.npy")
else:
epoch = 0
total_iter = 0
# === Create Dict to save Result
summary = { # To save training history
"train_loss": [],
"val_metric": [],
"epoch_info": [],
"min_val_metric": 0,
}
# Auto Data Parallel depending on gpu count
if config["data_parallel"]:
print(" >>> Multiple GPU Exsits. Use Data Parallel Training.")
model = torch.nn.DataParallel(model)
# ==== Start Training ====
time_start = time.time()
while epoch < config["trainer"]["num_epoch"]:
print("Epoch [%d] Starts Training" % (epoch))
epoch_info = initialize_epoch_info(config)
iter_time_start = time.time()
model.train()
for batch_idx, (inputs, labels) in enumerate(train_loader):
if (batch_idx+1) % config["trainer"]["print_every"] == 0 and batch_idx > 1:
print(" >> Iteration [%d]/%d, time consumed is [%.01fs] "%(batch_idx+1, num_train_iter_per_epoch, time.time()-iter_time_start))
iter_time_start = time.time()
iter_info = train_iter(inputs=inputs,
labels=labels,
loss_func=loss_func,
model=model,
optimizer=optimizer,
config=config,
device=device)
total_iter += 1
for each_key in iter_info.keys():
epoch_info[each_key] += iter_info[each_key]
# ==== Print Training Stats
epoch_info["train_loss"] = epoch_info["train_loss"] / num_train_iter_per_epoch
print(" >> >> Training Loss [%.03f]" % (epoch_info["train_loss"]))
summary["train_loss"].append(epoch_info["train_loss"])
# ==== Reaching Checkpoint
print(" >> Checkpoint Training time: [%.01fs]" % float(time.time()-time_start))
time_start = time.time()
# ==== Save checkpoint
if not config["debug"]:
save_model_ckp(config, model=model,
epoch=epoch, iter_num=total_iter,
optimizer=optimizer,
scheduler=scheduler,
save_dir=save_root)
# ==== Epoch Evaluation ====
print("Epoch [%d] starts evaluation." % (epoch))
model.eval()
# ==== Evaluate on validation set
val_result = validate(val_loader, model, loss_func, device)
# ==== Print Validation Stats
print(" >> >> Validation Metric [%.03f]" % (val_result["val_metric"]))
summary["val_metric"].append(val_result["val_metric"])
# ==== Plot Figures
plot_train_val_eval(summary_dict=summary, save_dir=fig_dir, save_name="Train_Val_Loss_Plot", config=config)
# ==== Save summary
file_name = os.path.join(save_root, "summary.npy")
np.save(file_name, summary)
# ==== Save checkpoint with least validation loss
if summary["min_val_metric"] > val_result["val_metric"]:
summary["min_val_metric"] = val_result["val_metric"]
print("Minimal validation loss updated!")
if not config["debug"]:
save_model_ckp(config, model=model,
epoch=epoch, iter_num=total_iter,
optimizer=optimizer,
scheduler=scheduler,
save_dir=mv_ckp_dir)
# ==== Learning rate scheduler ====
if config["optimizer"]["lr_scheduler"] != "RedusceLROnPlateau":
scheduler.step()
epoch += 1
| shoopshoop/OMC | models/swin_pyramid/main_train_swin.py | main_train_swin.py | py | 6,420 | python | en | code | 2 | github-code | 13 |
20215048755 | #-*- coding: utf-8 -*-
#word2vec.py
#this script uses gensim library to implement word2vec algorithm
import os
import re
import locale
from collections import Counter
import numpy as np
import gensim
#iteration class which will be used to train word2vec algorithm. Returns sentences as a list of words
class SentenceIterator:
def __init__(self,directory,sw):
self.directory = directory
self.stop_words = sw
self.categorylist = []
for filename in os.listdir(self.directory):
self.categorylist.append(filename)
for category in self.categorylist:
if category == '.DS_Store':
self.categorylist.remove('.DS_Store')
def __iter__(self):
for category in self.categorylist:
directory_category = self.directory+'/'+str(category)
for textname in os.listdir(directory_category):
directory_text = directory_category+'/'+str(textname)
textfile = open(directory_text,'r').read()
textfile = textfile.decode('utf-8','ignore')
doclist = [ line for line in textfile ]
docstr = '' . join(doclist)
sentences = re.split(r'[.!?]', docstr)
for s in range(len(sentences)):
sentence = sentences[s]
sentence = sentence.lower()
sentence = re.sub(r"\d+",'',sentence,flags=re.U) #remove numbers
sentence = re.sub(r"\W+",'\n',sentence,flags=re.U) #remove non alphanumerics with new line
sentence = sentence.split()
words = [w for w in sentence if w not in self.stop_words]
sentence = ' '.join(words)
wordlist = sentence.split()
yield wordlist
fileNameSW = '/Users/semihakbayrak/ConvNet4/turkish_stopwords.txt'
textfile = open(fileNameSW,'r').read()
textfile = textfile.decode('utf-8')
textfile = textfile.split()
stop_words = [w for w in textfile]
direc = '/Users/semihakbayrak/ConvNet4/42bin_haber/news'
sentences = SentenceIterator(direc,stop_words)
model = gensim.models.Word2Vec(sentences, size=50, window=4, min_count=5)
model.save("/Users/semihakbayrak/ConvNet4/42bin_haber_w2v_2")
| semihakbayrak/ConvolutionalNeuralNetworks | word2vec.py | word2vec.py | py | 1,975 | python | en | code | 2 | github-code | 13 |
15039289798 | from django.shortcuts import render
from funciones import analiticas, productos_por_categoria
# Create your views here.
def categoriaBuscada(request):
top5 = analiticas()
categorias = ["Electronicos", "Electrodomesticos", "Hogar"]
if request.method == 'POST':
categoria = request.POST.get("categorias")
productos = productos_por_categoria(categoria)
dicc = {"productos_de_la_categoria": productos,
"categoria": categoria, "categorias": categorias,
"productos": top5}
return render(request, 'pagina_de_categorias.html', dicc)
| msosav/Lookup | categorias/views.py | views.py | py | 583 | python | es | code | 2 | github-code | 13 |
35511123226 | import time
start_time = time.time()
md={}
def comb(n,k):
if (n,k) in md:
return md[(n,k)]
if n==0:
if k ==0:
return 1
else:
return 0
if n <k:
return 0
ret = comb(n-1,k-1)+comb(n-1,k)
md[(n,k)]=ret
return ret
print(comb(700,100))
print (time.time() - start_time, "seconds")
| rui1/leetcode | combination.py | combination.py | py | 355 | python | en | code | 0 | github-code | 13 |
18274192336 | #!/usr/bin/env python
import os
def install(alsi):
remote_cfr = 'cfr/cfr.jar'
local_cfr = os.path.join(alsi.alroot, 'support/cfr/cfr.jar')
alsi.fetch_package(remote_cfr, local_cfr)
alsi.install_oracle_java8()
alsi.milestone("Espresso install complete.")
if __name__ == '__main__':
from assemblyline.al.install import SiteInstaller
install(SiteInstaller())
| deeptechlabs/cyberweapons | assemblyline/alsvc_espresso/installer.py | installer.py | py | 388 | python | en | code | 78 | github-code | 13 |
46108614944 | from __future__ import print_function
import os
import subprocess
class CommandExecutor(object):
filename = None
def __init__(self, exepath):
if not exepath:
self.exe = self._find_default_exepath()
self.root = os.path.dirname(self.exe)
elif self._matches_and_isfile(exepath):
self.exe = exepath
self.root = os.path.dirname(self.exe)
else:
raise ValueError(exepath)
def _get_default_exepaths(self):
raise NotImplementedError
def run_script(self):
raise NotImplementedError
def _matches_and_isfile(self, exepath):
if self.filename == os.path.basename(exepath):
if os.path.isfile(exepath):
return True
return False
def _find_default_exepath(self):
for exepath in self._get_default_exepaths():
if self._matches_and_isfile(exepath):
return exepath
raise RuntimeError("{0} installation not found.".format(self.filename))
class MXSPyCOM(CommandExecutor):
"""
exe - path to MXSPyCOM.exe
"""
filename = "MXSPyCOM.exe"
def __init__(self, exepath=None):
if not exepath:
exepath = self._find_default_exepath()
CommandExecutor.__init__(self, exepath=exepath)
def _get_default_exepaths(self):
progfiles = os.environ["PROGRAMFILES"]
default_path = os.path.join(progfiles, "MXSPyCOM", "MXSPyCOM.exe")
return [default_path]
def run_script(self, scriptpath):
command = r'"{0}" -s "{1}"'.format(self.exe, scriptpath)
subprocess.Popen(command, shell=True)
class UserMax(CommandExecutor):
"""Retrieve/store path data about targeted 3ds Max installation and launching with
launch scripts
"""
filename = "3dsmax.exe"
supported_version_years = list(range(2012, 2019 + 1))
def __init__(self, versionyear=None, exepath=None):
if all([exepath, versionyear]):
raise ValueError
elif versionyear:
_root = self._get_max_root(versionyear)
exepath = os.path.join(_root, self.filename)
CommandExecutor.__init__(self, exepath=exepath)
def run_script(self, scriptpath):
command = '"{0}" -U PythonHost "{1}"'.format(self.exe, scriptpath)
subprocess.Popen(command, shell=True)
def _get_full_year(self, partial_year):
partial_year_str = str(partial_year)
for full_year in self.supported_version_years[::-1]:
if str(full_year).endswith(partial_year_str):
return full_year
def _get_max_root_env_var(self, year):
return "ADSK_3DSMAX_X64_{0}".format(str(year))
def _get_max_root(self, year):
if len(str(year)) < 4:
year = self._get_full_year(partial_year=year)
return os.environ[self._get_max_root_env_var(year)]
def _get_default_exepaths(self):
"""Returns environment variable, root path of executable of latest 3ds
Max version installed."""
default_paths = []
for year in self.supported_version_years[::-1]:
try:
root = os.environ[self._get_max_root_env_var(year)]
except KeyError:
continue
else:
exe = os.path.join(root, self.filename)
default_paths.append(exe)
return default_paths
| garytyler/maxpytest | maxpytest/maxcom.py | maxcom.py | py | 3,413 | python | en | code | 6 | github-code | 13 |
34892369422 | __author__ = 'spotapov'
def count_units(number):
n=0
R=0
bin_num = bin(number)
print (bin_num)
for i in bin_num:
if i == "b":
print("nothing")
else:
i = int(i)
#print(i)
if i == 1:
R = R+1
n = n +1
#print(n)
print("Number of units is", R)
return(R)
| sergiypotapov/EoC | Mission1.py | Mission1.py | py | 366 | python | en | code | 0 | github-code | 13 |
26995465443 | import crypto_helpers as cr
def crypto_caesar(message,shift):
'''
Returns given message shifted by
given number of characters along alphabet
(str,int)-->str
>>>crypto_caesar("klm",-10)
'abc'
>>> crypto_caesar("cats and dogs",4)
'gexw erh hskw'
>>> crypto_caesar("cow!!!",4)
'gsa!!!'
>>> crypto_caesar("help",5)
'mjqu'
>>>crypto_caesar("comp",0)
'comp'
>>>crypto_caesar("comp",-0)
'comp'
'''
encrypt_caesar=""
for i in range(len(message)):
encrypt_caesar+=cr.shift_char(message[i],shift)
return encrypt_caesar
def caesar(message,key,crypt):
'''
(str,int,int)--> str
Returns encrypted/decrypted message, of given key. Returns error otherwise.
>>> caesar("alphabet",7,1)
'hswohila'
>>> caesar("lmtts",82,-1)
'hippo'
>>> caesar("help",0,1)
'help'
caesar("0ctOpus",-5,1)
'0xojkpn'
'''
# if encrypt, takes positive shift (key)
# if decrypt, takes negative of shift (key)
if crypt==1:
new_message=crypto_caesar(message,key)
elif crypt==-1:
key=-key
new_message=crypto_caesar(message,key)
else:
raise ValueError("mode not supported")
return new_message
# takes the 'n' index of the key
def key_position(key,i):
'''
(str,int)-->int
Returns position of given index by user of the key
>>>key_position('alpha',0)
0
>>>key_position('alpha',3)
7
>>>key_position('',2)
IndexError: list index out of range
>>>key_position('cat and_Cow',3)
ValueError: the input string must contain only characters from the English alphabet
>>>key_position('apple',56)
IndexError: list index out of range
'''
position=cr.get_keys(key)
return (position[i])
def vigenere(message,key,crypt):
'''
(str,int,int)--> str
Returns encrypted/decrypted message, of given key. Returns error otherwise.
>>> vigenere("How are you?","good",1)
'nck gfs eci?'
>>> vigenere("nck gfs eci?","good",-1)
'how are you?'
>>>vigenere("rfavtoybkbs kg rqcx!", "comp",-1)
"programming is cool!"
>>>vigenere("programming", "computerprogramming",1)
'rfavltqdxeu'
>>> vigenere("The grass is green","green",1)
'zyi txrww oj kekvr'
>>> vigenere("The grass is green","",1)
Traceback (most recent call last):
ValueError: The string must contain at least one character
>>> vigenere("The sky is blue", "aaa",-1)
'the sky is blue'
>>> vigenere("24 hours a day", "9to5",1)
Traceback (most recent call last):
ValueError: The input string must contain only characters from the English alphabet
>>> vigenere("24 hours a day", "nine to five",1)
Traceback (most recent call last):
ValueError: The input string must contain only characters from the English alphabet
>>>vigenere("coding is fun", "code",6)
ValueError: mode not supported
'''
message_length=len(message)
# variable returns index for all letters in given message
keyword=cr.pad_keyword(key,message_length)
i=0
new_message=""
# if encrypt, takes positive of each index (key_position())
# if decrypt, takes negative of each index
if crypt==1:
for i in range (message_length):
new_index=key_position(keyword,i)
new_message+=cr.shift_char(message[i],new_index)
i+=1
elif crypt==-1:
for i in range (message_length):
new_index=-(key_position(keyword,i))
new_message+=cr.shift_char(message[i],new_index)
i+=1
else:
raise ValueError("mode not supported")
return new_message
| mgraiver/ciphers | ciphers.py | ciphers.py | py | 3,772 | python | en | code | 0 | github-code | 13 |
36325768105 | import os
from collections import Counter
from multiprocessing import Pool
from PlatformNlp import utils
class Dictionary(object):
"""A mapping from symbols to consecutive integers"""
def __init__(
self,
*, # begin keyword-only arguments
begin="[CLS]",
pad="[PAD]",
sep="[SEP]",
unk="[UNK]",
mask="[MASK]",
extra_special_symbols=None,
):
self.unk_word, self.pad_word, self.sep_word, self.begin_word, self.mark_word = unk, pad, sep, begin, mask
self.symbols = []
self.count = []
self.indices = {}
self.begin_index = self.add_symbol(begin)
self.pad_index = self.add_symbol(pad)
self.sep_index = self.add_symbol(sep)
self.unk_index = self.add_symbol(unk)
self.mask_index = self.add_symbol(mask)
if extra_special_symbols:
for s in extra_special_symbols:
self.add_symbol(s)
self.nspecial = len(self.symbols)
def index(self, sym):
"""Returns the index of the specified symbol"""
assert isinstance(sym, str)
if sym in self.indices:
return self.indices[sym]
return self.unk_index
def add_symbol(self, word, n=1, overwrite=False):
"""Adds a word to the dictionary"""
if word in self.indices and not overwrite:
idx = self.indices[word]
self.count[idx] = self.count[idx] + n
return idx
else:
idx = len(self.symbols)
self.indices[word] = idx
self.symbols.append(word)
self.count.append(n)
return idx
def finalize(self, threshold=-1, nwords=-1, padding_factor=4):
"""Sort symbols by frequency in descending order, ignoring special ones.
Args:
- threshold defines the minimum word count
- nwords defines the total number of words in the final dictionary,
including special symbols
- padding_factor can be used to pad the dictionary size to be a
multiple of 8, which is important on some hardware (e.g., Nvidia
Tensor Cores).
"""
if nwords <= 0:
nwords = len(self)
new_indices = dict(zip(self.symbols[: self.nspecial], range(self.nspecial)))
new_symbols = self.symbols[: self.nspecial]
new_count = self.count[: self.nspecial]
c = Counter(
dict(
sorted(zip(self.symbols[self.nspecial :], self.count[self.nspecial :]))
)
)
for symbol, count in c.most_common(nwords - self.nspecial):
if count >= threshold:
new_indices[symbol] = len(new_symbols)
new_symbols.append(symbol)
new_count.append(count)
else:
break
assert len(new_symbols) == len(new_indices)
self.count = list(new_count)
self.symbols = list(new_symbols)
self.indices = new_indices
@classmethod
def load(cls, f):
"""Loads the dictionary from a text file with the format:
```
<symbol0> <count0>
<symbol1> <count1>
...
```
"""
d = cls()
d.add_from_file(f)
return d
def add_from_file(self, f):
"""
Loads a pre-existing dictionary from a text file and adds its symbols
to this instance.
"""
if isinstance(f, str):
try:
with open(f, "r", encoding="utf-8") as fd:
self.add_from_file(fd)
except FileNotFoundError as fnfe:
raise fnfe
except UnicodeError:
raise Exception(
"Incorrect encoding detected in {}, please "
"rebuild the dataset".format(f)
)
return
for line in f:
try:
line = line.strip("\n")
line = line.strip("\r")
line = line.strip(" ")
word = line
self.add_symbol(word, n=1, overwrite=True)
except ValueError:
raise ValueError(
"Incorrect dictionary format, expected '<token> <cnt> [flags]'"
)
@staticmethod
def _add_file_to_dictionary(filename, dict, tokenize):
counter = Counter()
with open(filename, "r", encoding="utf-8") as f:
for line in f:
line = line.strip("\n")
line = line.strip("\r")
for word in tokenize(line):
counter.update([word])
for w, c in sorted(counter.items()):
dict.add_symbol(w, c)
| jd-aig/aves2_algorithm_components | src/nlp/PlatformNlp/data/dictionary.py | dictionary.py | py | 4,770 | python | en | code | 2 | github-code | 13 |
32561600092 | import socket
import os
import subprocess
import glob
s = socket.socket()
#host=raw_input("enter the host address")
host = '167.172.235.115'
#host='192.168.0.7'
#host="192.168.43.244"
port = 9899
s.connect((host, port))
print("connected")
def filer():
os.chdir("/home/pratik/Desktop/test")
filename=raw_input("please enter the filename")
with open(filename, 'wb') as f:
print ('file opened')
while True:
print('receiving data...')
data = s.recv(1024)
# print('data=%s', (data))
if not data:
break
# write data to a file
f.write(data)
for root, dirs, files in os.walk("/home/pratik/Desktop/test"):
for file in files:
if file.endswith(".py"):
a="true"
print(os.path.join(root, file))
if(a=="true"):
print("deleted")
directory='test' #JIS FOLDER MAI VIRUS H US FOLDER KA NAAM LIKH test ke badle, yeh code us folder ke bhar save kr yaah fir directory ki address daalo
# os.chdir(directory)
file=glob.glob('*.py')
for filename in file:
os.unlink(filename)
f.close()
print('Successfully got the file')
s.close()
print('connection closed')
filer()
| bhavika022/Antivirus-Software-with-VPN | client.py | client.py | py | 1,430 | python | en | code | 0 | github-code | 13 |
25746592780 | # coding=utf-8
"""
Created on 2017-07-14
@Filename: requests_toolbelt_demo
@Author: Gui
"""
import requests
from requests_toolbelt.multipart.encoder import MultipartEncoder
multipart_data = MultipartEncoder(
fields={
# a file upload field
'file1': ('file.py', open(r'..\files\笔记.xls', 'rb'), 'text/plain'),
'file2': ('file.py', open(r'..\files\a.html', 'rb'), 'text/plain'),
# plain text fields
'action': 'storeFile',
}
)
# response = requests.post('http://httpbin.org/post', data=multipart_data,
# headers={'Content-Type': multipart_data.content_type})
response = requests.post('http://192.168.30.101:10200', data=multipart_data,
headers={'Content-Type': multipart_data.content_type})
print(multipart_data.content_type)
print(response.content) | gy890/ep_test | tmp/requests_toolbelt_demo.py | requests_toolbelt_demo.py | py | 869 | python | en | code | 0 | github-code | 13 |
33081698040 | class Solution:
def longestPalindrome(self, s: str) -> str:
if len(s) == 0: return ""
max_length = 0
max_left = None
max_right = None
for i in range(len(s)):
left = i
right = i
while left-1>=0 and right+1 <= len(s)-1:
if s[left-1] != s[right+1]: break
left -= 1
right += 1
if right-left+1 > max_length:
max_length = right-left+1
max_left = left
max_right = right
if i+1 <= len(s)-1 and s[i] == s[i+1]:
left = i
right = i+1
while left - 1 >= 0 and right + 1 <= len(s) - 1:
if s[left - 1] != s[right + 1]: break
left -= 1
right += 1
if right - left + 1 > max_length:
max_length = right - left + 1
max_left = left
max_right = right
return s[max_left:max_right+1]
if __name__ == '__main__':
print(Solution().longestPalindrome("babad"))
| LNZ001/Analysis-of-algorithm-exercises | leetcode_ex/ex5-最长回文子串.py | ex5-最长回文子串.py | py | 1,133 | python | en | code | 0 | github-code | 13 |
32111458078 | # Import a library of functions called 'pygame'
import pygame
import random
# Initialize the game engine
pygame.init()
BLACK = [0, 0, 0]
WHITE = [255, 255, 255]
# Set the height and width of the screen
display_width = 800
display_height = 600
screen = pygame.display.set_mode((display_width,display_height))
class Scroller:
Star_img = pygame.image.load('Star.png')
# Create an empty array
star_list = []
# Loop 50 times and add a snow flake in a random x,y position
for i in range(25):
x = random.randrange(0, display_width)
y = random.randrange(0, display_height)
star_list.append([x, y])
clock = pygame.time.Clock()
# Loop until the user clicks the close button.
done = False
while not done:
for event in pygame.event.get(): # User did something
if event.type == pygame.QUIT: # If user clicked close
done = True # Flag that we are done so we exit this loop
# Set the screen background
screen.fill(BLACK)
# Process each snow flake in the list
for i in range(len(star_list)):
# Draw the snow flake
screen.blit(Star_img, star_list[i])
# Move the snow flake down one pixel
star_list[i][1] += 5
# If the snow flake has moved off the bottom of the screen
if star_list[i][1] > display_height:
star_list[i][1] = 600 - display_height
star_list[i][0] = random.randrange(0,display_width)
# Go ahead and update the screen with what we've drawn.
pygame.display.update()
clock.tick(60)
# Be IDLE friendly. If you forget this line, the program will 'hang'
# on exit.
pygame.quit()
| HightopJamal/Python | Bug Game/starScroller.py | starScroller.py | py | 1,901 | python | en | code | 0 | github-code | 13 |
35975906891 | import tensorflow as tf
import os
import numpy as np
from housing_3_minibatch_saver_tensorboard import reset_graph
## hiding warnings
tf.compat.v1.logging.set_verbosity(tf.compat.v1.logging.ERROR)
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3'
##data for file managment
from datetime import datetime
now = datetime.utcnow().strftime("%Y_%m_%d_%H:%M:%S")
file_name = os.path.basename(__file__)
root_logdir = "tf_dzienniki/{}".format(file_name)
logdir = "{}/{}_przebieg-{}/".format(root_logdir, file_name ,now)
##def linear activation unit
## reseting graph
reset_graph()
tf.compat.v1.reset_default_graph()
def relu(X):
with tf.name_scope("relu"):
w_shape = (int(X.get_shape()[1]),1)
w = tf.Variable(tf.random_normal(w_shape), name="wagi")
b = tf.Variable(0.0, name="obciazenie")
z = tf.add(tf.matmul(X,w), b, name="result")
return tf.maximum(z, 0., name="relu")
##
n_features = 5
X = tf.placeholder(dtype = tf.float32, shape=(None, n_features), name = "X")
relus = [relu(X) for i in range(5)]
output = tf.add_n(relus, name="wyjscie")
##saving graph
file_writer = tf.summary.FileWriter(root_logdir, tf.get_default_graph()) | MateuszKozakGda/My-Data-Science-repository | Tensorflow Sandbox/Simple Examples/modulowosc1.py | modulowosc1.py | py | 1,165 | python | en | code | 0 | github-code | 13 |
14645584085 | from sqlalchemy import Column, Identity, Integer, Table
from . import metadata
DeletedExternalAccountJson = Table(
"deleted_external_accountjson",
metadata,
Column("id", Integer, primary_key=True, server_default=Identity()),
)
__all__ = ["deleted_external_account.json"]
| offscale/stripe-sql | stripe_openapi/deleted_external_account.py | deleted_external_account.py | py | 285 | python | en | code | 1 | github-code | 13 |
7321004695 | import random
rock = '''
_______
---' ____)
(_____)
(_____)
(____)
---.__(___)
'''
paper = '''
_______
---' ____)____
______)
_______)
_______)
---.__________)
'''
scissors = '''
_______
---' ____)____
______)
__________)
(____)
---.__(___)
'''
rps = [rock, paper, scissors]
choice = int(input("What do you choose? Type 0 for Rock, 1 for Paper, 2 for Scissors"))
if choice >= 3 or choice < 0:
print("You type invalid number. You lose")
else:
print(rps[choice])
print("Computer choose:")
x = len(rps)
comp = random.randint(0, x - 1)
print(rps[comp])
if choice == 2 and comp == 1:
print("You Win")
elif choice == 0 and comp == 1:
print("You Lose")
elif choice == 0 and comp == 2:
print("You Win")
elif choice == 1 and comp == 0:
print("You Win")
elif choice == 1 and comp == 2:
print("You Lose")
elif choice == 2 and comp == 0:
print("You Lose")
else:
print("Replay") | sugengpriyanto/rock-paper-scissors | main.py | main.py | py | 1,013 | python | en | code | 0 | github-code | 13 |
852611674 | #!/bin/python3.6
import subprocess,sys, os
from etcdgetpy import etcdget as get
from etcdput import etcdput as put
from broadcasttolocal import broadcasttolocal
from socket import gethostname as hostname
def delcifs(*args):
vol = args[0]
ipaddr = args[1]
cmdline = 'docker ps -f volume='+vol
print(cmdline)
dockers = subprocess.run(cmdline.split(),stdout=subprocess.PIPE).stdout.decode('utf-8').split('\n')
dockers = [ x for x in dockers if ipaddr not in x and 'CONTAINER' not in x and len(x) > 10]
print('dockers',dockers)
print('###############3')
for docker in dockers:
res = docker.split()[-1]
cmdline = 'docker rm -f '+res
result = subprocess.run(cmdline.split(),stdout=subprocess.PIPE).stdout.decode('utf-8')
cmdline = '/sbin/pcs resource delete --force '+res
result = subprocess.run(cmdline.split(),stdout=subprocess.PIPE).stdout.decode('utf-8')
if __name__=='__main__':
delcifs(*sys.argv[1:])
| YousefAllam221b/TopStorDevOld | VolumeDockerChange.py | VolumeDockerChange.py | py | 940 | python | en | code | 0 | github-code | 13 |
14792904523 | from kubernetes import client, config
def main():
config.load_kube_config()
api_instance = client.ExtensionsV1beta1Api()
dep = client.ExtensionsV1beta1Deployment()
container = client.V1Container(name="pocket-datanode-dram", image="anakli/pocket-datanode-dram", ports=[client.V1ContainerPort(container_port=50030)])
template = client.V1PodTemplateSpec(metadata=client.V1ObjectMeta(labels={"app": "pocket-datanode-dram"}), spec=client.V1PodSpec(containers=[container]))
spec = client.ExtensionsV1beta1DeploymentSpec(replicas=2, template=template)
deployment = client.ExtensionsV1beta1Deployment(api_version="extensions/v1beta1", kind="Deployment", metadata=client.V1ObjectMeta(name="pocket-datanode-dram-deployment"), spec=spec)
api_response = api_instance.patch_namespaced_deployment(name="pocket-datanode-dram-deployment", namespace="default", body=deployment)
if __name__ == '__main__':
main()
| anakli/pocket-controller | kubernetes/modify_datanode_deployment.py | modify_datanode_deployment.py | py | 943 | python | en | code | 0 | github-code | 13 |
24749057393 | from pymatgen.core import Structure
import numpy as np,os, pandas as pd,sys, shutil
import matplotlib.pyplot as plt
from elastemp.base.strain_analysis import get_curvature,check_convergence,get_energy_volume,fit_parabola_bulk,plot_parabola
from elastemp.base.symmetry import get_symmetry,get_num_constants
from elastemp.base.makefiles import make_incar_dynamic, make_kpt_dynamic
from elastemp.dynamic.deformations import dynamic_deformations,dynamic_response
from elastemp.thermal.response import phonon_thermal,constants_thermal
def make_dim():
""" Returns a default file dim.txt to be used in supercell dimensions for phonon calculations.
:param None
:returns: dim.txt with default value [2,2,2]
:rtype : .txt file
"""
f = open('dim.txt','w')
f.write('{} {} {}'.format(2,2,2))
f.close()
def make_tmax():
""" Returns a default file tmax.txt which stores the maximum temperature at which elastic constants are calculated.
:param None
:returns: tmax.txt with default value 1000K
:rtype : .txt file
"""
f = open('tmax.txt','w')
f.write('1000\n')
f.close()
def make_dynamic_wrapper(calc_bulk_dynamic):
""" Wrapper for make_dynamic function to do preprocesing.
:param calc_bulk_dynamic : Boolean value to determine if dynamic calculation is for deformation-bulk or other modes.
:type calc_bulk_dynamic : Bool (True/False)
:returns None. calls make_bulk_dynamic or make_dynamic functions.
"""
struct = Structure.from_file('POSCAR')
strain_values = list(pd.read_csv('strains.txt',header=None)[0].values)
if os.path.exists('KPOINTS_dynamic'):
pass
else:
print('KPOINTS_dynamic is missing. Making a generic KPOINTS_dynamic file')
make_kpt_dynamic()
if os.path.exists('INCAR_dynamic'):
pass
else:
print('INCAR_dynamic is missing. Making a generic INCAR_dynamic file')
make_incar_dynamic()
if not os.path.exists('POTCAR'):
print('POTCAR file is missing! Cannot proceed with calculation.')
sys.exit()
try:
f = open('dim.txt').readlines()[0]
dim = [int(f.split()[i]) for i in range(len(f.split()))]
print('Read supercell dimension sucessfully!')
except:
print('Unable to read dim file from Input_files. Adding a default supercell dimension of 2 2 2')
dim = [2,2,2]
make_dim()
if calc_bulk_dynamic:
make_bulk_dynamic(struct,strain_values,dim)
else:
make_dynamic(struct,strain_values,dim)
def make_bulk_dynamic(struct,strain_values,dim):
""" Function to call dynamic_deformations routine from dynamic class to make volumetric strain deformations.
:param struct : Structure
strain_values: list of strain values
dim : dimension of supercell
:type struct : Pymatgen structure object
strain_values: list
dim : list
:returns None. calls routine to make bulk deformations.
"""
print(os.getcwd())
symmetry = open('symmetry').readlines()[0]
e = dynamic_deformations(symmetry,strain_values, dim, struct)
e.make_bulk_deformation()
def get_dynamic_wrapper(calc_bulk_dynamic):
""" Wrapper to call function to get either bulk or other deformation modes.
:param calc_bulk_dynamic : Boolean value to call either bulk or other modes.
:type calc_bulk_dynamic : Bool (True/False)
:returns None. calls get_bulk_dynamic or get_dynamic functions.
"""
strain_values = list(pd.read_csv('strains.txt',header=None)[0].values)
if calc_bulk_dynamic==True:
get_bulk_dynamic(strain_values)
else:
get_dynamic(strain_values)
def get_bulk_dynamic(strain_values):
""" Function to get thermal properties like thermal expansion coeff, Vols at different temp
:param strain_values : list of strain values
:type strain_values : list
:return volume_temp.txt : file which stores vols at different temp, thermal expansion coeff, Cv and phi values
Themal_expansion_temperature.png :plot of thermal expansion at different temperatures
:rtype volume_temp.txt : .txt file
Thermal_expansion_temperature.png : .png file
"""
os.chdir('deformation-bulk')
df_en_temp = pd.DataFrame()
df_cv_temp = pd.DataFrame()
for j in range(1,len(strain_values)+1):
os.chdir('strain-{}/Phonon_calculation'.format(j))
print('Curr_dir :', os.getcwd())
e = dynamic_response()
os.chdir('../..')
os.chdir('..')
try:
tmax = int(open('tmax.txt').readlines()[0])
except:
print('Unable to read max temperature from tmax.txt. Setting default value of 1000K')
tmax=1000
make_tmax()
f = open('dim.txt').readlines()[0]
dim = [int(f.split()[i]) for i in range(len(f.split()))]
e = phonon_thermal(dim,tmax)
os.chdir('deformation-bulk')
for j in range(1,len(strain_values)+1):
os.chdir('strain-{}/Phonon_calculation'.format(j))
print('Curr_dir: ',os.getcwd())
e.run_phonopy_thermal()
e.extract_thermal_yaml()
os.chdir('../..')
for i in range(1,len(strain_values)+1):
df_en = pd.read_csv('strain-{}/Phonon_calculation/energy_temp.dat'.format(i),usecols=[0,1],names=['temperature','strain-{}_energy'.format(i)],skiprows=1,sep='\t')
df_cv = pd.read_csv('strain-{}/Phonon_calculation/energy_temp.dat'.format(i),usecols=[0,2],names=['temperature','strain-{}_cv'.format(i)],skiprows=1,sep='\t')
if len(df_en_temp)==0:
df_en_temp = df_en.copy()
else:
df_en_temp = df_en_temp.merge(df_en,on='temperature')
if len(df_cv_temp)==0:
df_cv_temp = df_cv.copy()
else:
df_cv_temp = df_cv_temp.merge(df_cv,on='temperature')
df_en_temp.to_csv('energy_strain_temperature.csv',sep='\t',index=False)
df_cv_temp.to_csv('Cv_strain_temperature.csv',sep='\t',index=False)
energies_temp = df_en_temp.iloc[-1][1:].values
plot_parabola(strain_values,energies_temp,'Deformation-bulk-curve-tmax',per_atom=True)
df3 = pd.read_csv('energies.txt',sep='\t')
vols = df3['volumes']
temp = [] ; e_min = [] ;vols_T=[] ; lin_exp=[]
os.chdir('..')
natoms = len(Structure.from_file('POSCAR').sites)
V0 = Structure.from_file('POSCAR').volume
f = open('results_dir/volume_temp.txt','w')
f.write('temperature \t Volume_T\tVolume_0\tVolume_thermal_expansion\tLinear_thermal_expansion\tCv_calc\tLinear_expansion_factor\tphi\n')
for i in range(len(df_en_temp)):
energies = df_en_temp.iloc[i][1:].values
cvs = df_cv_temp.iloc[i][1:].values
V_T1 = fit_parabola_bulk(energies,vols,calc_bulk_parabola=False)
cv_fit = np.polyfit(strain_values,cvs,2)
temperature = df_en_temp.iloc[i][0]
vols_T.append(V_T1)
temp.append(temperature)
vol_thermal_exp=0
vpa_T = V_T1/natoms
if i!=0:
dV = (vols_T[i] - vols_T[i-1])/vols_T[i-1]
vol_thermal_exp = dV/(temp[i]-temp[i-1])
lin_thermal_exp=vol_thermal_exp/3
lin_exp.append(lin_thermal_exp*1e6)
eq_strain= lin_thermal_exp*temp[i]
cv_calc = cv_fit[0]*eq_strain**2+cv_fit[1]*eq_strain+cv_fit[2]
lin_expansion_factor = 1+eq_strain
phi=0
if i!=0 and cv_calc!=0:
phi = (1e5/160.217)*(temperature*vpa_T/cv_calc)*lin_thermal_exp**2
f.write('{}\t{}\t{}\t{}\t{}\t{}\t{}\t{}\n'.format(temperature,V_T1,V0,vol_thermal_exp,lin_thermal_exp,cv_calc,lin_expansion_factor,phi))
f.close()
fig = plt.figure()
plt.plot(temp,lin_exp,'r')
plt.title('Linear_thermal_expansion vs temperature')
plt.xlabel('Temperature (K)')
plt.ylabel(' Linear_thermal_expansion * 1e6 (/K)')
plt.savefig('results_dir/Thermal_expansion_temperature.png')
def make_T_Zpe():
""" function to make files to run enthalpy calculations for structures with lattice parameters corresponding to tmax.
:param None.
:returns saves strained POSCAR files corresponding to each deformation/strain.
:rtype POSCAR plaintext file
"""
if not os.path.exists('results_dir/volume_temp.txt'):
print('volume_temp file does not exist. Program will stop!')
sys.exit()
print('Getting linear expansion factor at tmax')
symmetry = open('symmetry').readlines()[0]
lin_expansion_factor = float(open('results_dir/volume_temp.txt').readlines()[-1].split()[6])
print('linear_exp',lin_expansion_factor)
num_constants = get_num_constants(symmetry)
strain_values = list(pd.read_csv('strains.txt',header=None)[0].values)
for i in range(1,num_constants+1):
for j in range(1,len(strain_values)+1):
os.chdir('deformation-{}/strain-{}'.format(i,j))
if not os.path.exists('Hightemp_static'):
os.mkdir('Hightemp_static')
os.chdir('Hightemp_static')
shutil.copy('../INCAR','INCAR')
shutil.copy('../KPOINTS','KPOINTS')
shutil.copy('../POTCAR','POTCAR')
shutil.copy('../POSCAR','POSCAR')
with open('POSCAR', 'r') as file:
data = file.readlines()
data[1] = "{}\n".format(lin_expansion_factor)
with open('POSCAR', 'w') as file:
file.writelines(data)
os.chdir('../../..')
print('Created hightemp_static dir and corresponding files to obtain the ZPE at tmax')
def get_T_Zpe():
""" function to return zero temp enthalpies of structures corresponding to lattice parameters at tmax
:param None.Reads symmetry, num_constants, strain_values from stored files.
:returns None. saves ZPE.txt files (enthalpy/atom) in plaintext.
"""
symmetry = open('symmetry').readlines()[0]
num_constants = get_num_constants(symmetry)
strain_values = list(pd.read_csv('strains.txt',header=None)[0].values)
for i in range(1,num_constants+1):
for j in range(1,len(strain_values)+1):
os.chdir('deformation-{}/strain-{}/Hightemp_static'.format(i,j))
if not os.path.exists('OSZICAR'):
print('OSZICAR file does not exist. Program will stop!')
sys.exit()
convergence_val,l = check_convergence('OSZICAR')
if convergence_val:
energy,volume = get_energy_volume('../Hightemp_static')
os.chdir('../../..')
print('Calculated ZPE at tmax. Please proceed to do phonon calculations for each deformation/strain')
def make_dynamic(struct,strain_values,dim):
""" function calling routine from dynamic class to make deformations for phonon calculations.
:param struct : Structure
strain_values : list of strain values
dim : supercell dimension
:type struct : Pymatgen structure object
strain_values : list
dim : list
:returns None
"""
symmetry = open('symmetry').readlines()[0]
e = dynamic_deformations(symmetry,strain_values,dim,struct)
e.make_deformation()
def get_dynamic(strain_values):
""" function to call routine from dynamic class to get dynamic properties.
:param strain_values : list of strain values
:type strain_values : list
:returns None
"""
symmetry = open('symmetry').readlines()[0]
num_constants = get_num_constants(symmetry)
for i in range(1,num_constants+1):
os.chdir('deformation-{}'.format(i))
for j in range(1,len(strain_values)+1):
os.chdir('strain-{}/Phonon_calculation'.format(j))
dynamic_response()
os.chdir('../..')
os.chdir('..')
| Karthik-Balas/elastemp | elastemp/input/make_dynamic_input.py | make_dynamic_input.py | py | 11,922 | python | en | code | 2 | github-code | 13 |
42232427256 | #import recog_value
import cv2
def capture():
# image_quality = int(input("画質を選択=> 0: 最高, 1: 通常, 2: 低画質 = "))
img_size = [[3200, 1800], [1920, 1080], [1280, 960]]
cap = cv2.VideoCapture(0)
if not cap.isOpened():
return
# cap.set(cv2.CAP_PROP_FRAME_WIDTH, img_size[1][0])
# cap.set(cv2.CAP_PROP_FRAME_HEIGHT, img_size[1][1])
ret, img = cap.read()
# img = cv2.resize(img, (img_size[1][0], img_size[1][1]))
return img | kolbe-ryo/RecognitionVal | camera.py | camera.py | py | 508 | python | en | code | 0 | github-code | 13 |
32043460449 | from functools import partial
import elasticsearch
from models import v1, v2
es = elasticsearch.Elasticsearch(['leetroutwrw2-9200.terminal.com:80'])
# ES partials
es_search = partial(es.search, index="pizzas")
es_get_customer = partial(es.get, index="pizzas", doc_type="customer")
es_get_pizza = partial(es.get, index="pizzas", doc_type="pizza")
def get_orders():
"""Return order objects from es after gathering all related objects."""
ret = []
orders = es_search(body={"filter": {"type": {"value": "order"}}})
for order in orders['hits']['hits']:
data = order['_source']
# Fetch the customer
cust = es_get_customer(id=data['customer'])
# Fetch the pizza(s)
if hasattr(data['pizzas'], '__iter__'):
pizza_ids = data['pizzas']
else:
pizza_ids = [data['pizzas']]
pizzas = []
pizza_objs = []
for pid in pizza_ids:
pizza = es_get_pizza(id=pid)
pizzas.append(pizza)
pizza_objs.append(v2.Pizza.new_message(**pizza['_source']))
# Synthesize objects
customer = v1.Customer.new_message(**cust['_source'])
order_obj = v2.Order.new_message(customer=customer, pizzas=pizza_objs)
ret.append(order_obj)
return ret
| leetrout/escapnprotojunk | giovannis.py | giovannis.py | py | 1,293 | python | en | code | 0 | github-code | 13 |
71549591378 | import sys
from PyQt5.QtGui import *
from PyQt5.QtCore import *
from PyQt5.QtWidgets import *
class FormPendaftaran(QWidget):
def __init__(self):
super().__init__()
self.setupUi()
def setupUi(self):
self.resize(350, 200)
self.move(300, 300)
self.setWindowTitle('Form Pendaftaran')
self.label1 = QLabel()
self.label1.setText('<b> Pendaftaran Calon Anggota Avengers </b>')
self.label2 = QLabel()
self.label2.setText('Nama')
self.textNama = QLineEdit()
self.label3 = QLabel()
self.label3.setText('Jenis Kelamin')
self.cekKelamin1 = QRadioButton()
self.cekKelamin1.setText('&Laki-Laki')
self.cekKelamin1.setChecked(True)
self.cekKelamin2 = QRadioButton()
self.cekKelamin2.setText('&Perempuan')
self.label4 = QLabel()
self.label4.setText('Tanggal Lahir')
self.dateEdit = QDateEdit()
self.dateEdit.setDisplayFormat('dd/MM/yyyy')
self.dateEdit.setDate(QDate.currentDate())
self.label5 = QLabel()
self.label5.setText('Hobi')
self.combo1 = QComboBox()
self.combo1.addItem('Basket')
self.combo1.addItem('Sepak bola')
self.combo1.addItem('Voli')
self.combo1.addItem('Catur')
self.combo1.addItem('Lainnya')
self.label6 = QLabel()
self.label6.setText('Alamat')
self.AlamatEdit = QTextEdit()
self.SubmitButton = QPushButton('&Submit')
self.CancelButton = QPushButton('&Cancel')
layout = QGridLayout()
layout.addWidget(self.label1, 0, 1, 1, 2)
layout.addWidget(self.label2, 1, 0)
layout.addWidget(self.textNama, 1, 1, 1, 2)
layout.addWidget(self.label3, 2, 0)
layout.addWidget(self.cekKelamin1, 2, 1,)
layout.addWidget(self.cekKelamin2, 3, 1,)
layout.addWidget(self.label4, 4, 0,)
layout.addWidget(self.dateEdit, 4, 1, 1, 2)
layout.addWidget(self.label5, 5, 0,)
layout.addWidget(self.combo1, 5, 1, 1, 2)
layout.addWidget(self.label6, 6, 0,)
layout.addWidget(self.AlamatEdit, 6, 1, 1, 2)
layout.addWidget(self.SubmitButton, 7, 1)
layout.addWidget(self.CancelButton, 7, 2)
self.setLayout(layout)
self.SubmitButton.clicked.connect(self.submit)
self.CancelButton.clicked.connect(self.cancel)
def submit(self):
nama = str(self.textNama.text())
ttl = str(self.dateEdit.date().toString())
hobi = str(self.combo1.currentText())
alamat = str(self.AlamatEdit.toPlainText())
if self.cekKelamin1.isChecked():
QMessageBox.information(self, 'Pendaftaran Berhasil',
'Nama : ' + nama + '\n' +
'Jenis Kelamin : Laki-Laki\n' +
'Tanggal Lahir : ' + ttl+ '\n'+
'Hobi : '+hobi +'\n'+
'Alamat : '+alamat+'\n')
else:
QMessageBox.information(self, 'Pendaftaran Berhasil',
'Nama : ' + nama + '\n' +
'Jenis Kelamin : Perempuan\n' +
'Tanggal Lahir : ' + ttl + '\n'+
'Hobi : '+hobi +'\n'+
'Alamat : '+alamat+'\n')
def cancel(self):
self.close()
| erpambudi/Pemrograman-GUI | Challange/FormPendaftaran.py | FormPendaftaran.py | py | 2,901 | python | en | code | 0 | github-code | 13 |
17314911334 | # -*- coding:utf-8 -*-
"""
__title__ = ''
__author__ = 'Administrator'
__time__ = '2018/6/7'
"""
from shapely.geometry import MultiPoint
from shapely.geometry import Point
import numpy as np
import pandas as pd
out_df = pd.DataFrame()
data = pd.read_csv('outPut/6MonthIMOxibo-singaporePOI', header=None)
data.columns = ['lon', 'lat', 'num']
data = data[data.num >= 0]
# 排序
data.sort_values(by=['num'], ascending=False)
print(data.head(10))
max2 = data['num'].max()
print(type(max2))
print(max2)
maxnum = int(max2)
print(maxnum)
for j in range(maxnum):
out_df = data[data['num'] == j]
out_df.to_csv('singaporePOICu/' + 'cu' + "-" + str(j), index=False, header=False)
| Jondamer/MarineTraffic | python/泊位提取相关/按照簇号生成多个文件.py | 按照簇号生成多个文件.py | py | 699 | python | en | code | 0 | github-code | 13 |
27311933183 | ''' Contains functions for the logic needed to run the GUI of the application '''
import PySimpleGUI as sg
import os
from typing import List, AnyStr
from playsound import playsound
from utils.real_time_voice_cloning import main_tts
def filter_voice_sample_file_names(voice_sample_dir: AnyStr) -> List:
'''
Given a directory of voice sample files, returns a list of voice sample names
with the file extension stripped.
Parameters:
-----------
voice_sample_dir: The directory containing the voice samples
Returns:
--------
A list of voice sample names with their file extensions stripped
'''
return [os.path.splitext(filename)[0] for filename in os.listdir(voice_sample_dir) if filename.endswith('.wav')]
def filter_text_sample_file_names(text_sample_dir: AnyStr) -> List:
'''
Given a directory of text sample files, returns a list of text sample names
with the file extension stripped.
Parameters:
-----------
text_sample_dir: The directory containing the text samples
Returns:
--------
A list of text sample names with their file extensions stripped
'''
return [os.path.splitext(filename)[0] for filename in os.listdir(text_sample_dir) if filename.endswith('.txt')]
def filter_text_sample_file_name(text_sample_filename: AnyStr) -> AnyStr:
'''
Given a text sample file name, strips the file extension and returns the
name of a piece of text
Parameters:
-----------
text_sample_filename: The filaneme of the text
Returns:
--------
The name of the piece of text
'''
return os.path.splitext(os.path.basename(text_sample_filename))[0]
def filter_voice_sample_file_name(voice_sample_filename: AnyStr) -> AnyStr:
'''
Given a a voice sample file name, strips the file extension and returns a
speaker's name
Parameters:
-----------
voice_sample_filename: The filanema of the speaker's voice
Returns:
--------
A speaker's name
'''
return os.path.splitext(os.path.basename(voice_sample_filename))[0]
def get_image_file_name(wav_filename: AnyStr) -> List:
'''
Given a path to a voice sample file, returns the path to an image of the speaker
Parameters:
-----------
wav_filename: The path to the wav file containing a sample voice
Returns:
--------
A filepath to the image corresponding to an image of the speaker
'''
return os.path.splitext(wav_filename)[0] + ".png"
def get_text_file_name(text_filename: AnyStr) -> List:
'''
Given a path to a text file, returns the true path to the file (with extension)
Parameters:
-----------
text_filename: The path to the text file
Returns:
--------
A filepath to the text (including file extension)
'''
return os.path.splitext(text_filename)[0] + ".txt"
def read_text_file(filepath: AnyStr):
'''
Given a text file, reads the entire content of the file and returns that
content as a single string.
'''
# Open the text file
text_file = open(filepath, mode='r')
# read all lines of the text file
content = text_file.read()
# close the file
text_file.close()
return content
def run_main_event_loop(main_window):
'''
Runs the main event loop for the application
TODO: Better docs
Parameters:
-----------
main_window: The main window for the application
Returns:
--------
None
'''
event, values = main_window.read()
if event == "Exit" or event == sg.WIN_CLOSED:
main_window.close()
exit()
# Folder name was filled in, so make a list of files in the folder
if event == "-FOLDER-":
folder = values["-FOLDER-"]
try:
# Get list of files in folder
file_list = filter_voice_sample_file_names(folder)
except:
file_list = []
main_window["-VOICE FILE LIST-"].update(file_list)
elif event == "-VOICE FILE LIST-": # A voice file was chosen from the listbox
try:
filename = get_image_file_name(os.path.join('./voice_samples/', values["-VOICE FILE LIST-"][0]))
main_window["-VOICE NAME-"].update(filter_voice_sample_file_name(filename))
main_window["-IMAGE-"].update(filename=filename, size=(200, 200))
# Update the TTS button
if values["-VOICE FILE LIST-"][0] and values["-TEXT FILE LIST-"][0]:
main_window["-TTS-"].update(disabled=False)
print('TTS Button enabled!')
except:
pass
elif event == "-TEXT FILE LIST-": # A text file was chosen from the listbox
try:
filename = get_text_file_name(os.path.join('./text_samples/', values["-TEXT FILE LIST-"][0]))
print(f'Text File: {filename}')
print(f'Voice File: {values["-VOICE FILE LIST-"][0]}')
main_window["-TEXT SCRIPT-"].update(read_text_file(filename))
# Update the TTS button
if values["-VOICE FILE LIST-"][0] and values["-TEXT FILE LIST-"][0]:
main_window["-TTS-"].update(disabled=False)
print('TTS Button enabled!')
except:
pass
elif event == "Create New Voice":
print('\'Create New Voice\' button clicked!')
elif event == "Create New Text":
print('\'Create New Text\' button clicked!')
elif event == "-TTS-":
# Perform TTS
try:
# Get path to speaker file
speaker_path = "./voice_samples/" + values["-VOICE FILE LIST-"][0] + ".wav"
# Get text content for speaker to vocalize
text_content = read_text_file(get_text_file_name(os.path.join('./text_samples/', values["-TEXT FILE LIST-"][0])))
# Get content name (for saving the spoken file)
content_name = values["-TEXT FILE LIST-"][0]
# Perform TTS
main_tts.tts(text_content, speaker_path, content_name)
print("Pressed the Button for TTS!")
print(main_window["-TEXT SCRIPT-"])
except:
pass
| stephenhgregory/ReadToMe | ReadToMeApp/scripts/utils/gui_logic.py | gui_logic.py | py | 6,162 | python | en | code | 0 | github-code | 13 |
7831432190 | import socket
import environ
from ..django import DATABASES, INSTALLED_APPS, TESTING
from ..third_party.aws import AWS_S3_CUSTOM_DOMAIN
from ..third_party.sentry import SENTRY_REPORT_URI
env = environ.FileAwareEnv()
DEVELOPMENT = env.bool("DEVELOPMENT", default=True)
ALLOWED_HOSTS: list[str] = env(
"ALLOWED_HOSTS", default=["www.courtlistener.com"]
)
EGRESS_PROXY_HOST = env(
"EGRESS_PROXY_HOST", default="http://cl-webhook-sentry:9090"
)
SECURE_HSTS_SECONDS = 63_072_000
SECURE_HSTS_INCLUDE_SUBDOMAINS = True
SECURE_HSTS_PRELOAD = True
SECURE_CONTENT_TYPE_NOSNIFF = True
X_FRAME_OPTIONS = "DENY"
SECURE_REFERRER_POLICY = "same-origin"
RATELIMIT_VIEW = "cl.simple_pages.views.ratelimited"
if DEVELOPMENT:
SESSION_COOKIE_SECURE = False
CSRF_COOKIE_SECURE = False
SESSION_COOKIE_DOMAIN = None
# For debug_toolbar
INSTALLED_APPS.append("debug_toolbar")
hostname, _, ips = socket.gethostbyname_ex(socket.gethostname())
INTERNAL_IPS = [".".join(ip.split(".")[:-1] + ["1"]) for ip in ips] + [
"127.0.0.1"
]
if TESTING:
db = DATABASES["default"]
db["ENCODING"] = "UTF8"
db["TEST_ENCODING"] = "UTF8"
db["CONN_MAX_AGE"] = 0
else:
SESSION_COOKIE_SECURE = True
CSRF_COOKIE_SECURE = True
SECURE_PROXY_SSL_HEADER = ("HTTP_X_FORWARDED_PROTO", "https")
AUTH_PASSWORD_VALIDATORS = [
{
"NAME": "django.contrib.auth.password_validation.UserAttributeSimilarityValidator",
},
{
"NAME": "django.contrib.auth.password_validation.MinimumLengthValidator",
"OPTIONS": {
"min_length": 9,
},
},
{
"NAME": "django.contrib.auth.password_validation.CommonPasswordValidator",
},
{
"NAME": "django.contrib.auth.password_validation.NumericPasswordValidator",
},
]
# CORS
CORS_ALLOW_ALL_ORIGINS = True
CORS_URLS_REGEX = r"^/api/.*$"
CORS_ALLOW_METHODS = (
"GET",
"HEAD",
"OPTIONS",
)
CORS_ALLOW_CREDENTIALS = True
# CSP
# Components:
# - hCaptcha: https://docs.hcaptcha.com/#content-security-policy-settings
# - Plausible: https://github.com/plausible/docs/issues/20
# - Stripe: https://stripe.com/docs/security/guide#content-security-policy
CSP_CONNECT_SRC = (
"'self'",
f"https://{AWS_S3_CUSTOM_DOMAIN}/", # for embedded PDFs
"https://hcaptcha.com/",
"https://*.hcaptcha.com/",
"https://plausible.io/",
"https://api.stripe.com/",
)
CSP_FONT_SRC = (
"'self'",
f"https://{AWS_S3_CUSTOM_DOMAIN}/",
"data:", # Some browser extensions like this.
)
CSP_FRAME_SRC = (
"'self'",
f"https://{AWS_S3_CUSTOM_DOMAIN}/", # for embedded PDFs
"https://hcaptcha.com/",
"https://*.hcaptcha.com/",
"https://js.stripe.com/",
"https://hooks.stripe.com/",
)
CSP_IMG_SRC = (
"'self'",
f"https://{AWS_S3_CUSTOM_DOMAIN}/",
"https://portraits.free.law/",
"data:", # @tailwindcss/forms uses data URIs for images.
"https://*.stripe.com/",
)
CSP_MEDIA_SRC = (
"'self'",
f"https://{AWS_S3_CUSTOM_DOMAIN}/",
"data:", # Some browser extensions like this.
)
CSP_OBJECT_SRC = (
"'self'",
f"https://{AWS_S3_CUSTOM_DOMAIN}/", # for embedded PDFs
)
CSP_SCRIPT_SRC = (
"'self'",
"'report-sample'",
f"https://{AWS_S3_CUSTOM_DOMAIN}/",
"https://hcaptcha.com/",
"https://*.hcaptcha.com/",
"https://plausible.io/",
"https://js.stripe.com/",
)
CSP_STYLE_SRC = (
"'self'",
"'report-sample'",
f"https://{AWS_S3_CUSTOM_DOMAIN}/",
"https://hcaptcha.com/",
"https://*.hcaptcha.com/",
"'unsafe-inline'",
)
CSP_DEFAULT_SRC = (
"'self'",
f"https://{AWS_S3_CUSTOM_DOMAIN}/",
)
CSP_BASE_URI = "'self'"
CSP_INCLUDE_NONCE_IN = ["script-src"]
if not any(
(DEVELOPMENT, TESTING)
): # Development and test aren’t used over HTTPS (yet)
CSP_UPGRADE_INSECURE_REQUESTS = True
| freelawproject/courtlistener | cl/settings/project/security.py | security.py | py | 3,874 | python | en | code | 435 | github-code | 13 |
38658103012 | # -*- coding: cp936 -*-
"""
Created on Fri Apr 24 16:05:13 2015
@author: shuaiyi
"""
# from sklearn.base import BaseEstimator
from sklearn.base import TransformerMixin
import numpy as np
import cv2
class SiftFeature(TransformerMixin):
"""
extract sift desc;
input is a img patch: size = 16*16
"""
def __init__(self, size=16):
self.size = size
self.sift = cv2.SIFT()
def fit(self, X=None, y=None):
return self
def transform(self, X):
'''
X: array like: n_samples, n_features
'''
results = []
for sample in X:
tmp = np.require(sample.reshape(self.size, self.size),dtype=np.ubyte)
# ¼ì²âµã£¬¹Ì¶¨size£¬¹Ì¶¨angle -1
kp = cv2.KeyPoint(self.size//2, self.size//2, self.size/4) #
_, desc = self.sift.compute(tmp,[kp])
desc = self.normalizeSIFT(desc)
results.append(desc)
return np.vstack(results)
def get_params(self, deep=True):
return {"size": self.size}
def set_params(self, **parameters):
for parameter, value in parameters.items():
self.__setattr__(parameter, value)
return self
def normalizeSIFT(self, descriptor):
'''
Normal the sift: L2 norm
'''
descriptor = np.array(descriptor)
norm = np.linalg.norm(descriptor)
if norm > 1.0:
descriptor /= float(norm)
return descriptor
if __name__ == "__main__":
from skimage.data import coins
from sklearn.feature_extraction.image import extract_patches_2d
img = coins()
patches = extract_patches_2d(img, (16,16),100,np.random.RandomState())
patches = patches.reshape(patches.shape[0], -1)
patches = np.asarray(patches, 'float32')
sift = SiftFeature()
sift.fit()
feature = sift.transform(patches) | understar/CNN-detection-tracking | LULC/sift.py | sift.py | py | 1,985 | python | en | code | 26 | github-code | 13 |
37438755790 |
# time: o(n) ; space: o(n)
def maxSubsetSumNoAdjacent_sol1(array):
# Write your code here.
dp = array[:]
if not len(array):
return 0
elif len(array) == 1:
return array[0]
dp[1] = max(dp[0], dp[1])
for i in range(2,len(array)):
dp[i] = max(dp[i-1], dp[i-2] + array[i])
return dp[-1]
def maxSubsetSumNoAdjacent_sol2(array):
if not len(array):
return 0
elif len(array) == 1:
return array[0]
first = max(array[0], array[1])
second = array[0]
current = 0
for i in range(2, len(array)):
current = max(first, second + array[i])
first, second = current, first
return first
array = [75, 105,120,75,90,135] # 75+120+135
print(maxSubsetSumNoAdjacent_sol1(array))
print(maxSubsetSumNoAdjacent_sol2(array)) | robinfelix25/DSA-with-python | Blind75/Dynamic_Programming/AlgoExpert/max_sum_non_adjacent.py | max_sum_non_adjacent.py | py | 819 | python | en | code | 0 | github-code | 13 |
3022180066 | from sys import *
from time import *
def chrput(c=0):
l=True
i=138
stdout.write(" ")
while l:
stdout.write("\b"+chr(i)+chr(131)+"\b")
g=getkey()
if g==" [A":
if i<255:
i+=1
if i==143:
i=162
elif g==" [B":
if i>=34:
i-=1
if i==161:
i=142
elif g==" [C":
l=False
if c==0:
stdout.write("\n")
return chr(i)
else:
return i
def demo(tn=1):
t=monotonic()
l=""
while (t+tn)>monotonic():
l+=getkey()
print(len(l)/3,"touches appuyees en ",tn,"secondes")
return l
def keyput(l):
s=""
c=""
for i in range(l):
c=getkey(1)
if c!="\b":
s+=c
stdout.write(s[i])
else:
s+=chr(ch(1))
stdout.write(s[i]+"\b")
stdout.write("\n")
return s
def getkey(n=0):
if n==0:
return stdin.read(3).replace(chr(27)," ")
else:
return stdin.read(1)
def ispressed(key="up",keylist=[]):
if key=="enter":
key=" [F"
if key=="up":
key=" [A"
if key=="down":
key=" [B"
if key=="right":
key=" [C"
if key=="left":
key=" [D"
if key=="annul":
key=" [2"
if key==keylist:
return True
else:
return False
"""use like this :
g=getkey()
if ispressed("up",g): #if up key is pressed
#do if pressed
"""
| Manerr/TI-PYTHON-KEYPAD-LIBRARY | old_version_TIKEYLIB.py | old_version_TIKEYLIB.py | py | 1,284 | python | en | code | 0 | github-code | 13 |
16447738825 | from graph import Graph
from tarjans_biconnectivity import TarjansBiconnectivity
from polynomial_time_algorithm import PolynomialTimeAlgorithm
from graph_parser import GraphParser
import sys
def main(argv):
file_path = None
num_of_vertices = None
num_of_edges = None
edges = None
AT_free_graph = None
if argv != []:
file_path = argv[0]
if file_path is None:
num_of_vertices = 8
num_of_edges = 11
edges = [
(0,1),
(1,2),
(1,5),
(1,3),
(1,4),
(2,6),
(3,6),
(3,4),
(4,7),
(6,7),
(5,7)
]
AT_free_graph = Graph(num_of_vertices, num_of_edges, edges)
else:
AT_free_graph = GraphParser.parse_graph_from_file(file_path)
AT_free_graph.show()
algorithm = PolynomialTimeAlgorithm(AT_free_graph)
algorithm.run()
if __name__ == "__main__":
main(argv=sys.argv[1:]) | DimitrisSintos/AT-Free-Graphs_3-Colouring | src/main.py | main.py | py | 1,053 | python | en | code | 0 | github-code | 13 |
30589577322 | # paura_lite:
# An ultra-simple command-line audio recorder with real-time
# spectrogram visualization
import numpy as np
import pyaudio
import struct
import scipy.fftpack as scp
import termplotlib as tpl
import os
# get window's dimensions
rows, columns = os.popen('stty size', 'r').read().split()
buff_size = 0.2 # window size in seconds
wanted_num_of_bins = 40 # number of frequency bins to display
# initialize soundcard for recording:
fs = 8000
pa = pyaudio.PyAudio()
stream = pa.open(format=pyaudio.paInt16, channels=1, rate=fs,
input=True, frames_per_buffer=int(fs * buff_size))
while 1: # for each recorded window (until ctr+c) is pressed
# get current block and convert to list of short ints,
block = stream.read(int(fs * buff_size))
format = "%dh" % (len(block) / 2)
shorts = struct.unpack(format, block)
# then normalize and convert to numpy array:
x = np.double(list(shorts)) / (2**15)
seg_len = len(x)
# get total energy of the current window and compute a normalization
# factor (to be used for visualizing the maximum spectrogram value)
energy = np.mean(x ** 2)
max_energy = 0.01 # energy for which the bars are set to max
max_width_from_energy = int((energy / max_energy) * int(columns)) + 1
if max_width_from_energy > int(columns) - 10:
max_width_from_energy = int(columns) - 10
# get the magnitude of the FFT and the corresponding frequencies
X = np.abs(scp.fft(x))[0:int(seg_len/2)]
freqs = (np.arange(0, 1 + 1.0/len(X), 1.0 / len(X)) * fs / 2)
# ... and resample to a fix number of frequency bins (to visualize)
wanted_step = (int(freqs.shape[0] / wanted_num_of_bins))
freqs2 = freqs[0::wanted_step].astype('int')
X2 = np.mean(X.reshape(-1, wanted_step), axis=1)
# plot (freqs, fft) as horizontal histogram:
fig = tpl.figure()
fig.barh(X2, labels=[str(int(f)) + " Hz" for f in freqs2[0:-1]],
show_vals=False, max_width=max_width_from_energy)
fig.show()
# add exactly as many new lines as they are needed to
# fill clear the screen in the next iteration:
print("\n" * (int(rows) - freqs2.shape[0] - 1))
| tyiannak/paura | paura_lite.py | paura_lite.py | py | 2,191 | python | en | code | 209 | github-code | 13 |
31234020539 | # -*- coding: utf-8 -*-
"""
Feature Extraction
Christian Rodriguez
crodriguez0874@gmail.com
07/10/19
Summary - In this script, we try multiple dimension reduction methods on the
base stats of the pokemon (HP, Attack, Sp. Attack, Defense, Sp. Defense,
Speed). The methods implemented are PCA, polynomial-kernal PCA, RBF-kernal PCA,
Cosine-kernal PCA, and Isomap. Prior to applying the dimension reduction
methods, we made sure to standardize the data via min-max
(x_i - min(x)/max(x)). Also, the parameters for each dimension reduction method
was chosen via grid search and observing which set of parameters best results
in the data being linearly seperable in 3 dimensions.
"""
###############################################################################
###Loading libraries and data
###############################################################################
import pandas as pd
import sklearn.decomposition as decomp
import sklearn.manifold as mani
import plotnine as p9
import matplotlib.pyplot as plt
from mpl_toolkits import mplot3d
pokemon = pd.read_csv('data/pokemon_training.csv', encoding='latin-1')
###############################################################################
###Standardizing Features of Interest (min-max)
###############################################################################
stats_features = ['hp', 'attack', 'defense', 'spattack', 'spdefense', 'speed']
pokemon_stats = pokemon.loc[:, stats_features]
pokemon_stats_min = pokemon_stats.min()
pokemon_stats_max = pokemon_stats.max()
pokemon_stats_scaled = pokemon_stats - pokemon_stats_min
pokemon_stats_scaled = pokemon_stats_scaled / pokemon_stats_max
is_legendary = pokemon.loc[:, 'is_legendary']
legendary_index = (is_legendary == 1)
non_legendary_index = (is_legendary == 0)
###############################################################################
###PCA
###############################################################################
###Conducting PCA on the base stats data
basestats_pca = decomp.PCA(n_components=None)
basestats_pca.fit(pokemon_stats_scaled)
basestats_pca_column_names = ['PC'+str(i) for i in
range(1, (basestats_pca.n_components_+1))]
basestats_pca_loadings= pd.DataFrame(data=basestats_pca.components_,
index=stats_features,
columns=basestats_pca_column_names)
print(basestats_pca_loadings)
###Base Stats PCA Screeplot
basestats_pca_exp_var = [(sum(basestats_pca.explained_variance_[:i])/
sum(basestats_pca.explained_variance_)) for i in
range(1, int(basestats_pca.n_components_)+1)]
basestats_screeplot_df = pd.DataFrame(columns=['number_of_components',
'perc_of_var_explained'])
basestats_screeplot_df['number_of_components'] = range(1,7)
basestats_screeplot_df['perc_of_var_explained'] = basestats_pca_exp_var
basestats_pca_screeplot = (p9.ggplot(data=basestats_screeplot_df,
mapping=p9.aes(x='number_of_components',
y='perc_of_var_explained')) +
p9.geoms.geom_point() +
p9.geoms.geom_line() +
p9.theme_bw() +
p9.labs(title='Proportion of Variance Explained (PCA)',
x='Number of Principal Components Used',
y='Percentage of Variance') +
p9.ylim(0, 1))
print(basestats_pca_screeplot)
###Transforming the base stats data via PCA and projecting to 2D
basestats_PCscores = pd.DataFrame(data=basestats_pca.transform(pokemon_stats_scaled),
columns=['PC1', 'PC2', 'PC3',
'PC4', 'PC5', 'PC6'])
basestats_PCscores['is_legendary'] = is_legendary
basestats_PCscores_plot = (p9.ggplot(data=basestats_PCscores,
mapping=p9.aes(x='PC1',
y='PC2')) +
p9.geoms.geom_point(p9.aes(color='factor(is_legendary)'),
alpha=0.30) +
p9.theme_bw() +
p9.labs(title='2D Projection of Base Stats via PCA',
x='PC1',
y='PC2') +
p9.scale_color_discrete(name='Class',
labels=['Non-legendary',
'Legendary']))
print(basestats_PCscores_plot)
###3D Representation
fig = plt.figure
ax = plt.axes(projection='3d')
ax.scatter3D(basestats_PCscores.ix[legendary_index , ['PC1']],
basestats_PCscores.ix[legendary_index , ['PC2']],
basestats_PCscores.ix[legendary_index , ['PC3']],
c='b',
alpha=0.3,
label='Legendary')
ax.scatter3D(basestats_PCscores.ix[non_legendary_index , ['PC1']],
basestats_PCscores.ix[non_legendary_index , ['PC2']],
basestats_PCscores.ix[non_legendary_index , ['PC3']],
c='r',
alpha=0.3,
label='Non-legendary')
ax.set_title('3D Projection of Base Stats via PCA', fontweight='bold')
ax.set_xlabel('PC1', fontweight='bold')
ax.set_ylabel('PC2', fontweight='bold')
ax.set_zlabel('PC3', fontweight='bold')
ax.legend(loc='best', bbox_to_anchor= (-0.25, 0.40, 0.5,0.5))
###############################################################################
###Polynomial-Kernal PCA
###############################################################################
###Conducting polynomial-KPCA (parameters chosen via grid search)
basestats_kpca_poly = decomp.KernelPCA(kernel="poly",
gamma=15,
degree=3)
basestats_kpca_poly.fit(pokemon_stats_scaled)
###Base Stats polynomial-KPCA Screeplot
basestats_kpca_poly_exp_var = [(sum(basestats_kpca_poly.lambdas_[:i])/
sum(basestats_kpca_poly.lambdas_)) for i in
range(1, len(basestats_kpca_poly.lambdas_)+1)]
basestats_kpca_poly_screeplot_df = pd.DataFrame(columns=['number_of_components',
'perc_of_var_explained'])
basestats_kpca_poly_screeplot_df['number_of_components'] = range(1,len(basestats_kpca_poly.lambdas_)+1)
basestats_kpca_poly_screeplot_df['perc_of_var_explained'] = basestats_kpca_poly_exp_var
basestats_kpca_poly_screeplot1 = (p9.ggplot(data=basestats_kpca_poly_screeplot_df,
mapping=p9.aes(x='number_of_components',
y='perc_of_var_explained')) +
p9.geoms.geom_point() +
p9.geoms.geom_line() +
p9.theme_bw() +
p9.labs(title='Proportion of Variance Explained (Poly-KPCA)',
x='Number of Principal Components Used',
y='Percentage of Variance') +
p9.ylim(0, 1))
print(basestats_kpca_poly_screeplot1)
###Base Stats polynomial-KPCA Screeplot - a closer look
basestats_kpca_poly_screeplot2 = (basestats_kpca_poly_screeplot1 +
p9.scale_x_continuous(limits=[1,11],
breaks=range(1,11)))
print(basestats_kpca_poly_screeplot2)
###Transforming the base stats data via polynomial-KPCA and projecting to 2D
basestats_kpca_poly_column_names = ['PC'+str(i) for i in
range(1, len(basestats_kpca_poly.lambdas_)+1)]
basestats_kPCscores_poly = pd.DataFrame(data=basestats_kpca_poly.transform(pokemon_stats_scaled),
columns=basestats_kpca_poly_column_names)
basestats_kPCscores_poly['is_legendary'] = is_legendary
basestats_kPCscores_poly_plot = (p9.ggplot(data=basestats_kPCscores_poly,
mapping=p9.aes(x='PC1',
y='PC2')) +
p9.geoms.geom_point(p9.aes(color='factor(is_legendary)'),
alpha=0.30) +
p9.theme_bw() +
p9.labs(title='2D Projection of Base Stats via Poly-KPCA',
x='PC1',
y='PC2') +
p9.scale_color_discrete(name='Class',
labels=['Non-legendary',
'Legendary']))
print(basestats_kPCscores_poly_plot)
###3D Representation
fig = plt.figure
ax = plt.axes(projection='3d')
ax.scatter3D(basestats_kPCscores_poly.ix[legendary_index , ['PC1']],
basestats_kPCscores_poly.ix[legendary_index , ['PC2']],
basestats_kPCscores_poly.ix[legendary_index , ['PC3']],
c='b',
alpha=0.3,
label='Legendary')
ax.scatter3D(basestats_kPCscores_poly.ix[non_legendary_index , ['PC1']],
basestats_kPCscores_poly.ix[non_legendary_index , ['PC2']],
basestats_kPCscores_poly.ix[non_legendary_index , ['PC3']],
c='r',
alpha=0.3,
label='Non-legendary')
ax.set_title('3D Projection of Base Stats via Poly-KPCA', fontweight='bold')
ax.set_xlabel('PC1', fontweight='bold')
ax.set_ylabel('PC2', fontweight='bold')
ax.set_zlabel('PC3', fontweight='bold')
ax.legend(loc='best', bbox_to_anchor= (-0.25, 0.40, 0.5,0.5))
###############################################################################
###RBF-Kernal PCA
###############################################################################
###Conducting RBF-KPCA (parameters chosen via grid search)
basestats_kpca_RBF = decomp.KernelPCA(kernel="rbf",
gamma=13)
basestats_kpca_RBF.fit(pokemon_stats_scaled)
###Base Stats RBF-KPCA Screeplot
basestats_kpca_RBF_exp_var = [(sum(basestats_kpca_RBF.lambdas_[:i])/
sum(basestats_kpca_RBF.lambdas_)) for i in
range(1, len(basestats_kpca_RBF.lambdas_)+1)]
basestats_kpca_RBF_screeplot_df = pd.DataFrame(columns=['number_of_components',
'perc_of_var_explained'])
basestats_kpca_RBF_screeplot_df['number_of_components'] = range(1,len(basestats_kpca_RBF.lambdas_)+1)
basestats_kpca_RBF_screeplot_df['perc_of_var_explained'] = basestats_kpca_RBF_exp_var
basestats_kpca_RBF_screeplot1 = (p9.ggplot(data=basestats_kpca_RBF_screeplot_df,
mapping=p9.aes(x='number_of_components',
y='perc_of_var_explained')) +
p9.geoms.geom_point() +
p9.geoms.geom_line() +
p9.theme_bw() +
p9.labs(title='Proportion of Variance Explained (RBF-PCA)',
x='Number of Principal Components Used',
y='Percentage of Variance') +
p9.ylim(0, 1))
print(basestats_kpca_RBF_screeplot1)
###Base Stats RBF-KPCA Screeplot - a closer look
basestats_kpca_RBF_screeplot2 = (basestats_kpca_RBF_screeplot1 +
p9.scale_x_continuous(limits=[1,201]))
print(basestats_kpca_RBF_screeplot2)
###Transforming the base stats data via RBF-KPCA and projecting to 2D
basestats_kpca_RBF_column_names = ['PC'+str(i) for i in
range(1, len(basestats_kpca_RBF.lambdas_)+1)]
basestats_kPCscores_RBF = pd.DataFrame(data=basestats_kpca_RBF.transform(pokemon_stats_scaled),
columns=basestats_kpca_RBF_column_names)
basestats_kPCscores_RBF['is_legendary'] = is_legendary
basestats_kPCscores_RBF_plot = (p9.ggplot(data=basestats_kPCscores_RBF,
mapping=p9.aes(x='PC1',
y='PC2')) +
p9.geoms.geom_point(p9.aes(color='factor(is_legendary)'),
alpha=0.30) +
p9.theme_bw() +
p9.labs(title='2D Projection of Base Stats via RBF-KPCA',
x='PC1',
y='PC2') +
p9.scale_color_discrete(name='Class',
labels=['Non-legendary',
'Legendary']))
print(basestats_kPCscores_RBF_plot)
###3D Representation
fig = plt.figure
ax = plt.axes(projection='3d')
ax.scatter3D(basestats_kPCscores_RBF.ix[legendary_index , ['PC1']],
basestats_kPCscores_RBF.ix[legendary_index , ['PC2']],
basestats_kPCscores_RBF.ix[legendary_index , ['PC3']],
c='b',
alpha=0.3,
label='Legendary')
ax.scatter3D(basestats_kPCscores_RBF.ix[non_legendary_index , ['PC1']],
basestats_kPCscores_RBF.ix[non_legendary_index , ['PC2']],
basestats_kPCscores_RBF.ix[non_legendary_index , ['PC3']],
c='r',
alpha=0.3,
label='Non-legendary')
ax.set_title('3D Projection of Base Stats via RBF-KPCA', fontweight='bold')
ax.set_xlabel('PC1', fontweight='bold')
ax.set_ylabel('PC2', fontweight='bold')
ax.set_zlabel('PC3', fontweight='bold')
ax.legend(loc='best', bbox_to_anchor= (-0.25, 0.40, 0.5,0.5))
###############################################################################
###Cosine-Kernal PCA
###############################################################################
###Conducting Cosine-KPCA (parameters chosen via grid search)
basestats_kpca_cosine = decomp.KernelPCA(kernel="cosine",
gamma=15)
basestats_kpca_cosine.fit(pokemon_stats_scaled)
###Base Stats Cosine-KPCA Screeplot
basestats_kpca_cosine_exp_var = [(sum(basestats_kpca_cosine.lambdas_[:i])/
sum(basestats_kpca_cosine.lambdas_)) for i in
range(1, len(basestats_kpca_cosine.lambdas_)+1)]
basestats_kpca_cosine_screeplot_df = pd.DataFrame(columns=['number_of_components',
'perc_of_var_explained'])
basestats_kpca_cosine_screeplot_df['number_of_components'] = range(1,len(basestats_kpca_cosine.lambdas_)+1)
basestats_kpca_cosine_screeplot_df['perc_of_var_explained'] = basestats_kpca_cosine_exp_var
basestats_kpca_cosine_screeplot1 = (p9.ggplot(data=basestats_kpca_cosine_screeplot_df,
mapping=p9.aes(x='number_of_components',
y='perc_of_var_explained')) +
p9.geoms.geom_point() +
p9.geoms.geom_line() +
p9.theme_bw() +
p9.labs(title='Proportion of Variance Explained (Cosine-PCA)',
x='Number of Principal Components Used',
y='Percentage of Variance') +
p9.ylim(0, 1))
print(basestats_kpca_cosine_screeplot1)
###Base Stats Cosine-KPCA Screeplot - a closer look
basestats_kpca_cosine_screeplot2 = (basestats_kpca_cosine_screeplot1 +
p9.scale_x_continuous(limits=[1,11],
breaks=range(1,11)))
print(basestats_kpca_cosine_screeplot2)
###Transforming the base stats data via Cosine-KPCA and projecting to 2D
basestats_kpca_cosine_column_names = ['PC'+str(i) for i in
range(1, len(basestats_kpca_cosine.lambdas_)+1)]
basestats_kPCscores_cosine = pd.DataFrame(data=basestats_kpca_cosine.transform(pokemon_stats_scaled),
columns=basestats_kpca_cosine_column_names)
basestats_kPCscores_cosine['is_legendary'] = is_legendary
basestats_kPCscores_cosine_plot = (p9.ggplot(data=basestats_kPCscores_cosine,
mapping=p9.aes(x='PC1',
y='PC2')) +
p9.geoms.geom_point(p9.aes(color='factor(is_legendary)'),
alpha=0.30) +
p9.theme_bw() +
p9.labs(title='2D Projection of Base Stats via Cosine-KPCA',
x='PC1',
y='PC2') +
p9.scale_color_discrete(name='Class',
labels=['Non-legendary',
'Legendary']))
print(basestats_kPCscores_cosine_plot)
###3D Representation
fig = plt.figure
ax = plt.axes(projection='3d')
ax.scatter3D(basestats_kPCscores_cosine.ix[legendary_index , ['PC1']],
basestats_kPCscores_cosine.ix[legendary_index , ['PC2']],
basestats_kPCscores_cosine.ix[legendary_index , ['PC3']],
c='b',
alpha=0.3,
label='Legendary')
ax.scatter3D(basestats_kPCscores_cosine.ix[non_legendary_index , ['PC1']],
basestats_kPCscores_cosine.ix[non_legendary_index , ['PC2']],
basestats_kPCscores_cosine.ix[non_legendary_index , ['PC3']],
c='r',
alpha=0.3,
label='Non-Legendary')
ax.set_title('3D Projection of Base Stats via Cosine-KPCA', fontweight='bold')
ax.set_xlabel('PC1', fontweight='bold')
ax.set_ylabel('PC2', fontweight='bold')
ax.set_zlabel('PC3', fontweight='bold')
ax.legend(loc='best', bbox_to_anchor= (-0.25, 0.40, 0.5,0.5))
###############################################################################
###Isomap
###############################################################################
###Conducting the isomapping (parameters chosen via grid search)
basestats_isomap = mani.Isomap(n_components=3, n_neighbors=3)
basestats_isomap.fit(pokemon_stats_scaled)
###Projecting the data to 2D and plotting
basestats_isomap_df = pd.DataFrame(data=basestats_isomap.transform(pokemon_stats_scaled),
columns=['1D', '2D', '3D'])
basestats_isomap_df['is_legendary'] = is_legendary
basestats_isomap_plot = (p9.ggplot(data=basestats_isomap_df,
mapping=p9.aes(x='1D',
y='2D')) +
p9.geoms.geom_point(p9.aes(color='factor(is_legendary)'),
alpha=0.30) +
p9.theme_bw() +
p9.scale_color_discrete(name='Class',
labels=['Non-legendary',
'Legendary']) +
p9.labs(title='2D Projection of Base Stats via Isomap'))
print(basestats_isomap_plot)
###3D Representation
fig = plt.figure
ax = plt.axes(projection='3d')
ax.scatter3D(basestats_isomap_df.ix[legendary_index , ['1D']],
basestats_isomap_df.ix[legendary_index , ['2D']],
basestats_isomap_df.ix[legendary_index , ['3D']],
c='b',
alpha=0.3,
label='Legendary')
ax.scatter3D(basestats_isomap_df.ix[non_legendary_index , ['1D']],
basestats_isomap_df.ix[non_legendary_index , ['2D']],
basestats_isomap_df.ix[non_legendary_index , ['3D']],
c='r',
alpha=0.3,
label='Non-legendary')
ax.set_title('3D Projection of Base Stats via Isomap', fontweight='bold')
ax.set_xlabel('1D', fontweight='bold')
ax.set_ylabel('2D', fontweight='bold')
ax.set_zlabel('3D', fontweight='bold')
ax.legend(loc='best', bbox_to_anchor= (-0.25, 0.40, 0.5,0.5))
###PCA - use first three
###Poly KPCA - use first four
###RBF KPCA - use first thirty
###Cosine - use first five
###Isomap - use all three | crodriguez0874/Legendary-Pokemon | Feature_Extraction/Feature_Extraction.py | Feature_Extraction.py | py | 20,921 | python | en | code | 0 | github-code | 13 |
27542342545 | """Contains transformer configuration information
"""
# The version number of the transformer
TRANSFORMER_VERSION = '2.1'
# The transformer description
TRANSFORMER_DESCRIPTION = 'PLY to LAS conversion'
# Short name of the transformer
TRANSFORMER_NAME = 'terra.3dscanner.ply2las'
# The sensor associated with the transformer
TRANSFORMER_SENSOR = 'scanner3DTop'
# The transformer type (eg: 'rgbmask', 'plotclipper')
TRANSFORMER_TYPE = 'ply2las'
# The name of the author of the extractor
AUTHOR_NAME = 'Max Burnette'
# The email of the author of the extractor
AUTHOR_EMAIL = 'mburnet2@illinois.edu'
# Contributors to this transformer
CONTRUBUTORS = ['Zongyang Li', 'Solmaz Hajmohammadi']
# Reposity URI of where the source code lives
REPOSITORY = 'https://github.com/AgPipeline/transformer-ply2las'
| AgPipeline/transformer-ply2las | configuration.py | configuration.py | py | 805 | python | en | code | 0 | github-code | 13 |
73210186898 | from fractions import Fraction
from sys import stdin, stdout
def main():
[n, q] = [int(z) for z in stdin.readline().split(' ')]
temp = {}
for i in range(n):
[a, b] = [int(z) for z in stdin.readline().split(' ')]
temp[i + 1] = (a, b)
for _ in range(q):
[a, b, c] = [int(z) for z in stdin.readline().split(' ')]
deg = Fraction(c - temp[a][0], temp[a][1] - temp[a][0]) * 100
ans = temp[b][0] + deg * (temp[b][1] - temp[b][0]) / 100
if ans == 0:
stdout.write('0/1\n')
elif ans.denominator == 1:
stdout.write('{}/1\n'.format(ans))
else:
stdout.write(str(ans) + '\n')
if __name__ == '__main__':
main()
| heiseish/Competitive-Programming | kattis/set9/thermostat.py | thermostat.py | py | 632 | python | en | code | 5 | github-code | 13 |
2346261073 | import collections
from babel.messages import pofile
import cStringIO
import copy
import errno
import os
import shutil
import tempfile
import zipfile
default_external_to_babel_locales = collections.defaultdict(list)
builtin_locales = {
'en-GB': 'en_GB',
'es-419': 'es_419',
'fr-CA': 'fr_CA',
'iw': 'he_IL',
'no': 'nb_NO',
'pt-BR': 'pt_BR',
'pt-PT': 'pt_PT',
'zh-CN': 'zh_Hans_CN',
'zh-HK': 'zh_Hant_HK',
'zh-TW': 'zh_Hant_TW',
}
for key, value in builtin_locales.iteritems():
default_external_to_babel_locales[key].append(value)
def _mkdir(path):
try:
os.makedirs(path)
except OSError as exc:
if exc.errno == errno.EEXIST and os.path.isdir(path):
return
raise
class Error(Exception):
pass
class Importer(object):
def __init__(self, pod):
self.pod = pod
def _validate_path(self, path):
if not os.path.isfile(path):
raise Error('Not found: {}'.format(path))
def import_path(self, path, locale=None):
if path.endswith('.zip'):
self._validate_path(path)
self.import_zip_file(path)
elif path.endswith('.po'):
self._validate_path(path)
self.import_file(locale, path)
elif os.path.isdir(path):
self.import_dir(path)
else:
raise Error('Must import a .zip file, .po file, or directory.')
def import_zip_file(self, zip_path):
try:
temp_dir_path = tempfile.mkdtemp()
with zipfile.ZipFile(zip_path, 'r') as zip_file:
zip_file.extractall(temp_dir_path)
self.import_dir(temp_dir_path)
finally:
shutil.rmtree(temp_dir_path)
def import_dir(self, dir_path):
# TODO(jeremydw): Allow a custom syntax for translation importers.
# Currently, assume one directory per locale.
for locale in os.listdir(dir_path):
locale_dir = os.path.join(dir_path, locale)
if locale.startswith('.') or os.path.isfile(locale_dir):
continue
for basename in os.listdir(locale_dir):
po_path = os.path.join(locale_dir, basename)
if basename.endswith('.po'):
self.import_file(locale, po_path)
else:
self.pod.logger.warning('Skipping: {}'.format(po_path))
def import_file(self, locale, po_path):
if not os.path.exists(po_path):
raise Error('Couldn\'t find PO file: {}'.format(po_path))
content = open(po_path).read()
return self.import_content(locale, content)
def import_content(self, locale, content):
if locale is None:
raise Error('Must specify locale.')
# Leverage user-defined locale identifiers when importing translations.
external_to_babel_locales = copy.deepcopy(
default_external_to_babel_locales)
if self.pod.podspec.localization:
if 'import_as' in self.pod.podspec.localization:
import_as = self.pod.podspec.localization['import_as']
for external_locale, babel_locales in import_as.iteritems():
for babel_locale in babel_locales:
external_to_babel_locales[external_locale].append(
babel_locale)
babel_locales = external_to_babel_locales.get(locale, [locale])
for babel_locale in babel_locales:
pod_translations_dir = os.path.join(
'translations', babel_locale, 'LC_MESSAGES')
pod_po_path = os.path.join(pod_translations_dir, 'messages.po')
if self.pod.file_exists(pod_po_path):
existing_po_file = self.pod.open_file(pod_po_path)
existing_catalog = pofile.read_po(existing_po_file, babel_locale)
po_file_to_merge = cStringIO.StringIO()
po_file_to_merge.write(content)
po_file_to_merge.seek(0)
catalog_to_merge = pofile.read_po(po_file_to_merge, babel_locale)
for message in catalog_to_merge:
if message.id not in existing_catalog:
existing_catalog[message.id] = message
else:
existing_catalog[message.id].string = message.string
existing_po_file = self.pod.open_file(pod_po_path, mode='w')
pofile.write_po(existing_po_file, existing_catalog, width=80,
sort_output=True, sort_by_file=True)
text = 'Imported {} translations: {}'
message = text.format(len(catalog_to_merge), babel_locale)
self.pod.logger.info(message)
else:
self.pod.write_file(pod_po_path, content)
message = 'Imported new catalog: {}'.format(babel_locale)
self.pod.logger.info(message)
| Yzupnick/grow | grow/pods/importers.py | importers.py | py | 4,929 | python | en | code | null | github-code | 13 |
20533380765 | import crypt
import base64
import json
import datetime
import logging
import uuid
from testrunner import testcase
from testutils import mock
from conary import conarycfg
from upsrv import config, app, db
from upsrv.views import records
class DatabaseTest(testcase.TestCaseWithWorkDir):
def testMigrate(self):
self.cfg = config.UpsrvConfig()
self.cfg.downloadDB = "sqlite:///%s/%s" % (self.workDir, "upsrv.sqlite")
self.wcfg = app.configure(self.cfg)
maker = self.wcfg.registry.settings['db.sessionmaker']
# New maker, without extensions, we don't need transaction
# management
makerArgs = maker.kw.copy()
del makerArgs['extension']
maker = maker.__class__(**makerArgs)
conn = maker()
conn.execute("""
CREATE TABLE databaseversion (
version integer NOT NULL,
minor integer NOT NULL,
PRIMARY KEY ( version, minor )
)""")
conn.execute("""
INSERT INTO databaseversion (version, minor)
VALUES (0, 1)
""")
db.schema.updateSchema(conn)
conn.commit()
versions = [ x for x in conn.execute("select version, minor from databaseversion") ]
self.assertEquals(versions, [ db.migrate.Version ])
conn.close()
class RecordTest(testcase.TestCaseWithWorkDir):
DefaultCreatedTime = '2013-12-11T10:09:08.080605'
DefaultUuid = '00000000-0000-0000-0000-000000000000'
DefaultSystemId = 'systemid0'
DefaultEntitlements = [
('a', 'aaa'),
('*', 'bbb'),
]
DefaultProducers = {
'conary-system-model' : {
'attributes' : {
'content-type' : 'text/plain',
'version' : '1',
},
# The system model has some unversioned artifacts
'data' : '''\
search "foo=cny.tv@ns:1/1-2-3"
install "group-university-appliance=university.cny.sas.com@sas:university-3p-staging/1-2-3[~!xen is: x86(i486,i586,i686) x86_64]"
install bar
update baz=/invalid.version.string@ns:1
''',
},
'system-information' : {
'attributes' : {
'content-type' : 'application/json',
'version' : '1',
},
'data' : {
'bootTime' : '2012-11-10T09:08:07',
'memory' : {
'MemFree' : '4142 kB',
'MemTotal' : '1020128 kB',
'SwapFree' : '4344 kB',
'SwapTotal' : '1048568 kB',
},
},
},
'string-encoded-json' : {
'attributes' : {
'content-type' : 'application/json',
'version' : '1',
},
'data' : json.dumps(dict(a=1, b=2)),
},
}
Username = 'records-reader'
Password = 'sikrit'
def setUp(self):
testcase.TestCaseWithWorkDir.setUp(self)
# Delete all root handlers
for handler in logging.root.handlers:
logging.root.removeHandler(handler)
logging.basicConfig(level=logging.DEBUG)
self.logHandler = logging.root.handlers[0]
mock.mockMethod(self.logHandler.handle)
self.cfg = config.UpsrvConfig()
self.cfg.downloadDB = "sqlite:///%s/%s" % (self.workDir, "upsrv.sqlite")
salt = file("/dev/urandom").read(8).encode('hex')
self.cfg.configLine('password %s %s' % (
self.Username, crypt.crypt(self.Password, '$1$%s$' % salt)))
self.wcfg = app.configure(self.cfg)
maker = self.wcfg.registry.settings['db.sessionmaker']
# New maker, without extensions, we don't need transaction
# management
makerArgs = maker.kw.copy()
del makerArgs['extension']
maker = maker.__class__(**makerArgs)
conn = maker()
db.schema.updateSchema(conn)
conn.commit()
self.conn = conn
self.app = self.wcfg.make_wsgi_app()
# Mock the conary config object
self.conaryCfg = conarycfg.ConaryConfiguration(False)
self.conaryCfg.root = "%s/%s" % (self.workDir, "__root__")
mock.mock(conarycfg, 'ConaryConfiguration', self.conaryCfg)
def tearDown(self):
mock.unmockAll()
testcase.TestCaseWithWorkDir.tearDown(self)
for handler in logging.root.handlers:
logging.root.removeHandler(handler)
logging.root.setLevel(logging.WARNING)
def _getLoggingCalls(self):
logEntries = [ (x[0][0].name, x[0][0].msg, x[0][0].args)
for x in self.logHandler.handle._mock.calls ]
return logEntries
def _resetLoggingCalls(self):
del self.logHandler.handle._mock.calls[:]
def _resetRecords(self):
self.conn.execute("delete from records")
self.conn.commit()
def _req(self, path, method='GET', entitlements=None, headers=None, body=None):
headers = headers or {}
if entitlements:
ents = " ".join("%s %s" % (x[0], base64.b64encode(x[1]))
for x in entitlements)
headers['X-Conary-Entitlement'] = ents
req = app.Request.blank(path, method=method, headers=headers or {},
environ=dict(REMOTE_ADDR='10.11.12.13'))
req.method = method
req.headers.update(headers or {})
req.body = body or ''
req.cfg = self.cfg
req._conaryClient = mock.MockObject()
mock.mockMethod(req.getConaryClient, returnValue=req._conaryClient)
return req
@classmethod
def _R(cls, uuid=None, system_id=None, producers=None,
created_time=None, updated_time=None, **kwargs):
uuid = uuid or cls.DefaultUuid
system_id = system_id or cls.DefaultSystemId
created_time = created_time or cls.DefaultCreatedTime
updated_time = updated_time or '1980-01-01T00:00:00.000000'
producers = producers or cls.DefaultProducers
return dict(uuid=uuid, system_id=system_id,
version=1, producers = producers,
created_time=created_time, updated_time=updated_time, **kwargs)
def testRecordCreate(self):
# No entitlement
resp = self._newRecord(entitlements=None)
self.assertEquals(resp.status_code, 401)
logEntries = self._getLoggingCalls()
self.assertEquals(len(logEntries), 5)
self.assertEquals(logEntries[3], ('upsrv.views.records', "Missing auth header `%s' from %s", ('X-Conary-Entitlement', '10.11.12.13')))
self._resetLoggingCalls()
resp = self._newRecord(
producers={
'system-information' : self.DefaultProducers['system-information']})
self.assertEquals(resp.status_code, 400)
logEntries = self._getLoggingCalls()
self.assertEquals(len(logEntries), 4)
self.assertEquals(logEntries[1], ('upsrv.views.records', 'Missing system model from %s', ('10.11.12.13',)))
self._resetLoggingCalls()
# Correct record
resp = self._newRecord()
self.assertEquals(resp.status_code, 200)
self.assertEquals(resp.json_body['entitlement_valid'], True)
self.assertEquals(resp.json_body['entitlements_json'],
'[["a", "aaa"], ["*", "bbb"]]')
now = datetime.datetime.utcnow()
# Decode the json coming back
record = json.loads(resp.body)
self.assertEquals(record['uuid'], self.DefaultUuid)
self.assertEquals(record['created_time'],
self.DefaultCreatedTime + "+00:00")
self.assertEquals(record['client_address'], '10.11.12.13')
req = resp.request
allRecords = records.records_view(req)
self.assertEquals(allRecords.status_code, 403)
# Let's add an auth header, with a bad username
req.headers['Authorization'] = 'Basic %s' % base64.b64encode(
'{username}:{password}'.format(username='faaaaake',
password=self.Password))
allRecords = records.records_view(req)
self.assertEquals(allRecords.status_code, 403)
# Let's add an auth header, with a good username and a bad password
req.headers['Authorization'] = 'Basic %s' % base64.b64encode(
'{username}:{password}'.format(username=self.Username,
password='faaaaaaake'))
allRecords = records.records_view(req)
self.assertEquals(allRecords.status_code, 403)
# Let's add an auth header, with a good username/password
req.headers['Authorization'] = 'Basic %s' % base64.b64encode(
'{username}:{password}'.format(username=self.Username,
password=self.Password))
# Make sure the record got persisted correctly
allRecords = records.records_view(req)['records']
self.assertEquals([ x['entitlement_valid'] for x in allRecords ],
[ True ])
self.assertEquals([ x['entitlements_json'] for x in allRecords ],
[ '[["a", "aaa"], ["*", "bbb"]]' ])
rec = allRecords[0]
self.assertEquals(rec['uuid'], self.DefaultUuid)
# Make sure updated_time got set by the server
rectime = datetime.datetime.strptime(rec['updated_time'],
"%Y-%m-%dT%H:%M:%S.%f")
delta = now - rectime
totalSeconds = 86400 * delta.days + delta.seconds + delta.microseconds / 1e6
self.assertTrue(0 <= totalSeconds)
self.assertTrue(totalSeconds < 2)
self.assertEquals(req.getConaryClient._mock.popCall(),
((), (('entitlements', ['aaa', 'bbb']),)))
self.assertEquals(req.getConaryClient._mock.calls, [])
# Remove all records
self._resetLoggingCalls()
self._resetRecords()
# Same deal, but make findTroves raise an exception
_calls = []
_exc = records.repoerrors.TroveNotFound('blah')
def fakeFindTroves(label, troves, *args, **kwargs):
_calls.append((label, troves, args, kwargs))
raise _exc
req._conaryClient.repos._mock.set(findTroves=fakeFindTroves)
resp = self.app.invoke_subrequest(req, use_tweens=True)
self.assertEquals(resp.status_code, 200)
self.assertEquals(resp.json_body['entitlement_valid'], False)
self.assertEquals(resp.json_body['entitlements_json'],
'[["a", "aaa"], ["*", "bbb"]]')
logEntries = self._getLoggingCalls()
self.assertEquals(len(logEntries), 5)
self.assertEquals(logEntries[2],
('upsrv.views.records', '%s: bad entitlements %s for system model %s: %s',
('10.11.12.13', [('a', 'aaa'), ('*', 'bbb')],
self.DefaultProducers['conary-system-model']['data'],
_exc)))
self.assertEquals(len(_calls), 1)
self.assertEquals([str(x) for x in _calls[0][1]],
['foo=cny.tv@ns:1/1-2-3', 'group-university-appliance=university.cny.sas.com@sas:university-3p-staging/1-2-3[~!xen is: x86(i486,i586,i686) x86_64]'])
# Make sure the record got persisted correctly
allRecords = records.records_view(req)['records']
self.assertEquals([ x['entitlement_valid'] for x in allRecords ],
[ False ])
self.assertEquals([ x['entitlements_json'] for x in allRecords ],
[ '[["a", "aaa"], ["*", "bbb"]]' ])
# Remove all records
self._resetLoggingCalls()
self._resetRecords()
# Same deal, but with a 1M provider payload
content = '0123456789abcdef' * 64 * 1024
resp = self._newRecord(
producers={
'system-information' : content})
self.assertEquals(resp.status_code, 413)
logEntries = self._getLoggingCalls()
self.assertEquals(len(logEntries), 4)
self.assertEquals(logEntries[2],
('upsrv.views.records', 'Request too large from %s: %s bytes',
('10.11.12.13', 1048797)))
def testDecodeEntitlements(self):
tests = [
("", []),
("a", []),
("a YWFh", [('a', 'aaa')]),
("a YWFh *", [('a', 'aaa')]),
("a YWFh * YmJi", [('a', 'aaa'), ('*', 'bbb')]),
]
for entString, expected in tests:
self.assertEqual(records._decodeEntitlements(entString), expected)
def _newRecord(self, **kwargs):
url = '/registration/v1/records'
entitlements = kwargs.pop('entitlements', self.DefaultEntitlements)
# Correct record
req = self._req(url, method='POST',
entitlements=entitlements,
body=json.dumps(self._R(**kwargs)))
resp = self.app.invoke_subrequest(req, use_tweens=True)
resp.request = req
return resp
def testRecordFiltering(self):
now = datetime.datetime.utcnow()
recordsData = [ (str(uuid.uuid4()), now - datetime.timedelta(days=10-i)) for i in range(10) ]
Record = db.models.Record
for recUuid, createdTime in recordsData:
resp = self._newRecord(uuid=recUuid,
created_time=createdTime.isoformat())
self.assertEqual(resp.status_code, 200)
self.assertEquals(resp.json['uuid'], recUuid)
rec = self.conn.query(Record).filter_by(uuid=recUuid).one()
self.assertEquals(rec.created_time, createdTime)
# Set updated_time
rec.updated_time = rec.created_time + datetime.timedelta(minutes=5)
self.conn.add(rec)
self.conn.commit()
req = resp.request
req.headers['Authorization'] = 'Basic %s' % base64.b64encode(
'{username}:{password}'.format(username=self.Username,
password=self.Password))
# Make sure the record got persisted correctly
allRecordsResp = records.records_view(req)
self.assertEquals(allRecordsResp['count'], 10)
allRecords = allRecordsResp['records']
self.assertEqual(
[(x['uuid'], x['created_time'], x['updated_time']) for x in allRecords],
[(x[0], x[1].isoformat(), (x[1] + datetime.timedelta(minutes=5)).isoformat()) for x in recordsData])
# Now build query
# Make sure we accept timezone specs too
uTimeStamp = (now - datetime.timedelta(days=1)).isoformat() + "%2B00:00"
nreq = self._req(req.url + '?filter=ge(updated_time,"%s")' % uTimeStamp,
headers=req.headers)
resp = self.app.invoke_subrequest(nreq, use_tweens=True)
self.assertEquals(resp.status_code, 200)
self.assertEquals(resp.json['count'], 1)
url = 'http://localhost/registration/v1/records?start=0&limit=100&filter=ge(updated_time,"{updatedTime}")'.format(updatedTime=uTimeStamp)
expectedLinks = [
('self', url),
('first', url),
]
self.assertEquals(
[ (x['rel'], x['href']) for x in resp.json['links'] ],
expectedLinks)
# Make sure we don't explode on a bad time spec
nreq = self._req(req.url + '?filter=ge(updated_time,"AAAA")',
headers=req.headers)
resp = self.app.invoke_subrequest(nreq, use_tweens=True)
self.assertEquals(resp.status_code, 400)
# APPENG-3387
# update_time is 5 minutes after create_time, and we want to bracket
# one minute before and after, so 4 and 6.
t0 = (recordsData[6][1] + datetime.timedelta(minutes=4)).isoformat() + "%2B00:00"
t1 = (recordsData[8][1] + datetime.timedelta(minutes=6)).isoformat() + "%2B00:00"
url = 'http://localhost/registration/v1/records?start=0&limit=100&filter=and(ge(updated_time,"{0}"),le(updated_time,"{1}"))'.format(t0, t1)
nreq = self._req(url, headers=req.headers)
resp = self.app.invoke_subrequest(nreq, use_tweens=True)
expectedLinks = [
('self', url),
('first', url),
]
self.assertEquals(
[ (x['rel'], x['href']) for x in resp.json['links'] ],
expectedLinks)
self.assertEquals(resp.json['count'], 3)
self.assertEqual(
[x['uuid'] for x in resp.json['records']],
[recordsData[6][0], recordsData[7][0], recordsData[8][0]])
| sassoftware/rbm | upsrv_test/record_test.py | record_test.py | py | 16,697 | python | en | code | 1 | github-code | 13 |
33641185953 | #!/usr/bin/python3
# https://www.hackerrank.com/challenges/python-division/problem
# Task
# Read two integers and print two lines. The first line should contain integer division, a//b. The second line should contain float division, a/b.
# You don't need to perform any rounding or formatting operations.
def division(a,b):
print(a//b)
print(a/b)
if __name__=='__main__':
a = int(input())
b = int(input())
division(a,b)
| nasaa0528/hackerRank | Python/Introduction/pythonDivision.py | pythonDivision.py | py | 431 | python | en | code | 0 | github-code | 13 |
31532825735 | import importlib
import operator
from django.utils.html import format_html
def tuple_index_elements(theset, elemnum=1):
"""gets tuple of each element(default=1)
within nested list/tuple/etc of lists/tuples
"""
get = operator.getitem
return tuple([get(get(theset,each),elemnum)
for each in range(theset.__len__())])
def admin_changelist_link(
attr,
short_description,
empty_description="-",
query_string=None
):
"""Decorator used for rendering a link to the list display of
a related model in the admin detail page.
attr (str):
Name of the related field.
short_description (str):
Field display name.
empty_description (str):
Value to display if the related field is None.
query_string (function):
Optional callback for adding a query string to the link.
Receives the object and should return a query string.
The wrapped method receives the related object and
should return the link text.
Usage:
@admin_changelist_link('credit_card', _('Credit Card'))
def credit_card_link(self, credit_card):
return credit_card.name
Decorator code found on https://medium.com/@hakibenita/things-you-must-know-about-django-admin-as-your-app-gets-bigger-6be0b0ee9614 in section "admin_changelist_link"
"""
def wrap(func):
def field_func(self, obj):
related_obj = getattr(obj, attr)
if related_obj is None:
return empty_description
url = ""
if query_string:
url += '?' + query_string(obj)
return format_html(
'<a href="{}">{}</a>',
url,
func(self, related_obj)
)
field_func.short_description = short_description
field_func.allow_tags = True
field_func.admin_order_field = attr
return field_func
return wrap
def field_is_empty(field):
"""check for empty field value, post str.strip"""
#FIXME: field_is_empty not valid response on field bool=True !
if field \
and str(field) != '' \
and str(field).strip() != '' :
empty = False
else:
empty = True
return empty
# From: https://djangosnippets.org/snippets/822
from django.shortcuts import render_to_response
from django.template import RequestContext
def render_to(template):
"""
Decorator for Django views that sends returned dict to render_to_response function
with given template and RequestContext a context instance.
If view doesn't return dict then decorator simply returns output.
Additionally view can return two-tuple, which must contain dict as first
element and string with template name as second. This string will
override template name, given as parameter
Parameters:
- template: template name to use
"""
def renderer(func):
def wrapper(request, *args, **kw):
output = func(request, *args, **kw)
if isinstance(output, (list, tuple)):
return render_to_response(output[1], output[0], RequestContext(request))
elif isinstance(output, dict):
return render_to_response(template, output, RequestContext(request))
return output
return wrapper
return renderer
def funcname():
"""use 'sys' to return name of calling function"""
import sys
try:
name = sys._getframe(1).f_code.co_name
except (IndexError, TypeError, AttributeError): # something went wrong
name = "<unknown>"
finally:
return name
# def get_required_fields(model_meta):
# """subset of get_fields where blank == False"""
# fields - model_meta.get_fields()
# reqd_fields = [f for f in fields
# if hasattr(f, 'blank') and f.blank == False]
# return reqd_fields
| cometsong/jaxid_generator | generator/utils.py | utils.py | py | 3,910 | python | en | code | 2 | github-code | 13 |
14277723796 | # ##### BEGIN GPL LICENSE BLOCK #####
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software Foundation,
# Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
#
# ##### END GPL LICENSE BLOCK #####
import bpy
import blf
import bgl
import gpu
from gpu_extras.batch import batch_for_shader
from .box import UVPM3_Box
from .utils import get_prefs, in_debug_mode, print_backtrace
from .overlay import OverlayManager
def disable_box_rendering(self, context):
prefs = get_prefs()
prefs.box_rendering = False
class TextChunk:
def __init__(self, text, color):
self.text = text
self.color = color
class BoxRenderInfo:
def __init__(self, glob_idx, box, color, text, text_line_num=0, z_coord=0.0):
self.glob_idx = glob_idx
self.box = box
self.color = color
self.outline_color = color
self.text_chunks = []
if text is not None:
self.text_chunks.append(TextChunk(text, color))
self.text_line_num = text_line_num
self.z_coord = z_coord
class BoxRenderer(OverlayManager):
def __init__(self, context, box_access):
self.last_pixel_viewsize = None
self.__draw_handler = None
self.prefs = get_prefs()
self.line_width = self.prefs.box_render_line_width
self.context = context
self.box_access = box_access
self.box_info_array = None
self.active_box_info = None
self.shader = gpu.shader.from_builtin('3D_FLAT_COLOR')
self.batch = None
self.line_batch = None
self.update_coords()
handler_args = (self, context)
self.__draw_handler = bpy.types.SpaceImageEditor.draw_handler_add(render_boxes_draw_callback, handler_args, 'WINDOW', 'POST_VIEW')
super().__init__(context, render_boxes_draw_text_callback)
def finish(self):
if self.__draw_handler is not None:
bpy.types.SpaceImageEditor.draw_handler_remove(self.__draw_handler, 'WINDOW')
super().finish()
def get_pixel_viewsize(self):
tmp_coords0 = self.context.region.view2d.region_to_view(0, 0)
tmp_coords1 = self.context.region.view2d.region_to_view(1, 1)
return abs(tmp_coords0[0] - tmp_coords1[0])
def coords_update_needed(self, event):
pixel_viewsize_changed = self.last_pixel_viewsize is None or (self.last_pixel_viewsize != self.get_pixel_viewsize())
# print(ret_value)
return pixel_viewsize_changed
# return (event.type in {'WHEELDOWNMOUSE', 'WHEELUPMOUSE'}) or (event.type == 'MIDDLEMOUSE' and event.ctrl)
# return event.type != 'MOUSEMOVE'
def update_box_info_array(self):
self.box_info_array = None
box_info_array, self.active_box_info = self.box_access.impl_box_info_array()
if box_info_array is None:
return
# MUSTDO: workaround for text drawing order. Can we avoid this?
self.box_info_array = sorted(box_info_array, key=lambda box_info: box_info.z_coord)
def update_coords(self):
self.prefs.boxes_dirty = False
self.update_box_info_array()
if self.box_info_array is None:
return
self.batch = None
self.line_batch = None
batch_coords = []
batch_colors = []
def append_box_to_batch(box, z_coord, color):
p1 = (box.p1_x, box.p1_y)
p2 = (box.p2_x, box.p2_y)
coords = [
(p1[0], p1[1], z_coord),
(p1[0], p2[1], z_coord),
(p2[0], p2[1], z_coord),
(p1[0], p1[1], z_coord),
(p2[0], p1[1], z_coord),
(p2[0], p2[1], z_coord)
]
nonlocal batch_coords
nonlocal batch_colors
batch_coords += coords
batch_colors += [color] * len(coords)
def append_line_to_batch(p1, p2, fixed_coord, offset, z_coord, color):
nonlocal append_box_to_batch
p2 = [p2[0], p2[1]]
p2[fixed_coord] += offset
append_box_to_batch(UVPM3_Box(p1[0], p1[1], p2[0], p2[1]), z_coord, color)
self.last_pixel_viewsize = self.get_pixel_viewsize()
line_width = self.line_width * self.last_pixel_viewsize
for box_info in self.box_info_array:
min_coords = box_info.box.min_corner
max_coords = box_info.box.max_corner
# max_coords = tuple(max(min_coords[i], max_coords[i]-pixel_viewsize[i]) for i in range(2))
z_coord = box_info.z_coord
color = box_info.color
p1 = (min_coords[0], min_coords[1])
p2 = (min_coords[0], max_coords[1])
p3 = (max_coords[0], max_coords[1])
p4 = (max_coords[0], min_coords[1])
append_line_to_batch(p1, p2, 0, line_width, z_coord, color)
append_line_to_batch(p2, p3, 1, -line_width, z_coord, color)
append_line_to_batch(p3, p4, 0, -line_width, z_coord, color)
append_line_to_batch(p4, p1, 1, line_width, z_coord, color)
self.batch = batch_for_shader(self.shader, 'TRIS', {"pos": batch_coords, "color": batch_colors})
if self.active_box_info is not None:
min_coords = self.active_box_info.box.min_corner
max_coords = self.active_box_info.box.max_corner
z_coord = self.active_box_info.z_coord
p1 = (min_coords[0], min_coords[1], z_coord)
p2 = (min_coords[0], max_coords[1], z_coord)
p3 = (max_coords[0], max_coords[1], z_coord)
p4 = (max_coords[0], min_coords[1], z_coord)
line_coords = [
p1, p2,
p2, p3,
p3, p4,
p4, p1
]
line_colors = [self.active_box_info.outline_color] * len(line_coords)
self.line_batch = batch_for_shader(self.shader, 'LINES', {"pos": line_coords, "color": line_colors})
self.context.area.tag_redraw()
def render_boxes_draw_callback(self, context):
try:
self.box_access.is_alive()
self.shader.bind()
bgl.glEnable(bgl.GL_DEPTH_TEST)
if self.batch is not None:
self.batch.draw(self.shader)
bgl.glDisable(bgl.GL_DEPTH_TEST)
if self.line_batch is not None:
bgl.glLineWidth(1.0)
self.line_batch.draw(self.shader)
except Exception as ex:
if in_debug_mode(debug_lvl=2):
print_backtrace(ex)
def render_boxes_draw_text_callback(self, context):
try:
self.box_access.is_alive()
self.callback_begin()
# box_info_array, active_box_info = self.box_access.impl_box_info_array()
if self.box_info_array is None:
return
# bgl.glEnable(bgl.GL_DEPTH_TEST)
# blf.shadow(self.font_id, 0, 1, 1, 1, 1)
# blf.enable(self.font_id, blf.SHADOW)
for box_info in self.box_info_array:
if len(box_info.text_chunks) == 0:
continue
# min_corner = box.min_corner
text_view_coords = box_info.box.min_corner
COORD_OFFSET = 0.05
text_view_coords = (text_view_coords[0] + COORD_OFFSET, text_view_coords[1] + COORD_OFFSET)
# if box_info is self.active_box_info:
# text = "[ {} ]".format(box_info.text)
# else:
# text = box_info.text
text_region_coords = self.context.region.view2d.view_to_region(text_view_coords[0], text_view_coords[1], clip=False)
text_region_coords = [text_region_coords[0], text_region_coords[1] + self.LINE_DISTANCE * box_info.text_line_num]
for t_chunk in box_info.text_chunks:
self.print_text(text_region_coords, t_chunk.text, t_chunk.color, box_info.z_coord)
text_region_coords[0] += blf.dimensions(self.font_id, t_chunk.text)[0]
bgl.glDisable(bgl.GL_DEPTH_TEST)
except Exception as ex:
if in_debug_mode(debug_lvl=2):
print_backtrace(ex)
class BoxRenderAccess:
ACTIVE_COLOR = None
ACTIVE_COLOR_MULTIPLIER = 1.0
ACTIVE_Z_COORD = None
ACTIVE_OUTLINE_COLOR = (1,0.25,0,1)
ACTIVE_TEXT_COLOR = None # (1,1,1,1)
NON_ACTIVE_COLOR_MULTIPLIER = 1.0
def is_alive(self):
return True
class BoxArrayAccess:
def init_access(self, box_array, color, active_idx=-1):
assert(len(box_array) > 0)
self.box_array = box_array
self.color = color
self.active_idx = active_idx
return True
def impl_active_box(self):
if self.active_idx < 0:
return None
return self.box_array[self.active_idx]
class BoxArrayRenderAccess(BoxArrayAccess, BoxRenderAccess):
def impl_box_info_array(self):
box_info_array = [BoxRenderInfo(glob_idx, box, self.color, None) for glob_idx, box in enumerate(self.box_array)]
active_box_info = None if self.active_idx < 0 else box_info_array[self.active_idx]
return box_info_array, active_box_info
def impl_select_box(self, box_info):
self.active_idx = box_info.glob_idx
class CustomTargetBoxAccess(BoxArrayRenderAccess):
MAIN_TARGET_BOX_COLOR = (1, 1, 0, 1)
def init_access(self, context, ui_drawing=False):
return super().init_access([context.scene.uvpm3_props.custom_target_box], self.MAIN_TARGET_BOX_COLOR, active_idx=0)
| Tilapiatsu/blender-custom_config | scripts/addon_library/local/uvpackmaster3/box_utils.py | box_utils.py | py | 10,137 | python | en | code | 5 | github-code | 13 |
15224673728 | import typing
import re
import logging
from collections import namedtuple
from urllib.parse import urljoin, quote, unquote, urlsplit, urlunsplit
import lxml.etree
import lxml.html
from lxml.html import soupparser
from lxml.html.defs import safe_attrs as lxml_safe_attrs
from lxml.html.clean import Cleaner
from readability import Document as ReadabilityDocument
from django.utils.html import escape as html_escape
from validr import T, Invalid
from rssant_common.validator import compiler
from .importer import RE_URL
from .helper import lxml_call, LXMLError
LOG = logging.getLogger(__name__)
validate_url = compiler.compile(T.url)
RE_IMG = re.compile(
r'(?:<img\s*[^<>]*?\s+src="([^"]+?)")|'
r'(?:<source\s*[^<>]*?\s+srcset="([^"]+?)")',
re.I | re.M)
RE_LINK = re.compile(r'<a\s*.*?\s+href="([^"]+?)"', re.I | re.M)
def story_image_count(content):
if not content:
return 0
return len(RE_IMG.findall(content))
def story_url_count(content):
"""
>>> content = '''
... <p><a class="xxx" href="https://rss.anyant.com/1">link1</a>
... http://www.example.com
... baidu.com asp.net
... <a href="https://rss.anyant.com/2" target="_blank">link2</a>
... </p>
... '''
>>> story_url_count(content)
3
"""
if not content:
return 0
return len(RE_URL.findall(content))
def story_link_count(content):
"""
>>> content = '''
... <p><a class="xxx" href="https://rss.anyant.com/1">link1</a>
... <a href="https://rss.anyant.com/2" target="_blank">link2</a>
... </p>
... '''
>>> story_link_count(content)
2
"""
if not content:
return 0
return len(RE_LINK.findall(content))
# implement by regex negative lookahead and negative lookbehind
# see also: https://regexr.com/
# $...$ but not $10...$10, 10$...10$ and jQuery $
_RE_MATHJAX_DOLLAR = r'(?<![^\s>])\$[^$\n]+?\$(?![^\s<])'
# `...` but not ```...```
_RE_MATHJAX_ASCIIMATH = r'(?<![^\s>])\`[^`\n]+?\`(?![^\s<])'
# loose regex for check MathJax
RE_MATHJAX = re.compile((
r'(\$\$.+?\$\$)|' # $$...$$
r'(\\\[.+?\\\])|' # \[...\]
r'(\\\(.+?\\\))|' # \(...\)
fr'({_RE_MATHJAX_DOLLAR})|' # $...$
fr'({_RE_MATHJAX_ASCIIMATH})' # `...`
), re.I | re.M)
def story_has_mathjax(content):
r"""
>>> story_has_mathjax(r'hi $$x^2$$ ok?')
True
>>> story_has_mathjax(r'hi \(x^2\), ok?')
True
>>> story_has_mathjax(r'hi \[x^2\], ok?')
True
>>> story_has_mathjax(r'hi $$x^2$$ ok?')
True
>>> story_has_mathjax(r'hi $x^2$ ok?')
True
>>> story_has_mathjax(r'hi $10 invest $10 ok?')
False
>>> story_has_mathjax(r'hi `x^2` ok?')
True
"""
if not content:
return False
return bool(RE_MATHJAX.search(content))
StoryImageIndexItem = namedtuple('StoryImageIndexItem', 'pos, endpos, value')
def is_data_url(url):
return url and url.startswith('data:')
RSSANT_IMAGE_TAG = 'rssant=1'
def is_replaced_image(url):
"""
>>> is_replaced_image('https://rss.anyant.com/123.jpg?rssant=1')
True
"""
return url and RSSANT_IMAGE_TAG in url
def _is_url(url):
return bool(re.match(r'^https?:\/\/', url))
def make_absolute_url(url, base_href):
if not base_href:
return url
if not _is_url(url):
url = urljoin(base_href, url)
return url
TOP_DOMAINS = set("""
com
org
net
edu
gov
tk
de
uk
cn
info
ru
nl
im
me
io
tech
top
xyz
""".strip().split())
RE_STICK_DOMAIN = re.compile(r'^({})[^\:\/$]'.format('|'.join(TOP_DOMAINS)))
def normalize_url(url: str, base_url: str = None):
"""
Normalize URL
Note: not support urn and magnet
urn:kill-the-newsletter:2wqcdaqwddn9lny1ewzy
magnet:?xt=urn:btih:28774CFFE3B4715054E192FF
"""
url = (url or '').strip()
if not url:
return url
url = url.replace('://', '://')
url = url.replace('%3A//', '://')
if url.startswith('://'):
url = 'http' + url
if not _is_url(url):
# ignore urn: or magnet:
if re.match(r'^[a-zA-Z0-9]+:', url):
return url
if base_url:
url = urljoin(base_url, url)
else:
# ignore simple texts
if not re.match(r'^(\.|\:|\/)?[a-zA-Z0-9\/]+(\.|\:|\/)', url):
return url
url = 'http://' + url
# fix: http://www.example.comhttp://www.example.com/hello
if url.count('://') >= 2:
matchs = list(re.finditer(r'https?://', url))
if matchs:
url = url[matchs[-1].start(0):]
else:
url = 'http://' + url.split('://')[-1]
match = re.search(r'\.[^.]+?(\/|$)', url)
if match:
# fix: http://example.com%5Cblog
match_text = unquote(match.group(0))
match_text = match_text.replace('\\', '/')
# fix: .comxxx -> .com/xxx
stick_match = RE_STICK_DOMAIN.match(match_text[1:])
if stick_match:
top_domain = stick_match.group(1)
pre_len = 1 + len(top_domain)
match_text = match_text[:pre_len] + '/' + match_text[pre_len:]
url = url[:match.start()] + match_text + url[match.end():]
scheme, netloc, path, query, fragment = urlsplit(url)
# remove needless port
if scheme == 'http' and netloc.endswith(':80'):
netloc = netloc.split(':')[0]
if scheme == 'https' and netloc.endswith(':443'):
netloc = netloc.split(':')[0]
# fix: http://example.com//blog
path = re.sub(r'^\/\/+', '/', path)
# quote is not idempotent, can not quote multiple times
path = quote(unquote(path))
url = urlunsplit((scheme, netloc, path, query, fragment))
return url
class StoryImageProcessor:
"""
>>> content = '''
... <picture class="kg-image lightness-target">
... <source srcset="/abc.webp" type="image/webp">
... <source
... srcset="/abc.jpg
... " type="image/jpeg">
... <img src="/abc.jpg" alt="Design System实践"><img src="https://image.example.com/2019/12/21/xxx.jpg" alt="xxx image">
... <img src="http://file///invalid.png">
... <img src="data:text/plain;base64,SGVsbG8sIFdvcmxkIQ%3D%3D" alt="DataURL">
... </picture>
... <img data-src="/error.jpg" src="/ok.jpg">
... '''
>>> story_image_count(content)
7
>>> processor = StoryImageProcessor("https://rss.anyant.com/story/123", content)
>>> image_indexs = processor.parse()
>>> len(image_indexs)
5
>>> image_indexs[0].value
'https://rss.anyant.com/abc.webp'
>>> image_indexs[1].value
'https://rss.anyant.com/abc.jpg'
>>> image_indexs[2].value
'https://rss.anyant.com/abc.jpg'
>>> image_indexs[3].value
'https://image.example.com/2019/12/21/xxx.jpg'
>>> image_indexs[4].value
'https://rss.anyant.com/ok.jpg'
""" # noqa: E501
def __init__(self, story_url, content):
self.story_url = story_url
self.content = content
def fix_relative_url(self, url):
return make_absolute_url(url, self.story_url)
def parse(self) -> typing.List[StoryImageIndexItem]:
if not self.content:
return []
content = self.content
image_indexs = []
pos = 0
while True:
match = RE_IMG.search(content, pos=pos)
if not match:
break
img_src, source_srcset = match.groups()
startpos, endpos = match.span(1) if img_src else match.span(2)
img_url = (img_src or source_srcset).strip()
if not is_data_url(img_url) and not is_replaced_image(img_url):
img_url = self.fix_relative_url(img_url)
try:
validate_url(img_url)
except Invalid:
pass
else:
idx = StoryImageIndexItem(startpos, endpos, img_url)
image_indexs.append(idx)
pos = endpos
return image_indexs
def process(self, image_indexs, images) -> str:
images = {quote(k): v for k, v in images.items()}
new_image_indexs = []
for idx in image_indexs:
new_url = images.get(quote(idx.value))
if new_url:
idx = StoryImageIndexItem(idx.pos, idx.endpos, new_url)
new_image_indexs.append(idx)
content = self.content
content_chunks = []
beginpos = 0
for pos, endpos, value in new_image_indexs:
content_chunks.append(content[beginpos: pos])
content_chunks.append(value)
beginpos = endpos
content_chunks.append(content[beginpos:])
return ''.join(content_chunks)
IMG_EXT_SRC_ATTRS = ['data-src', 'data-original', 'data-origin']
RE_IMAGE_URL = re.compile(
'(img|image|pic|picture|photo|png|jpg|jpeg|webp|bpg|ico|exif|tiff|gif|svg|bmp)', re.I)
def is_image_url(url):
if not url:
return False
if is_data_url(url):
return False
return bool(RE_IMAGE_URL.search(url))
def process_story_links(content, story_link):
"""
NOTE: Don't process_story_links after StoryImageProcessor, the replaced
image urls will broken.
>>> x = '<a href="/story/123.html">汉字</a>'
>>> result = process_story_links(x, 'http://blog.example.com/index.html')
>>> expect = '<a href="http://blog.example.com/story/123.html" target="_blank" rel="nofollow">汉字</a>'
>>> assert list(sorted(result)) == list(sorted(expect)), result
>>> x = '<img data-src="/story/123.png">'
>>> result = process_story_links(x, 'http://blog.example.com/index.html')
>>> expect = '<img data-src="/story/123.png" src="http://blog.example.com/story/123.png">'
>>> assert list(sorted(result)) == list(sorted(expect)), result
"""
if not content:
return content
dom = lxml_call(lxml.html.fromstring, content)
for a in dom.iter('a'):
url = a.get('href')
if url:
a.set('href', make_absolute_url(url, story_link))
a.set('target', '_blank')
a.set('rel', 'nofollow')
for x in dom.iter('img'):
ext_src = None
for key in IMG_EXT_SRC_ATTRS:
value = x.get(key)
if is_image_url(value):
ext_src = value
break
if ext_src:
src = make_absolute_url(ext_src, story_link)
x.set('src', src)
# also make image, video... other links absolute
if story_link:
dom.make_links_absolute(story_link)
result = lxml.html.tostring(dom, encoding='unicode')
if isinstance(result, bytes):
result = result.decode('utf-8')
return result
def story_readability(content):
"""
>>> content = '<p>hello <b>world</b><br/>你好<i>世界</i></p>'
>>> print(story_readability(content))
<body id="readabilityBody"><p>hello <b>world</b><br/>你好<i>世界</i></p></body>
"""
if (not content) or (not content.strip()):
return ""
doc = ReadabilityDocument(content)
return doc.summary(html_partial=True) or ""
StoryAttach = namedtuple("StoryAttach", "iframe_url, audio_url")
def _normalize_validate_url(url, base_url=None):
url = normalize_url(url, base_url=base_url)
if not url:
return None
try:
url = validate_url(url)
except Invalid:
url = None
return url
def story_extract_attach(html, base_url=None) -> StoryAttach:
iframe_url = None
audio_url = None
dom = lxml_call(lxml.html.fromstring, html)
iframe_el = dom.find('.//iframe')
if iframe_el is not None:
iframe_url = _normalize_validate_url(iframe_el.get('src'), base_url=base_url)
audio_el = dom.find('.//audio')
if audio_el is not None:
audio_src = audio_el.get('src')
if not audio_src:
source_el = audio_el.find('source')
if source_el is not None:
audio_src = source_el.get('src')
audio_url = _normalize_validate_url(audio_src, base_url=base_url)
attach = StoryAttach(iframe_url, audio_url)
return attach
RE_BLANK_LINE = re.compile(r'(\n\s*)(\n\s*)+')
lxml_html_parser = lxml.html.HTMLParser(
remove_blank_text=True, remove_comments=True, collect_ids=False)
lxml_text_html_cleaner = Cleaner(
scripts=True,
javascript=True,
comments=True,
style=True,
links=True,
meta=True,
page_structure=True,
processing_instructions=True,
embedded=True,
frames=True,
forms=True,
annoying_tags=True,
remove_tags=set(['body']),
kill_tags=set(['code', 'pre', 'img', 'video', 'noscript']),
)
def story_html_to_text(content, clean=True):
"""
>>> content = '''<html><body>
... <pre>hello world</pre>
...
...
... <p>happy day</p>
... </body></html>
... '''
>>> print(story_html_to_text(content))
happy day
>>> print(story_html_to_text(content, clean=False))
hello world
happy day
>>> content = '<![CDATA[hello world]]>'
>>> print(story_html_to_text(content))
hello world
>>> print(story_html_to_text('<pre><code>hi</code></pre>'))
<BLANKLINE>
>>> content = '''
... <?xml version="1.0" encoding="utf-8"?>
... <?xml-stylesheet type="text/xsl" href="/res/preview.xsl"?>
... <p>中文传媒精选</p>
... '''
>>> print(story_html_to_text(content))
中文传媒精选
>>> story_html_to_text('') == ''
True
>>> # lxml can not parse below content, we handled the exception
>>> content = "<?phpob_start();echo file_get_contents($_GET['pdf_url']);ob_flush();?>"
>>> assert story_html_to_text(content)
"""
if (not content) or (not content.strip()):
return ""
try:
if clean:
# https://bugs.launchpad.net/lxml/+bug/1851029
# The html cleaner raise AssertionError when both
# root tag and child tag in kill_tags set.
if content.startswith('<pre'):
content = '<div>' + content + '</div>'
content = lxml_call(lxml_text_html_cleaner.clean_html, content).strip()
if not content:
return ""
r = lxml_call(lxml.html.fromstring, content, parser=lxml_html_parser)
content = r.text_content().strip()
except LXMLError:
try:
content = lxml_call(soupparser.fromstring, content).text_content().strip()
except LXMLError as ex:
LOG.info(f'lxml unable to parse content: {ex} content={content!r}', exc_info=ex)
content = html_escape(content)
return RE_BLANK_LINE.sub('\n', content)
RSSANT_HTML_SAFE_ATTRS = set(lxml_safe_attrs) | set(IMG_EXT_SRC_ATTRS)
RSSANT_HTML_SAFE_ATTRS.update({'srcset'})
_html_cleaner_options = dict(
scripts=True,
javascript=True,
comments=True,
style=True,
links=True,
meta=True,
page_structure=True,
processing_instructions=True,
frames=True,
forms=True,
annoying_tags=True,
safe_attrs_only=True,
safe_attrs=RSSANT_HTML_SAFE_ATTRS,
add_nofollow=True,
remove_tags=set(['body']),
kill_tags=set(['noscript', 'iframe', 'embed']),
)
class FeedLooseHTMLCleaner(Cleaner):
"""
https://lxml.de/api/lxml.html.clean.Cleaner-class.html
https://lxml.de/api/lxml.html.clean-pysrc.html#Cleaner.allow_embedded_url
"""
def allow_embedded_url(self, el, url):
"""
Decide whether a URL that was found in an element's attributes or text
if configured to be accepted or rejected.
:param el: an element.
:param url: a URL found on the element.
:return: true to accept the URL and false to reject it.
"""
if self.whitelist_tags is not None and el.tag not in self.whitelist_tags:
return False
return True
lxml_story_html_cleaner = Cleaner(
**_html_cleaner_options,
embedded=True,
)
lxml_story_html_loose_cleaner = FeedLooseHTMLCleaner(
**_html_cleaner_options,
embedded=False, # allow iframe
whitelist_tags=['iframe'],
)
def story_html_clean(content, loose=False):
"""
>>> content = '''<html><head><style></style></head><body>
... <pre stype="xxx">
...
... hello world</pre>
... <p><b>happy</b> day<br>你好<i>世界</i></p>
... </body></html>
... '''
>>> print(story_html_clean(content))
<div>
<pre>
<BLANKLINE>
hello world</pre>
<p><b>happy</b> day<br>你好<i>世界</i></p>
</div>
>>> content = '''
... <?xml version="1.0" encoding="utf-8"?>
... <?xml-stylesheet type="text/xsl" href="/res/preview.xsl"?>
... <p>中文传媒精选</p>
... '''
>>> print(story_html_clean(content))
<p>中文传媒精选</p>
>>> # lxml can not parse below content, we handled the exception
>>> content = '<!-- build time:Mon Mar 16 2020 19:23:52 GMT+0800 (GMT+08:00) --><!-- rebuild by neat -->'
>>> assert story_html_clean(content)
>>> # loose cleaner allow iframe, not allow embed flash
>>> content = '<iframe src="https://example.com/123" width="650" height="477" border="0"></iframe>'
>>> story_html_clean(content)
'<div></div>'
>>> 'iframe' in story_html_clean(content, loose=True)
True
>>> content = '<embed src="https://example.com/movie.mp4">'
>>> story_html_clean(content, loose=True)
'<div></div>'
"""
if (not content) or (not content.strip()):
return ""
cleaner = lxml_story_html_loose_cleaner if loose else lxml_story_html_cleaner
try:
content = lxml_call(cleaner.clean_html, content).strip()
except LXMLError as ex:
LOG.info(f'lxml unable to parse content: {ex} content={content!r}', exc_info=ex)
content = html_escape(content)
if not content:
return ""
return content
RE_HTML_REDIRECT = re.compile(r"<meta[^>]*http-equiv=['\"]?refresh['\"]?([^>]*)>", re.I)
RE_HTML_REDIRECT_URL = re.compile(r"url=['\"]?([^'\"]+)['\"]?", re.I)
def get_html_redirect_url(html: str, base_url: str = None) -> str:
"""
Resolve HTML meta refresh client-side redirect
https://www.w3.org/TR/WCAG20-TECHS/H76.html
Example:
<meta http-equiv="refresh" content="0;URL='http://example.com/'"/>
"""
if not html or len(html) > 2048:
return None
match = RE_HTML_REDIRECT.search(html)
if not match:
return None
match = RE_HTML_REDIRECT_URL.search(match.group(1))
if not match:
return None
url = normalize_url(match.group(1).strip(), base_url=base_url)
try:
url = validate_url(url)
except Invalid:
url = None
return url
RE_V2EX = re.compile(r'^http(s)?://[a-zA-Z0-9_\.\-]*\.v2ex\.com', re.I)
RE_HACKNEWS = re.compile(r'^http(s)?://news\.ycombinator\.com', re.I)
RE_GITHUB = re.compile(r'^http(s)?://github\.com', re.I)
RE_PYPI = re.compile(r'^http(s)?://[a-zA-Z0-9_\.\-]*\.?pypi\.org', re.I)
def is_v2ex(url):
"""
>>> is_v2ex("https://www.v2ex.com/t/466888#reply0")
True
>>> is_v2ex("http://www.v2ex.com/t/466888#reply0")
True
>>> is_v2ex("http://xxx.cdn.v2ex.com/image/test.png")
True
>>> is_v2ex("https://www.v2ex.net/t/466888#reply0")
False
"""
return bool(RE_V2EX.match(url))
def is_hacknews(url):
"""
>>> is_hacknews("https://news.ycombinator.com/rss")
True
>>> is_hacknews("http://news.ycombinator.com/rss")
True
>>> is_hacknews("https://news.ycombinator.com/")
True
>>> is_hacknews("https://xxx.ycombinator.com/")
False
"""
return bool(RE_HACKNEWS.match(url))
def is_github(url):
"""
>>> is_github("https://github.com/guyskk/rssant")
True
>>> is_github("http://github.com/guyskk")
True
>>> is_github("https://github.com")
True
>>> is_github("https://www.github.com/guyskk/rssant")
False
>>> is_github("http://guyskk.github.io/blog/xxx")
False
"""
return bool(RE_GITHUB.match(url))
def is_pypi(url):
"""
>>> is_pypi("https://pypi.org/project/import-watch/1.0.0/")
True
>>> is_pypi("http://pypi.org")
True
>>> is_pypi("https://simple.pypi.org/index")
True
>>> is_pypi("https://pypi.python.org/index")
False
"""
return bool(RE_PYPI.match(url))
| s1368816131/PY-rssant | rssant_feedlib/processor.py | processor.py | py | 20,320 | python | en | code | 1 | github-code | 13 |
20659762434 | import numpy as np
import math
from sklearn.naive_bayes import GaussianNB
from sklearn.discriminant_analysis import QuadraticDiscriminantAnalysis
from teste import gaussClf
class NaiveBayesGaussiano():
def __init__(self):
self.medias = {}
self.variancias = {}
def separar_classes(self, X, y):
X2=X
self.classes = np.unique(y)
classes_index = {}
subdatasets = {}
X_ = np.c_[X, y]
cls, counts = np.unique(y, return_counts=True)
#frequencia que cada classe ocorre
self.class_freq = dict(zip(cls, counts))
for class_type in self.classes:
classes_index[class_type] = np.argwhere(y == class_type)
subdatasets[class_type] = X2[classes_index[class_type], :]
self.class_freq[class_type] = self.class_freq[class_type]/sum(list(self.class_freq.values()))
dados = {}
dados2 = []
for class_type in self.classes:
for i in range(len(y)):
if y[i] == class_type:
dados2.append(X[i])
dados[class_type] = np.array(dados2)
dados2=[]
return dados
def fit(self, X, y):
X_ = self.separar_classes(X, y)
self.predic_probabilidade(X_)
self.medias = {}
self.desvios = {}
for class_type in self.classes:
self.medias[class_type] = np.mean(X_[class_type], axis=0)
self.desvios[class_type] = self.desvioPadrao(X_[class_type])
print(self.medias)
def desvioPadrao(self, X):
X_ = X - X.mean(axis = 0)
return np.sqrt((sum(X_ * X_)/(len(X))))
def variancia(self, X):
return self.desvioPadrao(X)**2
def probrabilidade(self, X):
X_=X-X.mean(axis=0)
w,m = X.shape
cov = self.matrizDeCovariancia(X)
exp = np.exp((-1/2) * X_.T @ X_ @ np.linalg.pinv(cov) )
return (1/(np.linalg.det(cov) **(1/2) * (2*np.pi)**(m/2))) * exp
def prop2(self, x, media, desvio):
exp = math.exp(- ((x - media)** 2)/(2* desvio**2))
return (1/(desvio* np.sqrt(2*np.pi))) * exp
def predic_probabilidade(self, X):
self.class_prob = {c:math.log(self.class_freq[c],math.e) for c in self.classes}
for c in self.classes:
for i in range(len(self.medias)):
self.class_prob[c] +=math.log(self.prop2(X[i], self.medias[c][i], self.desvios[c][i]), math.e)
self.class_prob = {c: math.e**self.class_prob[c] for c in self.class_prob}
return self.class_prob
def matrizDeCovariancia(self, X):
X_ = X - X.mean(axis=0)
return X_.T @ X_ / (len(X) -1)
def predict(self, X):
pred = []
for x in X:
pred_class = None
max_prob = 0
for c, prob in self.predic_probabilidade(x).items():
if(prob > max_prob):
max_prob = prob
pred_class = c
pred.append(pred_class)
return np.array(pred)
data = np.loadtxt("c:/Users/bruno/Desktop/teste/ex2data1.txt", skiprows=1, delimiter=",")
np.random.shuffle(data)
X = data[:, 0: -1]
y = data[: , 2]
n = X.shape[0]
n_train = int(n*0.7)
n_test = n - n_train
X_train = X[:n_train]
X_test = X[-n_test:]
y_train = y[:n_train]
y_test = y[-n_test:]
nb = NaiveBayesGaussiano()
nb.fit(X_train, y_train)
g = QuadraticDiscriminantAnalysis()
g2 = gaussClf()
g2.fit(X_train, y_train)
g.fit(X_train, y_train)
print(g2.predict(X_test))
print(nb.predict(X_test))
print(g.predict(X_test))
| brunopinho321/ML_Codigos | NaiveBayes/naive_bayes.py | naive_bayes.py | py | 3,590 | python | en | code | 0 | github-code | 13 |
15396459823 | from BaseHandler import BaseHandler, authenticated
from orm import QuestionNaireInfoTable
from typing import Text
import json
import datetime
from config import DEBUG
import time
class UserQuestionnaireListHandler(BaseHandler):
@authenticated
async def get(self, *args, **kwargs):
# 获取当前用户问卷列表
# 接口约定:https://github.com/Wh1isper/QuestionnaireSystemDoc/blob/master/%E6%8E%A5%E5%8F%A3%E5%AE%9A%E4%B9%89/%E6%8E%A5%E5%8F%A3%E8%AE%BE%E8%AE%A1-2020.05.17-V1.0.md#%E7%94%A8%E6%88%B7%E9%97%AE%E5%8D%B7%E5%88%97%E8%A1%A8api
# 1. 获取用户 id:self.current_user
# 2. 查询数据库 获取用户id下的问卷信息
# 3. 打包返回
engine = await self.get_engine()
ret_list = []
async with engine.acquire() as conn:
result = await conn.execute(QuestionNaireInfoTable.select()
.where(QuestionNaireInfoTable.c.U_ID == self.current_user))
questionnaire_info_list = await result.fetchall()
for questionnaire_info in questionnaire_info_list:
info_module = {
"Q_ID": questionnaire_info.QI_ID,
"Q_Name": questionnaire_info.QI_Name,
"Q_creat_date": self.datetime_to_timestamp(questionnaire_info.QI_Creat_Date),
"Q_deadline_date": self.datetime_to_timestamp(questionnaire_info.QI_Deadline_Date),
"state": questionnaire_info.QI_State,
}
ret_list.append(info_module)
self.write(json.dumps(ret_list))
default_handlers = [
(r"/api/v1/userQuestionnaireList/", UserQuestionnaireListHandler),
]
| Wh1isper/QuestionnaireSystemBackend | APIHandler/UserQuestionnaire/UserQuestionnaireHandler.py | UserQuestionnaireHandler.py | py | 1,679 | python | en | code | 4 | github-code | 13 |
327300591 | import matplotlib
import matplotlib.pyplot as plt
import numpy as np
x = np.arange(0.0, 2.0, 0.01) # data for plotting
y = 1 + np.sin(x)
fig, ax = plt.subplots()
ax.plot(x, y)
ax.set(xlabel='time (s)', ylabel='1+sin(x)', title='basic matplotlib example')
ax.grid()
fig.savefig("example1.png")
plt.show()
| grexor/python-plotting-tutorial | examples/example1.py | example1.py | py | 308 | python | en | code | 0 | github-code | 13 |
29523481815 | import csv
import os
from googletrans import Translator
INPUT_DIR = 'csvs'
OUTPUT_DIR = 'csvs'
COLUMN_LIST = ['fath', 'name']
INPUT_LANGUAGE = 'hindi'
class GoogleTranslator:
def __init__(self,
input_file=None,
output_file=None,
column_list=COLUMN_LIST,
input_language=INPUT_LANGUAGE
):
# Create Translator object
self.translator = Translator()
# define session object
self.input_file = input_file
self.output_file = output_file
self.column_list = COLUMN_LIST
self.input_language = input_language
# define translated cells object
self.translated_cells = {}
def Start(self):
with open(self.input_file, mode='r', encoding='utf-8') as input_file:
lines = csv.reader(input_file, delimiter=',')
counter = 0
for line in lines:
# if header, make new header
if counter == 0:
new_headers = self.MakeNewHeaders(line)
self.WriteHeader(new_headers)
else:
print('line %s' % counter)
new_row = self.TranslateRow(line)
print(' %s' % new_row)
self.WriteData(new_row)
counter += 1
def WriteHeader(self, data):
# write data into output csv file
writer = csv.writer(open(self.output_file, 'w', encoding='utf-8'), delimiter=',', lineterminator='\n')
writer.writerow(data)
def WriteData(self, data):
# write data into output csv file
writer = csv.writer(open(self.output_file, 'a', encoding='utf-8'), delimiter=',', lineterminator='\n')
writer.writerow(data)
def MakeNewHeaders(self, headers):
self.column_idxs = []
new_headers = []
header_idx = 0
for header in headers:
new_headers.append(header)
for column in self.column_list:
if header == column:
new_headers.append(header + '-english')
self.column_idxs.append(header_idx)
header_idx += 1
return new_headers
def TranslateRow(self, row):
# get values for translating
input_values = []
column_idx = 0
for one in row:
for idx in self.column_idxs:
if column_idx == idx and one not in self.translated_cells and one not in input_values:
input_values.append(one)
break
column_idx += 1
# get translated values
translated_values = self.translator.translate(input_values, src=self.input_language, dest='english')
# add the translated_values into self.translated_cells
for one in translated_values:
self.translated_cells[one.origin] = one.text
# make new row
new_row = []
idx = 0
prev_idx = 0
for one in self.column_idxs:
new_row += row[prev_idx:one + 1]
new_row.append(self.translated_cells[row[one]])
prev_idx = one + 1
idx += 1
new_row += row[prev_idx:]
return new_row
def main():
files = os.listdir(INPUT_DIR)
for file_name in files:
input_file = INPUT_DIR + '/' + file_name
output_file = OUTPUT_DIR + '/' + '%s-goog-translate%s' % (os.path.splitext(os.path.basename(file_name))[0], os.path.splitext(os.path.basename(file_name))[1])
# create a Google Translator object
google_translator = GoogleTranslator(
input_file=input_file,
output_file=output_file,
column_list=COLUMN_LIST,
input_language=INPUT_LANGUAGE
)
# start translation
google_translator.Start()
if __name__ == '__main__':
main()
| in-rolls/table_cell_level_translator | google_translator.py | google_translator.py | py | 3,913 | python | en | code | 1 | github-code | 13 |
27632715680 | import cv2
import time
import numpy as np
from HandTrackingModule import HandDetector
from face_detection_video import FaceDetection
kernel = np.ones((5, 5), np.uint8)
wCam, hCam = 1280, 720
cap = cv2.VideoCapture(1)
cap.set(3, wCam)
cap.set(4, hCam)
detector = HandDetector(detectionCon=0.8, maxHands=2)
while cap.isOpened():
success, img = cap.read()
#img = cv2.flip(img, 1)
start = time.time()
#Deteccion del objeto
rangomax=np.array([50,255,50])
rangomin=np.array([0,51,0])
mascara = cv2.inRange(img, rangomin, rangomax)
opening = cv2.morphologyEx(mascara, cv2.MORPH_OPEN, kernel)
x, y, w, h = cv2.boundingRect(opening)
#Dibujando rectangulo verde
cv2.rectangle(img, (x, y), (x + w, y + h), (0, 255, 0), 3)
#Dibujando circulo rojo
cv2.circle(img, (int(x + w / 2), int(y + h / 2)), 5, (0, 0, 255), -1)
PosAbss = ["","",""]
#Detecion de manos
hands, img = detector.findHands(img)
if hands:
hand1 = hands[0]
lmList1 = hand1["lmList"]
bbox1 = hand1["center"]
handType1 = hand1["type"]
fingers1 = detector.fingersUp(hand1)
# En caso de que haya una mano
if len(hands) == 1 and handType1 == "Right":
PosAbss = ["",fingers1,""]
#PosAbss = ["",handType1,""]
else:
PosAbss = [fingers1,"",""]
#PosAbss = [handType1,"",""]
# En caso de que haya dos manos
if len(hands) == 2:
hand2 = hands[1]
lmList2 = hand2["lmList"]
bbox2 = hand2["center"]
handType2 = hand2["type"]
fingers2 = detector.fingersUp(hand2)
if handType1 == "Right":
PosAbss = [fingers2,fingers1,""]
#PosAbss = [handType2,handType1,""]
else:
PosAbss = [fingers1,fingers2,""]
#PosAbss = [handType1,handType2,""]
face_pos,img = FaceDetection(img)
PosAbss[2] = face_pos[0]
print(PosAbss)
end = time.time()
totalTime = end - start
fps = 1 / totalTime
cv2.putText(img, f'FPS: {int(fps)}', (20,70), cv2.FONT_HERSHEY_SIMPLEX, 1.5, (0,255,0), 2)
cv2.imshow("Hand and Face Detection", img)
cv2.waitKey(1)
| benanxio/HANDFACE | Principal_HF.py | Principal_HF.py | py | 2,297 | python | en | code | 0 | github-code | 13 |
1945010019 | pos = input()
#(수평 수직) 움직일 수 있는 모든 경우의 수
move = [[1, 2], [1, -2], [-1, 2], [-1, -2], [2, 1], [2, -1], [-2, 1], [-2, -1]]
x_a = ['a', 'b', 'c', 'd', 'e', 'f', 'g', 'h']
row = x_a.index(pos[0]) # index를 활용해 좌표 찾기
col = int(pos[1]) - 1
count =0
for i in move:
dx = row + i[0]
dy = col + i[1]
#범위 체크
if dx > 7 or dx < 0 or dy > 7 or dy < 0:
continue
else:
count +=1
print(count)
| wndjs803/Algorithm_with_Python | Implementaion/knight.py | knight.py | py | 480 | python | ko | code | 0 | github-code | 13 |
24283259685 | from dataclasses import dataclass
from datetime import datetime
from typing import List
from models.errors import BaseError
import psycopg2
from contextlib import contextmanager
from config import conf
connection_info = {
'database': conf.db.database,
'user': conf.db.user,
'password': conf.db.password,
'host': conf.db.host,
'port': conf.db.port
}
conn = psycopg2.connect(**connection_info)
@contextmanager
def new_transaction():
try:
with conn:
with conn.cursor() as cur:
yield cur
except psycopg2.Error as e:
raise PersistError("An unexpected database exception occured", e)
@dataclass(frozen=True)
class Table(object):
name: str
alias: str
fields: List[str]
def all_fields(self):
return ', '.join(self.fields)
def all_fields_aliased(self):
return ', '.join(map(lambda x: self.alias + '.' + x, self.fields))
Users = Table('users',
'u',
['id', 'name', 'email', 'instruments', 'created_at', 'updated_at'])
Songs = Table('songs',
's',
['id', 'title', 'instruments', 'created_at', 'updated_at'])
Notifications = Table('notifications',
'n',
['id', 'song_id', 'user_id', 'message', 'created_at'])
tables = [Users, Songs, Notifications]
@dataclass
class InsertionError(BaseError):
message: str
@dataclass
class PersistError(BaseError):
message: str
exception: Exception
@dataclass
class UpdateError(BaseError):
message: str
def is_unique_constraint_violation(err):
return err.pgcode == '23505'
@dataclass(frozen=True)
class RangeQuery:
"""Holds lower and upper bound for a range query"""
furthest: datetime
closest: datetime
@staticmethod
def from_query_string(query_string):
d1, d2 = [datetime.fromisoformat(x) for x in query_string.split('_')]
return RangeQuery(furthest=min(d1, d2), closest=max(d1, d2))
@dataclass(frozen=True)
class PaginatedQuery:
"""Holds offset and limit values for a paginated query"""
offset: int
count: int
@staticmethod
def from_query_string(query_string):
low, up = [int(x) for x in query_string.split('_')]
offset = low
count = up - low
return PaginatedQuery(offset, count)
| whisust/jellynote-backend | api/persist/__init__.py | __init__.py | py | 2,343 | python | en | code | 1 | github-code | 13 |
3371901581 | '''
#not all test cases pass- 310 out 0f 313 passed- O(n**3)
resultList = list()
nums = sorted(nums)
print(nums)
for i in range(0,len(nums)):
loopVar1 = i + 1
for j in range(loopVar1, len(nums)):
loopVar2 = j + 1
for k in range(loopVar2, len(nums)):
if nums[i]+nums[j]+nums[k]==0:
if [nums[i],nums[j],nums[k]] not in resultList:
resultList.append([nums[i], nums[j], nums[k]])
return resultList
'''
class Solution:
def threeSum(self, nums: List[int]) -> List[List[int]]:
s = sorted(nums) # O(nlogn)
output = set()
for k in range(len(s)): #complete list
target = -s[k] #negate each element
i,j = k+1, len(s)-1 # check from next element till the end of list
# i pointer from next element till half the list, j pointer
# from last element till half the list decremented
while i < j:
sum_two = s[i] + s[j]
if sum_two < target:
i += 1
elif sum_two > target:
j -= 1
else:
output.add((s[k],s[i],s[j]))
i += 1
j -= 1
return output | akshayyd/ltcSolutions | 3Sum.py | 3Sum.py | py | 1,422 | python | en | code | 0 | github-code | 13 |
34601370607 | from __future__ import annotations
import json
import logging
import pathlib
import typing as t
from dataclasses import dataclass
import aiofiles
from analytix.types import SecretT
_log = logging.getLogger(__name__)
@dataclass(frozen=True)
class Secrets:
"""A dataclass representing a set of secrets for a Google Developers
project. This should generally be created using one of the available
classmethods.
Args:
client_id:
The client ID.
project_id:
The name of the project.
auth_uri:
The authorisation server endpoint URI.
token_uri:
The token server endpoint URI.
auth_provider_x509_cert_url:
The URL of the public x509 certificate, used to verify the
signature on JWTs, such as ID tokens, signed by the
authentication provider.
client_secret:
The client secret.
redirect_uris:
A list of valid redirection endpoint URIs. This list should
match the list entered for the client ID on the API Access
pane of the Google APIs Console.
"""
client_id: str
project_id: str
auth_uri: str
token_uri: str
auth_provider_x509_cert_url: str
client_secret: str
redirect_uris: list[str]
def __str__(self) -> str:
return self.project_id
def __getitem__(self, key: str) -> SecretT:
return t.cast(SecretT, getattr(self, key))
@classmethod
def from_file(cls, path: pathlib.Path | str) -> Secrets:
"""Load a set of secrets from a file downloaded from the Google
Developers Console.
Args:
path:
The path to the secrets file.
Returns:
The loaded secrets.
"""
if not isinstance(path, pathlib.Path):
path = pathlib.Path(path)
_log.debug(f"Loading secrets from {path.resolve()}...")
with open(path) as f:
data = json.load(f)["installed"]
_log.info("Secrets loaded!")
return cls(**data)
@classmethod
async def afrom_file(cls, path: pathlib.Path | str) -> Secrets:
"""Asynchronously load a set of secrets from a file downloaded
from the Google Developers Console.
Args:
path:
The path to the secrets file.
Returns:
The loaded secrets.
"""
if not isinstance(path, pathlib.Path):
path = pathlib.Path(path)
_log.debug(f"Loading secrets from {path.resolve()}...")
async with aiofiles.open(path) as f:
data = json.loads(await f.read())["installed"]
_log.info("Secrets loaded!")
return cls(**data)
def to_dict(self) -> dict[str, SecretT]:
"""Convert secrets to a dictionary.
Returns:
A dictionary of secrets, where the keys are strings, and the
values are either strings or lists of strings.
"""
return {
"client_id": self.client_id,
"project_id": self.project_id,
"auth_uri": self.auth_uri,
"token_uri": self.token_uri,
"auth_provider_x509_cert_url": self.auth_provider_x509_cert_url,
"client_secret": self.client_secret,
"redirect_uris": self.redirect_uris,
}
| 81CuongVn/analytix | analytix/secrets.py | secrets.py | py | 3,374 | python | en | code | 0 | github-code | 13 |
72429894737 | #!/usr/bin/env python
__author__ = "Isidor Nygren"
__copyright__ = "Copyright 2018, Isidor Nygren"
__license__ = "MIT"
__version__ = "1.0"
__maintainer__ = "Isidor Nygren"
__email__ = "admin@isidor.co.uk"
import math
from .basesort import victorylap
def heapify(array, n, i):
end = i
l = 2*i + 1 # Left
r = 2*i + 2 # Right
if l < n and array.peek(i) < array.peek(l):
end = l
if r < n and array.peek(end) < array.peek(r):
end = r
if end != i:
array.swap(i, end)
heapify(array, n, end)
def sort(array):
""" Heapsort sorting function.
Sorts an array with the heapsort algorithm
Parameters
----------
array : ObjectList
array of objects to sort.
"""
n = array.len()
for i in range(n, -1, -1):
heapify(array, n, i)
for i in range(n - 1, 0, -1):
array.swap(0, i)
heapify(array, i, 0)
victorylap(array)
| isidornygren/sortware | algorithms/heapsort.py | heapsort.py | py | 930 | python | en | code | 1 | github-code | 13 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.