seq_id stringlengths 4 11 | text stringlengths 113 2.92M | repo_name stringlengths 4 125 ⌀ | sub_path stringlengths 3 214 | file_name stringlengths 3 160 | file_ext stringclasses 18
values | file_size_in_byte int64 113 2.92M | program_lang stringclasses 1
value | lang stringclasses 93
values | doc_type stringclasses 1
value | stars int64 0 179k ⌀ | dataset stringclasses 3
values | pt stringclasses 78
values |
|---|---|---|---|---|---|---|---|---|---|---|---|---|
19034241073 | from boat import Boat
from graph import Graph
from node import CrossingNode
import heapq
from argparse import ArgumentParser
import os
import sys
from time import time
def printSolution(file, currentNode, time, maxStackNodes,maxComputedNodes ):
print("Solution:\n")
currentNode.printPath()
print("Time:", str(time), "miliseconds \n")
print("Maximum number of nodes in memory:", maxStackNodes, "\n")
print("Maximum number of computed nodes:", maxComputedNodes, "\n")
print("Cost: ", currentNode.f, "\n")
print("Length: ", len(currentNode.getPath()), "\n")
def aStar(file):
boat = Boat({
"wolves": 0,
"goats": 0,
"cabbages": 0
},
{
"wolves": 0,
"goats": 0,
"cabbages": 0
})
startNode = CrossingNode(
None, Graph.initialBoatPosition, Graph.east, Graph.west, Graph.store, boat
)
maxStackNodes = 1
maxComputedNodes = 1
startTime = time()
openList = [startNode]
while len(openList) > 0:
currentIndex = 0
currentNode = openList.pop(currentIndex)
# Reached final state
if currentNode.isFinalState():
endTime = time()
printSolution(file, currentNode, round(endTime - startTime, 4), maxStackNodes, maxComputedNodes)
return
successors = currentNode.getSuccessors()
maxComputedNodes += len(successors)
for successor in successors:
if any(successor == item for item in openList):
continue
index = 0
foundPlace = False
for index in range(len(openList)):
if openList[index].f > successor.f:
foundPlace = True
break
if foundPlace:
openList.insert(index, successor)
else:
openList.append(successor)
maxStackNodes = max(maxStackNodes, len(openList))
def astarOptimized(file):
boat = Boat( {
"wolves": 0,
"goats": 0,
"cabbages": 0
},
{
"wolves":0,
"goats": 0,
"cabbages": 0
})
startNode = CrossingNode(
None, Graph.initialBoatPosition, Graph.east, Graph.west, Graph.store, boat
)
lopen = [startNode]
lclose = []
maxStackNodes = 1
maxComputedNodes = 1
startTime = time()
while len(lopen) > 0:
currentNode = lopen.pop(0)
lclose.append(currentNode)
if currentNode.isFinalState():
endTime = time()
printSolution(file, currentNode, round(endTime - startTime, 4), maxStackNodes, maxComputedNodes)
# -1
# 00->return
return
successors = currentNode.getSuccessors()
maxComputedNodes += len(successors)
for successor in successors:
foundOpen = False
for node in lopen:
if node == successor:
foundOpen = True
if successor.f < node.f: # will eliminiate from open and then add the current node in open
if node in lopen:
lopen.remove(node)
else:
if successor in successors:
successors.remove(successor)
break
if foundOpen == False: # will search it in lclose
for node in lclose:
if successor == node:
if successor.f < node.f:
lclose.remove(node)
else:
successors.remove(successor)
break
for successor in successors: # add them in lopen as we keep the order --> order ascendinf f && desc g
foundPlace = False
for place in range(len(lopen)):
if lopen[place].f > successor.f or (
lopen[place].f == successor.f and lopen[place].g <= successor.g):
foundPlace = True
break
if foundPlace == True:
lopen.insert(place, successor)
else:
lopen.append(successor)
maxStackNodes = max(maxStackNodes, len(lopen) + len(lclose))
def BF(file, nrSearchedSolutions):
boat = Boat({
"wolves": 0,
"goats": 0,
"cabbages": 0
},
{
"wolves": 0,
"goats": 0,
"cabbages": 0
})
startNode = CrossingNode(
None, Graph.initialBoatPosition, Graph.east, Graph.west, Graph.store, boat
)
maxStackNodes = 1
maxComputedNodes = 1 #all successors generated
queueNodes = [startNode]
startTime = time()
while len(queueNodes) != 0:
# print("Actual queue: " + str(queueNodes))
currentNode = queueNodes.pop()
if (currentNode.isFinalState()):
endTime = time()
printSolution(file, currentNode, round(endTime - startTime, 4),maxStackNodes,maxComputedNodes)
nrSearchedSolutions -= 1
if nrSearchedSolutions == 0:
return 0
else:
print("========================================\n")
succesors = currentNode.getSuccessors()
queueNodes.extend(succesors)
maxComputedNodes += len(succesors)
maxStackNodes = max(maxStackNodes, len(queueNodes))
def depthFirst(file, nrSearchedSolutions):
startTime = time()
boat = Boat({
"wolves": 0,
"goats": 0,
"cabbages": 0
},
{
"wolves": 0,
"goats": 0,
"cabbages": 0
})
startNode = CrossingNode(
None, Graph.initialBoatPosition, Graph.east, Graph.west, Graph.store, boat
)
maxComputedNodes = 1
maxStackNodes = 1
DF(file, nrSearchedSolutions, startTime, startNode, maxStackNodes, maxComputedNodes)
def DF(file, nrSearchedSolutions, startTime, currentNode, maxStackNodes, maxComputedNodes):
if currentNode.isFinalState():
endTime = time()
printSolution(file, currentNode, round(endTime - startTime, 4), maxStackNodes, maxComputedNodes)
nrSearchedSolutions -= 1
if nrSearchedSolutions == 0:
return nrSearchedSolutions
else:
print("========================================\n")
successors = currentNode.getSuccessors()
maxComputedNodes += len(successors)
maxStackNodes = max(maxStackNodes, len(successors)) #df memorates just one path
for successor in successors:
if nrSearchedSolutions != 0:
nrSearchedSolutions = DF(file, nrSearchedSolutions, startTime, successor, maxStackNodes, maxComputedNodes)
return nrSearchedSolutions
def depth_first_iterative(file, nrSearchedSolutions, currentNode, depth, maxStackNodes, maxComputedNodes):
if depth == 1 and currentNode.isFinalState(): #at the last step for the current searched depth
endTime = time()
printSolution(file, currentNode, round(endTime - startTime, 4), maxStackNodes, maxComputedNodes)
nrSearchedSolutions -= 1
if nrSearchedSolutions == 0:
return nrSearchedSolutions
else:
print("========================================\n")
if depth > 1:
succesors = currentNode.getSuccessors()
maxComputedNodes += len(succesors)
maxStackNodes = max(maxStackNodes, len(succesors))
for succesor in succesors:
if nrSearchedSolutions != 0:
nrSearchedSolutions = depth_first_iterative(file,nrSearchedSolutions, succesor, depth - 1, maxStackNodes, maxComputedNodes)
return nrSearchedSolutions
# dfs but with a maximum depth --> combination with bfs
# it will remake all the previous trees but at least we will not have problems with the memory
def DFI(file, nrSearchedSolutions):
startTime = time()
maxComputedNodes = 1
maxStackNodes =1
for depth in range(1, 100): #the maximum length
if nrSearchedSolutions == 0: #at the previous call we have finished all the the searches
return
boat = Boat({
"wolves": 0,
"goats": 0,
"cabbages": 0
},
{
"wolves": 0,
"goats": 0,
"cabbages": 0
})
startNode = CrossingNode(None, Graph.initialBoatPosition, Graph.east, Graph.west, Graph.store, boat)
nrSearchedSolutions = depth_first_iterative(file, nrSearchedSolutions, startNode, depth, maxStackNodes, maxComputedNodes)
def ida_star(nrSearchedSolutions = 1):
boat = Boat({
"wolves": 0,
"goats": 0,
"cabbages": 0
},
{
"wolves": 0,
"goats": 0,
"cabbages": 0
})
startNode = CrossingNode(
None, Graph.initialBoatPosition, Graph.east, Graph.west, Graph.store, boat
)
limit = startNode.f
while True: #while we are in the limit and still haven't find the nr of solutions
nrSearchedSolutions, result = buildPath(startNode, limit, nrSearchedSolutions)
if result == "finished":
break
if result == float('inf'):
print("No more solutions")
break
limit = result
def buildPath(currentNode, limit, nrSearchedSolutions):
print(currentNode)
if currentNode.f > limit: #we can not extend this node
return nrSearchedSolutions, currentNode.f #the new limit --> will choose the highest from all
if currentNode.isFinalState() and currentNode.f == limit:
print("Solution:")
currentNode.printPath()
nrSearchedSolutions -= 1
if nrSearchedSolutions == 0:
return 0, "finished"
successors = currentNode.getSuccessors()
minim = float('inf')
for successor in successors: #try to extend every child
nrSearchedSolutions, result = buildPath(successor, limit, nrSearchedSolutions)
if result == "finished":
return 0, "finished" #to go back at the initial call
if result < minim:
minim = result #calculate the minim of all limits
return nrSearchedSolutions, minim
def allAlgo(file, nrsol):
if not Graph.veifyInput():
file.write("There is no solution")
return
# file.write("-------------------- BFS --------------------\n")
# feedback = BF(file, nrsol)
# file.write(str(feedback) + '\n')
# file.write("-------------------- END BFS --------------------\n\n")
file.write("-------------------- DFS --------------------\n")
depthFirst(file, nrsol)
file.write("-------------------- END DFS --------------------\n\n")
# file.write("-------------------- ITERATIVE DFS --------------------\n")
# DFI(file, nrsol)
# file.write("-------------------- END ITERATIVE DFS --------------------\n\n")
# file.write("-------------------- A* --------------------\n")
# aStar(file)
# file.write("-------------------- END A* --------------------\n\n")
# file.write("-------------------- A* OPTIMIZED --------------------\n")
# astarOptimized(file)
# file.write("-------------------- END A* OPTIMIZED --------------------\n\n")
if __name__ == "__main__":
graph = None
parser = ArgumentParser()
parser.add_argument("-in_file", "--input_folder", dest = "inputFolder", help = "Path to input")
parser.add_argument("-out_file", "--output_folder", dest = "outputFolder", help = "Path to output")
parser.add_argument("-nsol", dest = "nsol", help = "The desired number of solutions")
parser.add_argument("-heur", dest = "heuristic", help = "The heuristic used")
#parser.add_argument("-t", dest = "timeout", help = "The timeout")
args = vars(parser.parse_args())
inputFolder = args["inputFolder"]
outputFolder = args["outputFolder"]
nsol = int(args["nsol"])
#get all input files
inputFiles = os.listdir(inputFolder)
#create if it doesn't exist
if not os.path.exists(outputFolder):
os.mkdir(outputFolder)
#concatenate / at the end
if inputFolder[-1] != '/':
inputFolder += '/'
if outputFolder[-1] != '/':
outputFolder += '/'
startTime = time()
for(index, currentFile) in enumerate (inputFiles):
outPath = outputFolder + "output" + str(index + 1)
inPath = inputFolder + currentFile
f = open(outPath, "w+")
try:
graph = Graph(inPath)
allAlgo(f, nsol)
except Exception as e:
print(str(e))
| LupascuMiruna/IA-first-project | code/index.py | index.py | py | 12,609 | python | en | code | 0 | github-code | 13 |
13834210237 | #!/usr/bin/python3
# -*- coding: utf-8 -*-
import sys, os, shutil, json
from subprocess import call
import distutils.dir_util
import configparser
from distutils.version import LooseVersion
from urllib.request import Request, urlopen
GlobalScriptPath = os.path.dirname(os.path.realpath(__file__)).replace('\\','/')
GlobalReleasePath = GlobalScriptPath + "/BuildRelease/PortableApps/MachinaTrader"
GlobalGitPath = GlobalScriptPath + "/MachinaTraderGit"
GlobalSourceFolder = GlobalScriptPath
GlobalConfigFileName=os.path.basename(__file__).replace('.py','.json')
GlobalConfigFile=GlobalScriptPath + "/" + GlobalConfigFileName
portableAppsPath=os.path.dirname(GlobalScriptPath)
if os.path.exists(GlobalConfigFile):
with open(GlobalConfigFile) as data_file:
GlobalConfig = json.load(data_file)
else:
print("Config not found - Creating")
GlobalConfig = {}
GlobalConfig["BuildOptions"] = {};
GlobalConfig["BuildOptions"]["BuildSetupFolders"] = True
GlobalConfig["BuildOptions"]["BuildAddPlugins"] = True
GlobalConfig["BuildOptions"]["BuildEnableAllPlugins"] = True
GlobalConfig["BuildOptions"]["BuildCompile"] = True
GlobalConfig["BuildOptions"]["BuildCreateInstaller"] = True
GlobalConfig["EnabledPlugins"] = []
# What to do
BuildSetupFolders = GlobalConfig["BuildOptions"]["BuildSetupFolders"]
BuildAddPlugins = GlobalConfig["BuildOptions"]["BuildAddPlugins"]
BuildEnableAllPlugins = GlobalConfig["BuildOptions"]["BuildEnableAllPlugins"]
BuildCompile = GlobalConfig["BuildOptions"]["BuildCompile"]
BuildCreateInstaller = GlobalConfig["BuildOptions"]["BuildCreateInstaller"]
ConfigValidPlugins=GlobalConfig["EnabledPlugins"]
if (BuildSetupFolders):
print('-------------------------------------------------------------------------')
print('Setup Folders')
print('-------------------------------------------------------------------------')
if not os.path.isdir(GlobalSourceFolder + '/BuildRelease/App'):
print ('Source Folder not found -> Fallback to Git Folder')
GlobalSourceFolder = GlobalGitPath
print (GlobalSourceFolder)
shutil.rmtree(GlobalReleasePath + "/App", ignore_errors=True)
os.makedirs(GlobalReleasePath, exist_ok=True)
os.makedirs(GlobalReleasePath +'/App/Plugins', exist_ok=True)
distutils.dir_util.copy_tree(GlobalSourceFolder + '/BuildRelease/App/AppInfo', GlobalReleasePath + '/App/AppInfo')
shutil.copy2(GlobalSourceFolder + '/BuildRelease/AppAdditions/help.html', GlobalReleasePath + '/help.html')
distutils.dir_util.copy_tree(GlobalSourceFolder + '/MachinaTrader/wwwroot', GlobalReleasePath + '/App/wwwroot')
if BuildEnableAllPlugins:
ConfigValidPlugins=[]
for root, directories, files in os.walk(GlobalSourceFolder):
for pluginFolder in directories:
if pluginFolder.startswith('MachinaTrader.Plugin.'):
ConfigValidPlugins.append(pluginFolder)
if (BuildAddPlugins):
print('-------------------------------------------------------------------------')
print('Add Plugins')
print('-------------------------------------------------------------------------')
for validPlugin in ConfigValidPlugins:
try:
distutils.dir_util.copy_tree(GlobalSourceFolder + '/' + validPlugin + '/wwwroot', GlobalReleasePath + '/App/Plugins/' + validPlugin+ '/wwwroot')
except:
print("Error: " + validPlugin + " dont contains a wwwroot Folder")
if (BuildCompile):
print('-------------------------------------------------------------------------')
print('Cleanup Project File')
print('-------------------------------------------------------------------------')
shutil.copy2(GlobalSourceFolder + '/MachinaTrader/MachinaTrader.csproj', GlobalSourceFolder + '/MachinaTrader/MachinaTrader.csproj.bak')
f = open(GlobalSourceFolder + '/MachinaTrader/MachinaTrader.csproj','r+')
d = f.readlines()
f.seek(0)
for i in d:
if 'MachinaTrader.Globals' in i:
f.write(i)
#Add all enabled Plugins
for validPlugin in ConfigValidPlugins:
f.write('<ProjectReference Include="..\\' + validPlugin + '\\' + validPlugin + '.csproj" />\r\n')
elif 'MachinaTrader.Plugin.' not in i:
f.write(i)
f.truncate()
f.close()
print('-------------------------------------------------------------------------')
print('Compile')
print('-------------------------------------------------------------------------')
os.chdir(GlobalSourceFolder + '/MachinaTrader')
os.system('dotnet restore')
if sys.platform == "win32":
os.system('dotnet publish --framework netcoreapp2.1 --self-contained --runtime win-x64 --output ' + GlobalReleasePath + '/App')
#Copy Launcher
shutil.copy2(GlobalSourceFolder + '/BuildRelease/AppAdditions/MachinaTraderLauncher.exe', GlobalReleasePath + '/MachinaTraderLauncher.exe')
if sys.platform =="linux":
os.system('dotnet publish --framework netcoreapp2.1 --self-contained --runtime linux-x64 --output ' + GlobalReleasePath + '/App')
# Restore real project
os.remove(GlobalSourceFolder + '/MachinaTrader/MachinaTrader.csproj')
shutil.copy2(GlobalSourceFolder + '/MachinaTrader/MachinaTrader.csproj.bak', GlobalSourceFolder + '/MachinaTrader/MachinaTrader.csproj')
# Windows: Release Path is scriptpath + "/PortableApps"
if BuildCreateInstaller:
# Check if needed Build Tools are installed, we DONT check for netcore/vsbuild tools because they are hard dependency
if not os.path.isdir(GlobalSourceFolder + "/BuildRelease"):
os.makedirs(GlobalSourceFolder + "/BuildRelease", exist_ok=True)
if not os.path.isdir(GlobalSourceFolder + "/BuildRelease"):
os.makedirs(GlobalSourceFolder + "/BuildRelease", exist_ok=True)
if not os.path.isfile(GlobalSourceFolder + "/BuildRelease/MachinaCore.comAppInstaller/PortableApps.comInstaller.exe"):
print("Warning Build Tools dont exist - Downloading")
os.chdir(GlobalSourceFolder + "/BuildRelease")
os.system('git clone https://github.com/MachinaCore/MachinaCore.comAppInstaller.git ' + GlobalScriptPath + "/BuildRelease/AppInstaller")
os.chdir(GlobalScriptPath)
config = configparser.ConfigParser()
config.read(GlobalSourceFolder + '/BuildRelease/App/AppInfo/appinfo.ini')
fileName = config['Details']['AppID']
fileVersion = config['Version']['PackageVersion']
installerFileName = fileName + "_" + fileVersion + ".paf.exe"
# Make sure data folder is deleted for release
shutil.rmtree(GlobalReleasePath + "/App/Data", ignore_errors=True)
# Create 7-zip
os.system('7z.exe a -r -t7z -mx=9 '+ fileName +'_'+ fileVersion +'.7z ' + GlobalReleasePath.replace("/","\\") + '\\*')
# Create Installer
os.system(GlobalSourceFolder + '/BuildRelease/AppInstaller/PortableApps.comInstaller.exe "'+ GlobalReleasePath.replace("/","\\") + '"')
| MobileGuru1013/MachinaTrader | BuildRelease.py | BuildRelease.py | py | 6,960 | python | en | code | 16 | github-code | 13 |
24845770878 | """module to capture traffic signal information from parsed opendrive file"""
import iso3166
import numpy as np
import warnings
import enum
from typing import Union
from crdesigner.map_conversion.opendrive.opendrive_parser.elements.road import Road
from crdesigner.map_conversion.common.utils import generate_unique_id
from commonroad.scenario.traffic_sign import TrafficSign, TrafficLight, TrafficSignElement, TrafficSignIDZamunda, \
TrafficSignIDGermany, TrafficSignIDUsa, TrafficSignIDChina, TrafficSignIDSpain, TrafficSignIDRussia
from commonroad.scenario.lanelet import StopLine, LineMarking
__author__ = "Benjamin Orthen, Stefan Urban"
__copyright__ = "TUM Cyber-Physical Systems Group"
__credits__ = ["Priority Program SPP 1835 Cooperative Interacting Automobiles"]
__version__ = "0.5"
__maintainer__ = "Sebastian Maierhofer"
__email__ = "commonroad@lists.lrz.de"
__status__ = "Released"
def extract_traffic_element_id(signal_type: str, signal_subtype: str, traffic_sign_enum: enum) \
-> Union[TrafficSignIDZamunda, TrafficSignIDGermany, TrafficSignIDUsa, TrafficSignIDChina,
TrafficSignIDSpain, TrafficSignIDRussia]:
if signal_type in set(item.value for item in traffic_sign_enum):
element_id = traffic_sign_enum(signal_type)
elif signal_type + "-" + signal_subtype in set(item.value for item in traffic_sign_enum):
element_id = traffic_sign_enum(signal_type + "-" + str(signal_subtype))
else:
warnings.warn("OpenDRIVE/traffic_signals.py: Unknown {}"
" of ID {} of subtype {}!".format(traffic_sign_enum.__name__, signal_type, signal_subtype))
element_id = traffic_sign_enum.UNKNOWN
return element_id
def get_traffic_signals(road: Road):
traffic_signs = []
traffic_lights = []
stop_lines = []
# TODO: Stop lines are created and appended to the list for DEU and OpenDrive format.
# This has been replicated for other countries but has not been tested with a test case
# Stop lines have a signal type of 294 and are handled differently in the commonroad format
for signal in road.signals:
position, tangent, _, _ = road.planView.calc(signal.s, compute_curvature=False)
position = np.array([position[0] + signal.t * np.cos(tangent + np.pi / 2),
position[1] + signal.t * np.sin(tangent + np.pi / 2)])
if signal.dynamic == 'no':
if signal.value == '-1' or signal.value == '-1.0000000000000000e+00' \
or signal.value == 'none' or signal.value is None:
additional_values = []
else:
if signal.unit == 'km/h':
additional_values = [str(float(signal.value)/3.6)]
else:
additional_values = [str(signal.value)]
signal_country = get_signal_country(signal.country)
if signal_country == 'DEU':
if signal.type == "1000003" or signal.type == "1000004":
continue # stop line
# Stop lines have a signal type of 294 and are handled differently in the commonroad format
if signal.type == '294':
# Creating stop line object by first calculating the position of the two end points that define the
# straight stop line
position_1, position_2 = calculate_stop_line_position(road.lanes.lane_sections, signal,
position, tangent)
stop_line = StopLine(position_1, position_2, LineMarking.SOLID)
stop_lines.append(stop_line)
continue
element_id = extract_traffic_element_id(signal.type, str(signal.subtype), TrafficSignIDGermany)
elif signal_country == 'USA':
element_id = extract_traffic_element_id(signal.type, str(signal.subtype), TrafficSignIDUsa)
if signal.type == '294': # TODO has another ID
# Creating stop line object by first calculating the position of the two end points that define the
# straight stop line
position_1, position_2 = calculate_stop_line_position(road.lanes.lane_sections, signal,
position, tangent)
stop_line = StopLine(position_1, position_2, LineMarking.SOLID)
stop_lines.append(stop_line)
continue
elif signal_country == 'CHN':
element_id = extract_traffic_element_id(signal.type, str(signal.subtype), TrafficSignIDChina)
if signal.type == '294': # TODO has another ID
# Creating stop line object by first calculating the position of the two end points that define the
# straight stop line
position_1, position_2 = calculate_stop_line_position(road.lanes.lane_sections, signal,
position, tangent)
stop_line = StopLine(position_1, position_2, LineMarking.SOLID)
stop_lines.append(stop_line)
continue
elif signal_country == 'ESP':
element_id = extract_traffic_element_id(signal.type, str(signal.subtype), TrafficSignIDSpain)
if signal.type == '294': # TODO has another ID
# Creating stop line object by first calculating the position of the two end points that define the
# straight stop line
position_1, position_2 = calculate_stop_line_position(road.lanes.lane_sections, signal,
position, tangent)
stop_line = StopLine(position_1, position_2, LineMarking.SOLID)
stop_lines.append(stop_line)
continue
elif signal_country == 'RUS':
element_id = extract_traffic_element_id(signal.type, str(signal.subtype), TrafficSignIDRussia)
if signal.type == '294': # TODO has another ID
# Creating stop line object by first calculating the position of the two end points that define the
# straight stop line
position_1, position_2 = calculate_stop_line_position(road.lanes.lane_sections, signal,
position, tangent)
stop_line = StopLine(position_1, position_2, LineMarking.SOLID)
stop_lines.append(stop_line)
continue
else:
if signal.type == "1000003" or signal.type == "1000004":
continue
if signal.type == '294':
# Creating stop line object
position_1, position_2 = calculate_stop_line_position(road.lanes.lane_sections, signal,
position, tangent)
stop_line = StopLine(position_1, position_2, LineMarking.SOLID)
stop_lines.append(stop_line)
continue
element_id = extract_traffic_element_id(signal.type, str(signal.subtype), TrafficSignIDZamunda)
if element_id.value == "":
continue
traffic_sign_element = TrafficSignElement(
traffic_sign_element_id=element_id,
additional_values=additional_values
)
traffic_sign = TrafficSign(
traffic_sign_id=generate_unique_id(),
traffic_sign_elements=list([traffic_sign_element]),
first_occurrence=None,
position=position,
virtual=False
)
traffic_signs.append(traffic_sign)
elif signal.dynamic == 'yes':
# the three listed here are hard to interpret in commonroad.
# we ignore such signals in order not cause trouble in traffic simulation
if signal.type != ("1000002" or "1000007" or "1000013"):
traffic_light = TrafficLight(traffic_light_id=signal.id + 2000, cycle=[], position=position)
traffic_lights.append(traffic_light)
else:
continue
return traffic_lights, traffic_signs, stop_lines
def get_signal_country(signal_country: str):
"""
ISO iso3166 standard to find 3 letter country id
Args:
signal_country: string value of the country
"""
signal_country = signal_country.upper()
if signal_country in iso3166.countries_by_name:
return iso3166.countries_by_name[signal_country].alpha3
elif signal_country in iso3166.countries_by_alpha2:
return iso3166.countries_by_alpha2[signal_country].alpha3
elif signal_country in iso3166.countries_by_alpha3:
return signal_country
else:
return "ZAM"
def calculate_stop_line_position(lane_sections, signal, position, tangent):
"""
Function to calculate the 2 points that define the stop line which
is a straight line from one edge of the road to the other.
Args:
lane_sections: opendrive lane_sections list containing the lane_section parsed lane_section class
signal: signal object in this case the stop line
position: initial position as calculated in the get_traffic_signals function
tangent: tangent value as calculated in the get_traffic_signals function
"""
total_width = 0
for lane_section in lane_sections:
for lane in lane_section.allLanes:
# Stop line width only depends on drivable lanes
if lane.id != 0 and lane.type in ["driving", "onRamp", "offRamp", "exit", "entry"]:
for width in lane.widths:
# Calculating total width of stop line
coefficients = width.polynomial_coefficients
lane_width = \
coefficients[0] + coefficients[1] * signal.s + coefficients[2] * signal.s ** 2 \
+ coefficients[3] * signal.s ** 2
total_width += lane_width
position_1 = position
# Calculating second point of stop line using trigonometry
position_2 = np.array([position[0] - total_width * np.cos(tangent + np.pi / 2),
position[1] - total_width * np.sin(tangent + np.pi / 2)])
return position_1, position_2
def get_traffic_signal_references(road: Road):
"""
Function to extract all the traffic sign references that are stored in the road object
in order to avoid duplication by redefiniing predefined signals/lights and stoplines.
"""
# TODO: This function was ultimately not used as signal references were not required to define all traffic
# lights signals and stoplines. However, it needs to be verified if signal references are required elsewhere.
# If not this function can safely be deleted.
signal_references = []
for signal_reference in road.signalReference:
signal_references.append(signal_reference)
return signal_references
| CommonRoad/crgeo | commonroad_geometric/external/map_conversion/opendrive/opendrive_conversion/plane_elements/traffic_signals.py | traffic_signals.py | py | 11,367 | python | en | code | 25 | github-code | 13 |
19207869813 | from flask_restful import Resource, reqparse, abort
from flask import request, jsonify
def abort_if_task_id_not_exists(task_id):
if task_id not in tasks:
abort (404, message='The task does not exists.')
dict_func_description = {
'1': 'this is first api description',
'helloworld': 'this is second api description',
'pm_validation': 'this is a post mile validation function'
}
def abort_if_func_not_exists(func_name):
if func_name not in dict_func_description:
abort(404, message='The function does not exists.')
#setup db to persist the task data
#db.column, task_id, input_file_url, task_status, output_file_url
#check task
tasks = {}
pm_put_args = reqparse.RequestParser()
pm_put_args.add_argument('ID', type=int, help='unique ID of the post mile data.')
pm_put_args.add_argument('PMstring', type=str, help='Post mile DynSeg strings.')
pm_put_args.add_argument('Alignment', type=str, help='Roadway Alignment.')
class TaskManager(Resource):
def post(self, func_name):
abort_if_func_not_exists(func_name)
task_id = len(tasks)+1
args = pm_put_args.parse_args()
tasks[task_id] = args
return tasks[task_id], 201
#save file to local drive with unique name
#log the task to table
def get(self,task_id):
#get from db and return the status of task_id
return {'status': 'The job is finished' }
class PostMile(Resource):
# corresponds to the GET request.
# this function is called whenever there
# is a GET request for this resource
# def get(self, task_id):
# # desc = dict_func_description[func_name]
# # return jsonify({func_name: desc}
#
# try:
# return jsonify({task_id: tasks[task_id]})
# except:
# return jsonify({'message': 'the requested task id ({}) is not available'.format(task_id)})
#
# # Corresponds to Put request
#
# def put(self, task_id):
# args = pm_put_args.parse_args()
# tasks[task_id] = args
# return jsonify({tasks[task_id]: args}), 201
def get(self, task_id):
# desc = dict_func_description[func_name]
# return jsonify({func_name: desc}
abort_if_task_id_not_exists(task_id)
return tasks[task_id]
def put(self, task_id):
args = pm_put_args.parse_args()
tasks[task_id] = args
return tasks[task_id], 201 | zhuzhy1214/tam_jobmanager | jobmanager/apis/postmile.py | postmile.py | py | 2,435 | python | en | code | 0 | github-code | 13 |
36387452592 | """ Commerce views. """
import logging
from django.conf import settings
from django.views.decorators.cache import cache_page
from opaque_keys import InvalidKeyError
from opaque_keys.edx.keys import CourseKey
from rest_framework.permissions import IsAuthenticated
from rest_framework.status import HTTP_406_NOT_ACCEPTABLE, HTTP_409_CONFLICT
from rest_framework.views import APIView
from commerce.api import EcommerceAPI
from commerce.constants import Messages
from commerce.exceptions import ApiError, InvalidConfigurationError, InvalidResponseError
from commerce.http import DetailResponse, InternalRequestErrorResponse
from course_modes.models import CourseMode
from courseware import courses
from edxmako.shortcuts import render_to_response
from enrollment.api import add_enrollment
from microsite_configuration import microsite
from openedx.core.lib.api.authentication import SessionAuthenticationAllowInactiveUser
from student.models import CourseEnrollment
from util.json_request import JsonResponse
log = logging.getLogger(__name__)
class BasketsView(APIView):
""" Creates a basket with a course seat and enrolls users. """
# LMS utilizes User.user_is_active to indicate email verification, not whether an account is active. Sigh!
authentication_classes = (SessionAuthenticationAllowInactiveUser,)
permission_classes = (IsAuthenticated,)
def _is_data_valid(self, request):
"""
Validates the data posted to the view.
Arguments
request -- HTTP request
Returns
Tuple (data_is_valid, course_key, error_msg)
"""
course_id = request.DATA.get('course_id')
if not course_id:
return False, None, u'Field course_id is missing.'
try:
course_key = CourseKey.from_string(course_id)
courses.get_course(course_key)
except (InvalidKeyError, ValueError)as ex:
log.exception(u'Unable to locate course matching %s.', course_id)
return False, None, ex.message
return True, course_key, None
def _enroll(self, course_key, user):
""" Enroll the user in the course. """
add_enrollment(user.username, unicode(course_key))
def post(self, request, *args, **kwargs): # pylint: disable=unused-argument
"""
Attempt to create the basket and enroll the user.
"""
user = request.user
valid, course_key, error = self._is_data_valid(request)
if not valid:
return DetailResponse(error, status=HTTP_406_NOT_ACCEPTABLE)
# Don't do anything if an enrollment already exists
course_id = unicode(course_key)
enrollment = CourseEnrollment.get_enrollment(user, course_key)
if enrollment and enrollment.is_active:
msg = Messages.ENROLLMENT_EXISTS.format(course_id=course_id, username=user.username)
return DetailResponse(msg, status=HTTP_409_CONFLICT)
# If there is no honor course mode, this most likely a Prof-Ed course. Return an error so that the JS
# redirects to track selection.
honor_mode = CourseMode.mode_for_course(course_key, CourseMode.HONOR)
if not honor_mode:
msg = Messages.NO_HONOR_MODE.format(course_id=course_id)
return DetailResponse(msg, status=HTTP_406_NOT_ACCEPTABLE)
elif not honor_mode.sku:
# If there are no course modes with SKUs, enroll the user without contacting the external API.
msg = Messages.NO_SKU_ENROLLED.format(enrollment_mode=CourseMode.HONOR, course_id=course_id,
username=user.username)
log.debug(msg)
self._enroll(course_key, user)
return DetailResponse(msg)
# Setup the API and report any errors if settings are not valid.
try:
api = EcommerceAPI()
except InvalidConfigurationError:
self._enroll(course_key, user)
msg = Messages.NO_ECOM_API.format(username=user.username, course_id=unicode(course_key))
log.debug(msg)
return DetailResponse(msg)
# Make the API call
try:
response_data = api.create_basket(
user,
honor_mode.sku,
payment_processor="cybersource",
)
payment_data = response_data["payment_data"]
if payment_data is not None:
# it is time to start the payment flow.
# NOTE this branch does not appear to be used at the moment.
return JsonResponse(payment_data)
elif response_data['order']:
# the order was completed immediately because there was no charge.
msg = Messages.ORDER_COMPLETED.format(order_number=response_data['order']['number'])
log.debug(msg)
return DetailResponse(msg)
else:
# Enroll in the honor mode directly as a failsafe.
# This MUST be removed when this code handles paid modes.
self._enroll(course_key, user)
msg = u'Unexpected response from basket endpoint.'
log.error(
msg + u' Could not enroll user %(username)s in course %(course_id)s.',
{'username': user.id, 'course_id': course_id},
)
raise InvalidResponseError(msg)
except ApiError as err:
# The API will handle logging of the error.
return InternalRequestErrorResponse(err.message)
@cache_page(1800)
def checkout_cancel(_request):
""" Checkout/payment cancellation view. """
context = {'payment_support_email': microsite.get_value('payment_support_email', settings.PAYMENT_SUPPORT_EMAIL)}
return render_to_response("commerce/checkout_cancel.html", context)
| escolaglobal/edx-platform | lms/djangoapps/commerce/views.py | views.py | py | 5,899 | python | en | code | 0 | github-code | 13 |
17675379406 | # -*- coding: utf-8 -*-
"""
Created on Thu Feb 18 23:38:29 2021
@author: pooja
"""
import numpy as np
import sys
def matrix_to_lst(mat):
# converting to 1D list
flatlist = np.array(mat).reshape(-1)
# replacement dictionary
repl = {0: 'A', 1: 'B', 2: 'C', 3: 'D', 4: 'E', 5: 'F', 6: 'G', 7: 'H', 8: 'I', 9: 'J', 10: 'K', 11: 'L', 12: 'M', 13: 'N', 14: 'O', 15: 'P'}
# forming the character list
result =[repl[item] for item in flatlist]
return result
def get_path(predecessors, start, goal):
current = goal # current node is goal
path = []
while current != start: # continue this loop till start node, because it has no parent
path.append(current) # append current node to path
current = predecessors[current] # make its parent the current node
path.append(start)
path.reverse()
print_path(path)
return None
def print_path(path):
# replacement dictionary to print path
print_repl = {'A': 0 , 'B': 1,'C': 2, 'D': 3,'E': 4, 'F': 5, 'G': 6, 'H': 7, 'I': 8, 'J': 9, 'K': 10, 'L': 11, 'M': 12, 'N': 13, 'O': 14, 'P': 15}
# Save a reference to the original standard output
original_stdout = sys.stdout
with open('bfs.txt', 'w') as f:
# Change the standard output to the file we created.
sys.stdout = f
for step in path:
letterlist = list(step) # convert string to list
numlist = np.array([print_repl[item] for item in letterlist]) # map letters to numbers, convert list to numpy array
nummat = numlist.reshape(4,4) # convert the numpy array to matrix
print(nummat)
print()
# Reset the standard output to its original value
sys.stdout = original_stdout
return None
| pooja-kabra/15-Puzzle-Problem | helpers.py | helpers.py | py | 1,869 | python | en | code | 0 | github-code | 13 |
19112447343 | import sys
sys.path.append("..")
import torch
import numpy as np
from .inversion_losses import _weighted_CS_SE_loss, _gradient_norm_weighted_CS_SE_loss, _squared_error_loss, \
_cosine_similarity_loss
from torch.nn.functional import conv1d
def _uniform_initialization(x_true, dataset=None, device=None):
"""
All features are initialized independently and uniformly on the interval [-1, 1].
:param x_true: (torch.tensor) The true datapoint that we are trying to win back.
:param dataset: (BaseDataset) The dataset with respect to which the reconstruction is happening.
:param device: (str) The device on which the tensors are stored.
:return: (torch.tensor) The initial guess for our reconstruction.
"""
if device is None:
device = x_true.device
x_init = (torch.rand(x_true.shape, device=device) - 0.5) * 2
return x_init
def _gaussian_initialization(x_true, dataset, device=None):
"""
All features are initialized independently according to a Gaussian with the same mean and variance as the feature.
:param x_true: (torch.tensor) The true datapoint that we are trying to win back.
:param dataset: (BaseDataset) The dataset with respect to which the reconstruction is happening.
:param device: (str) The device on which the tensors are stored.
:return: (torch.tensor) The initial guess for our reconstruction.
"""
if device is None:
device = x_true.device
x_init = torch.randn_like(x_true, device=device)
# if the dataset is standardized, we can leave these sample as is, if not, we perform reparametrization
if not dataset.standardized:
mean = dataset.mean
std = dataset.std
x_init *= torch.reshape(std, (1, -1))
x_init += torch.reshape(mean, (1, -1))
return x_init
def _mean_initialization(x_true, dataset, device=None):
"""
All features are initialized to their mean values.
:param x_true: (torch.tensor) The true datapoint that we are trying to win back.
:param dataset: (BaseDataset) The dataset with respect to which the reconstruction is happening.
:param device: (str) The device on which the tensors are stored.
:return: (torch.tensor) The initial guess for our reconstruction.
"""
if device is None:
device = x_true.device
if dataset.standardized:
x_init = torch.zeros_like(x_true, device=device)
else:
x_init = torch.ones_like(x_true, device=device)
mean = dataset.mean
x_init *= torch.reshape(mean, (1, -1))
return x_init
def _dataset_sample_initialization(x_true, dataset, device=None):
"""
The initial seed is a sample from the dataset.
:param x_true: (torch.tensor) The true datapoint that we are trying to win back.
:param dataset: (BaseDataset) The dataset with respect to which the reconstruction is happening.
:param device: (str) The device on which the tensors are stored.
:return: (torch.tensor) The initial guess for our reconstruction.
"""
if device is None:
device = x_true.device
Xtrain = dataset.get_Xtrain()
batch_size = x_true.size()[0]
batchindices = torch.tensor(np.random.randint(Xtrain.size()[0], size=batch_size)).to(device)
x_init = Xtrain[batchindices].clone().detach()
return x_init
def _likelihood_prior_sample_initialization(x_true, dataset, device=None):
"""
The initial seed is a sample from the feature marginals for each feature independently.
:param x_true: (torch.tensor) The true datapoint that we are trying to win back.
:param dataset: (BaseDataset) The dataset with respect to which the reconstruction is happening.
:param device: (str) The device on which the tensors are stored.
:return: (torch.tensor) The initial guess for our reconstruction.
"""
if device is None:
device = x_true.device
batch_size = x_true.size()[0]
x_init = np.zeros((batch_size, len(dataset.train_features)), dtype='object')
for i, (feature_name, feature_values) in enumerate(dataset.train_features.items()):
if feature_values is None:
lower, upper = dataset.continuous_bounds[feature_name]
cont_histogram = dataset.cont_histograms[feature_name]
if len(cont_histogram) < 100:
feature_range = np.arange(lower, upper + 1)
else:
delta = (upper - lower) / 100
feature_range = np.array([lower + j * delta for j in range(100)])
x_init[:, i] = np.random.choice(feature_range, batch_size, p=dataset.cont_histograms[feature_name])
else:
p = dataset.categorical_histograms[feature_name]
x_init[:, i] = np.random.choice(feature_values, batch_size, p=p)
x_init = dataset.encode_batch(x_init, standardize=dataset.standardized)
x_init.to(device)
return x_init
def _mixed_initialization(x_true, dataset, device=None):
"""
The categorical features are initialized uniformly whereas the continuous features are initialized according to
their marginals.
:param x_true: (torch.tensor) The true datapoint that we are trying to win back.
:param dataset: (BaseDataset) The dataset with respect to which the reconstruction is happening.
:param device: (str) The device on which the tensors are stored.
:return: (torch.tensor) The initial guess for our reconstruction.
"""
if device is None:
device = x_true.device
# create feature masks
index_map = dataset.train_feature_index_map
cat_mask = torch.ones_like(x_true)
for feature_type, feature_index in index_map.values():
if feature_type == 'cont':
cat_mask[:, feature_index] = 0.
cont_mask = torch.ones_like(x_true) - cat_mask
cat_unif_init = _uniform_initialization(x_true, dataset, device)
cont_likelihood_init = _likelihood_prior_sample_initialization(x_true, dataset, device)
return cat_mask * cat_unif_init + cont_mask + cont_likelihood_init
def _best_sample_initialization(x_true, dataset, true_gradient, net, criterion, true_labels,
reconstruction_loss='cosine_sim', n_samples=1000, averaging_steps=2, weights=None,
alpha=1e-5, device=None):
"""
:param x_true:
:param dataset:
:param true_gradient:
:param net:
:param criterion:
:param true_labels:
:param reconstruction_loss:
:param n_samples:
:param averaging_steps:
:param weights:
:param alpha:
:param device:
:return:
"""
if device is None:
device = x_true.device
rec_loss_function = {
'squared_error': _squared_error_loss,
'cosine_sim': _cosine_similarity_loss,
'weighted_combined': _weighted_CS_SE_loss,
'norm_weighted_combined': _gradient_norm_weighted_CS_SE_loss
}
best_sample = None
best_score = None
for _ in range(n_samples):
# get the current candidate for the initialization
current_candidate = _likelihood_prior_sample_initialization(x_true, dataset, device)
# get its gradient and check how well it fits
candidate_loss = criterion(net(current_candidate), true_labels)
candidate_gradient = torch.autograd.grad(candidate_loss, net.parameters())
candidate_gradient = [grad.detach() for grad in candidate_gradient]
candidate_reconstruction_loss = rec_loss_function[reconstruction_loss](candidate_gradient, true_gradient, device, weights, alpha).item()
# check if this loss is better than our current best, if yes replace it and the current datapoint
if best_sample is None or candidate_reconstruction_loss < best_score:
best_sample = current_candidate.detach().clone()
best_score = candidate_reconstruction_loss
# smoothen out the categorical features a bit -- helps optimization later
weight = torch.tensor([1/10, 1/10, 6/10, 1/10, 1/10]).unsqueeze(0).unsqueeze(1).float()
for feature_type, feature_index in dataset.train_feature_index_map.items():
if feature_type == 'cat':
if len(feature_index) == 1:
# just add a tiny bit of noise to the binary features
best_sample[:, feature_index] += 0.2 * torch.rand(best_sample.size()[0]) - 0.1
else:
# add some noise
best_sample[:, feature_index] += 0.3 * torch.rand(best_sample.size()[0]) - 0.15
for _ in range(averaging_steps):
best_sample[:, feature_index] = conv1d(best_sample[:, feature_index].unsqueeze(1), weight, padding=2).squeeze(1)
return best_sample
| eth-sri/tableak | attacks/initializations.py | initializations.py | py | 8,675 | python | en | code | 6 | github-code | 13 |
32252693950 | # 1 ############################
import os
os.chdir("DZ_files")
def domains_point_free(filename):
try:
with open(filename, "r") as file:
return [line.strip()[1:] for line in file.readlines()]
except FileNotFoundError as error:
return f"No file {error}"
print(domains_point_free("domains.txt"))
# 2 ############################
def surnames(filename):
with open(filename, "r") as file:
return [line.split("\t")[1] for line in file.readlines()]
print(surnames("names.txt"))
# 3 #############################
from datetime import datetime
def dates(filename):
result = []
with open(filename, "r") as file:
for line in file.readlines():
my_line = line.split(" - ")
if len(my_line) > 1:
date = my_line[0]
day, month, year = date.split()
my_date = datetime.strptime(f"{day[:-2]} {month} {year}", "%d %B %Y")
result.append(
{
"date_original": date,
"date_modified": datetime.strftime(my_date, "%d/%m/%Y"),
}
)
return result
print(dates("authors.txt")) | ssocolov/IntroPython_socolov | DZ_lesson6_SSocolov.py | DZ_lesson6_SSocolov.py | py | 1,217 | python | en | code | 0 | github-code | 13 |
1527780346 | class Disjoint_Set:
def __init__(self,arr):
self.arr=arr
self.parent={i:i for i in self.arr}
def find_path_compress(self,ele):
if ele==self.parent[ele]:
print(ele)
return self.parent[ele]
par=self.find_path_compress(self.parent[ele])
self.parent[ele]=par
return par
def union_byVal(self,ele1,ele2):
parent_ele1=self.find_path_compress(ele1)
parent_ele2=self.find_path_compress(ele2)
if parent_ele1==parent_ele2:
return
elif parent_ele1>parent_ele2:
self.parent[parent_ele2]=parent_ele1
return
elif parent_ele2>parent_ele1:
self.parent[parent_ele1]=parent_ele2
return
def find_winner(self,ele1,ele2):
parent_ele1=self.find_path_compress(ele1)
parent_ele2=self.find_path_compress(ele2)
if parent_ele1==parent_ele2:
print("Draw...")
return
elif parent_ele1>parent_ele2:
print(ele1," wins...")
elif parent_ele2>parent_ele1:
print(ele2,' wins...')
ds=Disjoint_Set([1,2,3,4,5,6,7])
while True:
ch=int(input("Enter choice : "))
if ch==1:
ds.find_path_compress(int(input("Enter ele : ")))
elif ch==2:
ds.union_byVal(int(input('Enter ele1 : ')),int(input('Enter ele2 : ')))
elif ch==3:
print(ds.parent)
elif ch==4:
ds.find_winner(int(input('Enter ele1 : ')),int(input('Enter ele2 : ')))
else:
print('Terminating...')
break | stuntmartial/DSA | Graphs/Disjoint_Sets/owl_fight.py | owl_fight.py | py | 1,585 | python | en | code | 0 | github-code | 13 |
71304936657 | import pygame as pg
import os
import random
pg.init()
WIDTH = 1280
HEIGHT = 720
screen = pg.display.set_mode((WIDTH,HEIGHT))
clock = pg.time.Clock()
map_image = pg.image.load(os.path.join('assets', 'map.png'))
head_image = pg.image.load(os.path.join('assets', 'head.png'))
body_image = pg.image.load(os.path.join('assets', 'body.png'))
food_image = pg.image.load(os.path.join('assets', "food2.png"))
font = pg.font.Font('freesansbold.ttf', 32)
food_rect = pg.Rect(-100,-100, 40, 40)
class Body:
def __init__ (self,x,y,w,h,direction):
self.rect = pg.Rect(x,y,w,h)
self.direction = direction
self.image = ""
def set_image(self, image, anger=0):
self.image = pg.transform.rotate(image,anger)
def move(player):
for i in range(len(player)-1, 0, -1):
player[i].rect.x = player[i-1].rect.x
player[i].rect.y = player[i-1].rect.y
player[i].direction = player[i-1].direction
if player[0].direction == "right":
player[0].rect.x += 40
if player[0].direction == "left":
player[0].rect.x += -40
if player[0].direction == "up":
player[0].rect.y += -40
if player[0].direction == "down":
player[0].rect.y += 40
def handle_control(player):
keys = pg.key.get_pressed()
if keys[pg.K_w] and player[0].direction != "down":
player[0].direction = "up"
if keys[pg.K_s] and player[0].direction != "up":
player[0].direction = "down"
if keys[pg.K_a] and player[0].direction != "right":
player[0].direction = "left"
if keys[pg.K_d] and player[0].direction != "left":
player[0].direction = "right"
def set_player_image(player):
player[0].set_image(head_image,0)
for i in range(1,len(player)):
player[i].set_image(body_image)
def rotate_image(player):
if player[0].direction == "right":
player[0].set_image(head_image, 0)
if player[0].direction == "left":
player[0].set_image(head_image, 180)
if player[0].direction == "up":
player[0].set_image(head_image, 90)
if player[0].direction == "down":
player[0].set_image(head_image, -90)
for i in range(1, len(player)):
if player[i].direction == "right":
player[i].set_image(body_image, 0)
if player[i].direction == "left":
player[i].set_image(body_image, 180)
if player[i].direction == "up":
player[i].set_image(body_image, 90)
if player[i].direction == "down":
player[i].set_image(body_image, -90)
def check_border(player):
if player[0].rect.x >= 1280 and player[0].direction == "right":
player[0].rect.x = 0
if player[0].rect.x < 0 and player[0].direction == "left":
player[0].rect.x = 1240
if player[0].rect.y >= 720 and player[0].direction == "down":
player[0].rect.y = 0
if player[0].rect.y < 0 and player[0].direction == "up":
player[0].rect.y = 680
def generate_food(food):
x = random.randint(0,31)
y = random.randint(0,17)
food.x = 40*x
food.y = 40*y
def eat_food(food, player, x,y,direction):
if player[0].rect.colliderect(food):
global point
point += 1
generate_food(food)
player.append(Body(x,y,40,40,direction))
def check_end(player):
for i in range(1,len(player)-1):
if player[0].rect.colliderect(player[i].rect):
return True
point = 0
running = True
status = "menu"
snake = [Body(40,0,40,40,"right"), Body(0,0,40,40,"right")]
while running:
for event in pg.event.get():
if event.type == pg.QUIT:
running = False
img = font.render("point: " + str(point),True,(0,0,0))
if status == "menu":
screen.fill("black")
menu_text = font.render("Play", True, (255,255,255), "green")
menu_textRect = menu_text.get_rect()
menu_textRect.center = (WIDTH/2,HEIGHT/2)
screen.blit(menu_text, menu_textRect)
pg.display.update()
if menu_textRect.collidepoint(pg.mouse.get_pos()) and pg.mouse.get_pressed()[0]==True :
status = "playing"
generate_food(food_rect)
if status == "gameover":
screen.fill("white")
quit_text = font.render("Quit", True, (0,0,0))
again_text = font.render("Play Again", True, (0,0,0))
quit_textRect = quit_text.get_rect()
again_textRect = again_text.get_rect()
pointRect = img.get_rect()
pointRect.center = (WIDTH/2, HEIGHT/3)
quit_textRect.center = (WIDTH/3*2,HEIGHT/3*2)
again_textRect.center = (WIDTH/3,HEIGHT/3*2)
screen.blit(img, pointRect)
screen.blit(again_text, again_textRect)
screen.blit(quit_text, quit_textRect)
pg.display.update()
if quit_textRect.collidepoint(pg.mouse.get_pos()) and pg.mouse.get_pressed()[0]==True :
running = False
if again_textRect.collidepoint(pg.mouse.get_pos()) and pg.mouse.get_pressed()[0]==True :
status = "playing"
point = 0
snake = [Body(40,0,40,40,"right"), Body(0,0,40,40,"right")]
generate_food(food_rect)
if status == "playing":
rotate_image(snake)
screen.blit(map_image,(0,0))
screen.blit(img,(0,360))
screen.blit(food_image, food_rect.topleft)
handle_control(snake)
last_x = snake[-1].rect.x
last_y = snake[-1].rect.y
last_direction = snake[-1].direction
move(snake)
check_border(snake)
for i in range(len(snake)):
screen.blit(snake[i].image, (snake[i].rect.x,snake[i].rect.y))
print(snake[0].rect.topleft, snake[0].direction)
eat_food(food_rect,snake,last_x,last_y,last_direction)
if check_end(snake):
status = "gameover"
pg.display.update()
clock.tick(10)
pg.quit() | rintarou07/python | python-project1/main.py | main.py | py | 5,791 | python | en | code | 0 | github-code | 13 |
20322570935 | with open("20-input.txt") as f:
lines = f.read().strip().split('\n')
def swap(nums, a, b):
nums[a], nums[b] = nums[b], nums[a]
return nums
def sol(p):
KEY = 811589153
coords = [1000, 2000, 3000]
if p == 1:
nums = list(enumerate(map(int, lines)))
n = len(nums)
og = nums.copy()
for i, x in og:
for index in range(n):
if nums[index][0] == i:
break
if x < 0:
cur = index
for _ in range(-x):
nums = swap(nums, cur, (cur - 1) % n)
cur = (cur - 1) % n
continue
cur = index
for _ in range(x):
nums = swap(nums, cur, (cur + 1) % n)
cur = (cur + 1) % n
p1 = 0
for zero_index in range(n):
if nums[zero_index][1] == 0:
break
for c in coords:
p1 += nums[(zero_index + c) % n][1]
return p1
#part 2
nums2 = list(map(int, lines))
for i in range(len(nums2)):
nums2[i] = (i, nums2[i] * KEY)
n = len(nums2)
og = nums2.copy()
for _ in range(10):
for i, x in og:
for index in range(n):
if nums2[index][0] == i:
break
x %= (n - 1)
if x > 0:
cur = index
for _ in range(x):
nums2 = swap(nums2, cur, (cur + 1) % n)
cur = (cur + 1) % n
p2 = 0
for zero_index in range(n):
if nums2[zero_index][1] == 0:
break
for c in coords:
p2 += nums2[(zero_index + c) % n][1]
return p2
print(sol(1))
print(sol(2)) | TrlRizu/Advent_of_code | Day 20/20-Grove_positioning.py | 20-Grove_positioning.py | py | 1,753 | python | en | code | 0 | github-code | 13 |
15562588946 | from os import getenv
import logging
import time
from dotenv import load_dotenv
from os.path import realpath, dirname
import paho.mqtt.client as mqtt
def on_message(client, userdata, message):
logger.info("message received %s", str(message.payload.decode("utf-8")))
logger.info("message topic=%s", str(message.topic))
logger.info("message qos=%s", str(message.qos))
logger.info("message retain flag=%s", str(message.retain))
if __name__ == '__main__':
logger = logging.getLogger(__name__)
root_dir = realpath(dirname(realpath(__file__)) + "/..")
load_dotenv(dotenv_path=f"{root_dir}/.env")
logging.basicConfig(level=getenv("LOGLEVEL", "INFO").upper())
logger.info("")
env_configs = {("MQTT_HOST", "rabbitmq", True), ("MQTT_TOPIC", "detections", True), ("MQTT_ID", "consumer", True), }
config = {}
for key, default, log in env_configs:
config[key] = getenv(key, default)
if log:
logger.info("%s='%s'", key, str(config[key]))
client = mqtt.Client(config["MQTT_ID"])
client.on_message = on_message
client.connect(config["MQTT_HOST"])
client.loop_start()
client.subscribe(config["MQTT_TOPIC"])
while True:
try:
time.sleep(1)
print(".", end="")
except KeyboardInterrupt:
break
client.loop_stop()
| jeremy-share/rabbitmq-simple-mqtt | simple-consumer/src/main.py | main.py | py | 1,357 | python | en | code | 0 | github-code | 13 |
13336479347 |
import os
import io
os.environ['GOOGLE_APPLICATION_CREDENTIALS'] = r"C:\Users\LENOVO\Documents\hackathon\Unique\ML\ku-hack-6b534f8e99ac.json"
folder_path = r"C:\Users\LENOVO\Documents\hackathon\Unique\ML"
image_path = 'test-KU.png'
path = os.path.join(folder_path, image_path)
def detect_text(path):
"""Detects text in the file."""
from google.cloud import vision
import io
client = vision.ImageAnnotatorClient()
with io.open(path, 'rb') as image_file:
content = image_file.read()
image = vision.Image(content=content)
response = client.text_detection(image=image)
texts = response.text_annotations
print('Texts:')
for text in texts:
print('\n"{}"'.format(text.description))
vertices = (['({},{})'.format(vertex.x, vertex.y)
for vertex in text.bounding_poly.vertices])
print('bounds: {}'.format(','.join(vertices)))
if response.error.message:
raise Exception(
'{}\nFor more info on error messages, check: '
'https://cloud.google.com/apis/design/errors'.format(
response.error.message))
detect_text(path)
| harshshaw/Unique | ML/app.py | app.py | py | 1,162 | python | en | code | 0 | github-code | 13 |
70766833618 | import itertools
import torch
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from sklearn.metrics import confusion_matrix
from models import *
import torch.nn.functional as F
from affectnet import get_dataloaders
# class_names = ["Anger", "Disgust", "Fear", "Happy", "Sad", "Surprised", "Neutral"] #fer2013
class_names = ["Neutral", "Happy", "Sad", "Surprise", "Fear", "Disgust", "Anger","Contempt"] #affectnet
checkpoint_name = 'CBAM'
def main():
model = CBAM_ResNet18()
model.load_state_dict(torch.load('/results/affect/affect1/CBAM_epoch200_bs32_lr0.1_momentum0.9_wd0.0001_seed0_smoothTrue_mixupTrue_schedulerreduce_affect1/checkpoints/best_checkpoint.tar')['model_state_dict'])
model.cuda()
model.eval()
_,val_loader=get_dataloaders()
count = 0
correct = 0
all_target = []
all_output = []
with torch.no_grad():
for i,data in enumerate(val_loader):
inputs, labels = data
inputs, labels = inputs.cuda(), labels.cuda()
# fuse crops and batchsize
bs, ncrops, c, h, w = inputs.shape
inputs = inputs.view(-1, c, h, w)
# forward
outputs = model(inputs)
# combine results across the crops
outputs = outputs.view(bs, ncrops, -1)
outputs = torch.sum(outputs, dim=1) / ncrops
_, preds = torch.max(outputs, 1)
correct += torch.sum(preds == labels.data).item()
count += labels.shape[0]
all_target.append(labels.data.cpu())
all_output.append(preds.data.cpu())
all_target = np.concatenate(all_target)
all_output = np.concatenate(all_output)
matrix = confusion_matrix(all_target, all_output)
np.set_printoptions(precision=2)
plot_confusion_matrix(
matrix,
classes=class_names,
normalize=True,
# title='{} \n Accuracc: {:.03f}'.format(checkpoint_name, acc)
title="Resnet18 With CBAM",
)
# plt.show()
# plt.savefig('cm_{}.png'.format(checkpoint_name))
plt.savefig("./cm_{}.pdf".format(checkpoint_name))
plt.close()
# 生成混淆矩阵
def plot_confusion_matrix(
cm, classes, normalize=False, title="Confusion matrix", cmap=plt.cm.Blues
):
"""
This function prints and plots the confusion matrix.
Normalization can be applied by setting `normalize=True`.
"""
if normalize:
cm = cm.astype("float") / cm.sum(axis=1)[:, np.newaxis]
print("Normalized confusion matrix")
else:
print("Confusion matrix, without normalization")
print(cm)
plt.imshow(cm, interpolation="nearest", cmap=cmap)
plt.title(title, fontsize=12)
plt.colorbar()
tick_marks = np.arange(len(classes))
plt.xticks(tick_marks, classes, rotation=45)
plt.yticks(tick_marks, classes)
fmt = ".2f" if normalize else "d"
thresh = cm.max() / 2.0
for i, j in itertools.product(range(cm.shape[0]), range(cm.shape[1])):
plt.text(
j,
i,
format(cm[i, j], fmt),
horizontalalignment="center",
color="white" if cm[i, j] > thresh else "black",
)
plt.ylabel("True label", fontsize=12)
plt.xlabel("Predicted label", fontsize=12)
plt.tight_layout()
if __name__ == "__main__":
main()
| Accci/OCFER | PreTrain/cm_cbam.py | cm_cbam.py | py | 3,372 | python | en | code | 0 | github-code | 13 |
28183802531 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Thu Sep 7 10:59:08 2017
@author: axel
"""
from nltk.tokenize import sent_tokenize
from nltk.tokenize import word_tokenize
from nltk.corpus import stopwords
import re
inputfile = open("Document.txt", "r").read()
inputfile = inputfile.replace('”','"').replace('“','"')
inputfile = re.sub(r'"\s*(?=[A-Z])',r'" .',inputfile)
inputfile = re.sub(r'"(?=\n)','" .',inputfile)
sentences = sent_tokenize(inputfile)
summ_phra = ["after all",
"all in all",
"all things considered",
"briefly",
"by and large",
"in any case",
"in any event",
"in brief",
"in conclusion",
"on the whole",
"in short",
"in summary",
"in the final analysis",
"in the long run, on balance",
"to sum up",
"to summarize",
"finally"]
stopWords = set(stopwords.words('english'))
word_dict = dict()
score = dict()
sumPhrase = dict()
namedEntities = dict()
numWords = dict()
i=0
for sentence in sentences:
namedEntities[i] = 0
numWords[i] = 0
words = word_tokenize(sentence)
coreWords = [word for word in words if word.lower() not in stopWords]
for word in coreWords:
lword = word.lower()
if lword in word_dict:
word_dict[lword] += 1
else:
word_dict[lword] = 1
if word[0].isupper():
namedEntities[i]+=1
score[i]=0
for phrase in summ_phra:
if phrase in sentence.lower():
sumPhrase[i] = 1
else:
sumPhrase[i] = 0
i+=1
i=0
for sentence in sentences:
coreWords = [word for word in words if word.lower() not in stopWords]
for word in coreWords:
lword = word.lower()
numWords[i] += word_dict[lword];
score[i] = numWords[i]*0.1 + namedEntities[i] * 0.5 + sumPhrase[i]*2;
i+=1
rank = sorted(score , key= score.get, reverse=True)
for i in range(4):
print(str(i)+'\t')
print(sentences[rank[i]])
print("\n")
| axelJames/DocumentSummary | doc.py | doc.py | py | 2,263 | python | en | code | 2 | github-code | 13 |
22983798608 | import pytest
from alertmanagermeshtastic.meshtastic import (
create_announcer,
DummyAnnouncer,
MeshtasticAnnouncer,
MeshtasticConfig,
MeshtasticServer,
)
@pytest.mark.parametrize(
'server, expected_type',
[
(MeshtasticServer('meshtastic.server.test'), MeshtasticAnnouncer),
(None, DummyAnnouncer),
],
)
def test_create_announcer(server, expected_type):
config = MeshtasticConfig(
server=server,
nickname='nick',
realname='Nick',
commands=[],
channels=set(),
)
announcer = create_announcer(config)
assert type(announcer) == expected_type
| Apfelwurm/alertmanagermeshtastic | tests/test_create_announcer.py | test_create_announcer.py | py | 647 | python | en | code | 0 | github-code | 13 |
5971369637 | from rdkit import Chem
from rdkit.Chem.ChemUtils import SDFToCSV
# input file format: *.sdf
# output file format: *.csv
# test
test_out = open('output_file/testset.csv', 'w' )
test_in = Chem.SDMolSupplier('input_file/testset.sdf')
SDFToCSV.Convert(test_in, test_out, keyCol=None, stopAfter=- 1, includeChirality=False, smilesFrom='')
test_out.close()
# validate
vldt_out = open('output_file/validationset.csv', 'w' )
vldt_in = Chem.SDMolSupplier('input_file/validationset.sdf')
SDFToCSV.Convert(vldt_in, vldt_out, keyCol=None, stopAfter=- 1, includeChirality=False, smilesFrom='')
vldt_out.close()
# train
train_out = open('output_file/trainingset.csv', 'w')
train_in = Chem.SDMolSupplier('input_file/trainingset.sdf')
SDFToCSV.Convert(train_in, train_out, keyCol=None, stopAfter=- 1, includeChirality=False, smilesFrom='')
train_out.close() | ersilia-os/eos30gr | model/framework/train/1_SDFToCSV.py | 1_SDFToCSV.py | py | 843 | python | en | code | 0 | github-code | 13 |
24362211186 | from course import get_course, today
from tkinter import *
window = Tk()
window.title("Банк")
window.geometry("500x500")
window.resizable(width=False, height=False)
img_logo = PhotoImage(file=r"D:\Kod\modul 2\lesson8\logo.png")
logo = Label(window, image=img_logo)
logo.place(x=0, y=0)
title_label = Label(window, text="Банк Maxmum", font="TimesNewsRoman 34")
title_label.place(x=160, y=50)
course_label = Label(window, text=f"Курсы валют на {today} число:", font="TimesNewsRoman 20")
course_label.place(x=35, y=160)
dollar_info =f"{get_course('R01235').get('name')} {get_course('R01235').get('value')} руб."
dollar_label = Label(window, text=dollar_info, font="TimesNewsRoman 16")
dollar_label.place(x=80, y=215)
eur_info =f"{get_course('R01239').get('name')} {get_course('R01239').get('value')} руб."
eur_label = Label(window, text=eur_info, font="TimesNewsRoman 16")
eur_label.place(x=80, y=245)
cny_info =f"{get_course('R01375').get('name')} {get_course('R01239').get('value')} руб."
cny_label = Label(window, text=cny_info, font="TimesNewsRoman 16")
cny_label.place(x=80, y=275)
entry = Entry(font="TimesNewsRoman 16", width=10)
entry.place(x=80, y=400)
y = 30
def search():
global y
currency_id = entry.get()
currency_info = f"{get_course(currency_id).get('name')} {get_course(currency_id).get('value')} руб."
currency_lable = Label(window, text=currency_info, font="TimesNewsRoman 16")
currency_lable.place(x=80, y=245 + y)
button = Button(text="Поиск", font="TimesNewsRoman 10", command=search)
button.place(x=200, y=400)
window.mainloop()
| Den4ik20020/modul4 | modul2/lesson8/main.py | main.py | py | 1,625 | python | en | code | 0 | github-code | 13 |
35747940154 | import json
from sqlalchemy.dialects.postgresql import JSONB
from datetime import datetime
from index import db
class Message(db.Model):
__tablename__ = 'messages'
id = db.Column(db.Integer, primary_key=True)
site_id = db.Column(db.ForeignKey('sites.site_id'), index=True)
received = db.Column(db.DateTime, nullable=False)
ts = db.Column(db.DateTime, nullable=False, index=True)
message = db.Column(JSONB, nullable=False)
def __init__(self, message):
self.message = message
self.site_id = message['site_id']
self.received = datetime.utcnow()
# fill trailing zeros to make sure the ts string is the expected length
self.ts = datetime.strptime(message['ts'] + '0' * (14 - len(message['ts'])), '%Y%m%d%H%M%S')
db.Index('message_type', Message.message['type'])
db.Index('message_content', Message.message, postgresql_using='gin')
| cliqz-oss/green-analytics | web/models/message.py | message.py | py | 907 | python | en | code | 8 | github-code | 13 |
35813245090 | import json
import cv2
import torch
import numpy as np
import torch.nn.functional as F
from LSTR.config import system_configs
from LSTR.nnet.py_factory import NetworkFactory
class LSTRPredict:
def __init__(self):
self.load_configs()
# Load model
self.nnet = NetworkFactory()
test_iter = system_configs.max_iter
self.nnet.load_params(test_iter)
self.nnet.cuda()
self.nnet.eval_mode()
def load_configs(self):
print('loaded')
with open('./LSTR/config/LSTR.json', "r") as f:
self.configs = json.load(f)
self.configs["system"]["snapshot_name"] = "LSTR"
system_configs.update_config(self.configs["system"])
# info
self.input_size = self.configs['db']['input_size']
def predict(self, image):
# function is based on the original End-to-end implementation as used in LSTR github (https://github.com/liuruijin17/LSTR)
self.height, self.width = image.shape[0:2]
# Image
images = np.zeros(
(1, 3, self.input_size[0], self.input_size[1]), dtype=np.float32)
pad_image = image.copy()
resized_image = cv2.resize(
pad_image, (self.input_size[1], self.input_size[0]))
resized_image = resized_image / 255.
resized_image = cv2.normalize(
resized_image, None, alpha=-1, beta=1, norm_type=cv2.NORM_MINMAX, dtype=cv2.CV_32F)
resized_image = resized_image.transpose(2, 0, 1)
images[0] = resized_image
images = torch.from_numpy(images).cuda(non_blocking=True)
# Masks
masks = np.ones(
(1, 1, self.input_size[0], self.input_size[1]), dtype=np.float32)
pad_mask = np.zeros((self.height, self.width, 1), dtype=np.float32)
resized_mask = cv2.resize(
pad_mask, (self.input_size[1], self.input_size[0]))
masks[0][0] = resized_mask.squeeze()
masks = torch.from_numpy(masks).cuda(non_blocking=True)
# results
outputs, weights = self.nnet.test([images, masks])
return outputs
def detect_lanes(self, image):
# getting results from model - function is based on the original End-to-end implementation as used in LSTR github (https://github.com/liuruijin17/LSTR)
outputs = self.predict(image)
out_logits, out_curves = outputs['pred_logits'], outputs['pred_curves']
prob = F.softmax(out_logits, -1)
scores, labels = prob.max(-1)
labels[labels != 1] = 0
results = torch.cat([labels.unsqueeze(-1).float(), out_curves], dim=-1)
# making predictions
pred = results[0].cpu().numpy()
pred = pred[pred[:, 0].astype(int) == 1]
lane_points = []
for i, lane in enumerate(pred):
lane = lane[1:] # remove conf
lower, upper = lane[0], lane[1] # Get lower, upper positions
lane = lane[2:] # remove upper, lower positions
# lane point generation
ys = np.linspace(lower, upper, num=100)
points = np.zeros((len(ys), 2), dtype=np.int32)
points[:, 1] = (ys * self.height).astype(int)
points[:, 0] = ((lane[0] / (ys - lane[1]) ** 2 + lane[2] / (ys - lane[1]) + lane[3] + lane[4] * ys -
lane[5]) * self.width).astype(int)
points = points[(points[:, 0] > 0) & (points[:, 0] < self.width)]
lane_points.append(points)
return lane_points
| HantsonAlec/Research-Project-CARLA | LstrPredict/lstr.py | lstr.py | py | 3,486 | python | en | code | 1 | github-code | 13 |
29195035592 | import time
import numpy as np
from pylive import liven_plotter
import matplotlib.pyplot as plt
def plot_soc_photon_data(r, key):
var_str = "unit, hm, cTime, dt, sat,sel,mod, Tb, vb, ib, vsat,dv_dyn,voc_stat,voc_ekf, y_ekf, soc_s,soc_ekf,soc,"
count = 0
i = 0
cTime_last = None
t_v = None
T_actual = 0.
T_actual_past = 0.
time_span = 3600. * 4. # second plot window setting
y_vec0 = None
y_vec1 = None
y_vec2 = None
linen_x0 = None
linen_x1 = None
linen_x2 = None
axx0 = None
axx1 = None
axx2 = None
fig = None
identifier = ''
print(var_str)
try:
while True:
count += 1
time.sleep(0.01)
try:
data_r = r.readline().decode().rstrip() # USB
except IOError:
data_r = r.readline().rstrip() # BLE
if data_r.__contains__(key):
list_r = data_r.split(',')
unit = list_r[0]
hm = list_r[1]
cTime = float(list_r[2])
dt = float(list_r[3])
sat = int(list_r[4])
sel = int(list_r[5])
mod = int(list_r[6])
Tb = float(list_r[7])
vb = float(list_r[8])
ib = float(list_r[9])
vsat = float(list_r[10])
dv_dyn = float(list_r[11])
voc_stat = float(list_r[12])
voc_ekf = float(list_r[13])
y_ekf = float(list_r[14])
soc_s = float(list_r[15])
soc_ekf = float(list_r[16])
soc = float(list_r[17])
print(count, unit, hm, cTime, dt, sat, sel, mod, Tb, vb, ib, vsat, dv_dyn, voc_stat, voc_ekf, y_ekf, soc_s,
soc_ekf, soc)
i += 1
# Plot when have at least 2 points available
if i > 1:
# Setup
if i == 2:
T_maybe = cTime - cTime_last
T_actual_past = T_maybe
n_v = int(time_span / T_maybe)
t_v = np.arange(-n_v * T_maybe, 0, T_maybe)
y_vec0 = np.zeros((len(t_v), 3))
y_vec0[:, 0] = soc_s
y_vec0[:, 1] = soc_ekf
y_vec0[:, 2] = soc
y_vec1 = np.zeros((len(t_v), 1))
y_vec1[:, 0] = ib
y_vec2 = np.zeros((len(t_v), 4))
y_vec2[:, 0] = vb
y_vec2[:, 1] = voc_stat
y_vec2[:, 2] = voc_ekf
y_vec2[:, 3] = vsat
print('Point#', i, 'at cTime=', cTime, 'T may be=', T_maybe, 'N=', n_v)
# print('t_v=', t_v)
# print('y_vec1=', y_vec1, 'y_vec2=', y_vec2)
# Ready for plots
T_actual = cTime - cTime_last
dt = T_actual_past - T_actual
t_v[:] = t_v[:] + dt
y_vec0[-1][0] = soc_s
y_vec0[-1][1] = soc_ekf
y_vec0[-1][2] = soc
y_vec1[-1][0] = ib
y_vec2[-1][0] = vb
y_vec2[-1][1] = voc_stat
y_vec2[-1][2] = voc_ekf
y_vec2[-1][3] = vsat
if linen_x1 is None:
fig = plt.figure(figsize=(12, 5))
linen_x0, axx0 = liven_plotter(t_v, y_vec0, linen_x0, fig, subplot=311, ax=axx0, y_label='Amps',
title='Title: {}'.format(identifier), pause_time=0.01,
labels=['soc_s', 'soc_ekf', 'soc'])
linen_x1, axx1 = liven_plotter(t_v, y_vec1, linen_x1, fig, subplot=312, ax=axx1, y_label='Amps',
pause_time=0.01,
labels='ib')
linen_x2, axx2 = liven_plotter(t_v, y_vec2, linen_x2, fig, subplot=313, ax=axx2, y_label='Volts',
pause_time=0.01, labels=['vb', 'voc_stat', 'voc_ekf', 'vsat'])
y_vec0 = np.append(y_vec0[1:][:], np.zeros((1, 3)), axis=0)
y_vec1 = np.append(y_vec1[1:][:], np.zeros((1, 1)), axis=0)
y_vec2 = np.append(y_vec2[1:][:], np.zeros((1, 4)), axis=0)
# Past values
cTime_last = cTime
T_actual_past = T_actual
except Exception as err:
print("Something went wrong: ", err)
r.close()
exit(1)
| davegutz/myStateOfCharge | SOC_Particle/py/plot_SOC_Photon_data.py | plot_SOC_Photon_data.py | py | 4,871 | python | en | code | 1 | github-code | 13 |
73996338258 | import pandas as pd
from model import *
data=pd.read_csv('seed.txt',sep=' ')
#get X and y
dataset=data.iloc[:].values
print('dataset.....................')
#train the AdaboostClassifier
kmeans=Kmeans(dataset,10,1)
kmeans.clutter()
| hry8310/ai | ml/kmeans/train.py | train.py | py | 235 | python | en | code | 2 | github-code | 13 |
5255681872 | import pytest
import numpy as np
from sklearn import datasets
from sklearn.svm import SVC
from models.svm import SVM
def load_dataset():
X, y = datasets.make_blobs(n_samples=50, n_features=2, centers=2, cluster_std=1.05, random_state=40)
y = np.where(y == 0, -1, 1) # 0이랑 같으면 -1, 아니면 1
return X, y
def compare_svm():
X, y = load_dataset()
my_clf = SVM()
sk_clf = SVC(kernel='linear')
sk_clf.fit(X, y)
my_clf.fit(X, y)
# test 데이터 5개 생성
X_test, _ = datasets.make_blobs(n_samples=5, n_features=2, centers=2, cluster_std=1.05, random_state=40)
print('X: ', X_test)
print('sklearn: ',sk_clf.predict(X_test))
print('my SVM: ',my_clf.predict(X_test))
def visualize_svm():
import matplotlib.pyplot as plt
def get_hyperplane_value(x, w, b, offset):
return (-w[0] * x + b + offset) / w[1]
X, y = load_dataset()
clf = SVM()
clf.fit(X, y)
fig = plt.figure()
ax = fig.add_subplot(1,1,1)
plt.scatter(X[:,0], X[:,1], marker='o', c=y)
x0_1 = np.amin(X[:,0])
x0_2 = np.amax(X[:,0])
x1_1 = get_hyperplane_value(x0_1, clf.w, clf.b, 0)
x1_2 = get_hyperplane_value(x0_2, clf.w, clf.b, 0)
x1_1_m = get_hyperplane_value(x0_1, clf.w, clf.b, -1)
x1_2_m = get_hyperplane_value(x0_2, clf.w, clf.b, -1)
x1_1_p = get_hyperplane_value(x0_1, clf.w, clf.b, 1)
x1_2_p = get_hyperplane_value(x0_2, clf.w, clf.b, 1)
ax.plot([x0_1, x0_2], [x1_1, x1_2], 'y--')
ax.plot([x0_1, x0_2], [x1_1_m, x1_2_m], 'k')
ax.plot([x0_1, x0_2], [x1_1_p, x1_2_p], 'k')
x1_min = np.amin(X[:,1])
x1_max = np.amax(X[:,1])
ax.set_ylim([x1_min-3, x1_max+3])
plt.show()
@pytest.fixture
def datasets_for_pytest():
return load_dataset()
def test_naive_bayes(datasets_for_pytest):
X, y = datasets_for_pytest
my_clf = SVM()
sk_clf = SVC(kernel='linear')
sk_clf.fit(X, y)
my_clf.fit(X, y)
# test 데이터 5개 생성
X_test, _ = datasets.make_blobs(n_samples=5, n_features=2, centers=2, cluster_std=1.05, random_state=40)
assert all([a == b for a, b in zip(sk_clf.predict(X_test), my_clf.predict(X_test))])
if __name__ == '__main__':
compare_svm()
visualize_svm() | supertigim/ML-DL-Rewind | machine_learning/from_scratch/tests/test_svm.py | test_svm.py | py | 2,285 | python | en | code | 0 | github-code | 13 |
71945806418 | '''
Counter is a subclass of a dictionary where each dictionary key is a hashable
object and the associated value in an integer count of that object.
There are 3 ways to initialize a counter.
'''
from collections import Counter
c1 = Counter('anysequence')
c2 = Counter({'a':1, 'c':1, 'e':3})
c3 = Counter(a=1, c=1, e=3)
print(c1)
'''
We can also create an empty counter object and populate it by passing
its update method on iterable or a dictionary
'''
from collections import Counter
ct = Counter() #creates an empty counter object
ct.update('abca') #populates the object
print(ct) #Output: Counter({'a': 2, 'b': 1, 'c' : 1})
ct.update({'a':3}) #updates the 'a' count
print(ct) #Output: Counter({'a': 5, 'b': 1, 'c': 1})
for item in ct:
print('%s : %d' % (item, ct[item]))
'''
Notable difference between counter objects and dictionaries is that counter
objects return a zero count for missing items rather than raising a key error.
e.g: ct['x'] will return 0
'''
ct.update({'a':-3, 'b':-2, 'd':3, 'e':2}) #perform an update
sorted(ct.elements()) #returns a sorted list from the iterator
'''
Two other Counter methods: most_common() and subtract()
most_common() takes a positive integer argument that determines the number of
most common elements to return. Elements are returned as a list of (key, value).
subtract() works exactly like update except instead of adding values, it subtracts them.
'''
ct.most_common()
ct.subtract({'a':2})
| AniketKul/learning-python3 | counter.py | counter.py | py | 1,450 | python | en | code | 0 | github-code | 13 |
15871535531 | import numpy as np
from tensorflow import keras
from tensorflow.keras import layers
import pathlib
class SupervisedClassifier:
def __init__(self, args, num_classes, save_path, input_shape):
self.args = args
self.num_classes = num_classes
self.save_path = save_path.joinpath("supervised_classifier")
self.save_path.mkdir(exist_ok=True)
self.input_shape = input_shape
self.history_dict = dict()
self.build_models()
def build_models(self):
self.model = keras.Sequential(
[
layers.Conv2D(32, input_shape=self.input_shape, kernel_size=3,
padding="same", activation="relu",),
layers.MaxPooling2D(pool_size=2),
layers.Conv2D(64, kernel_size=3, padding="same",
activation="relu"),
layers.MaxPooling2D(pool_size=2),
layers.Flatten(),
layers.Dropout(0.5),
layers.Dense(10, activation="relu"),
layers.Dense(self.num_classes, activation="softmax"),
],
name="supervised_classifier"
)
self.model.summary()
optimizer = keras.optimizers.get(
self.args.optimizer_supervised_classifier)
if self.args.learning_rate_supervised_classifier is not None:
optimizer.learning_rate.assign(
self.args.learning_rate_supervised_classifier)
self.model.compile(
optimizer=optimizer,
loss=self.args.loss_function_supervised_classifier,
metrics=["accuracy"])
def train(self, x_train, y_train, x_val, y_val):
history = self.model.fit(x_train, y_train,
batch_size=self.args.batch_size,
epochs=self.args.epochs_supervised_classifier,
validation_data=(x_val, y_val))
self.history_dict = history.history
return self.history_dict
def save_models(self):
self.model.save(self.save_path.joinpath("model"))
np.save(self.save_path.joinpath(
"supervised_classifier_history_dict.npy"), self.history_dict)
def load_models(self):
self.model = keras.models.load_model(
self.save_path.joinpath("model"))
self.history_dict = np.load(self.save_path.joinpath(
"supervised_classifier_history_dict.npy"), allow_pickle=True).item()
def evaluate(self, x_test, y_test, verbose=1):
return self.model.evaluate(x_test, y_test, verbose=verbose)
| Magnuti/IT3030-Deep-Learning | project_3/supervised_classifier.py | supervised_classifier.py | py | 2,624 | python | en | code | 0 | github-code | 13 |
34384271364 | """
Host fwlite jobs in a toolchain. **EXPERIMENTAL!**
"""
import subprocess
import time
from os.path import exists, join
from varial import analysis
from varial import diskio
from varial import monitor
from varial import settings
from varial import toolinterface
from varial import wrappers
class Fwlite(toolinterface.Tool):
"""
This class hosts a series of fwlite processes.
"""
def __init__(self,
fwlite_exe,
name=None):
super(Fwlite, self).__init__(name)
self.fwlite_exe = fwlite_exe
self._proxy = None
self._not_ask_execute = False
def wanna_reuse(self, all_reused_before_me):
samples = analysis.samples()
proxy = diskio.get('fwlite_proxy')
if settings.fwlite_force_reuse or settings.only_reload_results:
self._proxy = proxy
return True
# has been working at all?
if not proxy:
self._not_ask_execute = True
return False
# check if all previous results are available
if not all(
exists(join(self.cwd, '%s.info' % res))
for res in proxy.results
):
self.message('INFO Not all results are found, running again.')
return False
# check if all samples are available
files_done = proxy.files_done
if not all(name in files_done for name in samples):
self.message('INFO Not all samples are done, running again.')
return False
# check if all files are done
if not all(
f in files_done[smp.name]
for smp in samples.itervalues()
for f in smp.input_files
):
self.message('INFO Not all files are done, running again.')
return False
self._proxy = proxy
return True
def reuse(self):
super(Fwlite, self).reuse()
self._finalize()
def run(self):
if settings.suppress_eventloop_exec:
self.message(
self, "INFO settings.suppress_eventloop_exec == True, pass...")
return
# prepare proxy file / ask for execution
self._make_proxy()
if not (self._not_ask_execute or settings.not_ask_execute or raw_input(
"Really run fwlite jobs on these samples:\n "
+ ",\n ".join(map(str, self._proxy.due_samples))
+ ('\nusing %i cores' % settings.max_num_processes)
+ "\n?? (type 'yes') "
) == "yes"):
return
diskio.write(self._proxy)
# start subprocess
self.message("INFO Starting script: '%s'" % self.fwlite_exe)
proc = subprocess.Popen(
['python', self.fwlite_exe],
stdout=monitor.MonitorInfo.outstream,
stderr=subprocess.STDOUT,
cwd=self.cwd,
)
# block while finished
sig_kill_sent = False
while None == proc.returncode:
if settings.recieved_sigint and not sig_kill_sent:
proc.kill()
sig_kill_sent = True
time.sleep(0.2)
proc.poll()
# final
if proc.returncode:
self.message('FATAL subprocess has non zero returncode')
raise RuntimeError(
'FwLite subprocess returned %d' % proc.returncode)
self._proxy = diskio.read('fwlite_proxy')
self.result = self._proxy
self._finalize()
def _make_proxy(self):
samples = analysis.samples()
for smpl in samples.itervalues():
if not smpl.input_files:
self.message(
self.name,
"WARNING input_files seems to be undefined for sample %s."
% smpl.name
)
self._proxy = diskio.get(
'fwlite_proxy',
wrappers.Wrapper(name='fwlite_proxy', files_done={}, results={})
)
self._proxy.max_num_processes = settings.max_num_processes
self._proxy.do_profiling = settings.fwlite_profiling
self._proxy.event_files = dict(
(s.name, s.input_files)
for s in samples.itervalues()
)
# if a result was deleted, remove all associated files from files_done
files_done = self._proxy.files_done
results = self._proxy.results
for res in results.keys():
if not exists(join(self.cwd, '%s.info' % res)):
del results[res]
smpl = res.split('!')[0]
if smpl in files_done:
del files_done[smpl]
due_samples = samples.keys()
self._proxy.due_samples = due_samples
for res in results.keys():
smpl = res.split('!')[0]
if smpl in due_samples:
if all(
f in files_done[smpl]
for f in samples[smpl].input_files
):
due_samples.remove(smpl)
def _finalize(self):
if settings.recieved_sigint:
return
for res in self._proxy.results:
samplename = res.split('!')[0]
if samplename not in analysis.all_samples:
continue
analysis.fs_aliases += list(
alias for alias in diskio.generate_fs_aliases(
join(self.cwd, '%s.root' % res),
analysis.all_samples[samplename]
)
) | De-Cristo/MLinHEP | CoTools/Varial/varial/extensions/fwlite.py | fwlite.py | py | 5,522 | python | en | code | 3 | github-code | 13 |
9077953980 | # n = 1일 경우 1개, n = 2일 경우 2개, n = 3일 경우 4개, n = 4일 경우 7개, n = 5일 경우 13개
# n이 3보다 큰 경우부터는 f(n-1) + f(n-2) + f(n-3) = f(n)
t = int(input()) # 테스트 케이스 개수
def sol(n):
if n == 1:
return 1
elif n == 2:
return 2
elif n == 3:
return 4
else:
return sol(n - 1) + sol(n - 2) + sol(n - 3)
for i in range(t):
n = int(input())
print(sol(n))
| Mins00oo/PythonStudy_CT | BACKJOON/Python/S3/S3_9095_1,2,3 더하기.py | S3_9095_1,2,3 더하기.py | py | 481 | python | ko | code | 0 | github-code | 13 |
37183872910 | import os
import os.path
IMG_EXTENSIONS = [
'.jpg', '.JPG', '.jpeg', '.JPEG',
'.png', '.PNG', '.ppm', '.PPM', '.bmp', '.BMP',
]
def is_image_file(filename):
return any(filename.endswith(extension) for extension in IMG_EXTENSIONS)
def make_dataset(path_files):
if path_files.find('.txt') != -1:
paths, size = make_dataset_txt(path_files)
else:
paths, size = make_dataset_dir(path_files)
return paths, size
def make_dataset_txt(path_files):
# reading txt file
image_paths = []
with open(path_files) as f:
paths = f.readlines()
for path in paths:
path = path.strip()
image_paths.append(path)
return image_paths, len(image_paths)
def make_dataset_dir(dir):
image_paths = []
assert os.path.isdir(dir), '%s is not a valid directory' % dir
for root, _, fnames in os.walk(dir):
for fname in sorted(fnames):
if is_image_file(fname):
path = os.path.join(root, fname)
image_paths.append(path)
return image_paths, len(image_paths) | lyndonzheng/Synthetic2Realistic | dataloader/image_folder.py | image_folder.py | py | 1,085 | python | en | code | 177 | github-code | 13 |
26620318039 |
n = int(input())
x_list = []
y_list = []
z_list = []
# x, y, z 값 받기
for i in range(n):
x, y, z = map(int, input().split())
x_list.append(x)
y_list.append(y)
z_list.append(z)
# 정렬
x_list.sort()
y_list.sort()
z_list.sort()
total_list = []
for i in range(n-1):
# x, y, z 각각의 거리를 구해서 삽입 --> 거리, a, b 노드 번호
total_list.append((x_list[i+1] - x_list[i], i, i+1))
total_list.append((y_list[i + 1] - y_list[i], i, i + 1))
total_list.append((z_list[i + 1] - z_list[i], i, i + 1))
# 정렬
total_list.sort()
parent = [0]*(n)
for i in range(n):
parent[i] = i
def find_p(p, x):
if p[x] != x:
p[x] = find_p(p, p[x])
return p[x]
def union(p,a,b):
a = find_p(p,a)
b = find_p(p,b)
if a > b:
p[a] = b
else:
p[b] = a
result = 0
for edge in total_list:
# 비용, a노드, b노드
cost, a, b = edge
# 연결되어 있지 않으면 연결!
if find_p(parent,a) != find_p(parent,b):
union(parent, a, b)
result += cost
print(result) | isakchoe/TIL | algorithm /Graph/ex_44.py | ex_44.py | py | 1,086 | python | ko | code | 0 | github-code | 13 |
31939408373 | from django.shortcuts import render, redirect
from .models import *
import bcrypt, time
def index(request):
return render(request, "belt/index.html")
def reg(request):
errors = User.objects.validate_reg(request.POST)
if errors:
for key, val in errors.items():
messages.info(request, val, extra_tags=key)
request.session["first_name"] = request.POST["first_name"]
request.session["last_name"] = request.POST["last_name"]
request.session["email"] = request.POST["email"]
return redirect("/") # failure
else:
request.session.clear()
hash_pw = bcrypt.hashpw(request.POST["password"].encode(), bcrypt.gensalt())
new_user = User.objects.create(first_name=request.POST["first_name"], last_name=request.POST["last_name"], email=request.POST["email"], password=hash_pw)
request.session["user_id"] = new_user.id
request.session["user_name"] = new_user.first_name + " " + new_user.last_name
return redirect("/dashboard") # success
def login(request):
result = User.objects.validate_login(request.POST)
errors = result[0]
if len(errors):
for val in errors:
messages.warning(request, val)
request.session["email2"] = request.POST["email"]
return redirect("/") # failure
else:
request.session["user_id"] = result[1]["id"]
request.session["user_name"] = result[1]["first_name"] + " " + result[1]["last_name"]
request.session["secret_key"] = bcrypt.hashpw(str(result[1]["created_at"]).encode(), bcrypt.gensalt()).decode('utf8')
return redirect("/dashboard") # success
def dashboard(request):
if "user_id" in request.session.keys():
all_granted = Wish.objects.exclude(granted=False).order_by("-updated_at")
wishes = Wish.objects.filter(user_id=request.session["user_id"]).exclude(granted=True)
for wish in all_granted:
wish.count_likes = len(Like.objects.filter(wish_id=wish.id))
context = {
"all_granted" : all_granted,
"wishes" : wishes
}
return render(request, "belt/dashboard.html", context)
return redirect("/")
def stats(request):
if "user_id" in request.session.keys():
wishes = Wish.objects.filter(granted=True)
current_wishes = Wish.objects.filter(user_id=request.session["user_id"]).exclude(granted=True)
granted_wishes = Wish.objects.filter(user_id=request.session["user_id"], granted=True)
print(wishes)
context = {
"wishes" : len(wishes),
"current_wishes" : len(current_wishes),
"granted_wishes" : len(granted_wishes)
}
return render(request, "belt/stats.html", context)
def wish(request, id=None):
if not "user_id" in request.session.keys():
return redirect ("/")
context = {}
if id:
wish = Wish.objects.get(id=id)
context["wish"] = wish
return render(request, "belt/wish.html", context)
def granted(request, id):
if not "user_id" in request.session.keys():
return redirect ("/")
wish = Wish.objects.get(id=id)
wish.granted = True
wish.save()
return redirect("/dashboard")
def like(request, id):
if not "user_id" in request.session.keys():
return redirect ("/")
liked = Like.objects.filter(wish_id=id, user_id=request.session["user_id"])
if not liked:
Like.objects.create(wish_id=id, user_id=request.session["user_id"])
return redirect("/dashboard")
def form(request, id=None):
if not "user_id" in request.session.keys():
return redirect ("/")
errors = {}
if len(request.POST["wish"]) < 3:
errors["wish"] = "Wish must be at least 3 characters"
if len(request.POST["description"]) < 3:
errors["description"] = "Description must be at least 3 characters"
if errors and id:
for key, val in errors.items():
messages.info(request, val, extra_tags=key)
return redirect("/wish/%s" % id)
elif errors:
for key, val in errors.items():
messages.info(request, val, extra_tags=key)
request.session["wish"] = request.POST["wish"]
request.session["description"] = request.POST["description"]
return redirect("/wish")
if id:
wish = Wish.objects.get(id=id)
wish.wish = request.POST["wish"]
wish.description = request.POST["description"]
wish.save()
return redirect("/dashboard")
else:
request.session.pop("wish", None)
request.session.pop("description", None)
Wish.objects.create(user=User.objects.get(id=request.session["user_id"]), wish=request.POST["wish"], description=request.POST["description"], granted=False)
return redirect("/dashboard")
def logout(request):
request.session.clear()
return redirect("/")
def remove(request, id):
if not "user_id" in request.session.keys():
return redirect ("/")
wish = Wish.objects.get(id=id)
wish.delete()
return redirect("/dashboard")
| jimisjames/wish_list | apps/belt/views.py | views.py | py | 5,142 | python | en | code | 0 | github-code | 13 |
32276578353 | # exercise 15: Display the Tail of a File
import os.path
import sys
NUM_LINES = 10
# exactly 2 arguments will have to be passed from CLI: file.py file.txt
if len(sys.argv) != 2:
print("you must provide the file name as a command line parameter")
quit()
# showing the two arguments passed from CLI
print(sys.argv)
# the last element of the path to join is the second argument passed from CLI, namely the file name
file_to_open = os.path.join("..", "files", sys.argv[1])
try:
inf = open(file_to_open, 'r')
count = 0
for line in reversed(inf.readlines()):
print(line.rstrip())
count += 1
if count == NUM_LINES:
break
except IOError:
print('file not found') | sara-kassani/1000_Python_example | books/Python Workbook/files_and_exceptions/ex150.py | ex150.py | py | 722 | python | en | code | 1 | github-code | 13 |
41326774002 | from fastapi import APIRouter, UploadFile,Form, File
from src.core.models import VideoUpload
router = APIRouter(
prefix="/videos", tags=["Videos"]
)
import datetime
import shutil
#!/usr/bin/python
import httplib2
import os
import random
import sys
import time
from apiclient import discovery, http, errors
from oauth2client.client import flow_from_clientsecrets
from oauth2client.file import Storage
from oauth2client.tools import argparser, run_flow
import google.oauth2.credentials
# Explicitly tell the underlying HTTP transport library not to retry, since
# we are handling retry logic ourselves.
httplib2.RETRIES = 1
# Maximum number of times to retry before giving up.
MAX_RETRIES = 10
# Always retry when these exceptions are raised.
RETRIABLE_EXCEPTIONS = (httplib2.HttpLib2Error, IOError)
# Always retry when an apiclient.errors.HttpError with one of these status
# codes is raised.
RETRIABLE_STATUS_CODES = [500, 502, 503, 504]
# The CLIENT_SECRETS_FILE variable specifies the name of a file that contains
# the OAuth 2.0 information for this application, including its client_id and
# client_secret. You can acquire an OAuth 2.0 client ID and client secret from
# the Google API Console at
# https://console.cloud.google.com/.
# Please ensure that you have enabled the YouTube Data API for your project.
# For more information about using OAuth2 to access the YouTube Data API, see:
# https://developers.google.com/youtube/v3/guides/authentication
# For more information about the client_secrets.json file format, see:
# https://developers.google.com/api-client-library/python/guide/aaa_client_secrets
CLIENT_SECRETS_FILE = "client_secrets.json"
# This OAuth 2.0 access scope allows an application to upload files to the
# authenticated user's YouTube channel, but doesn't allow other types of access.
YOUTUBE_UPLOAD_SCOPE = "https://www.googleapis.com/auth/youtube.upload"
YOUTUBE_API_SERVICE_NAME = "youtube"
YOUTUBE_API_VERSION = "v3"
# This variable defines a message to display if the CLIENT_SECRETS_FILE is
# missing.
MISSING_CLIENT_SECRETS_MESSAGE = """
WARNING: Please configure OAuth 2.0
To make this sample run you will need to populate the client_secrets.json file
found at:
%s
with information from the API Console
https://console.cloud.google.com/
For more information about the client_secrets.json file format, please visit:
https://developers.google.com/api-client-library/python/guide/aaa_client_secrets
""" % os.path.abspath(os.path.join(os.path.dirname(__file__),
CLIENT_SECRETS_FILE))
VALID_PRIVACY_STATUSES = ("public", "private", "unlisted")
def get_authenticated_service(accessToken):
credentials = google.oauth2.credentials.Credentials(accessToken)
return discovery.build(YOUTUBE_API_SERVICE_NAME, YOUTUBE_API_VERSION,
credentials=credentials)
def resumable_upload(insert_request):
response = None
error = None
retry = 0
while response is None:
try:
print ("Uploading file...")
status, response = insert_request.next_chunk()
if response is not None:
if 'id' in response:
return "Video enviado"
else:
raise Exception("Não foi possível enviar o vídeo")
except errors.HttpError as e:
if e.resp.status in RETRIABLE_STATUS_CODES:
error = "A retriable HTTP error %d occurred:\n%s" % (e.resp.status,
e.content)
else:
raise
except RETRIABLE_EXCEPTIONS as e:
error = "A retriable error occurred: %s" % e
if error is not None:
print (error)
retry += 1
if retry > MAX_RETRIES:
exit("No longer attempting to retry.")
max_sleep = 2 ** retry
sleep_seconds = random.random() * max_sleep
print ("Sleeping %f seconds and then retrying...") % sleep_seconds
time.sleep(sleep_seconds)
@router.post("/", status_code=201, response_model=VideoUpload)
async def upload_video(
access_token: str,
media: UploadFile,
title: str = Form(),
description: str = Form(),
):
youtube = get_authenticated_service(access_token)
file_location = f"./src/tmp-files/{datetime.datetime.now().isoformat()}"
with open(file_location, "wb+") as file_object:
shutil.copyfileobj(media.file, file_object)
body=dict(
snippet=dict(
title=title,
description=description,
),
)
insert_request = youtube.videos().insert(
part=",".join(body.keys()),
body=body,
media_body=http.MediaFileUpload(file_location, chunksize=-1, resumable=True)
)
os.remove(file_location)
resumable_upload(insert_request)
| ttiagoestevaoo/youtube-video-upload | src/routes.py | routes.py | py | 4,656 | python | en | code | 0 | github-code | 13 |
14230492777 | from django.urls import path
from . import views
urlpatterns = [
# 分级管理员
path(
"grade_managers/",
views.ManagementGradeManagerViewSet.as_view({"get": "list", "post": "create"}),
name="open.management.v1.grade_manager",
),
path(
"grade_managers/<int:id>/",
views.ManagementGradeManagerViewSet.as_view({"put": "update"}),
name="open.management.v1.grade_manager",
),
# 分级管理员成员
path(
"grade_managers/<int:id>/members/",
views.ManagementGradeManagerMemberViewSet.as_view({"get": "list", "post": "create", "delete": "destroy"}),
name="open.management.v1.grade_manager_member",
),
# 用户组
path(
"grade_managers/<int:id>/groups/",
views.ManagementGradeManagerGroupViewSet.as_view({"get": "list", "post": "create"}),
name="open.management.v1.grade_manager_group",
),
path(
"groups/<int:id>/",
views.ManagementGroupViewSet.as_view({"put": "update", "delete": "destroy"}),
name="open.management.v1.group",
),
# 用户组成员
path(
"groups/<int:id>/members/",
views.ManagementGroupMemberViewSet.as_view({"get": "list", "post": "create", "delete": "destroy"}),
name="open.management.v1.group_member",
),
# 用户组自定义权限
path(
"groups/<int:id>/policies/",
views.ManagementGroupPolicyViewSet.as_view({"post": "create", "delete": "destroy"}),
name="open.management.v1.group_policy",
),
# 用户组自定义权限 - 操作级别的变更,不涉及Resources
path(
"groups/<int:id>/actions/policies/",
views.ManagementGroupActionPolicyViewSet.as_view({"delete": "destroy"}),
name="open.management.v1.group_action",
),
# 用户
path(
"users/grade_managers/",
views.ManagementUserGradeManagerViewSet.as_view({"get": "list"}),
name="open.management.v1.user_grade_manager",
),
path(
"users/grade_managers/<int:id>/groups/",
views.ManagementUserGradeManagerGroupViewSet.as_view({"get": "list"}),
name="open.management.v1.user_grade_manager_group",
),
# 用户组申请单
path(
"groups/applications/",
views.ManagementGroupApplicationViewSet.as_view({"post": "create"}),
name="open.management.v1.group_application",
),
]
| TencentBlueKing/bk-iam-saas | saas/backend/api/management/v1/urls.py | urls.py | py | 2,426 | python | en | code | 24 | github-code | 13 |
72298587859 | #Belle Pan
#260839939
import skimage.io as io
import numpy as np
from skimage.color import rgb2gray
from skimage import filters
# This function is provided to you. You will need to call it.
# You should not need to modify it.
def seedfill(im, seed_row, seed_col, fill_color, bckg):
"""
im: The image on which to perform the seedfill algorithm
seed_row and seed_col: position of the seed pixel
fill_color: Color for the fill
bckg: Color of the background, to be filled
Returns: Number of pixels filled
Behavior: Modifies image by performing seedfill
"""
size=0 # keep track of patch size
n_row, n_col = im.shape
front={(seed_row,seed_col)} # initial front
while len(front)>0:
r, c = front.pop() # remove an element from front
if im[r, c]==bckg:
im[r, c]=fill_color # color the pixel
size+=1
# look at all neighbors
for i in range(max(0,r-1), min(n_row,r+2)):
for j in range(max(0,c-1),min(n_col,c+2)):
# if background, add to front
if im[i,j]==bckg and\
(i,j) not in front:
front.add((i,j))
return size
# QUESTION 4
def fill_cells(edge_image):
"""
Args:
edge_image: A black-and-white image, with black background and
white edges
Returns: A new image where each close region is filled with a different
grayscale value
"""
fill_image = edge_image.copy()
nrow, ncol = fill_image.shape
fill_colour = 0.5
seedfill(fill_image, 0, 0, 0.1, 0.0)
for row in range(nrow):
for col in range(ncol):
if fill_image[row, col] == 0.0:
seedfill(fill_image, row, col, fill_colour, 0.0)
fill_colour += 0.001
return fill_image
# return None # REMOVE THIS WHEN YOU'RE DONE
# QUESTION 5
def classify_cells(original_image, labeled_image, \
min_size=1000, max_size=5000, \
infected_grayscale=0.5, min_infected_percentage=0.02):
"""
Args:
original_image: A graytone image
labeled_image: A graytone image, with each closed region colored
with a different grayscal value
min_size, max_size: The min and max size of a region to be called a cell
infected_grayscale: Maximum grayscale value for a pixel to be called infected
min_infected_percentage: Smallest fraction of dark pixels needed to call a cell infected
Returns: A tuple of two sets, containing the grayscale values of cells
that are infected and not infected
"""
all_greyscale_values = set()
for row in range(nrow):
for col in range(ncol):
all_greyscale_values.add(labeled_image[row, col]) #adds all greyscale values in the image to the set
infected = set()
not_infected = set()
for value in all_greyscale_values:
count = 0
infected_count = 0
for row in range(nrow):
for col in range(ncol):
if labeled_image[row, col] == value:
count += 1 #keeps track of number of pixels with the same greyscale value
if original_image[row, col] <= infected_grayscale:
infected_count += 1 #keeps track of number of pixels that are within the infected greyscale value
if count >= min_size and count <= max_size: #if the area with the same greyscale value is within the given size to be considered a cell
if (count*min_infected_percentage) > infected_count:
not_infected.add(value)
elif (count*min_infected_percentage) <= infected_count:
infected.add(value)
return(infected, not_infected)
# return None # REMOVE THIS WHEN YOU'RE DONE
# QUESTION 6
def annotate_image(color_image, labeled_image, infected, not_infected):
"""
Args:
color_image: A color image
labeled_image: A graytone image, with each closed region colored
with a different grayscal value
infected: A set of graytone values of infected cells
not_infected: A set of graytone values of non-infcted cells
Returns: A color image, with infected cells highlighted in red
and non-infected cells highlighted in green
"""
new_colour_image = color_image.copy()
for row in range(nrow):
for col in range(ncol):
if labeled_image[row, col] in infected:
if row-1 in range(nrow):
if labeled_image[row-1, col] == 1.0:
new_colour_image[row, col] = [255, 0, 0]
if col-1 in range(nrow):
if labeled_image[row, col-1] == 1.0:
new_colour_image[row, col] = [255, 0, 0]
elif labeled_image[row-1, col-1] == 1.0:
new_colour_image[row, col] = [255, 0, 0]
if col+1 in range(nrow):
if labeled_image[row, col+1] == 1.0:
new_colour_image[row, col] = [255, 0, 0]
elif labeled_image[row-1, col+1]<0.5:
new_colour_image[row, col] = [255, 0, 0]
if row+1 in range(nrow):
if labeled_image[row+1, col] == 1.0:
new_colour_image[row, col] = [255, 0, 0]
if col+1 in range(ncol):
if labeled_image[row, col+1] == 1.0:
new_colour_image[row, col] = [255, 0, 0]
elif labeled_image[row+1, col+1] == 1.0:
new_colour_image[row, col] = [255, 0, 0]
if col-1 in range(ncol):
if labeled_image[row, col-1] == 1.0:
new_colour_image[row, col] = [255, 0, 0]
elif labeled_image[row+1, col-1] == 1.0:
new_colour_image[row, col] = [255, 0, 0]
elif labeled_image[row, col] in not_infected:
if row-1 in range(nrow):
if labeled_image[row-1, col] == 1.0:
new_colour_image[row, col] = [0, 255, 0]
if col-1 in range(nrow):
if labeled_image[row, col-1] == 1.0:
new_colour_image[row, col] = [0, 255, 0]
elif labeled_image[row-1, col-1] == 1.0:
new_colour_image[row, col] = [0, 255, 0]
if col+1 in range(nrow):
if labeled_image[row, col+1] == 1.0:
new_colour_image[row, col] = [0, 255, 0]
elif labeled_image[row-1, col+1]<0.5:
new_colour_image[row, col] = [0, 255, 0]
if row+1 in range(nrow):
if labeled_image[row+1, col] == 1.0:
new_colour_image[row, col] = [0, 255, 0]
if col+1 in range(ncol):
if labeled_image[row, col+1] == 1.0:
new_colour_image[row, col] = [0, 255, 0]
elif labeled_image[row+1, col+1] == 1.0:
new_colour_image[row, col] = [0, 255, 0]
if col-1 in range(ncol):
if labeled_image[row, col-1] == 1.0:
new_colour_image[row, col] = [0, 255, 0]
elif labeled_image[row+1, col-1] == 1.0:
new_colour_image[row, col] = [0, 255, 0]
return new_colour_image
# return None # REMOVE THIS WHEN YOU'RE DONE
if __name__ == "__main__": # do not remove this line
# QUESTION 1: WRITE YOUR CODE HERE
image = io.imread ("malaria-1.jpg")
grey_image = rgb2gray(image)
grey_sobel= filters.sobel(grey_image)
io.imsave ("Q1_Sobel.jpg", grey_sobel)
# QUESTION 2: WRITE YOUR CODE HERE
black_and_white = np.where(grey_sobel>0.05, 1.0, 0)
io.imsave("Q2_Sobel_T_0.05.jpg", black_and_white)
# QUESTION 3: WRITE YOUR CODE HERE
nrow, ncol, colour = image.shape
for row in range(nrow):
for col in range(ncol):
if (grey_image[row, col])<0.5:
black_and_white[row, col] = 0.0
else:
if row-1 in range(nrow):
if grey_image[row-1, col]<0.5:
black_and_white[row, col] = 0.0
if col-1 in range(nrow):
if grey_image[row, col-1]<0.5:
black_and_white[row, col] = 0.0
elif grey_image[row-1, col-1]<0.5:
black_and_white[row, col] = 0.0
if col+1 in range(nrow):
if grey_image[row, col+1]<0.5:
black_and_white[row, col] = 0.0
elif grey_image[row-1, col+1]<0.5:
black_and_white[row, col] = 0.0
if row+1 in range(nrow):
if grey_image[row+1, col]<0.5:
black_and_white[row, col] = 0.0
if col+1 in range(ncol):
if grey_image[row, col+1]<0.5:
black_and_white[row, col] = 0.0
elif grey_image[row+1, col+1]<0.5:
black_and_white[row, col] = 0.0
if col-1 in range(ncol):
if grey_image[row, col-1]<0.5:
black_and_white[row, col] = 0.0
elif grey_image[row+1, col-1]<0.5:
black_and_white[row, col] = 0.0
io.imsave("Q3_Sobel_T_0.05_clean.jpg", black_and_white)
# QUESTION 4: WRITE YOUR CODE CALLING THE FILL_CELLS FUNCTION HERE
fill_cell_image = fill_cells(black_and_white)
io.imsave("Q4_Sobel_T_0.05_clean_filled.jpg", fill_cell_image)
# QUESTION 5: WRITE YOUR CODE CALLING THE CLASSIFY_CELLS FUNCTION HERE
infected, not_infected = classify_cells(grey_image, fill_cell_image)
# QUESTION 6: WRITE YOUR CODE CALLING THE ANNOTATE_IMAGE FUNCTION HERE
annotated_image = annotate_image(image, fill_cell_image, infected, not_infected)
io.imsave("Q6_annotated.jpg", annotated_image)
| bpan4/COMP204_Fall2019_Computer-Programming-for-Life-Sciences | Assignment 5/cell_counting.py | cell_counting.py | py | 10,730 | python | en | code | 0 | github-code | 13 |
73719195856 | ### Imports
from bs4 import BeautifulSoup
import random as random
import re
import requests
import sys
# Notes:
# base url: https://transcripts.foreverdreaming.org/viewtopic.php?f=292&t={url substrings}
# season 6 url substrings (must include &sid=f24ccd5eea5bfc2086ee09ad73943b29 after "t={}")
## 18278 - 18289
# season 7 url substrings
## 18290 - 18301
# season 8 url substrings
## 18302 - 18311
# Setting up variables for URL formatting
base_url, start, end, tag = "https://transcripts.foreverdreaming.org/viewtopic.php?f=292&t={}{}", 18278, 18311, "&sid=f24ccd5eea5bfc2086ee09ad73943b29"
# redirecting output to file (so we can have "data points")
orig_stdout = sys.stdout
f = open('transcripts.txt', 'w')
sys.stdout = f
# Collect all those urls
urls_to_scrape = []
for i in range(start, end+1):
urls_to_scrape.append(base_url.format(i, tag if i <= 18289 else ""))
# just doin a lil scrapin, u kno wut im sayin?
for URL in urls_to_scrape:
page = requests.get(URL)
soup = BeautifulSoup(page.content, "html.parser")
results = soup.find("div", class_="postbody").find_all("p")
for element in results:
# want to clean this up a bit.
# one thing we can do is remove of the instances of "[name]:" in strings.
# # i don't think names are that useful to have in language models.
current_line = element.text
current_line = re.sub(r'\w*:', '', current_line)
# # When we do the above ^ there's an empty white space at the front the line...
# # we can get rid of it (also let's get of punctuation too)
current_line = current_line.strip()
current_line = re.sub(r'[^(\w\s)]', '', current_line)
print(current_line)
# redirecting output to o.g.output
sys.stdout = orig_stdout
f.close()
| bjmedina/bachelorette | preproc.py | preproc.py | py | 1,798 | python | en | code | 0 | github-code | 13 |
15123040154 | import random
import copy
is_first=True
def setup(is_first):
lmnop=random.randint(1,3)
#1 is presnt 2 is future 3 is past imperfect
### VERBS INIT ###
hicend1=['aec','uius','uic','anc','āc','ae','ārum','īs','ās','īs']
hicend2=['ic','uius','uic','onc','ōc','ī','ōrum','īs','ōs','īs']
hicend3=['oc','uius','uic','oc','ōc','aec','ōrum','īs','aec','īs']
if lmnop==1:
verbendings1 = ["ō","as","at","amus","atis","ant","a","āte","āre"]
verbendings2 = ["eō","es","et","emus","etis","ent","e","ēte","ēre"]
verbendings3 = ["ō","is","it","imus","itis","unt","e","ite","ēre"]
verbendingsspecial=["sum","es","est","sumus","estis","sunt","es","este"]
verbs = []
f=open("verbs.txt","rt", encoding="utf-8")
for line in f:
if line[0]!="#":
verbs.append(line[:-1].split())
f.close()
### ADJECTIVES INIT ###
adjectives = []
f=open("adjectives.txt","rt", encoding="utf-8")
for line in f:
if line[0]!="#":
adjectives.append(line.strip().split(" "))
f.close()
elif lmnop==2:
verbendings1 = ["bō","bis","bit","bimus","bitis","bunt",'te','re']
verbendings2 = ["bō","bis","bit","bimus","bitis","bunt",'te','re']
verbendings3 = ["am","ēs","et","ēmus","ētis","ent","ite","ēre"]
verbendingsspecial=["ō","is","it","imus","itis","unt"]
verbs = []
f=open("verbs2.txt","rt", encoding="utf-8")
for line in f:
if line[0]!="#":
verbs.append(line[:-1].split())
f.close()
adjectives = []
f=open("adjectives2.txt","rt", encoding="utf-8")
for line in f:
if line[0]!="#":
adjectives.append(line.strip().split(" "))
f.close()
else:
verbendings1 = ["bām","bās","bāt","bāmus","bātis","bānt",'te','re']
verbendings2 = ["bām","bās","bāt","bāmus","bātis","bānt",'te','re']
verbendings3 = ["ēbām","ēbās","ēbāt","ēbāmus","ēbātis","ēbānt",'te','re']
verbendingsspecial=['am','ās','at','āmus','ātis','ant']
verbs = []
f=open("verbs2.txt","rt", encoding="utf-8")
for line in f:
if line[0]!="#":
verbs.append(line[:-1].split())
f.close()
adjectives = []
f=open("adjectives2.txt","rt", encoding="utf-8")
for line in f:
if line[0]!="#":
adjectives.append(line.strip().split(" "))
f.close()
### ADVERBS INIT ###
adverbs = []
f=open("adverbs.txt","rt", encoding="utf-8")
for line in f:
if line[0]!="#":
adverbs.append(line[:-1].split())
f.close()
### CONJUNCTIONS INIT ###
conjunctions = []
f=open("adverbs.txt","rt", encoding="utf-8")
for line in f:
if line[0]!="#":
conjunctions.append(line[:-1].split())
f.close()
### PREPOSITIONS INIT ###
prepositions = []
f=open("prepositions.txt","rt", encoding="utf-8")
for line in f:
if line[0]!="#":
prepositions.append(line[:-1].split())
f.close()
### PRONOUNS INIT ###
pronouns = []
f=open("pronouns.txt","rt", encoding="utf-8")
for line in f:
if line[0]!="#":
pronouns.append(line[:-1].split())
f.close()
### NOUNS INIT ###
nounendings1 = ["a","a","ae","ae","am","ā","ae","ae","ārum","īs","ās","īs"]
nounendings2 = ["us","e","ī","ō","um","ō","ī","ī","ōrum","īs","ōs","īs"]
nounendings2er = ["r","r","rī","rō","rum","rō","rī","rī","rōrum","rīs","rōs","rīs"]
nounendings3 = ["um","um","ī","ō","um","ō","a","a","ōrum","īs","as","īs"]
thirddecn=['is',"ī","em","e","ēs","ēs","um","ibus","ēs","ibus"]
nounendinglocation = lambda case,num:["sg","pl"].index(num)*6+["nom","voc","gen","dat","acc","abl"].index(case)
nouns = []
f=open("nouns.txt","rt", encoding="utf-8")
for line in f:
if line[0]!="#" and line[0]!='~':
nouns.append(line.strip().split(" "))
if line[0]!='#' and "~" in line:
specialnoun=line.strip().split(" ")[1:]
nombase=specialnoun[1]
thirddec=[specialnoun[1]]
thirddec.append(nombase)
if specialnoun[2]=='m' or specialnoun[2]=='f':
[thirddec.append(i) for i in [specialnoun[0]+'is',specialnoun[0]+"ī",specialnoun[0]+"em",specialnoun[0]+"e",specialnoun[0]+"ēs",specialnoun[0]+"ēs",specialnoun[0]+"um",specialnoun[0]+"ibus",specialnoun[0]+"ēs",specialnoun[0]+"ibus"]]
elif specialnoun[2]=='n':
[thirddec.append(i) for i in [specialnoun[0]+'is',specialnoun[0]+"ī",nombase,specialnoun[0]+"e",specialnoun[0]+"a",specialnoun[0]+"um",specialnoun[0]+"ibus",specialnoun[0]+"a",specialnoun[0]+"ibus"]]
else:
pass
specialnoun[1]=thirddec[:]
specialnoun[0]=""
nouns.append(specialnoun)
f.close()
if is_first:
is_first=False
globals().update(locals())
### GENERATE A NOUN PHRASE ###
# I should make -are things appear here... later
maxnounrecursiondeapth=3
def nounphrase(case,num=random.choice(["sg","pl"]),nounrecursiondeapth=0):
if num == "pl":
if random.randint(0,1)==1 and nounrecursiondeapth<maxnounrecursiondeapth:
return nounphrase(case,num="sg",nounrecursiondeapth = nounrecursiondeapth+1)+" et "+nounphrase(case,num="sg",nounrecursiondeapth=nounrecursiondeapth+1)
localnouns = copy.copy(nouns)
for pronoun in pronouns:
if case in eval(pronoun[1]) and num in eval(pronoun[2]):
localnouns.append(pronoun)
chosennoun = random.choice(localnouns)
if chosennoun in pronouns:
return chosennoun[0]
else:
if chosennoun[0]=="":
phrase=str(chosennoun[1][nounendinglocation(case,num)])
else:
phrase = chosennoun[0]+eval(chosennoun[1])[nounendinglocation(case,num)]
if random.randint(0,1)==1:
if random.randint(0,3)!=1:
phrase += " "+random.choice(adjectives)[0]+{"f":nounendings1,"m":nounendings2,"n":nounendings3}[chosennoun[2]][nounendinglocation(case,num)]
else:
pass
phrase+=" "+random.choice('h')[0]+{"f":hicend1,"m":hicend2,"n":hicend3}[choosenoun[2]][nounendinglocation(case,num)]
if random.randint(0,1)==1 and nounrecursiondeapth<maxnounrecursiondeapth:
phrase += " "+nounphrase("gen",nounrecursiondeapth = nounrecursiondeapth+1)
return phrase
###sentance order
#0 address
#1 if
#2 preposition
#3 subject
#4 indirect object
#5 direct object
#6 ablative instrument
#7 adverbs
#8 verb
#9predicate
#10 linkednominative
def sentance(si=False): #si is there so that there aren't nested ifs or multiple addresses
setup(is_first)
locals().update({'nounendings3':nounendings3})
verb = random.choice(verbs)
ending = random.randint(0,7)
phrase=[]
if ending == 2:
phrase.append([nounphrase("nom",num="sg"),3])
if ending == 5:
phrase.append([nounphrase("nom",num="pl"),3])
if verb[2] == "link":
#let this also be an adjective also
if ending in [0,1,2,6]:
num = "sg"
else:
num = "pl"
phrase.append([nounphrase("nom",num=num),10])
if verb[2] == "t" and verb[3]!="v":
phrase.append([nounphrase("acc"),5])
if verb[3]=='v':
phrase.append([verb[0]+eval(verb[1])[ending],8])
debug= [tempverb for tempverb in verbs if tempverb[2] == verb[2] and tempverb != verb and tempverb[3]!='v']
otherverb = random.choice(debug)
phrase.append([otherverb[0]+eval(otherverb[1])[8],8])
if verb[0] in ["d","serv"] and random.randint(0,1)==1:
phrase.append([nounphrase("dat"),4])
if random.randint(0,1)==1:
prep = random.choice(prepositions)
phrase.append([prep[0]+" "+nounphrase(prep[1]),2])
if not si and random.randint(0,1)==1:
phrase.append(["ō "+nounphrase("voc")+",",0])
if random.randint(0,1)==1:
phrase.append([nounphrase("abl"),6])
if random.randint(0,1)==1:
phrase.append([random.choice(adverbs)[0],7])
if random.randint(0,1)==1 and verb[2]!='link':
debug=[tempverb for tempverb in verbs if tempverb[2] == verb[2] and tempverb != verb and tempverb[3]!='v']
otherverb = random.choice(debug)
phrase.append([verb[0]+eval(verb[1])[ending]+" et" ,9])
phrase.append([otherverb[0]+eval(otherverb[1])[ending],9])
if random.randint(0,1)==1 and si!=True:
phrase.append(["sī "+sentance(si=True)+",",1])
phrase.sort(key=lambda x:x[1])
return " ".join([subphrase[0] for subphrase in phrase])
j=0
##while j<1000:
##
##
##
##
## print(sentance())
## print("------")
## j+=1
while j<100:
try:
##
z=sentance()
z
if 'sī et' not in z and z!='' and len(z.split())>2 :
print(z)
print("------")
j+=1
## else:
## if random.randint(0,100)==1:
## print(z)
## print("------")
##
## j+=1
except:
pass
##
##
####XX=True
####
##while XX:
## setup()
## yy=sentance()
## if len(yy.split())>=500:
## print(yy)
## XX=False
| my-name-here/prgl | prgl2.py | prgl2.py | py | 9,729 | python | en | code | 0 | github-code | 13 |
70765708498 | """Public views tests"""
# run these tests like:
#
# python -m unittest test_user_model.py
import os
from unittest import TestCase
from sqlalchemy import exc
from flask import session
from models import db, User, Phrasebook, Translation, PhrasebookTranslation
from bs4 import BeautifulSoup
os.environ["DATABASE_URL"] = "postgresql:///translator-test"
from app import app, CURR_USER_KEY
app.config['WTF_CSRF_ENABLED'] = False
app.config['TESTING'] = True
app.config['DEBUG_TB_HOSTS'] = ['dont-show-debug-toolbar']
with app.app_context():
db.create_all()
class PublicViewsTestCase(TestCase):
"""Testing app view functions."""
def setUp(self):
"""Create test client & mock data.
2 users each with one phrasebook. User1 contains 2 translations. The first is only in user1's phrasebook, the 2nd is in both user1 and user2's phrasebooks"""
db.drop_all()
db.create_all()
self.client = app.test_client()
# create user 1
u1 = User.signup("testuser", "password")
uid1 = 111
u1.id = uid1
db.session.commit()
u1 = User.query.get(uid1)
self.u1 = u1
self.uid1 = uid1
# create user 2
u2 = User.signup("testuser2", "password")
uid2 = 222
u2.id = uid2
db.session.commit()
u2 = User.query.get(uid2)
self.u2 = u2
self.uid2 = uid2
# create a public phrasebook for user 1
p1 = Phrasebook(
name="phrasebook",
user_id=self.uid1,
public=True,
lang_from="EN",
lang_to="ES",
)
pid1 = 111
p1.id = pid1
db.session.add(p1)
db.session.commit()
p1 = Phrasebook.query.get(pid1)
self.p1 = p1
self.pid1 = pid1
# create phrasebook for user 2
p2 = Phrasebook(
name="french phrases",
user_id=self.uid2,
public=True,
lang_from="EN",
lang_to="FR",
)
pid2 = 222
p2.id = pid2
db.session.add(p2)
db.session.commit()
p2 = Phrasebook.query.get(pid2)
self.p2 = p2
self.pid2 = pid2
# Create translation and add to user 1's only phrasebook.
t1 = Translation(
lang_from="EN",
lang_to="ES",
text_from="What's going on, pumpkin?",
text_to="¿Qué te pasa, calabaza?",
)
tid1 = 111
t1.id = tid1
db.session.add(t1)
db.session.commit()
t1 = Translation.query.get(tid1)
self.t1 = t1
self.tid1 = tid1
self.p1.translations.append(t1)
db.session.commit()
# create second translation and add to user1's only phrasbook and user2's only phrasebook.
t2 = Translation(
lang_from="EN",
lang_to="FR",
text_from="What a test!",
text_to="Quel test!",
)
tid2 = 222
t2.id = tid2
db.session.add(t2)
db.session.commit()
t2 = Translation.query.get(tid2)
self.t2 = t2
self.tid2 = tid2
self.p1.translations.append(t2)
self.p2.translations.append(t2)
db.session.commit()
p2_t2 = PhrasebookTranslation.query.get((pid2, tid2))
p2_t2.note = "Tesing is happening! testuser2's testing note."
db.session.commit()
self.p2_t2 = p2_t2
# create a third translation that belongs to no phrasebooks.
t3 = Translation(
lang_from="EN",
lang_to="ES",
text_from="I'm orphaned data",
text_to="Soy datos huérfanos",
)
tid3 = 333
t3.id = tid3
db.session.add(t3)
db.session.commit()
t3 = Translation.query.get(tid3)
self.t3 = t3
self.tid3 = tid3
db.session.commit()
def tearDown(self):
"""Clean up any fouled transaction."""
db.session.rollback()
##################################################
# Public routes tests
##################################################
def test_show_public_phraebooks(self):
"""If logged in, does route show public phrasebooks that do not belong to the current user?"""
p3 = Phrasebook(name="secondbook", user_id=self.uid1, lang_from="EN", lang_to="ES", public=True)
p3.id = 333
db.session.add(p3)
p3.translations.append(self.t3)
db.session.commit()
self.assertIn(self.t3, p3.translations)
with self.client as c:
"""Access should be blocked and user redirected home if not logged in."""
resp = c.get("/public", follow_redirects=True)
html = resp.get_data(as_text=True)
self.assertEqual(resp.status_code, 200)
self.assertIn('Access unauthorized', str(resp.data))
with c.session_transaction() as session:
session[CURR_USER_KEY] = self.uid2
# If logged in...
resp = c.get("/public", follow_redirects=True)
self.assertEqual(resp.status_code, 200)
soup = BeautifulSoup(str(resp.data), 'html.parser')
# Only non-user public phrasebooks should be shown
phrasebooks = soup.find_all("a", {"class": "pb-button"})
self.assertEqual(len(phrasebooks), 2)
self.assertIn("phrasebook", phrasebooks[0].text)
self.assertIn("secondbook", phrasebooks[1].text)
self.assertNotIn("french phrases", phrasebooks[0].text)
self.assertNotIn("french phrases", phrasebooks[1].text)
# Phrasebooks should contain the text of their translations and no others
translations_from = [t.get_text() for t in soup('td', {"class": "from"})]
translations_from[0] = "What's going on, pumpkin?"
translations_from[1] = 'What a test!'
translations_from[2] = "I'm orphaned data"
self.assertEqual(len(translations_from), 3)
def test_add_public_translation(self):
"""Does route add public translation to users's selected phrasebooks.
If no data is submitted, appropriate alret should be shown and redirected to public page."""
p3 = Phrasebook(name="secondbook", user_id=self.uid1, lang_from="EN", lang_to="ES")
p3.id = 333
db.session.add(p3)
db.session.commit()
with self.client as c:
self.assertEqual(len(self.u1.phrasebooks), 2)
self.assertEqual(len(p3.translations), 0)
with c.session_transaction() as session:
session[CURR_USER_KEY] = self.uid1
resp = c.post(f"/public/translation/{self.tid3}/add",
data={"phrasebooks": [self.pid1, p3.id]},
follow_redirects=True)
html = resp.get_data(as_text=True)
p1 = Phrasebook.query.get(self.pid1)
t3 = Translation.query.get(self.tid3)
p3 = Phrasebook.query.get(p3.id)
self.assertIn('Translation saved', html)
self.assertEqual(resp.status_code, 200 or 202)
self.assertEqual(len(p1.translations), 3)
self.assertIn(t3, p1.translations)
self.assertEqual(len(p3.translations), 1)
self.assertIn(t3, p3.translations)
# Failing case / no data
resp = c.post(f"/public/translation/{self.tid3}/add",
data={"phrasebooks": []},
follow_redirects=True)
html = resp.get_data(as_text=True)
self.assertIn('No data submitted', html)
self.assertEqual(resp.status_code, 200 or 202)
def test_copy_public_phrasebook(self):
"""Does route copy public phrasebook and all of it's translations to the user's phrasebook."""
self.assertNotIn(self.t2, self.u1.phrasebooks)
with self.client as c:
# Access should be blocked and user redirected home if not logged in.
resp = c.post("/public/phrasebook/1/add", follow_redirects=True)
self.assertEqual(resp.status_code, 200)
self.assertIn('Access unauthorized', str(resp.data))
# Show that phrasebook2 is not in user1's phrasebooks
self.assertNotIn(self.p2, self.u1.phrasebooks)
self.assertEqual(len(self.u1.phrasebooks), 1)
# Route should sucessfully add public phrasebook2 and it's translations to user1.
with c.session_transaction() as session:
session[CURR_USER_KEY] = self.uid1
u1 = User.query.get(self.uid1)
u1_pbs = u1.phrasebooks
p2 = Phrasebook.query.get(self.pid2)
self.assertEqual(len(p2.translations), 1)
resp = c.post(f"/public/phrasebook/{self.pid2}/add", follow_redirects=True)
u1 = User.query.get(self.uid1)
# there should be one more phrasebook for user1
self.assertEqual(len(u1.phrasebooks), 2)
# the new phrasebook should contian all of the same data and translationsas the coppied one, except phrasebook id and user_id
new_p = u1.phrasebooks[1]
self.assertEqual(new_p.name, p2.name)
self.assertEqual(new_p.lang_from, p2.lang_from)
self.assertEqual(new_p.lang_to, p2.lang_to)
self.assertNotEqual(new_p.user_id, p2.user_id)
self.assertNotEqual(new_p.id, p2.id)
self.assertEqual(new_p.user_id, self.uid1)
# new phrasebook should contain the same translations as the copied phrasebook
p2 = Phrasebook.query.get(self.pid2)
self.assertEqual(new_p.translations, p2.translations)
| adamnyk/capstone-1 | app/tests/test_public_views.py | test_public_views.py | py | 10,118 | python | en | code | 0 | github-code | 13 |
71308410577 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Fri Apr 19 17:32:10 2019
@author: dougpalmer
"""
from __future__ import division
import cv2
import numpy as np
import time
import vision
# Setup classes
cap = cv2.VideoCapture('test_videos/output3.avi')
vis = vision.droidVision()
class droidThresh():
def __init__(self):
self.icol = (0, 0, 0, 255, 255, 255) # New start
cv2.namedWindow('colorTest')
# Lower range colour sliders.
cv2.createTrackbar('lowHue', 'colorTest', icol[0], 255, nothing)
cv2.createTrackbar('lowSat', 'colorTest', icol[1], 255, nothing)
cv2.createTrackbar('lowVal', 'colorTest', icol[2], 255, nothing)
# Higher range colour sliders.
cv2.createTrackbar('highHue', 'colorTest', icol[3], 255, nothing)
cv2.createTrackbar('highSat', 'colorTest', icol[4], 255, nothing)
cv2.createTrackbar('highVal', 'colorTest', icol[5], 255, nothing)
def setGUI(self):
# Get HSV values from the GUI sliders.
lowHue = cv2.getTrackbarPos('lowHue', 'colorTest')
lowSat = cv2.getTrackbarPos('lowSat', 'colorTest')
lowVal = cv2.getTrackbarPos('lowVal', 'colorTest')
highHue = cv2.getTrackbarPos('highHue', 'colorTest')
highSat = cv2.getTrackbarPos('highSat', 'colorTest')
highVal = cv2.getTrackbarPos('highVal', 'colorTest')
# HSV values to define a colour range we want to create a mask from.
colorLow = np.array([lowHue,lowSat,lowVal])
colorHigh = np.array([highHue,highSat,highVal])
mask = cv2.inRange(frameHSV, colorLow, colorHigh)
def thresholdImage(self,frame):
# Show the first mask
cv2.imshow('mask-plain', mask)
im2, contours, hierarchy = cv2.findContours(mask, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
contour_sizes = [(cv2.contourArea(contour), contour) for contour in contours]
biggest_contour = max(contour_sizes, key=lambda x: x[0])[1]
x,y,w,h = cv2.boundingRect(biggest_contour)
cv2.rectangle(frameHSV,(x,y),(x+w,y+h),(0,255,0),2)
# k = cv2.waitKey(5) & 0xFF
# if k == 27:
# break
# print('fps - ', 1/(time.time() - timeCheck))
# cv2.destroyAllWindows()
# vidCapture.release()
# Open frame
while(cap.isOpened()):
t0 = time.time()
ret, frame = cap.read()
if frame is not None:
# Rescale image to 416, 304
frame = cv2.resize(frame,(416,304))
vis.processFrame(frame)
print('fps, ',1.0/(time.time() - t0))
cv2.imshow("Vision Testing", vis.frame_edited)
cv2.waitKey(1)
else:
print ('releasing resources')
cap.release()
cv2.destroyAllWindows()
break | krishanrana/droidracer | droidThresh.py | droidThresh.py | py | 2,854 | python | en | code | 0 | github-code | 13 |
42382919306 | import logging
from typing import Callable
from django.conf import settings
from django.core.handlers.wsgi import WSGIRequest
from django.db.models.signals import pre_migrate, post_migrate
from django.dispatch import receiver
from django.http import HttpResponse
from applications.multi_tenant.db import DatabaseAlias
logger = logging.getLogger(__name__)
@receiver(pre_migrate)
def switch_database_pre_migrate(sender, **kwargs):
database_alias = kwargs.get('using')
DatabaseAlias.set(database_alias)
@receiver(post_migrate)
def switch_database_post_migrate(sender, **kwargs):
DatabaseAlias.clear()
class ThreadLocalMiddleware:
"""
Middleware that sets the thread local variable `database_alias`.
This middleware must be placed before any other middleware that
executes queries on the database.
In this way, the database alias is set before any other middleware is
called and it is removed after all of them are called.
"""
def __init__(self, get_response: Callable):
"""
Middleware initialization. Only called once per Django application initialization.
Args:
get_response: Callable to get the response of the view.
"""
self.get_response = get_response
def __call__(self, request: WSGIRequest) -> HttpResponse:
"""
Called by Django for each http request to process it and return a response.
Everything that should be done before the view is called is done before
the get_response method is called and everything that should be done
after the view is called is done after the get_response method is called.
Args:
request: Django request object.
Returns: HttpResponse object.
"""
if DatabaseAlias.is_set():
raise Exception('Database alias already set')
database_alias = self._get_database_alias_from_header(request)
self._save_to_thread_local(database_alias)
# Call the next middleware in the chain until the response is returned.
# After that, the database alias is removed from the thread local variable.
response = self.get_response(request)
DatabaseAlias.clear()
return response
@staticmethod
def _get_database_alias_from_header(request):
if settings.DATABASE_ALIAS_HEADER not in request.META:
raise Exception('Database alias not found in request', request.META)
return request.META[settings.DATABASE_ALIAS_HEADER]
@staticmethod
def _save_to_thread_local(database_alias: str):
DatabaseAlias.set(database_alias)
| CardoAI/django-multi-tenant | multi_tenant/middleware/thread_local.py | thread_local.py | py | 2,640 | python | en | code | 0 | github-code | 13 |
4682776243 | from collections import namedtuple
import sydpy
mapping = namedtuple('Mapping', ['m', 'cs', 'slice'])
class JesdPackerAlgo:
def __init__(self, dtype = None, M=1, N=8, S=1, CS=0, CF=0, L=1, F=1, HD=0):
self.N = N
self.S = S
self.M = M
self.CS = CS
self.CF = CF
self.F = F
self.L = L
self.HD = HD
self.dtype = dtype
def form_words(self, samples):
words = []
if self.CF == 0:
for s in samples:
words.append(s[0] % s[1])
else:
control_word = self.dtype(0)()
for s in samples:
words.append(s[0])
control_word %= s[1]
words.append(control_word)
return words
def form_nibble_groups(self, words):
nibble_groups = []
for w in words:
ng = []
upper = len(w)
while upper >= 4:
ng.append(w[upper-4:upper-1])
upper -= 4;
if upper != 0:
ng.append(w[0:upper-1] % self.dtype(4-upper)(0))
nibble_groups.append(ng)
return nibble_groups
def form_lane_groups(self, ng):
lg_nibbles = []
start = 0
if self.HD == 0:
start = 0
for i in range(self.L):
lane_ng = []
while (len(lane_ng) < self.F*2) and (start < len(ng)):
if len(lane_ng) + len(ng[start]) <= self.F*2:
lane_ng.extend(ng[start])
start += 1
else:
break
for i in range(len(lane_ng), self.F*2):
lane_ng.append(self.dtype(4)(0))
lg_nibbles.append(lane_ng)
else:
start = 0
nibbles = []
for n in ng:
nibbles.extend(n)
for i in range(self.L):
if (start + self.F*2) <= len(nibbles):
lane_ng = nibbles[start:start+self.F*2]
else:
lane_ng = nibbles[start:]
for i in range(len(lane_ng), self.F*2):
lane_ng.append(self.dtype(4)(0))
start += self.F*2
lg_nibbles.append(lane_ng)
lg = []
for l in lg_nibbles:
lane = []
for i in range(0,len(l),2):
lane.append(l[i] % l[i+1])
lg.append(lane)
return lg
def pack(self, samples):
words = self.form_words(samples)
print('Words: ', words)
ng = self.form_nibble_groups(words)
print('NG: ', ng)
frame = self.form_lane_groups(ng)
print()
print('Output Frame:')
print()
for l in frame:
print(l)
return frame
def SymbolicBit(w):
return type('symbit', (SymbolicBitABC,), dict(w=w))
class SymbolicBitABC:
w = 1
def __init__(self, val=[], vld=None, defval = 0):
try:
l = len(val)
except:
val = []
l = 0
self.val = val + [defval]*(self.w - l)
def __mod__(self, other):
return SymbolicBit(self.w + other.w)(val = other.val + self.val)
def __len__(self):
return self.w
def __str__(self):
return str(self.val)
def __repr__(self):
return repr(self.val)
def __getitem__(self, key):
if isinstance( key, slice ) :
high = max(key.start, key.stop)
low = min(key.start, key.stop)
elif isinstance( key, int ) :
high = low = int(key)
else:
raise TypeError("Invalid argument type.")
return SymbolicBit(high-low+1)(val = self.val[low:(high+1)])
def pack_samples(samples, M, CF, CS, F, HD, L, S, N):
dtype = sydpy.Bit
print('Samples: ', samples_conv)
p = PackerTlAlgo(dtype = dtype, M=M, N=N, S=S, CS=CS, CF=CF, L=L, F=F, HD=HD)
return p.pack(samples)
def calc_pack_matrix(M, CF, CS, F, HD, L, S, N):
m = [[0]*F for _ in range(L)]
# dtype = sydpy.Bit
dtype = SymbolicBit
sym_samples = []
for i in range(M):
sym_samples.append((dtype(N)([(i, 0, j) for j in range(N)]),
dtype(CS)([(i, 1, j) for j in range(CS)])))
print('Samples: ', sym_samples)
p = PackerTlAlgo(dtype = dtype, M=M, N=N, S=S, CS=CS, CF=CF, L=L, F=F, HD=HD)
return p.pack(sym_samples)
if __name__ == "__main__":
samples = [(0x660,0x0), (0x189,0x2), (0x000,0x3), (0x0ef,0x0), (0x3cb,0x1), (0x0a0,0x1), (0x53f,0x1), (0x432,0x1), (0x553,0x0), (0x21e,0x2), (0x02a,0x3), (0x38d,0x0), (0x779,0x2), (0x32f,0x2), (0x347,0x0), (0x2d9,0x3)]
samples_conv = []
for (d, cs) in samples:
samples_conv.append((sydpy.Bit(11)(d), sydpy.Bit(2)(cs)))
m = calc_pack_matrix(M=16, N=11, S=1, CS=2, CF=1, L=7, F=4, HD=1)
frame = pack_samples(samples_conv, M=16, N=11, S=1, CS=2, CF=1, L=7, F=4, HD=1)
for f_lane, m_lane in zip(frame, m):
for f_byte, m_byte in zip(f_lane, m_lane):
for f_bit, m_bit in zip(f_byte, m_byte.val):
if m_bit:
assert int(f_bit) == samples_conv[m_bit[0]][m_bit[1]][m_bit[2]]
| bogdanvuk/sydpy | tests/packer_coef_calc.py | packer_coef_calc.py | py | 5,566 | python | en | code | 12 | github-code | 13 |
4543934777 | """
Given a string and a non-negative int n, return a larger string that is n copies of the original string.
string_times('Hi', 2) → 'HiHi'
string_times('Hi', 3) → 'HiHiHi'
string_times('Hi', 1) → 'Hi'
"""
def string_times(given_str, times):
print_str = ''
while( times > 0 ):
print_str += given_str
times -= 1
return print_str
if __name__ == "__main__":
given_str = "Hi"
times = 3
print(string_times(given_str,times))
| avisionx/Must-Know-Programming-Codes | String Functions/string_times.py | string_times.py | py | 458 | python | en | code | 19 | github-code | 13 |
37175509699 | import sqlite3
import logging
class DatabaseHandler():
"""Handles the hearthstone database"""
def __init__(self):
logging.debug("DatabaseHandler trying to connect to database")
try:
self.db = sqlite3.connect('Database/Hearthstone.db')
self.cursor = self.db.cursor()
logging.info("DatabaseHandler is connected")
except sqlite3.Error as err:
logging.warning("sqlite3.Error : {0}".format(err))
def runQuery(self, query):
logging.debug("Running query: " + query)
try:
self.cursor.execute(query)
self.db.commit()
except sqlite3.Error as err:
logging.warning("sqlite3.Error : {0}".format(err))
def getCardName(self, cardId):
logging.debug("called DatabaseHandler.getCardName")
try:
query = "SELECT name FROM cards WHERE id=\'" + cardId + "\';"
self.cursor.execute(query)
name = self.cursor.fetchone()
if name:
return name[0]
except sqlite3.Error as err:
logging.warning("sqlite3.Error : {0}".format(err))
def addDeck(self, name):
logging.debug("called DatabaseHandler.addDeck")
query = "INSERT INTO decks (name) VALUES('" + name + "');"
self.runQuery(query)
def getDecks(self):
logging.debug("called DatabaseHandler.getDecks")
query = "SELECT rowid, * FROM decks;"
self.runQuery(query)
return self.cursor.fetchall()
def changeDeckName(self, oldName, newName):
logging.debug(
"called DatabaseHandler.changeDeckName from {0} to {1}".format(oldName, newName))
query = "UPDATE decks SET name='{0}' WHERE name='{1}'".format(
oldName, newName)
self.runQuery(query)
def addCardToDeck(self, cardid, deckid, numberof):
logging.debug("called DatabaseHandler.addCardToDeck with {0},{1},{2}".format(
cardid, deckid, numberof))
query = "SELECT * FROM cardsInDeck WHERE cardid='{0}' AND deckid={1} AND numberof={2};".format(
cardid, deckid, numberof)
self.runQuery(query)
exist = self.cursor.fetchone()
if not exist:
query = "INSERT INTO cardsInDeck VALUES ('{0}',{1},{2},0,0,0)".format(
cardid, deckid, numberof)
else:
query = "UPDATE cardsInDeck SET indeck=1 WHERE cardid='{0}' AND deckid={1} AND numberof={2};".format(
cardid, deckid, numberof)
self.runQuery(query)
def removeCardFromDeck(self, cardid, deckid, numberof):
logging.debug("called DatabaseHandler.removeCardFromDeck with {0},{1},{2}".format(
cardid, deckid, numberof))
query = "UPDATE cardsInDeck SET indeck=0 WHERE cardid='{0}' AND deckid={1} AND numberof={2};".format(
cardid, deckid, numberof)
self.runQuery(query)
def findCards(self, searchTerm):
logging.debug(
"called DatabaseHandler.getCardByCost with " + searchTerm)
query = "SELECT id, name FROM cards WHERE (name LIKE '%{0}%' OR cost='{0}' )AND (type='Minion' OR type='Spell');".format(
searchTerm)
self.runQuery(query)
return self.cursor.fetchall()
def shutdown(self):
logging.debug("called DatabaseHandler.shutdown")
self.db.close()
| Nieo/HSHelper | Database/dbhandler.py | dbhandler.py | py | 3,391 | python | en | code | 1 | github-code | 13 |
12553493160 | #!/usr/bin/env python3
import curses
from random import randint
def main():
"""Main"""
# set up the window
screen = curses.initscr()
curses.curs_set(0)
n_rows, n_cols = screen.getmaxyx()
win = curses.newwin(n_rows, n_cols, 0, 0)
win.keypad(1)
win.timeout(100)
# draw a game boundary
boundary_char = '#'
boarder_top, boarder_bottom = 3, n_rows - 2
boarder_left, boarder_right = 1, n_cols - 2
board_h, board_w = boarder_bottom - boarder_top, boarder_right - boarder_left
for x in range(boarder_left, boarder_right + 1):
win.addch(boarder_top, x, boundary_char)
win.addch(boarder_bottom, x, boundary_char)
for y in range(boarder_top, boarder_bottom + 1):
win.addch(y, boarder_left, boundary_char)
win.addch(y, boarder_right, boundary_char)
# initialize the snake
snake_y, snake_x = board_h // 2, board_w // 4
snake = [[snake_y, snake_x], [snake_y, snake_x - 1], [snake_y, snake_x - 2]]
# set the first food location
food = [board_h // 2, board_w // 2]
win.addch(food[0], food[1], curses.ACS_PI)
# set the initial movement direction
key = curses.KEY_RIGHT
# initialize the score
score = 0
score_y, score_x = 1, 2
win.addstr(score_y, score_x, f"Score: {score}")
# main game loop
while True:
# get the next key press
next_key = win.getch()
key = key if next_key == -1 else next_key
# modify the snake movement direction
new_head = [snake[0][0], snake[0][1]]
if key == curses.KEY_DOWN:
new_head[0] += 1
if key == curses.KEY_UP:
new_head[0] -= 1
if key == curses.KEY_LEFT:
new_head[1] -= 1
if key == curses.KEY_RIGHT:
new_head[1] += 1
snake.insert(0, new_head)
# check for game ending collisions with the wall and snake
if (
snake[0][0] in [boarder_top, boarder_bottom] or
snake[0][1] in [boarder_left, boarder_right] or
snake[0] in snake[1:]
):
return
# check for food consumption and move the snake
if snake[0] == food:
food = None
while food is None:
nf = [randint(boarder_top + 1, boarder_bottom - 1),
randint(boarder_left + 1, boarder_right - 1)]
food = nf if nf not in snake else None
win.addch(food[0], food[1], curses.ACS_PI)
# modify the score display
score += 1
win.addstr(score_y, score_x, f"Score: {score}")
else:
tail = snake.pop()
win.addch(tail[0], tail[1], ' ')
win.addch(snake[0][0], snake[0][1], curses.ACS_CKBOARD)
if __name__ == '__main__':
try:
main()
except:
pass
# cleanup
curses.napms(300)
curses.endwin()
| keseribp/snake | snake.py | snake.py | py | 2,903 | python | en | code | 0 | github-code | 13 |
10560851369 | #!/usr/bin/env python
# coding: utf-8
# # Week 1 Discussion - Descriptive Statistics
# ### Download the train.csv dataset and run some basic descriptive statistics and graphs for two or more variables of your choosing using Python. Provide your Python code here, perhaps as a Jupyter notebook .html file on GitHub. Embed at least one graph in your discussion by using the "Files" link in the upper right portion of Canvas.
# In[1]:
# Week 1 Discussion
# MSDS 422-DL-55
# Brandi Beals
# In[9]:
# Load packages
import pandas as pd
import matplotlib.pyplot as plt
# In[5]:
# Read data
data = pd.read_csv('train.csv',sep=',')
# Show a sample of the first 10 rows of data
data.head(10)
# In[7]:
# Understand the data types of the data
data.info()
# In[8]:
# Summarize the numerical fields in the data
data.describe()
# In[48]:
# Exploratory data analysis
pd.plotting.scatter_matrix(data, diagonal='kde', color='black', alpha=0.3, figsize=(15,15))
# In[55]:
plt.figure(figsize=(10,10))
plt.hist(data['Pclass'], 3, color='black', alpha=0.3)
plt.xticks(data['Pclass'])
plt.title('Class Count')
plt.xlabel('Class')
plt.ylabel('Frequency')
# In[56]:
plt.figure(figsize=(10,10))
age = data['Age']
plt.hist(age.dropna(), 20, color='black', alpha=0.3)
plt.title('Histogram of Age')
plt.xlabel('Age')
plt.ylabel('Frequency')
# In[57]:
plt.figure(figsize=(10,10))
plt.scatter(data['SibSp'], data['Parch'], color='black', alpha=0.3)
plt.title('Scatterplot')
plt.xlabel('Number of Sibling/Spouse Aboard')
plt.ylabel('Number of Parent/Child Aboard')
# In[58]:
plt.figure(figsize=(10,10))
plt.boxplot(data['Fare'], vert=False)
plt.title('Boxplot of Fare')
| brandibeals/NW422-Week1-Titanic | Week 1 Discussion - Descriptive Statistics.py | Week 1 Discussion - Descriptive Statistics.py | py | 1,681 | python | en | code | 0 | github-code | 13 |
4410370705 | from collections import Counter
from msilib import type_short
import time
import numpy as np
from QuickSort2 import FIXED_RANDOM
BASELINE_PIVOT = 0
FIXED_PIVOT = 1
INSERTION_LIMIT = 10
# Function to perform the insertion sort
def insertion_sort(arr):
print("insertion started")
# We start from 1 since the first element is trivially sorted
for index in range(1, len(arr)):
currentValue = arr[index]
currentPosition = index
# As long as we haven't reached the beginning and there is an element
# in our sorted array larger than the one we're trying to insert - move
# that element to the right
while currentPosition > 0 and arr[currentPosition - 1] > currentValue:
arr[currentPosition] = arr[currentPosition -1]
currentPosition = currentPosition - 1
# We have either reached the beginning of the array or we have found
# an element of the sorted array that is smaller than the element
# we're trying to insert at index currentPosition - 1.
# Either way - we insert the element at currentPosition
arr[currentPosition] = currentValue
# The following two functions are used
# to perform quicksort on the array.
# Partition function for quicksort
def partition(arr, low, high, sort_mode):
if sort_mode == BASELINE_PIVOT:
pivot = arr[high-low //2]
i = j = low
for i in range(low, high):
if arr[i]<pivot:
a[i], a[j]= a[j], a[i]
j+= 1
a[j], a[high]= a[high], a[j]
return j
elif sort_mode == FIXED_PIVOT:
pivot = arr[int(low + 0.6 * (high - low + 1))]
i = j = low
for i in range(low, high):
if arr[i]<pivot:
a[i], a[j]= a[j], a[i]
j+= 1
a[j], a[high]= a[high], a[j]
return j
# Function to call the partition function
# and perform quick sort on the array
def quick_sort(arr, low, high):
if low<high:
pivot = partition(arr, low, high, 0)
quick_sort(arr, low, pivot-1)
quick_sort(arr, pivot + 1, high)
return arr
# Hybrid function -> Quick + Insertion sort
def hybrid_quick_sort(arr, low, high):
while low<high:
# Ean to megethos tou pinaka ine mikrotero tou Limit tote kanoume insertion sort
if high-low + 1< INSERTION_LIMIT:
insertion_sort(arr)
break
else:
pivot = partition(arr, low, high, 0)
if pivot-low<high-pivot:
hybrid_quick_sort(arr, low, pivot-1)
low = pivot + 1
else:
hybrid_quick_sort(arr, pivot + 1, high)
high = pivot-1
# Driver code
a = np.load("Uniform.npy")
c = Counter(a)
print(c)
total = sum(count > 1 for count in c.values())
print("there are %i duplicates elements in the array " %total)
#metrisi duplicate stoixeion
time1 = time.time()
hybrid_quick_sort(a, 0, len(a) -1)
time2 = time.time()
sort_time=(time2-time1)
print('time to sort the array: %f' %sort_time)
print('the length of array is ' ,len(a))
print(a) | spyrosgeo13/TheBeg | hybrid_quicksort.py | hybrid_quicksort.py | py | 3,161 | python | en | code | 0 | github-code | 13 |
47006456244 | import sys
import traceback
from telegram import ParseMode
from telegram.utils.helpers import mention_html
from config import ownerId
# error handler sends the message to owner on error
def onError(update, context):
if update.effective_message:
text = "Произошла ошибка. Передам бате."
update.effective_message.reply_text(text)
# This traceback is created with accessing the traceback object from the sys.exc_info, which is returned as the
# third value of the returned tuple. Then we use the traceback.format_tb to get the traceback as a string, which
# for a weird reason separates the line breaks in a list, but keeps the linebreaks itself. So just joining an
# empty string works fine.
trace = "".join(traceback.format_tb(sys.exc_info()[2]))
# lets try to get as much information from the telegram update as possible
payload = ""
# normally, we always have an user. If not, its either a channel or a poll update.
if update.effective_user:
payload += f' with the user {mention_html(update.effective_user.id, update.effective_user.first_name)}'
# there are more situations when you don't get a chat
if update.effective_chat:
payload += f' within the chat <i>{update.effective_chat.title}</i>'
if update.effective_chat.username:
payload += f' (@{update.effective_chat.username})'
# but only one where you have an empty payload by now: A poll (buuuh)
if update.poll:
payload += f' with the poll id {update.poll.id}.'
# lets put this in a "well" formatted text
text = f"Hey.\n The error <code>{context.error}</code> happened{payload}. The full traceback:\n\n<code>{trace}" \
f"</code>"
# and send it to the dev(s)
context.bot.send_message(ownerId, text, parse_mode=ParseMode.HTML)
# we raise the error again, so the logger module catches it. If you don't use the logger module, use it.
# raise | aa333/Fido | modules/errorHandler.py | errorHandler.py | py | 1,973 | python | en | code | 0 | github-code | 13 |
31084168792 | '''
Desenvolvido por:
- Lucas Azevedo Zortea
- Marcelo Dalvi
- Rhayane Couto Fabres
- Victor Luis Moreira Rosa
'''
from listNode import ListNode
class DoublyLinkedListIterator:
def __init__(self, _firstNode = None):
self.firstNode = _firstNode
self.lastNode = _firstNode
self.iterator = _firstNode
if self.firstNode:
self.size = 1
else:
self.size = 0
def first_Node(self):
'''
Put the iterator under the first node
'''
self.iterator = self.firstNode
def last_Node(self):
'''
Put the iterator under the last node
'''
self.iterator = self.lastNode
def nextNode(self):
'''
Advances the iterator to the next node
'''
if self.iterator:
self.iterator = self.iterator.nextNode
def undefinedIterator(self):
'''
Checks if iterator is undefined
'''
if self.iterator == None:
return True
else:
return False
def printNode(self):
'''
Displays list items
'''
print(f'[ ', end='')
currentNode = self.firstNode
while currentNode:
print(f'{currentNode.data}', end=' ')
currentNode = currentNode.nextNode
print(f']')
def addNode(self, data):
'''
Insert a node after the iterator and goes under this node
:param data: int, string...
:return True or False
'''
newNode = ListNode(data, None, None)
if self.size == 0:
'''
Check if it is the first node in the list
'''
self.firstNode = newNode
self.lastNode = newNode
self.iterator = newNode
elif self.iterator == self.lastNode:
'''
Check if iterator is under last node
'''
newNode.nextNode = None
newNode.antNode = self.iterator
self.iterator.nextNode = newNode
self.lastNode = newNode
self.iterator = newNode
else:
'''
Iterator is under an innermost element
'''
newNode.antNode = self.iterator
newNode.nextNode = self.iterator.nextNode
self.iterator.nextNode.antNode = newNode
self.iterator.nextNode = newNode
self.iterator = newNode
self.size += 1
return True
def insNode(self, data):
'''
Insert a node before the iterator and places the iterator over it
:param data: int, string...
:return True or False
'''
newNode = ListNode(data, None, None)
if self.size == 0:
'''
Check if it is the first node in the list
'''
self.firstNode = newNode
self.lastNode = newNode
self.iterator = newNode
elif self.iterator == self.firstNode:
'''
Checks if the interactor is under the first node
'''
newNode.nextNode = self.iterator
self.iterator.antNode = newNode
self.firstNode = newNode
self.iterator = newNode
else:
'''
Iterator is under an innermost element
'''
newNode.nextNode = self.iterator
newNode.antNode = self.iterator.antNode
self.iterator.antNode.nextNode = newNode
self.iterator.antNode = newNode
self.iterator = newNode
self.size += 1
return True
def elimNode(self):
'''
Drop the node and place the iterator under the next node
:param void
:return True or False
'''
if self.size == 0:
'''
Check if list is empty
'''
print(f'list is empty!')
else:
if self.iterator == self.lastNode: # self.iterator == self.firstNode
'''
Checks if iterator is under last node
'''
if self.firstNode == self.lastNode:
'''
Check if iterator is under last node, but one element
'''
self.firstNode = None
self.lastNode = None
self.iterator = None
else:
'''
Iterator is under last node, but has many elements
'''
self.iterator.antNode.nextNode = None
self.lastNode = self.iterator.antNode
self.iterator = self.iterator.antNode
elif self.iterator == self.firstNode:
'''
Check if iterator is under first node
'''
self.iterator.nextNode.antNode = None
self.firstNode = self.iterator.nextNode
self.iterator = self.iterator.nextNode
else:
'''
Iterator is under an innermost element
'''
self.iterator.antNode.nextNode = self.iterator.nextNode
self.iterator.nextNode.antNode = self.iterator.antNode
self.iterator = self.iterator.nextNode
self.size -= 1
return True
def posNode(self, position):
'''
Put the iterator under the position received in parameter
:param position: int
:return True or False
'''
if self.size == 0:
'''
Check if list is empty
'''
print(f'List is empty!')
else:
if position >= 1 and position <= self.size:
'''
Check if parameter position is greater than or equal to 1 and position is less than or
equal to size.
'''
i = 1
self.iterator = self.firstNode
while i < position:
if self.iterator.nextNode != None:
self.iterator = self.iterator.nextNode
i += 1
return True
else:
return False | vlrosa-dev/estrutura-dados-python | 04-struct-data-DoubleLinkedList/doublyLinkedList.py | doublyLinkedList.py | py | 6,377 | python | en | code | 1 | github-code | 13 |
27204096569 | import asyncio
from aiogram import types, Dispatcher
from aiogram.dispatcher import DEFAULT_RATE_LIMIT
from aiogram.dispatcher.handler import CancelHandler, current_handler
from aiogram.dispatcher.middlewares import BaseMiddleware
from aiogram.utils.exceptions import Throttled
from datetime import datetime, timedelta
class ThrottlingMiddleware(BaseMiddleware):
"""
Simple middleware
"""
def __init__(self, limit=DEFAULT_RATE_LIMIT, key_prefix='antiflood_'):
self.rate_limit = limit
self.prefix = key_prefix
super(ThrottlingMiddleware, self).__init__()
async def on_process_message(self, message: types.Message, data: dict):
handler = current_handler.get()
dispatcher = Dispatcher.get_current()
if handler:
limit = getattr(handler, "throttling_rate_limit", self.rate_limit)
key = getattr(handler, "throttling_key", f"{self.prefix}_{handler.__name__}")
else:
limit = self.rate_limit
key = f"{self.prefix}_message"
try:
await dispatcher.throttle(key, rate=limit)
except Throttled as t:
await self.message_throttled(message, t)
raise CancelHandler()
async def message_throttled(self, message: types.Message, throttled: Throttled):
if throttled.exceeded_count <= 2:
await message.reply("Too many requests!")
class TimeMiddleware(BaseMiddleware):
"""
Simple middleware
"""
def __init__(self):
super(TimeMiddleware, self).__init__()
async def on_process_callback_query(self, call: types.CallbackQuery, data: dict):
# print(call.message.date < (datetime.now() + timedelta(minutes=1)))
if datetime.now() > (call.message.date + timedelta(minutes=20)):
await call.answer('У этой кнопки истекло время жизни.')
raise CancelHandler() | Kyle-krn/TelegramShop | middlewares/throttling.py | throttling.py | py | 1,932 | python | en | code | 0 | github-code | 13 |
71758476177 | def fizzbuzz(number):
for i in range(1, number+1):
string = ''
if i % 3 == 0:
string += 'fizz'
if i % 5 == 0:
string += 'buzz'
print(string or i)
if __name__ == '__main__':
user_input = int(input('Please enter a positive number:\n'))
fizzbuzz(user_input)
| jdsmith04/Katas | fizzbuzz.py | fizzbuzz.py | py | 325 | python | en | code | 0 | github-code | 13 |
3634749660 | # Reverse Words in a String
s = 'Python IS awesome'
new_s = ''
words_list = s.split(' ')
for word in words_list:
reversed_word = word[::-1] # ''.join(reversed(word))
swapped_case = reversed_word.swapcase()
new_s += swapped_case + ' '
#print(reversed_word)
#print(swapped_case)
new_s = new_s.rstrip()
print(new_s) | ashish-kumar-hit/python-qt | python/python-basics-100/String 2.5.py | String 2.5.py | py | 332 | python | en | code | 0 | github-code | 13 |
21381271544 | from django.conf.urls import url, include
from rest_framework_nested import routers
from .views import (
DiagnosisViewSet,
PatientViewSet,
PictureView
)
router = routers.SimpleRouter()
router.register(r'patients', PatientViewSet)
diagnosis_router = routers.NestedSimpleRouter(router, r'patients', lookup='patient')
diagnosis_router.register(r'diagnosis', DiagnosisViewSet, base_name='patient-diagnosis')
urlpatterns = (
url(r'^', include(router.urls)),
url(r'^', include(diagnosis_router.urls)),
url(r'^patients/(?P<pk>[\w-]+)/picture/$', PictureView.as_view())
) | wott86/dacardioapp | apps/patients/api_patients/urls.py | urls.py | py | 591 | python | en | code | null | github-code | 13 |
36487921300 | import hashlib
import requests
import sys
import time
# api key and urls
API_KEY = ""
url = "https://api.metadefender.com/v4/"
# constants
BLOCK_SIZE = 8192
# Function calculate hash of a file
def hash_func(filename):
hash_sha256 = hashlib.sha256()
with open(filename, "rb") as f:
block = f.read(BLOCK_SIZE)
while len(block) > 0:
hash_sha256.update(block)
block = f.read(BLOCK_SIZE)
return (hash_sha256.hexdigest())
# function to check if hash exists already:
def hash_check(hash, filename):
try:
response = requests.request(
"GET", url + "hash/" + hash, headers={"apikey": API_KEY, filename:filename})
except requests.exceptions.RequestException as e:
raise SystemExit(e)
if response.status_code == 200:
print_results(response)
return True
else:
return False
# function to print scan results
def print_results(result):
scanresults = result.json()
print("filename: ", scanresults["file_info"]["display_name"])
print("overall_status: ", scanresults["scan_results"]["scan_all_result_a"])
scan_details = scanresults["scan_results"]["scan_details"]
for details in scan_details:
print("engine: ", details)
print("threat_found: ", scan_details[details]["threat_found"] or "CLEAN")
print("scan_result: ", scan_details[details]["scan_result_i"])
print("def_time: ", scan_details[details]["def_time"])
# function to upload file if hash not found and returns the id of the file
def upload_file(filename):
try:
response = requests.request("POST", url + "file", headers={"apikey": API_KEY,"Content-Type": "application/octet-stream","filename": filename }, data=open(filename, "rb"))
except requests.exceptions.RequestException as e:
raise SystemExit(e)
if response.status_code == 200:
return (response.json()["data_id"])
else:
print("Error: Upload unsuccessful, bad request", file=sys.stderr)
exit(1)
#repeatedly pulls on the id until its complete then prints the results
def pull_id(id):
while True:
try:
response = requests.request(
"GET", url + "file/" + str(id), headers={"apikey": API_KEY, filename: filename})
except requests.exceptions.RequestException as e:
raise SystemExit(e)
if response.status_code == 200:
responsejson = response.json()
if responsejson["scan_results"]["progress_percentage"] == 100:
break
else:
time.sleep(5)
print("Progress:" + str(responsejson["scan_results"]["progress_percentage"]) +"%")
else:
print("Connection error while pulling", file = sys.stderr)
exit(1)
print_results(response)
if __name__ == "__main__":
# check for proper usage
if len(sys.argv) != 2:
print("Usage: python3 upload_file.py SampleFile.txt", file=sys.stderr)
exit(1)
filename = sys.argv[1]
hash = hash_func(filename)
ishash = hash_check(hash, filename)
#upload the file if not hash and get the id
if not ishash:
data_id = upload_file(filename)
#pull on data id and retrieve results
pull_id(data_id)
| youngman-droid/File-Scanner | upload_file.py | upload_file.py | py | 3,294 | python | en | code | 0 | github-code | 13 |
7750379801 | '''
Aim: collect all relevant FITS files from /mnt/astrophysics/muchogalfit-output/ and port them to the all_input_fits located in /mnt/astrophysics/kconger_wisesize/. These files will be necessary when running the build_html_website.py script (in my_mucho_galfit) either locally (will have to scp the folder) or on the virtual machine.
'''
import os
import glob
import numpy as np
from astropy.table import Table
from astropy.io import fits
#convert cutouts from .fz to .fits, then save .fits to target_folder. not currently needed (all input files are already in the VFID output directories).
def fz_to_fits(path_to_im, galaxy_name, target_folder, group_name=None, group_flag=False):
galaxy_w3 = galaxy_name+'-custom-image-W3.fits.fz'
galaxy_r = galaxy_name+'-custom-image-r.fits.fz'
galaxies = [galaxy_w3,galaxy_r]
if group_name is not None:
group_w3 = group_name+'-custom-image-W3.fits.fz'
group_r = group_name+'-custom-image-r.fits.fz'
groups = [group_w3,group_r]
for n in range(2):
galaxy_path = path_to_im+galaxies[n]
if group_flag==True:
galaxy_path = path_to_im+groups[n]
try:
fz = fits.getdata(galaxy_path)
fz_header = (fits.open(galaxy_path)[1]).header
print('Adding '+target_folder+galaxies[n][:-3])
fits.writeto(target_folder+galaxies[n][:-3],fz,header=fz_header,overwrite=True) #[:-3] removes the .fz from filename string
except:
print('Unable to pull galaxy cutout file.')
#should be two per galaxy - rband and w3
def grab_input_cutouts(catalog, cutouts_path, target_folder):
VFIDs = catalog['VFID']
#VFID_V1s = catalog['VFID_V1']
objnames = catalog['objname']
#RAs = catalog['RA_1']
#group_names = sgaparams['GROUP_NAME']
#have to retrieve these images from the RA directories (LOLJK the input files are also in the output directories if galfit ran successfully!)
for i in range(len(catalog)):
print(VFIDs[i]+' input time')
galaxy_folder = cutouts_path+VFIDs[i]+'/'
input_r = glob.glob(galaxy_folder+'*-custom-image-r.fits')
input_w3 = glob.glob(galaxy_folder+'*-custom-image-W3.fits')
input_im = np.concatenate([input_w3,input_r])
print(input_im)
for im in input_im: #if no images in output_mosaics, then none will be cp'd. if only one, then only one will be cp'd. usw.
print('Moving '+im)
os.system('cp '+im+' '+target_folder)
#PRE-GROUP GALAXY HOOHAW. can ignore.
#group_name = group_names[sgaparams['VFID']==VFID_V1s[i]]
#group_name = group_name[0]
#print(group_name)
#ra_int = int(np.floor(RAs[i]))
#ra_folder = input_cutouts_path+str(ra_int)+'/'
#galaxy_folder = ra_folder+objnames[i]+'/'
#if os.path.isdir(galaxy_folder):
# print(galaxy_folder)
# fz_to_fits(galaxy_folder,objnames[i],target_folder)
#else:
# galaxy_folder_group = ra_folder+group_name+'/'
# print(galaxy_folder_group)
# fz_to_fits(galaxy_folder_group,group_name,target_folder,group_name,group_flag=True)
#should be four per galaxy - rband (nopsf, psf) and w3 (nopsf, psf)
#if galfit 'failed', then out* images will not appear in the folder.
def grab_output_cutouts(catalog, host_folder_path, target_folder):
VFIDs = catalog['VFID']
objnames = catalog['objname']
for i in range(len(catalog)):
print('Moving '+VFIDs[i]+' output file, if any.')
galaxy_folder = host_folder_path+VFIDs[i]+'/'
output_mosaics_r = glob.glob(galaxy_folder+'*r-out*')
output_mosaics_w3 = glob.glob(galaxy_folder+'*W3-out*')
output_mosaics = np.concatenate([output_mosaics_r,output_mosaics_w3])
for im in output_mosaics: #if no images in output_mosaics, then none will be cp'd. if only one, then only one will be cp'd. usw.
print('Moving '+im)
os.system('cp '+im+' '+target_folder)
def grab_mask_images(catalog, host_folder_path, target_folder):
VFIDs = catalog['VFID']
objnames = catalog['objname']
for i in range(len(catalog)):
print('Moving '+VFIDs[i]+' mask(s), if any.')
galaxy_folder = host_folder_path+VFIDs[i]+'/'
rmask = glob.glob(galaxy_folder+'*r-mask.fits')
w3mask = glob.glob(galaxy_folder+'*wise-mask.fits')
masks = np.concatenate([rmask,w3mask])
for im in masks: #if no images in masks, then none will be cp'd. if only one, then only one will be cp'd. usw.
print('Moving '+im)
os.system('cp '+im+' '+target_folder)
if __name__ == '__main__':
homedir=os.getenv("HOME")
vf = Table.read(homedir+'/sgacut_coadd.fits') #contains objnames, RAs, and VFIDs
host_folder_path = '/mnt/astrophysics/muchogalfit-output/'
input_cutouts_path = '/mnt/virgofilaments-data/'
onefolder_path = '/mnt/astrophysics/kconger_wisesize/vf_html_mask/all_input_fits/'
print('Creating target directory '+onefolder_path)
os.system('mkdir '+onefolder_path)
print('Moving postage stamp cutouts for rband and W3...')
grab_input_cutouts(vf, host_folder_path, onefolder_path)
print('Moving GALFIT output mosaics for rband and w3...')
grab_output_cutouts(vf, host_folder_path, onefolder_path)
print('Moving r-band and W3 mask images...')
grab_mask_images(vf, host_folder_path, onefolder_path)
| Kyssuber/research | my_mucho_galfit/website/move_fits_one_folder.py | move_fits_one_folder.py | py | 5,501 | python | en | code | 1 | github-code | 13 |
37620648212 | #!/usr/bin/env python
"""
List all package names in the repository.
"""
from __future__ import print_function
import pprint
import os
import io
import re
REPO_SRC = os.path.abspath(os.path.join(__file__, "..", "..", "..", ".."))
RGX = re.compile(r"\s*<name>(?P<pkg_name>[_\w]+)</name>")
def _get_xmls():
ret = []
for root, _, files in os.walk(REPO_SRC):
for fname in files:
if fname != "package.xml":
continue
ret.append(os.path.join(root, fname))
return ret
def _get_package_name(xml_file):
ret = ""
with io.open(xml_file) as _fp:
lines = _fp.read().splitlines()
for line in lines:
match = RGX.match(line)
if match:
ret = match.expand(r"\g<pkg_name>")
if not ret:
print("Cannot find package name: {}".format(xml_file))
return ret
def main():
"""Prog entry."""
xmls = _get_xmls()
pkgs = {_get_package_name(xml):xml for xml in xmls}
pprint.pprint(pkgs)
for pkg in sorted(pkgs.keys()):
print("{}".format(pkg))
if __name__ == "__main__":
main()
| wasn-lab/Taillight_Recognition_with_VGG16-WaveNet | src/scripts/ci/list_package_names.py | list_package_names.py | py | 1,102 | python | en | code | 2 | github-code | 13 |
27763611189 | """Represent models for near-Earth objects and their close approaches.
The `NearEarthObject` class represents a near-Earth object. Each has a unique
primary designation, an optional unique name, an optional diameter, and a flag
for whether the object is potentially hazardous.
The `CloseApproach` class represents a close approach to Earth by an NEO. Each
has an approach datetime, a nominal approach distance, and a relative approach
velocity.
A `NearEarthObject` maintains a collection of its close approaches, and a
`CloseApproach` maintains a reference to its NEO.
The functions that construct these objects use information extracted from the
data files from NASA, so these objects should be able to handle all of the
quirks of the data set, such as missing names and unknown diameters.
"""
from typing import List
from helpers import cd_to_datetime, datetime_to_str
class NearEarthObject:
"""A near-Earth object (NEO).
An NEO encapsulates semantic and physical parameters about the object, such
as its primary designation (required, unique), IAU name (optional), diameter
in kilometers (optional - sometimes unknown), and whether it's marked as
potentially hazardous to Earth.
A `NearEarthObject` also maintains a collection of its close approaches -
initialized to an empty collection, but eventually populated in the
`NEODatabase` constructor.
"""
def __init__(
self,
pdes: str,
name: str = None,
diameter: float = float('nan'),
hazardous: bool = False,
**info):
"""Create a new `NearEarthObject`.
:param pdes: A designation for the NearEarthObject
:param name: Name of the NearEarthObject, may be none or empty string
:param diameter: Diameter of the NearEarthObject, may be none
:param pha: NearEarthObject is hazardous or not
:param info: A dictionary of excess keyword arguments supplied to the constructor.
"""
self.designation = pdes
self.name = name
self.diameter = diameter
self.hazardous = hazardous
# Create an empty initial collection of linked approaches.
self.approaches: List[CloseApproach] = []
def __str__(self):
"""Return `str(self)`."""
return (
f"NEO {self.fullname} has a diameter of {self.diameter:.3f} km and "
f"{'is' if self.hazardous else 'is not'} potentially hazardous.")
def __repr__(self):
"""Return `repr(self)`, a computer-readable string representation of this object."""
return (
f"NearEarthObject(designation={self.designation!r}, name={self.name!r}, "
f"diameter={self.diameter:.3f}, hazardous={self.hazardous!r})")
@property
def fullname(self):
"""Return a representation of the full name of this NEO."""
return f"{self.designation} ({self.name})"
def serialize(self):
"""Serialize the NEO object."""
return {
'designation': self.designation,
'name': self.name,
'diameter_km': self.diameter,
'potentially_hazardous': self.hazardous
}
class CloseApproach:
"""A close approach to Earth by an NEO.
A `CloseApproach` encapsulates information about the NEO's close approach to
Earth, such as the date and time (in UTC) of closest approach, the nominal
approach distance in astronomical units, and the relative approach velocity
in kilometers per second.
A `CloseApproach` also maintains a reference to its `NearEarthObject` -
initally, this information (the NEO's primary designation) is saved in a
private attribute, but the referenced NEO is eventually replaced in the
`NEODatabase` constructor.
"""
def __init__(
self,
pdes: str,
time: str,
distance: float = float('nan'),
velocity: float = float('nan'),
neo: NearEarthObject = None,
**info):
"""Create a new `CloseApproach`.
:param des: A designation for the Object
:param cd: Time of close-approach
:param dist: Nominal approach distance
:param v_rel: Velocity relative to the approach body at close approach (km/s)
:param info: A dictionary of excess keyword arguments supplied to the constructor.
"""
self._designation = pdes
self.time = cd_to_datetime(time)
self.distance = distance
self.velocity = velocity
# Create an attribute for the referenced NEO, originally None.
self.neo: NearEarthObject = neo
@property
def time_str(self):
"""Return a formatted representation of this `CloseApproach`'s approach time.
The value in `self.time` should be a Python `datetime` object. While a
`datetime` object has a string representation, the default representation
includes seconds - significant figures that don't exist in our input
data set.
The `datetime_to_str` method converts a `datetime` object to a
formatted string that can be used in human-readable representations and
in serialization to CSV and JSON files.
"""
return datetime_to_str(self.time)
def __str__(self):
"""Return `str(self)`."""
return (
f"On {self.time_str}, '{self.neo.fullname}' approaches Earth "
f"at a distance of {self.distance:.2f} au and a velocity of {self.velocity:.2f} km/s.")
def __repr__(self):
"""Return `repr(self)`, a computer-readable string representation of this object."""
return (
f"CloseApproach(time={self.time_str!r}, distance={self.distance:.2f}, "
f"velocity={self.velocity:.2f}, neo={self.neo!r})")
@property
def designation(self):
"""Accessor for the `_designation` property."""
return self._designation
def serialize(self):
"""Serialize the close approach object."""
return {
'datetime_utc': self.time_str,
'distance_au': self.distance,
'velocity_km_s': self.velocity,
'neo': self.neo.serialize()
}
| saltamay/Udacity_Intermediate_Python_NEO | models.py | models.py | py | 6,230 | python | en | code | 1 | github-code | 13 |
6368615489 | # !/usr/bin/env python3
# Author: ALP CANER SATI, May 2021
from pathlib import Path
import os
from datetime import datetime as dt
import time
import json
from traceback import format_exc
import requests
import pwd
CONFIG_PATH = "/home/pi/Desktop/config.json"
LOG_FOLDER_PATH = "/home/pi/Desktop/camera_logs/"
def create_Log_File(log:list):
log_file_name = "camera_"+str(dt.now()).split(" ")[0]+".log"
log_path = LOG_FOLDER_PATH+log_file_name
if(log_file_name in os.listdir(LOG_FOLDER_PATH)):
with open(log_path, "a", encoding="utf-8") as f:
f.write("\n\n"+str(dt.now()).split(" ")[1]+" saatinde loglandi;\n")
f.writelines([i[0]+","+i[1]+","+i[2]+"\n" for i in log[1:]])
else:
with open(log_path, "w", encoding="utf-8") as f:
f.write("******************** "+str(dt.now()).split(" ")[0]+" Camera Script Logu ********************\n")
f.write("\n\n"+str(dt.now()).split(" ")[1].split(".")[0]+" saatinde loglandi;\n")
f.writelines([i[0]+","+i[1]+","+i[2]+"\n" for i in log[1:]])
def send_slack_message(payload, webhook):
"""Send a Slack message to a channel via a webhook.
Args:
payload (dict): Dictionary containing Slack message, i.e. {"text": "This is a test"}
webhook (str): Full Slack webhook URL for your chosen channel.
Returns:
HTTP response code, i.e. <Response [503]>
"""
return requests.post(webhook, json.dumps(payload))
class SlackMessageTemplate:
motion_detected = """\
:warning: Hi, motion detected from the camera !!
- *File name*: `{}`
- *Motion start*: `{}`
"""
motion_ended = """\
:exclamation: This is a recently detected motion from the camera. Please check your _OneDrive/camera_ folder.
- *File name*: `{}`
- *Motion end*: `{}` \
"""
def file_check(path_to_watch):
before = dict ([(f, None) for f in os.listdir (path_to_watch)])
while 1:
after = dict ([(f, None) for f in os.listdir (path_to_watch)])
added = [f for f in after if not f in before]
if added:
return added
else:
before = after
if __name__=="__main__":
log = [["Date","User","Message"]]
try:
f = open(CONFIG_PATH, "r")
config = json.load(f)
f.close()
while True:
ls1 = os.listdir(config["path"])
log = [["Date","User","Message"]]
added_file_names = file_check(config["path"])
for i in added_file_names:
if not i.endswith("lastsnap.jpg"):
path = config["path"]+i
rs = send_slack_message({"text":SlackMessageTemplate.motion_detected.format(i,dt.now())},config["slack_webhook"])
if rs.status_code != 200:
log.append([str(dt.now()),pwd.getpwuid(os.geteuid())[0],"Motion detected but Slack message could not be sent.. File Name: {}".format(i)])
create_Log_File(log)
while (not "mlp_actions: End of event" in os.popen("tail -1 {}".format(config["log_path"])).read()):
time.sleep(0.2)
send_slack_message({"text":SlackMessageTemplate.motion_ended.format(i,dt.now())},config["slack_webhook"])
time.sleep(2)
added_file_names = None
except:
log.append([str(dt.now()),pwd.getpwuid(os.geteuid())[0],format_exc()])
create_Log_File(log)
| acsati/motion_detected_with_webcam | scripts/motion_detection.py | motion_detection.py | py | 3,512 | python | en | code | 0 | github-code | 13 |
1146478225 | from dotenv import load_dotenv
import os
import telebot
load_dotenv()
BOT_KEY = os.getenv('BOT_KEY')
bot = telebot.TeleBot(BOT_KEY)
@bot.message_handler(commands=['start'])
def start(m, res=False):
bot.send_message(m.chat.id, 'I am online!')
@bot.message_handler(content_types=['text'])
def handle_text(message):
bot.send_message(message.chat.id, 'Вы написали: ' + message.text)
bot.polling(none_stop=True, interval=0)
| DevOps-spb-org/python-telegram-bot-examples | echo-bot/main.py | main.py | py | 446 | python | en | code | 1 | github-code | 13 |
17182124861 | from PyQt5.QtWidgets import*
from PyQt5.QtPrintSupport import *
from PyQt5 import QtCore, QtGui, uic
from PyQt5.QtGui import *
from PyQt5.QtCore import *
import time
import cv2
import os
import sys
from PIL import Image
import threading
import inceptiontest
ui_MainWindow = uic.loadUiType("mainwindow.ui")[0]
class Fruitdetection(ui_MainWindow ,QMainWindow):
def __init__(self, *args, **kwargs):
ui_MainWindow.__init__(self, *args, **kwargs)
QMainWindow.__init__(self)
self.path = None
self.inceppath= None
self.calory = None
self.category =None
self.title = 'Fruit detection'
self.left = 10
self.top = 10
self.width = 600
self.height = 500
self.status = QStatusBar()
self.setStatusBar(self.status)
self.setupUi(self)
self.setinit()
def setinit(self):
self.setWindowTitle(self.title)
self.setGeometry(self.left, self.top, self.width, self.height)
self.startButton.clicked.connect(self.gotomainpro)
# status bar config
file_toolbar = QToolBar("File")
file_toolbar.setIconSize(QSize(14, 14))
self.addToolBar(file_toolbar)
file_menu = self.menuBar().addMenu("&File")
open_file_action = QAction("Open...", self)
open_file_action.setStatusTip("Open file")
open_file_action.triggered.connect(self.file_open)
file_menu.addAction(open_file_action)
file_toolbar.addAction(open_file_action)
saveas_file_action = QAction("Save...", self)
saveas_file_action.setStatusTip("Save current page to specified file")
saveas_file_action.triggered.connect(self.file_save)
file_menu.addAction(saveas_file_action)
file_toolbar.addAction(saveas_file_action)
self.show()
def gotomainpro(self):
if not self.path :
self.s= "Oh no!\n Please choose your fruit :)"
self.dialog_critical(self.s)
else:
self.mprogram(self.path)
if self.inceppath :
self.outputimage(self.inceppath)
else:
return
if self.calory:
self.setcalory(self.calory)
else:
return
def outputimage(self,mnpath):
if mnpath:
try:
image1 = QImage(mnpath)
if image1.isNull():
self.dialog_critical(str("Cannot load"))
return
self.label_2.setPixmap(QPixmap.fromImage(image1))
except Exception as e:
self.dialog_critical(str(e))
else:
return
def setcalory(self,ce):
if ce:
self.textEdit.setText(str(ce))
if self.category:
self.textEdit_2.setText("Fruit category :\n {}".format(str(self.category)))
else:
return
def dialog_critical(self, s):
dlg = QMessageBox(self)
dlg.setText(s)
dlg.setIcon(QMessageBox.Critical)
dlg.show()
def file_open(self):
self.textEdit_2.setText("Processing ...")
options = QFileDialog.Options()
self.path, _ = QFileDialog.getOpenFileName(self, "Open file", "",
'Images (*.png *.jpeg *.jpg *.bmp *.gif *.JPG)',options=options)
if self.path:
try:
image = QImage(self.path)
if image.isNull():
self.dialog_critical(str("Cannot load"))
return
image1 = Image.open(self.path).convert("RGB")
h,w=image1.size
print(h,w)
if h >= 120 or w >= 120:
image1 = image1.resize((120,120))
image1.save("MY_new_resize.jpg")
self.path = "MY_new_resize.jpg"
image = QImage(self.path)
self.label.setPixmap(QPixmap.fromImage(image))
#self.gotomainpro()
except Exception as e:
self.dialog_critical(str(e))
else:
self.dialog_critical(str("select image"))
#self.path = path
return
def file_save(self):
path, _ = QFileDialog.getSaveFileName(self, "Save Image", "", "Images (*.png *.jpeg *.jpg *.bmp *.gif *.JPG)")
if not self.path:
# If dialog is cancelled, will return ''
return
def mprogram(self,path):
if path :
self.calory,self.inceppath,self.category = inceptiontest.inceptionfunc(path)
else:
self.dialog_critical(str("We have problem to run inception.py"))
if __name__ == '__main__':
app = QApplication(sys.argv)
window = Fruitdetection()
app.exec_() | zsrabbani/Fruit-Detection | runfile.py | runfile.py | py | 5,288 | python | en | code | 0 | github-code | 13 |
33346415750 | # Definition for a binary tree node.
# class TreeNode:
# def __init__(self, val=0, left=None, right=None):
# self.val = val
# self.left = left
# self.right = right
class Solution:
def maxDepth(self, root: Optional[TreeNode]) -> int:
if not root:
return 0
queue = collections.deque([root])
depths = []
while queue:
lvl = []
for _ in range(len(queue)):
curr = queue.popleft()
lvl.append(curr.val)
if curr.left:
queue.append(curr.left)
if curr.right:
queue.append(curr.right)
depths.append(lvl)
return len(depths)
| BradleyGenao/LeetCode-Solutions | maximum-depth-of-binary-tree/maximum-depth-of-binary-tree.py | maximum-depth-of-binary-tree.py | py | 834 | python | en | code | 0 | github-code | 13 |
25585964730 | import logging
import rpyc
from utils.strings import genRandomFilename
# exfil data over RPC
def exfilRPC(server, port, data, File=False):
logging.debug("Using RPC exfiltration")
if not File:
if isinstance(data, list):
logging.debug("data is a list, converting to string")
data = ','.join(data)
try:
logging.debug("making connection over RPC")
conn = rpyc.classic.connect(server, port=port)
logging.debug("RPC connection made")
if File:
logging.debug("data is a file")
filename = data.split("\\")[-1]
rb = open(data, "rb").read()
logging.debug(f"writing file to remote host as dlpd_rpc_{filename}")
conn.execute(f'printf "%b" "{rb}" > dlpd_rpc_{filename}')
else:
logging.debug("data is a string")
logging.debug(f"writing data to remote host as dlpd_rpc_{genRandomFilename()}.txt")
conn.execute(f'echo "{data}" >> dlpd_rpc_{genRandomFilename()}.txt')
logging.debug("data exfiltration over RPC successful!")
return True
except Exception as e:
logging.error(f"Failed to exfiltrate data over RPC: {e}")
return False | bcdannyboy/dlpauto | src/dlpautomation/exfil/rpc/exfil.py | exfil.py | py | 1,230 | python | en | code | 0 | github-code | 13 |
16808790624 | import unicodedata
import pytest
from hypothesis.errors import InvalidArgument
from hypothesis.strategies import characters
from tests.common.debug import assert_no_examples, find_any, minimal
from tests.common.utils import fails_with
@fails_with(InvalidArgument)
def test_nonexistent_category_argument():
characters(exclude_categories=["foo"]).example()
def test_bad_codepoint_arguments():
with pytest.raises(InvalidArgument):
characters(min_codepoint=42, max_codepoint=24).example()
def test_exclude_all_available_range():
with pytest.raises(InvalidArgument):
characters(
min_codepoint=ord("0"), max_codepoint=ord("0"), exclude_characters="0"
).example()
def test_when_nothing_could_be_produced():
with pytest.raises(InvalidArgument):
characters(
categories=["Cc"], min_codepoint=ord("0"), max_codepoint=ord("9")
).example()
def test_characters_of_specific_groups():
st = characters(categories=("Lu", "Nd"))
find_any(st, lambda c: unicodedata.category(c) == "Lu")
find_any(st, lambda c: unicodedata.category(c) == "Nd")
assert_no_examples(st, lambda c: unicodedata.category(c) not in ("Lu", "Nd"))
def test_characters_of_major_categories():
st = characters(categories=("L", "N"))
find_any(st, lambda c: unicodedata.category(c).startswith("L"))
find_any(st, lambda c: unicodedata.category(c).startswith("N"))
assert_no_examples(st, lambda c: unicodedata.category(c)[0] not in ("L", "N"))
def test_exclude_characters_of_specific_groups():
st = characters(exclude_categories=("Lu", "Nd"))
find_any(st, lambda c: unicodedata.category(c) != "Lu")
find_any(st, lambda c: unicodedata.category(c) != "Nd")
assert_no_examples(st, lambda c: unicodedata.category(c) in ("Lu", "Nd"))
def test_exclude_characters_of_major_categories():
st = characters(exclude_categories=("L", "N"))
find_any(st, lambda c: not unicodedata.category(c).startswith("L"))
find_any(st, lambda c: not unicodedata.category(c).startswith("N"))
assert_no_examples(st, lambda c: unicodedata.category(c)[0] in ("L", "N"))
def test_find_one():
char = minimal(characters(min_codepoint=48, max_codepoint=48), lambda _: True)
assert char == "0"
def test_find_something_rare():
st = characters(categories=["Zs"], min_codepoint=12288)
find_any(st, lambda c: unicodedata.category(c) == "Zs")
assert_no_examples(st, lambda c: unicodedata.category(c) != "Zs")
def test_whitelisted_characters_alone():
with pytest.raises(InvalidArgument):
characters(include_characters="te02тест49st").example()
def test_whitelisted_characters_overlap_blacklisted_characters():
good_chars = "te02тест49st"
bad_chars = "ts94тсет"
with pytest.raises(InvalidArgument) as exc:
characters(
min_codepoint=ord("0"),
max_codepoint=ord("9"),
include_characters=good_chars,
exclude_characters=bad_chars,
).example()
assert repr(good_chars) in str(exc)
assert repr(bad_chars) in str(exc)
def test_whitelisted_characters_override():
good_characters = "teтестst"
st = characters(
min_codepoint=ord("0"),
max_codepoint=ord("9"),
include_characters=good_characters,
)
find_any(st, lambda c: c in good_characters)
find_any(st, lambda c: c in "0123456789")
assert_no_examples(st, lambda c: c not in good_characters + "0123456789")
def test_blacklisted_characters():
bad_chars = "te02тест49st"
st = characters(
min_codepoint=ord("0"), max_codepoint=ord("9"), exclude_characters=bad_chars
)
assert "1" == minimal(st, lambda c: True)
assert_no_examples(st, lambda c: c in bad_chars)
def test_whitelist_characters_disjoint_blacklist_characters():
good_chars = "123abc"
bad_chars = "456def"
st = characters(
min_codepoint=ord("0"),
max_codepoint=ord("9"),
exclude_characters=bad_chars,
include_characters=good_chars,
)
assert_no_examples(st, lambda c: c in bad_chars)
| HypothesisWorks/hypothesis | hypothesis-python/tests/cover/test_simple_characters.py | test_simple_characters.py | py | 4,132 | python | en | code | 7,035 | github-code | 13 |
19436043687 | def solution(numbers):
answer = [-1] * len(numbers)
stack = [] # 스택에는 인덱스를 넣기
for i in range(len(numbers)):
while stack and numbers[stack[-1]] < numbers[i]:
answer[stack.pop()] = numbers[i]
stack.append(i)
return answer | Youmi-Kim/problem-solved | 프로그래머스/2/154539. 뒤에 있는 큰 수 찾기/뒤에 있는 큰 수 찾기.py | 뒤에 있는 큰 수 찾기.py | py | 294 | python | en | code | 0 | github-code | 13 |
41977764862 |
import heapq
import sys
heap=[]
input=sys.stdin.readline
N=int(input())
for _ in range(N):
a=int(input().strip())
if a>0:
heapq.heappush(heap,(-a,a)) # how to use maxheap using heapq module
else:
if len(heap):
print(heapq.heappop(heap)[1])
else:
print(0)
| honghyeong/python-problem-solving | BOJ/step21_priority_queue/11279.py | 11279.py | py | 317 | python | en | code | 0 | github-code | 13 |
5379494501 | from urllib import request # 引用urllib中的request
from bs4 import BeautifulSoup
import re
import pandas as pd
from pandas import DataFrame
if __name__ == "__main__":
response = request.urlopen("http://58921.com") #获取网址请求返回值
html =str(response.read(), encoding='utf-8') # 返回bytes转为utf8
soup = BeautifulSoup(html, 'lxml')
all=soup.find_all(attrs={'id': 'front_block_top_day'})[0].find('tbody').find_all('tr')
# for film in all:
# print(film.a.attrs['title'])
all_film=[]
for film in all:
args = film.find_all('td')
film_info=[args[0].get_text(),
re.findall(r'\d+\.?\d+',args[1].get_text())[0],
re.findall(r'\D',args[1].get_text())[-1],
re.findall(r'\d+\.?\d+',args[2].get_text())[0],
re.findall(r'\D',args[2].get_text())[-1]]
all_film.append(film_info)
df=DataFrame(all_film)
df.columns = ['film_name', 'y_box_office', 'unit1', 'a_box_office', 'unit2']
df.to_csv("d:/test.csv",index=False) | luzhonghe999/WebCrawler | 03_爬去结果存储_csv.py | 03_爬去结果存储_csv.py | py | 1,024 | python | en | code | 0 | github-code | 13 |
4882858447 | import os
import logging
from dotenv import load_dotenv
from dune_client.types import QueryParameter
from dune_client.query import QueryBase
from dune_client.api.execution import ExecutionAPI, BaseRouter
from dune_client.models import ExecutionResponse
import time
load_dotenv()
api_key = os.getenv("DUNE_API_KEY_TEAM")
# Configure logging
logging.basicConfig(format="%(asctime)s %(levelname)s %(message)s", level=logging.INFO)
logger = logging.getLogger()
class CustomExecutionAPI(ExecutionAPI):
def __init__(self, base_router: BaseRouter):
super().__init__(
base_router.token,
base_router.base_url,
base_router.request_timeout,
base_router.client_version,
base_router.performance,
)
def execute_query(self, query: QueryBase) -> ExecutionResponse:
return super().execute_query(query)
def main():
# Define the query with parameters
query = QueryBase(
name="sample_fact_tokenization_summary_mv_execapi",
query_id=3125259,
params=[
QueryParameter.number_type(name="offset", value=1000),
QueryParameter.number_type(name="limit", value=10000),
],
)
print("Results available at", query.url())
# Create an instance of the BaseRouter class with your API key
dune = BaseRouter(api_key=api_key)
# Create an instance of the CustomExecutionAPI class using the BaseRouter instance
execution_api = CustomExecutionAPI(dune)
# Execute the query
execution_response = execution_api.execute_query(query)
if execution_response.state == "QUERY_STATE_COMPLETE":
job_id = execution_response.job_id
print("Execution successful. Job ID:", job_id)
# Use the ExecutionAPI to get execution status and results
execution_status = execution_api.get_execution_status(job_id)
execution_results = execution_api.get_execution_results(job_id)
# Access data from execution_status and execution_results as needed
print("Execution Status:", execution_status)
print("Execution Results:", execution_results)
elif execution_response.state == "QUERY_STATE_PENDING":
# Execution is pending, wait and check again until it's complete
max_retries = 10 # Set the maximum number of retries
retries = 0
while (
execution_response.state == "QUERY_STATE_PENDING" and retries < max_retries
):
time.sleep(60) # Wait for 60 seconds (adjust this as needed)
execution_response = execution_api.get_execution_status(
execution_response.job_id
)
retries += 1
if execution_response.state == "QUERY_STATE_COMPLETE":
job_id = execution_response.job_id
print("Execution successful. Job ID:", job_id)
# Use the ExecutionAPI to get execution status and results
execution_status = execution_api.get_execution_status(job_id)
execution_results = execution_api.get_execution_results(job_id)
# Access data from execution_status and execution_results as needed
print("Execution Status:", execution_status)
print("Execution Results:", execution_results)
else:
print("Execution failed after retries.")
else:
print("Execution failed.")
if __name__ == "__main__":
main()
| PaulApivat/data_engineer | practice/celery-worker-etl/etl/client/sample_fact_tokenization_summary_mv_execapi.py | sample_fact_tokenization_summary_mv_execapi.py | py | 3,434 | python | en | code | 0 | github-code | 13 |
39715883832 | import Libnumcomplex as lb
import math
# Experimento de las canicas con coeficientes booleanos, se tomará el 1 como True y el 0 como False.
# m: matriz doblemente estocastica unicamente con valores 1 y 0, v: vector con posiciones iniciales
# c: número de "clicks" que realiza, debe ser un número real
def expcanicas(m, v, c):
for mult in range(c):
v = lb.multmatrices(m, v)
return v
# Experimento de las múltiples rendijas clásico probabilístico, con más de dos rendijas
# m: matriz doblemente estocastica con fraccionarios, v: vector con posiciones iniciales
# c: número de "clicks" que realiza, debe ser un número real
def expprobabilistico(m, v, c):
for mult in range(c):
v = lb.multmatrices(m, v)
return v
# Experimento de las múltiples rendijas cuántico.
# m: matriz unitaria con números complejos, v: vector con posiciones iniciales
# c: número de "clicks" que realiza, debe ser un número real
def expcuantico(m, v, c):
for mult in range(c):
v = lb.multmatrices(m, v)
return v
m1 = [
[[0, 0], [1 / 6, 0], [5 / 6, 0]],
[[1 / 3, 0], [1 / 2, 0], [1 / 6, 0]],
[[2 / 3, 0], [1 / 3, 0], [0, 0]]
]
m2 = [
[[0, 0], [1, 0], [0, 0]],
[[1, 0], [0, 0], [0, 0]],
[[0, 0], [0, 0], [1, 0]]
]
m3 = [
[[1 / math.sqrt(2), 0], [1 / math.sqrt(2), 0], [0, 0]],
[[0, -1 / math.sqrt(2)], [0, 1 / math.sqrt(2)], [0, 0]],
[[0, 0], [0, 0], [0, 1]]
]
v1 = [
[[1 / 6, 0]],
[[1 / 6, 0]],
[[2 / 3, 0]]
]
v2 = [
[[2, 0]],
[[4, 0]],
[[4, 0]]
]
v3 = [
[[1 / math.sqrt(3), 0]],
[[0, 2 / math.sqrt(15)]],
[[math.sqrt(2 / 5), 0]]
]
# print(expcanicas(m2, v2, 3))
# print(expprobabilistico(m1, v1, 1))
print(expcuantico(m3, v3, 1))
| JuanMedina-R/CalculadoraNumerosComplejos | DeClasicoACuantico/Capitulo3.py | Capitulo3.py | py | 1,743 | python | es | code | 0 | github-code | 13 |
16929181572 | from flask import Flask, render_template, redirect, url_for, render_template, request, session, flash
from binance.client import Client
from binance.enums import *
app = Flask(__name__)
app.secret_key = "jsad;fk039u2401u90n3k;alkm092uqio234n92837498hwhofiuahsdf"
# client = Client(config.api_key, config.api_secret, tld = 'us')
if __name__ == "__main__":
app.run(debug=True) #This will allows us to not have to rerun flask each time we make a change, it will automatically update our code. We can just hit refresh on the page to see new changes.
# Putting a parameter of debug=True in app.run() will automatically put our flask in debug mode and automatically update any changes we make to flask
# The Post and GET method will allow us to retrieve information from the page
@app.route('/', methods=["POST", "GET"])
def login():
# Grabbing user input from login page to validate client keys
if request.method == "POST":
key = request.form["key"]
session["key"] = key
secret = request.form["secret"]
session["secret"] = secret
# Passing user input into binance validation
client = Client(key, secret, tld = 'us')
# Testing whether the key and secret were retrieved
# print(client, flush=True)
# print(key, secret, flush=True)
# print(session["key"], session["secret"])
# Retrieving client information from binance and parsing information to retrieve available balance
exchange_info = client.get_exchange_info()
info = client.get_account()
balances = info['balances']
available_to_trade = info['balances'][2]['free']
# Parsing symbols from exchange info to retrieve only crypto currency symbols
symbols = exchange_info['symbols']
prices = client.get_all_tickers()[ :len(balances)]
current_prices = prices[0]['price']
session["available_to_trade"] = available_to_trade
session["balances"] = balances
session["prices"] = prices
session["symbols"] = symbols
# Testing that my key is validated to return my available balance to trade
# print(session["balances"])
# print(current_prices)
# Once the keys are validated user is allowed into home page
return render_template('index.html', my_balances=balances, available_to_trade=available_to_trade, prices=prices, symbols=symbols)
# If the incorrect keys and secret were entered into the form, the user will not be allowed into the home page
else:
return render_template("login.html")
# Route where our main page is located
@app.route('/index', methods=["POST", "GET"])
def index():
return render_template('index.html')
# Under construction
@app.route("/logout")
def logout():
session.pop("key", None)
session.pop("secret", None)
return redirect(url_for("login"))
# Route where our Binance buy method is housed
@app.route('/buy', methods=["POST","GET"])
def buy():
# If a Post request was submitted through the buy form the following will code the Buy operation
if request.method == "POST":
# Utilizing session to make key accessible to the buy route, to enable user to make a trade via binance
# key = session["api_key"]
# secret = session["api_secret"]
# client = Client(session["api_key"], session["api_secret"], tld = 'us')
# print(session["api_key"],session["api_secret"])
api_key = "nSlr9x4X9DuvXOmdTKKcfMc4CPjeaTa0expkYAY7ZwSVno5GC31CFtbOMN"
api_secret = "jPgsSS1L6McMrvbHHzOc7gifTzT7VS1jLDJ0PNWY2xVDsGmNi4FzMz6SsYb"
client = Client(api_key, api_secret, tld = "us")
try:
# print(session["api_key"],session["api_secret"])
# Function to make an trade
order = client.create_order(symbol="ETHUSD",
side=SIDE_BUY,
type=ORDER_TYPE_MARKET,
quantity= float(.01))
except Exception as e:
flash(e.message, "error")
#Connecting to user's binance account to retrieve information about account balances
exchange_info = client.get_exchange_info()
info = client.get_account()
balances = info['balances']
available_to_trade = info['balances'][2]['free']
# Retrieve which symbol is selected for buying
symbols = exchange_info['symbols']
# Displays the current prices of Crypto currencies
prices = client.get_all_tickers()
# Once the order executes, the page refreshes and updates the available balance to trade, current holdings, and prices of crypto currencies
return render_template('index.html', my_balances=balances, available_to_trade=available_to_trade,
prices=prices, symbols=symbols)
else:
# If a trade has not been successfully executed the user is returned the same page
return render_template("login.html")
# Route where our Binance sell method is housed
@app.route('/sell', methods=["POST","GET"])
def sell():
# If a Post request was submitted through the sell form the following will code will execute
if request.method == "POST":
api_key = "nSlr9x4X9DuvXOmdTKKcfMc4CPjeaTa0expkYAY7ZwSVno5GC31CFtbOMNX"
api_secret = "jPgsSS1L6McMrvbHHzOc7gifTzT7VS1jLDJ0PNWY2xVDsGmNi4FzMz6SsYba"
client = Client(api_key, api_secret, tld = "us")
# Function to make a sell
order = client.create_order(symbol=request.form['symbol'],
side=SIDE_SELL,
type=ORDER_TYPE_MARKET,
quantity= float(request.form['quantity']))
# Once a sell order has been successfully executed, the users available balance is retrieved to be displayed on the page
exchange_info = client.get_exchange_info()
info = client.get_account()
sell_balances = info['balances']
available_to_trade = info['balances'][2]['free']
symbols = exchange_info['symbols']
# Displays the current prices of the Crypto currencys
prices = client.get_all_tickers()
# Test to see if this route is functioning properly
# print(request.form)
# Once the order executes, the page refreshes and updates the available balance to trade, current holdings, and prices of crypto currencies
return render_template('index.html', my_balances=sell_balances, available_to_trade=available_to_trade, prices=prices, symbols=symbols)
else:
# If a trade is unsuccessful it refreshes back to the same page
return render_template("index.html")
# Under construction, this will house the function that will allow a user to set parameters for the bot to trade
@app.route('/settings')
def settings():
return 'settings'
@app.route('/history', methods=["POST", "GET"])
def history():
return render_template('history.html')
| JohnLam916/Wow_Project | app.py | app.py | py | 7,367 | python | en | code | 1 | github-code | 13 |
34247961098 | import urllib.request,json
from .models import NewsArticle
# Getting api key
apikey = None
# Getting the NEWS base url
base_url = None
def configure_request(app):
global apikey,base_url
base_url = app.config['NEWS_API_BASE_URL']
apikey = app.config['NEWS_API_KEY']
def get_news(categories):
'''
Function that gets the json response to our url request
'''
get_news_url = base_url.format(categories,apikey)
with urllib.request.urlopen(get_news_url) as url:
get_news_data = url.read()
get_news_response = json.loads(get_news_data)
news_results = None
if get_news_response['articles']:
news_results_list = get_news_response['articles']
news_results = process_results(news_results_list)
return news_results
def process_results(newz_list):
'''
Function that processes the news result and transforms it to a list of Objects
Args:
news_list: A list of dictionaries that contain news details
Returns :
newz_results: A list of news objects
'''
newz_results = []
for source_info in newz_list:
#source parses info from source{}
source = source_info.get('source')
id = source.get('id')
broadcaster = source.get('name')
title = source_info.get('title')
description = source_info.get('description')
image = source_info.get('urlToImage')
URLsource = source_info.get('url')
date = source_info.get('publishedAt')
if id:
news_object = NewsArticle(id,broadcaster,title,description,image,URLsource,date)
# news_source_object = News1(id,broadcaster)
newz_results.append(news_object)
# newz_results.append(news_object)
return newz_results
def get_broadcaster_news(id):
'''
Function that gets the json response to our url request
'''
get_broadcaster_news_url = base_url.format(id,apikey)
with urllib.request.urlopen(get_broadcaster_news_url) as url:
broadcaster_news_details_data = url.read()
broadcaster_news_details_data_response = json.loads(broadcaster_news_details_data)
bdnd_object = None
if broadcaster_news_details_data_response['articles']:
bdnd_object_list = broadcaster_news_details_data_response['articles']
bdnd_object = process_results(bdnd_object_list)
return bdnd_object
| AbugaAroni/FlaskIP2 | app/requests.py | requests.py | py | 2,441 | python | en | code | 0 | github-code | 13 |
72755156818 | from selenium import webdriver
def run_scratch_production(productionID, opCode, mess):
production_url = 'http://127.0.0.1:8000/team_match/production/scratch/build/index.html' \
'?id=' + str(productionID) + \
'&opCodeAndMess=' + str(opCode) + str(mess)
chrome_driver = 'C:\\Users\\413knight\\Desktop\\API\\team_match\\chromedriver.exe'
options = webdriver.ChromeOptions()
options.add_argument('--headless')
driver = webdriver.Chrome(executable_path=chrome_driver, chrome_options=options)
driver.get(production_url)
# 指定该opCode对应 notAI 对战
driver.add_cookie({'name': opCode, 'value': 'notAI'})
driver.add_cookie({'name': 'username', 'value': 'admin'})
# driver.close()
| liqiniuniu/- | team_match/run_production.py | run_production.py | py | 743 | python | en | code | 0 | github-code | 13 |
44833280461 | #!/usr/bin/python3
import unittest
from models.base import Base
from models.rectangle import Rectangle
from models.square import Square
from io import StringIO
import sys
import json
class TestBase(unittest.TestCase):
def setUp(self):
"""
function to redirect stdout to check
outpute of functions relying on print
"""
sys.stdout = StringIO()
def tearDown(self):
"""
cleans everything up after running
setup
"""
sys.stdout = sys.__stdout__
def test_0_id(self):
"""
Test to check for id method
"""
Base._Base__nb_objects = 0
b1 = Base()
b2 = Base()
b3 = Base()
b4 = Base(12)
b5 = Base()
self.assertEqual(b1.id, 1)
self.assertEqual(b2.id, 2)
self.assertEqual(b3.id, 3)
self.assertEqual(b4.id, 12)
self.assertEqual(b5.id, 4)
def test_1_id(self):
"""
Random arguments passed to check
"""
Base._Base__nb_objects = 0
t1 = Base(22)
self.assertEqual(t1.id, 22)
t2 = Base(-33)
self.assertEqual(t2.id, -33)
t3 = Base()
self.assertEqual(t3.id, 1)
def test_2_dict(self):
"""
Test to check if dictionary
is working
"""
r1 = Rectangle(10, 7, 2, 8, 1)
d1 = r1.to_dictionary()
j = {'x': 2, 'id': 1, 'y': 8, 'height': 7, 'width': 10}
jd = Base.to_json_string([d1])
self.assertEqual(d1, j)
self.assertEqual(type(d1), dict)
self.assertEqual(type(jd), str)
def test_3_to_json_string(self):
"""
Test to check for string to
json conversion
"""
Base.__nb_objects = 0
self.assertEqual(Base.to_json_string(None), "[]")
self.assertTrue(type(Base.to_json_string(None)) is str)
self.assertEqual(Base.to_json_string([]), "[]")
self.assertTrue(type(Base.to_json_string("[]")) is str)
dt1 = {"id": 7, "width": 10, "height": 22, "x": 4, "y": 5}
dt2 = {"id": 8, "width": 4, "height": 2, "x": 14, "y": 3}
conv = Base.to_json_string([dt1, dt2])
self.assertTrue(type(conv) is str)
d = json.loads(conv)
self.assertEqual(d, [dt1, dt2])
def test_4_from_json_string_empty(self):
"""
Test to check if it works with
empty string or none
"""
self.assertEqual(Base.from_json_string(""), [])
self.assertEqual(Base.from_json_string(None), [])
def test_5_jfile_empty(self):
"""Test to check from empty"""
Rectangle.save_to_file([])
with open("Rectangle.json", mode="r") as myFile:
self.assertEqual([], json.load(myFile))
def test_6_sq(self):
"""
Test to check for square creation
"""
S1 = Square(44, 55, 66, 77)
S1_dict = S1.to_dictionary()
S2 = Rectangle.create(**S1_dict)
self.assertNotEqual(S1, S2)
def test_7_file_rect(self):
"""
Test to check if file loads from rect
"""
R1 = Rectangle(33, 34, 35, 26)
R2 = Rectangle(202, 2)
lR = [R1, R2]
Rectangle.save_to_file(lR)
lR2 = Rectangle.load_from_file()
self.assertNotEqual(lR, lR2)
def test_8_file_square(self):
"""
Test to check if file loads from square
"""
S1 = Square(22)
S2 = Square(44, 44, 55, 66)
lS = [S1, S2]
Square.save_to_file(lS)
lS2 = Square.load_from_file()
self.assertNotEqual(lS, lS2)
| Hanifa-10/alx-higher_level_programming | 0x0C-python-almost_a_circle/tests/test_models/test_base.py | test_base.py | py | 3,751 | python | en | code | 0 | github-code | 13 |
33015682856 | import os
def directory_parser(path):
"""
parses the input the program has got into an array of absolute paths of files to work with
:param path: the input path
:return: a list of absolute paths of files to work with
"""
if os.path.isdir(path):
path_array, valid_paths = os.listdir(path), []
for index, dir_path in enumerate(path_array):
dir_path = os.path.abspath(os.path.sep.join([path, dir_path]))
if dir_path.endswith(".asm") and not os.path.isdir(
path_array[index]):
valid_paths.append(dir_path)
return valid_paths
else:
return [path]
def clean_empty(lines_array):
"""
cleans empty lines from the .asm files
:param lines_array: the array of lines to clean
:return: the clean array
"""
for line in lines_array:
if line == '':
lines_array.remove(line)
return lines_array
def file_reader(asm_path):
"""
reads a file, returns a list of the file's lines
:param asm_path: the path of the .asm file to read
:return: the array of the file's line, minus empty lines
"""
with open(asm_path) as file:
lines_array = file.readlines()
lines_array = [line.strip('\n') for line in lines_array]
return clean_empty(lines_array)
def file_writer(command_array, asm_path):
"""
writes the array of binary commands to a new .hack file with the same name
:param command_array: the array of binary commands
:param asm_path: the path of the .asm file's .hack aquivilant to write to
"""
new_name = asm_path.replace('.asm', '.hack')
with open(new_name, "w") as file:
for command in command_array:
file.write(command)
file.write('\n')
if __name__ == '__main__':
path = "C:\\Users\\user\\Documents\\2nd\\nand2tetris\\projects\\06\\NAND-ex6\\test"
arr = directory_parser(path)
print("path array: ", arr)
com_arr = ["1st binary\n", "2nd binary\n", "1010010"]
for file_path in arr:
print("lines from path: ", file_path, " are: ", file_reader(file_path))
file_writer(com_arr, file_path)
print("binary lines from path: ", file_path, " are: ", file_reader(file_path))
print("binary lines from path: ", file_path, " are: ", file_reader(file_path))
for item in os.listdir(path):
if item.endswith(".hack"):
os.remove(os.path.join(path, item))
| damebrown/NAND_ex6 | NAND-ex6/file_parser.py | file_parser.py | py | 2,535 | python | en | code | 0 | github-code | 13 |
20448747985 | from flask import redirect, render_template, url_for, Blueprint,flash
from flask_login import login_required
from app.config import UPLOADS_FOLDER, TEMPLATE_FOLDER
from app.views.main.models import AboutPage, File
import os
# Create blueprint
main_blueprint = Blueprint('main_blueprint', __name__, template_folder=TEMPLATE_FOLDER, url_prefix='/')
@login_required
@main_blueprint.route('/')
def index():
about = AboutPage.get_by_id(1)
return render_template('main/index.html', about=about)
@main_blueprint.route('/child/<string:child_name>')
@login_required
def child(child_name):
# get all object from Files model where 'MAT' is in file name
files = File.query.filter(File.file_name.like(f'%{child_name}%')).all()
if not files:
flash('არ არსებობს ამ ბავშვის ფაილები')
return redirect(url_for('main_blueprint.index'))
return redirect(url_for('main_blueprint.child_files', file=files[0].file_name, child_name=child_name))
@main_blueprint.route('/child/<string:child_name>/<string:file>')
@login_required
def child_files(child_name, file):
child_all_files = File.query.filter(File.file_name.like(f'%{child_name}%')).all()
cha_filename = File.query.filter(File.file_name.like(f'%{file}%')).first()
if not cha_filename:
flash('არ არსებობს ეს ფაილი')
return redirect(url_for('main_blueprint.index'))
next_five_files = File.query.filter(File.file_name.like(f'%{child_name}%')).offset(cha_filename.id).limit(5).all()
if len(next_five_files) < 5:
next_five_files += File.query.filter(File.file_name.like(f'%{child_name}%'), File.file_name != file).limit(5 - len(next_five_files)).all()
print(next_five_files)
next_five_files = set(next_five_files)
if not child_all_files or not cha_filename:
flash('არასწორი მოთხოვნა, სცადეთ თავიდან')
return redirect(url_for('main_blueprint.index'))
cha_file_dir = os.path.join(UPLOADS_FOLDER, 'cha', child_name.upper())
cha_file_path = os.path.join(cha_file_dir, file)
print(cha_file_path, cha_file_dir)
if not os.path.exists(cha_file_path):
flash('არასწორი მოთხოვნა, სცადეთ თავიდან!')
return redirect(url_for('main_blueprint.index'))
with open(cha_file_path, 'r') as f:
lines = f.readlines()
file_head_data ={'head': [], 'ID': []}
file_main_data = {'main': []}
date = ''
for line in lines:
parsed_line = line.strip('@').strip('\n').split('\t')
if 'ID' in parsed_line[0]:
file_head_data['ID'].append(parsed_line[1].split('|'))
if 'Date' in parsed_line[0]:
date = parsed_line[1].split('|')[0]
if '*' not in parsed_line[0] and '' != parsed_line[0] and '%' not in parsed_line[0] and 'End' != parsed_line[0]:
file_head_data['head'].append(parsed_line)
else:
parsed_line = line.strip('*').strip('%').strip('@').strip('\n').split('\t')
if '' == parsed_line[0] and len(file_main_data['main']) > 0:
file_main_data['main'][-1][1] += parsed_line[1]
if parsed_line[0] != '':
file_main_data['main'].append(line.strip('*').strip('%').strip('@').strip('\n').split('\t'))
return render_template('main/chafile_view.html',
child_name=child_name,
one_file=cha_filename,
file_head_data=file_head_data,
file_main_data=file_main_data,
date=date,
child_files=child_all_files,
first_five_file=next_five_files)
@main_blueprint.route('/cha/<string:file_name>')
@login_required
def file(file_name):
file_n = file_name
file = File.query.filter_by(file_name=file_n).first()
if not file:
flash('არ არსებობს ეს ფაილი')
return redirect(url_for('main_blueprint.index'))
# stripping file name for having only child name
child_file_name = file_name[0:3]
cha_file_in_mat_folder = os.path.join(UPLOADS_FOLDER, 'cha', f'{child_file_name}')
file = os.path.join(cha_file_in_mat_folder, file_n)
# read with line by line
file_data = ''
if not os.path.exists(file):
flash('მოხდა შეცდომა ფაილის გახსნასთან დაკავშირებით!')
return redirect(url_for('main_blueprint.index'))
with open(file, 'r') as f:
lines = f.readlines()
for line in lines:
file_data += line + '<br>'
return file_data
| UnilabEdu/Childes | app/views/main/views.py | views.py | py | 4,907 | python | en | code | 0 | github-code | 13 |
37260142483 | from typing import (
cast,
List,
Tuple,
)
import pytest
from galaxyls.services.xml.nodes import XmlElement
from galaxyls.tests.unit.utils import TestUtils
class TestXmlElementClass:
@pytest.mark.parametrize(
"source, expected_offsets",
[
("<test", (5, 5)),
("<test>", (5, 5)),
("<test ", (5, 5)),
('<test attr="val">', (6, 16)),
('<test attr="val" attr2="value" >', (6, 32)),
],
)
def test_get_attributes_offsets_returns_expected(self, source: str, expected_offsets: Tuple[int, int]) -> None:
xml_document = TestUtils.from_source_to_xml_document(source)
node = xml_document.get_node_at(1)
assert node
assert node.is_element
element = cast(XmlElement, node)
actual_offsets = element.get_attributes_offsets()
assert actual_offsets == expected_offsets
@pytest.mark.parametrize(
"source, expected_contents",
[
('<test attr="val">', ['"val"']),
('<test attr="val" attr2="value" >', ['"val"', '"value"']),
],
)
def test_get_attribute_content_returns_expected(self, source: str, expected_contents: List[str]) -> None:
xml_document = TestUtils.from_source_to_xml_document(source)
node = xml_document.get_node_at(1)
assert node
assert node.is_element
element = cast(XmlElement, node)
actual_contents = [attr.value.get_content(source) for attr in element.attributes.values() if attr.value is not None]
assert actual_contents == expected_contents
| galaxyproject/galaxy-language-server | server/galaxyls/tests/integration/xml/test_element_node.py | test_element_node.py | py | 1,627 | python | en | code | 22 | github-code | 13 |
9466957384 | # Command Status - SMPP v5.0, section 4.7.6, table 4-45, page 116-122
# The command_status represents the means by which an ESME or MC sends an error code to its peer.
# This field is only relevant in response PDUs.
ESME_ROK = int(0x00000000) # description : No error
ESME_RINVMSGLEN = int(0x00000001) # description : Message Length is invalid
ESME_RINVCMDLEN = int(0x00000002) # description : Command Length is invalid
ESME_RINVCMDID = int(0x00000003) # description: Invalid Command ID
ESME_RINVBNDSTS = int(0x00000004) # description: Incorrect BIND Status for given command
ESME_RALYBND = int(0x00000005) # description: ESME Already in bound state
ESME_RINVPRTFLG = int(0x00000006) # description: Invalid priority flag
ESME_RINVREGDLVFLG = int(0x00000007) # description: Invalid registered delivery flag
ESME_RSYSERR = int(0x00000008) # description: System Error
ESME_RINVSRCADR = int(0x0000000A) # description: Invalid source address
ESME_RINVDSTADR = int(0x0000000B) # description: Invalid destination address
ESME_RINVMSGID = int(0x0000000C) # description: Message ID is invalid
ESME_RBINDFAIL = int(0x0000000D) # description: Bind failed
ESME_RINVPASWD = int(0x0000000E) # description: Invalid password
ESME_RINVSYSID = int(0x0000000F) # description: Invalid System ID
ESME_RCANCELFAIL = int(0x00000011) # description: Cancel SM Failed
ESME_RREPLACEFAIL = int(0x00000013) # description: Replace SM Failed
ESME_RMSGQFUL = int(0x00000014) # description: Message queue full
ESME_RINVSERTYP = int(0x00000015) # description: Invalid service type
ESME_RINVNUMDESTS = int(0x00000033) # description: Invalid number of destinations
ESME_RINVDLNAME = int(0x00000034) # description: Invalid distribution list name
ESME_RINVDESTFLAG = int(0x00000040) # description: Destination flag is invalid (submit_multi)
ESME_RINVSUBREP = int(0x00000042) # description: Invalid submit with replace request (i.e. submit_sm with replace_if_present_flag set)
ESME_RINVESMCLASS = int(0x00000043) # description: Invalid esm_class field data
ESME_RCNTSUBDL = int(0x00000044) # description: Cannot submit to distribution list
ESME_RSUBMITFAIL = int(0x00000045) # description: submit_sm or submit_multi failed
ESME_RINVSRCTON = int(0x00000048) # description: Invalid source address TON
ESME_RINVSRCNPI = int(0x00000049) # description: Invalid source address NPI
ESME_RINVDSTTON = int(0x00000050) # description: Invalid destination address TON
ESME_RINVDSTNPI = int(0x00000051) # description: Invalid destination address NPI
ESME_RINVSYSTYP = int(0x00000053) # description: Invalid system_type field
ESME_RINVREPFLAG = int(0x00000054) # description: Invalid replace_if_present flag
ESME_RINVNUMMSGS = int(0x00000055) # description: Invalid number of messages
ESME_RTHROTTLED = int(0x00000058) # description: Throttling error (ESME has exceeded allowed message limits)
ESME_RINVSCHED = int(0x00000061) # description: Invalid scheduled delivery time
ESME_RINVEXPIRY = int(0x00000062) # description: Invalid message validity period (expiry time)
ESME_RINVDFTMSGID = int(0x00000063) # description: Predefined message invalid or not found
ESME_RX_T_APPN = int(0x00000064) # description: ESME Receiver Temporary App Error Code
ESME_RX_P_APPN = int(0x00000065) # description: ESME Receiver Permanent App Error Code
ESME_RX_R_APPN = int(0x00000066) # description: ESME Receiver Reject Message Error Code
ESME_RQUERYFAIL = int(0x00000067) # description: query_sm request failed
ESME_RINVOPTPARSTREAM = int(0x000000C0) # description: Error in the optional part of the PDU Body
ESME_ROPTPARNOTALLWD = int(0x000000C1) # description: TLV not allowed
ESME_RINVPARLEN = int(0x000000C2) # description: Invalid parameter length
ESME_RMISSINGOPTPARAM = int(0x000000C3) # description: Expected TLV missing
ESME_RINVOPTPARAMVAL = int(0x000000C4) # description: Invalid TLV Value
ESME_RDELIVERYFAILURE = int(0x000000FE) # description: Transaction Delivery Failure (used for data_sm_resp)
ESME_RUNKNOWNERR = int(0x000000FF) # description: Unknown error
ESME_RSERTYPUNAUTH = int(0x00000100) # description: ESME Not authorised to use specified service_type
ESME_RPROHIBITED = int(0x00000101) # description: ESME prohibited from using specified operation
ESME_RSERTYPUNAVAIL = int(0x00000102) # description : Specified service_type is unavailable.
ESME_RSERTYPDENIED = int(0x00000103) # description : Specified service_type is denied.
ESME_RINVDCS = int(0x00000104) # description : Invalid Data Coding Scheme.
ESME_RINVSRCADDRSUBUNIT = int(0x00000105) # description : Source Address Sub unit is Invalid.
ESME_RINVDSTADDRSUBUNIT = int(0x00000106) # description : Destination Address Sub unit is Invalid
ESME_RINVBCASTFREQINT = int(0x00000107) # description : Broadcast Frequency Interval is invalid.
ESME_RINVBCASTALIAS_NAME = int(0x00000108) # description : Broadcast Alias Name is invalid.
ESME_RINVBCASTAREAFMT = int(0x00000109) # description : Broadcast Area Format is invalid.
ESME_RINVNUMBCAST_AREAS = int(0x0000010A) # description : Number of Broadcast Areas is invalid.
ESME_RINVBCASTCNTTYPE = int(0x0000010B) # description : Broadcast Content Type is invalid.
ESME_RINVBCASTMSGCLASS = int(0x0000010C) # description : Broadcast Message Class is invalid.
ESME_RBCASTFAIL = int(0x0000010D) # description : broadcast_sm operation failed.
ESME_RBCASTQUERYFAIL = int(0x0000010E) # description : query_broadcast_sm operation failed.
ESME_RBCASTCANCELFAIL = int(0x0000010F) # description : cancel_broadcast_sm operation failed.
ESME_RINVBCAST_REP = int(0x00000110) # description : Number of Repeated Broadcasts is invalid.
ESME_RINVBCASTSRVGRP = int(0x00000111) # description : Broadcast Service Group is invalid.
ESME_RINVBCASTCHANIND = int(0x00000112) # description : Broadcast Channel Indicator is invalid.
| kashifpk/smpp5 | smpp5/smpp5/lib/constants/command_status.py | command_status.py | py | 5,845 | python | en | code | 0 | github-code | 13 |
18274821796 | import json
import os
from assemblyline.al.service.base import ServiceBase
from assemblyline.al.common.result import Result, ResultSection, SCORE, TAG_TYPE, TAG_WEIGHT, TEXT_FORMAT
import hashlib
import time
class TorrentSlicer(ServiceBase):
SERVICE_CATEGORY = 'Static Analysis'
SERVICE_ACCEPTS = 'meta/torrent'
SERVICE_DESCRIPTION = "Extracts information from torrent files"
SERVICE_REVISION = ServiceBase.parse_revision('$Id: ebb685f586dd7a9b652ba105558bdb9dc822f287 $')
SERVICE_VERSION = '1'
SERVICE_ENABLED = True
SERVICE_STAGE = 'CORE'
SERVICE_CPU_CORES = 1
SERVICE_RAM_MB = 256
def __init__(self, cfg=None):
super(TorrentSlicer, self).__init__(cfg)
def start(self):
self.log.debug("TorrentSlicer service started")
# noinspection PyUnresolvedReferences,PyGlobalUndefined
def import_service_deps(self):
global bencode, binascii, humanfriendly, size, si
from hurry.filesize import size, si
import bencode
import binascii
import humanfriendly
# noinspection PyUnusedLocal
@staticmethod
def create_tables(infohash,
announce,
announce_list,
creation_date,
comment,
created_by,
encoding,
piece_length,
private,
name,
sflength,
sfmd5sum,
files,
piecehashes,
last_piece_size,
torrent_size,
torrent_type):
announce_str = ""
for x in announce_list:
for y in x:
announce_str += "{} " .format(y)
meta_dict = {
'InfoHash:': infohash,
'Announce:': announce,
'Announce List*:': announce_str,
'Creation Date*:': creation_date,
'Comment*:': comment,
'Created By*:': created_by,
'Encoding*:': encoding,
'Piece Length:': "%s (%s)" % (str(piece_length), size(piece_length, system=si)),
'Private*:': private,
'Name*:': name,
}
meta = []
for k, i in sorted(meta_dict.iteritems()):
meta.append('{0:20s} {1}' .format(k, i))
cal_dict = {
'Type of Torrent:': torrent_type,
'Number of Pieces:': str(len(piecehashes)),
'Last Piece Size:': "%s (%s)" % (str(last_piece_size), size(last_piece_size, system=si)),
'Size of Torrent:': "%s (%s)" % (str(torrent_size), size(torrent_size, system=si)),
}
cal = []
for k, i in sorted(cal_dict.iteritems()):
cal.append('{0:18s} {1}' .format(k, i))
des = []
if len(files) > 0:
des.append('{:100s} {:10s} {:32s}' .format('File Path', 'Length', 'MD5Sum*'))
des.append('{:100s} {:10s} {:32s}' .format('-' * 9, '-' * 6, '-' * 7))
for f in files:
fmd5 = ""
path = ""
for k, i in f.iteritems():
if k == "hash":
fmd5 = i
if k == "path":
for x in i:
path = str(x)
des.append('{:100s} {:10s} {:32s}' .format(path, size(f['length'], system=si), fmd5))
return meta, cal, des
def run_tosl(self, filename, request):
file_res = request.result
torrent_file = open(filename, "rb").read()
# noinspection PyBroadException
try:
metainfo = bencode.bdecode(torrent_file)
except:
res = (ResultSection(SCORE.NULL, "This is not a valid *.torrent file"))
file_res.add_result(res)
return
# Grab specific data from file
announce = metainfo['announce']
if 'announce-list' in metainfo:
announce_list = metainfo['announce-list']
else:
announce_list = ""
if 'creation date' in metainfo:
creation_date = metainfo['creation date']
else:
creation_date = ""
if 'comment' in metainfo:
comment = metainfo['comment']
else:
comment = ""
if 'created by' in metainfo:
created_by = metainfo['created by']
else:
created_by = ""
if 'encoding' in metainfo:
encoding = metainfo['encoding']
else:
encoding = ""
if 'url-list' in metainfo:
url_list = metainfo['url-list']
else:
url_list = []
info = metainfo['info']
piece_length = info['piece length']
pieces = info['pieces']
if 'private' in info:
private = info['private']
else:
private = ""
if 'name' in info:
name = info['name']
else:
name = ""
if 'length' in info:
sflength = info['length']
else:
sflength = ""
if 'md5sum' in info:
sfmd5sum = info['md5sum']
else:
sfmd5sum = ""
if 'files' in info:
files = info['files']
else:
files = []
infohash = hashlib.sha1(bencode.bencode(info)).hexdigest()
piecehashes = [binascii.hexlify(pieces[i:i+20]) for i in range(0, len(pieces), 20)]
torrent_size = 0
for i in files:
torrent_size += i['length']
i['length'] = i['length']
for j in range(len(i['path'])):
i['path'][j] = unicode(i['path'][j], "utf8")
if torrent_size == 0:
torrent_type = 'single file torrent'
torrent_size = sflength
else:
torrent_type = 'multiple file torrent'
last_piece_size = min(torrent_size, (len(piecehashes) * int(piece_length)) - torrent_size)
errmsg = []
if last_piece_size > piece_length:
errmsg.append("WARNING: The calculated length of the last piece is greater than the stated piece length")
if (piece_length > torrent_size) and (torrent_type == 'multiple file torrent'):
errmsg.append("WARNING: The stated length of an individual piece is greater "
"than the calculated torrent size")
if creation_date != "":
creation_date_conv = time.strftime('%Y-%m-%d %H:%M:%S', time.localtime(creation_date))
creation_date_str = "{0} ({1})" .format(str(creation_date), creation_date_conv)
else:
creation_date_str = creation_date
# Generate result output
meta, cal, des = self.create_tables(
infohash,
announce,
announce_list,
creation_date_str,
comment,
created_by,
encoding,
piece_length,
private,
name,
sflength,
sfmd5sum,
files,
piecehashes,
last_piece_size,
torrent_size,
torrent_type
)
tosl_res = (ResultSection(SCORE.NULL, "Torrent File Details"))
comment = "NOTE: '*' Denotes an optional field in the Torrent Descriptor File. As a result it may be blank. " \
"Refer to the BitTorrent Specification.\n"
tosl_res.add_line(comment)
if len(errmsg) > 0:
error_res = (ResultSection(SCORE.NULL, "Errors Detected:", body_format=TEXT_FORMAT.MEMORY_DUMP,
parent=tosl_res))
for line in errmsg:
error_res.add_line(line)
meta_res = (ResultSection(SCORE.NULL, "Meta Data:", body_format=TEXT_FORMAT.MEMORY_DUMP,
parent=tosl_res))
for line in meta:
meta_res.add_line(line)
cal_res = (ResultSection(SCORE.NULL, "Calculated Data:", body_format=TEXT_FORMAT.MEMORY_DUMP,
parent=tosl_res))
comment = "NOTE: the length of last piece is calculated as:" \
"(number of pieces X piece length) - size of torrent\n"
cal_res.add_line(comment)
for line in cal:
cal_res.add_line(line)
if len(des) > 0:
des_res = (ResultSection(SCORE.NULL, "File paths:",
body_format=TEXT_FORMAT.MEMORY_DUMP, parent=tosl_res))
for line in des:
des_res.add_line(line)
if url_list:
url_res = (ResultSection(SCORE.NULL, "Urls found in metadata:", body_format=TEXT_FORMAT.MEMORY_DUMP,
parent=tosl_res))
for url in url_list:
url_res.add_line(url)
url_res.add_tag(TAG_TYPE['NET_FULL_URI'], url, TAG_WEIGHT.LOW)
sha1_hashes = os.path.join(self.working_directory, "hash_of_pieces.json")
with open(sha1_hashes, "wb") as sha1_file:
sha1_file.write(json.dumps(piecehashes))
request.add_supplementary(sha1_hashes, "List of hashes in order of the different pieces of the torrent (json)")
# Tags
if len(announce) > 0:
tosl_res.add_tag(TAG_TYPE['NET_FULL_URI'], announce, TAG_WEIGHT.LOW)
for it in announce_list:
for uri in it:
tosl_res.add_tag(TAG_TYPE['NET_FULL_URI'], uri, TAG_WEIGHT.LOW)
if name != "":
tosl_res.add_tag(TAG_TYPE['FILE_NAME'], name, TAG_WEIGHT.LOW)
for f in files:
for k, i in f.iteritems():
if k == "hash" and len(k) > 0:
tosl_res.add_tag(TAG_TYPE['FILE_MD5'], i, TAG_WEIGHT.LOW)
if k == "path" and len(k) > 0:
for x in i:
tosl_res.add_tag(TAG_TYPE['FILE_NAME'], str(x), TAG_WEIGHT.LOW)
file_res.add_result(tosl_res)
def execute(self, request):
request.result = Result()
local_path = request.download()
self.run_tosl(local_path, request)
| deeptechlabs/cyberweapons | assemblyline/alsvc_torrentslicer/torrentslicer.py | torrentslicer.py | py | 10,224 | python | en | code | 78 | github-code | 13 |
74282754899 | from flask import Flask, url_for
from flask import request,json
import uuid
app = Flask(__name__)
app.debug = True
@app.route("/",methods=["POST"])
def hello():
if request.method =="POST":
f=request.files["file"]
uid = str(uuid.uuid4())
f.save("D://temp//"+uid+".jpg")
return "ok"
@app.route('/articles')
def api_articles():
return 'List of ' + url_for('api_articles')
@app.route('/articles/<articleid>')
def api_article(articleid):
return 'You are reading ' + articleid
@app.route('/messages', methods = ['POST'])
def api_message():
if request.headers['Content-Type'] == 'text/plain':
return "Text Message: " + str(request.data)
elif request.headers['Content-Type'] == 'application/json':
return "JSON Message: " + json.dumps(request.json)
elif request.headers['Content-Type'] == 'application/octet-stream':
#f = open('./binary', 'wb')
# f.write(request.data)
# f.close()
return "Binary message written!"
else:
return "415 Unsupported Media Type ;)"
if __name__ == '__main__':
app.run(("0.0.0.0")) | hello0word/autojs_code | code/微信注册/图片服务器.py | 图片服务器.py | py | 1,134 | python | en | code | 6 | github-code | 13 |
14648841675 | """
Test for the `cdd.emit` module
"""
from os import path
from unittest import TestCase
from cdd.shared.emit import EMITTERS
from cdd.shared.pure_utils import all_dunder_for_module
from cdd.tests.utils_for_tests import unittest_main
class TestEmitters(TestCase):
"""
Tests the `cdd.emit` module magic `__all__`
"""
def test_emitters_root(self) -> None:
"""Confirm that emitter names are up-to-date"""
self.assertListEqual(
EMITTERS,
all_dunder_for_module(
path.dirname(path.dirname(path.dirname(__file__))),
(
"sqlalchemy_hybrid",
"sqlalchemy_table",
),
),
)
unittest_main()
| offscale/cdd-python | cdd/tests/test_emit/test_emitters.py | test_emitters.py | py | 747 | python | en | code | 10 | github-code | 13 |
31711726774 | import csv
import os
import time
from gl import *
class Controller():
def __init__(self, count):
self.counter = count
self.cpu_value = ""
self.all_data = [('timestamp', 'cpustatus')]
def testprocess(self):
result = os.popen("adb shell dumpsys cpuinfo | grep %s" % GL_PACKAGE_NAME)
for line in result.readlines():
self.cpu_value = line.split('%')[0]
current_time = self.getCurrentTime()
self.all_data.append((current_time, self.cpu_value))
def run(self):
while self.counter > 0:
self.testprocess()
self.counter = self.counter - 1
time.sleep(1)
def getCurrentTime(self):
return time.strftime('%Y-%m-%d %H:%M:%S', time.localtime())
def saveToCSV(self):
csv_file = open('cpuinfo.csv', mode='w')
writer = csv.writer(csv_file)
writer.writerows(self.all_data)
csv_file.close()
if "__main__" == __name__:
con = Controller(20)
con.run()
con.saveToCSV()
| gsy13213009/py_auto | launchTime/cpuinfo.py | cpuinfo.py | py | 1,038 | python | en | code | 0 | github-code | 13 |
35605622925 | import datetime
from typing import List, Tuple
import joblib
from pandas import DataFrame
from sklearn.ensemble import RandomForestClassifier
from sklearn.model_selection import train_test_split
class Model:
def __init__(self, df: DataFrame, target: str, features: List[str]):
X_train, X_test, y_train, y_test = train_test_split(
df[features],
df[target],
test_size=0.20,
random_state=831592708,
stratify=df[target],
)
self.model = RandomForestClassifier(
max_depth=10,
max_features=3,
n_estimators=66,
n_jobs=-1,
random_state=831592708,
)
self.model.fit(X_train, y_train)
baseline_score = 1 / df[target].unique().shape[0]
train_score = self.model.score(X_train, y_train)
test_score = self.model.score(X_test, y_test)
timestamp = datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S")
self.info = {
"Model Name": "Random Forest Classifier",
"Train/Total Count": f"{X_train.shape[0]}/{df.shape[0]}",
"Baseline Score": f"{baseline_score:.1%}",
"Training Score": f"{train_score:.1%}",
"Testing Score": f"{test_score:.1%}",
"Timestamp": timestamp,
}
def __call__(self, feature_basis: DataFrame) -> List[Tuple]:
prediction = self.model.predict(feature_basis)
probability = self.model.predict_proba(feature_basis)
return list(zip(prediction, map(max, probability)))
def __repr__(self):
return "\n".join(f"{k}: {v}" for k, v in self.info.items())
def __str__(self):
return repr(self)
def save(self, filepath):
joblib.dump(self, filepath)
@staticmethod
def open(filepath: str):
return joblib.load(filepath)
| BloomTech-Labs/DSLabsCurriculum | machine_learning/model.py | model.py | py | 1,871 | python | en | code | 2 | github-code | 13 |
41810454836 | # Импортируем необходимые компоненты
from telegram.ext import Updater, CommandHandler, MessageHandler, Filters
from settings import TG_TOKEN, TG_API_URL
from handlers import *
# Создадим (объявляем) функцию main, которая соединяется с платформой Telegram
def main():
# тело функции, описываем функцию
# создадим переменную my_bot, с помощью которой будем взаимодействовать с нашим ботом
my_bot = Updater(TG_TOKEN , TG_API_URL, use_context=True)
my_bot.dispatcher.add_handler(CommandHandler('start', sms)) # обработчик команды start
my_bot.dispatcher.add_handler(MessageHandler(Filters.regex('Картинки'), send_meme))
my_bot.dispatcher.add_handler(MessageHandler(Filters.regex('Начать'), sms)) # обрабатываем текст кнопки
my_bot.dispatcher.add_handler(MessageHandler(Filters.regex('Анекдот'), get_anecdote)) # обрабатываем текст кнопки
my_bot.dispatcher.add_handler(MessageHandler(Filters.contact, get_contact)) # обработчик полученного контакта
my_bot.dispatcher.add_handler(MessageHandler(Filters.location, get_location)) # обработчик полученной геопозиции
my_bot.dispatcher.add_handler(MessageHandler(Filters.text, parrot)) # обработчик текстового сообщения
my_bot.start_polling() # проверяет о наличии сообщений с платформы Telegram
my_bot.idle() # бот будет работать пока его не остановят
# Вызваем (запускаем) функцию main
if __name__ == '__main__':
main() | Ilgiz-arch/telegram_bot | bot.py | bot.py | py | 1,861 | python | ru | code | 0 | github-code | 13 |
8141492351 | import pprint, sys
from parse.integrations import super_kloudless as kloudlessDrives, super_google as google, confluence, sifter, zoho_bugtracker, gsites, trello
pp = pprint.PrettyPrinter(indent=4)
Services = {
'confluence': {
'title': 'Confluence',
'superService': False,
'module': confluence
},
'gdrive': {
'title': 'Google Drive',
'superService': 'kloudless',
},
'gmail': {
'title': 'Gmail',
'superService': 'google',
},
'dropbox': {
'title': 'Dropbox',
'superService': 'kloudless',
},
'sifter': {
'title': 'Sifter',
'superService': False,
'module': sifter
},
'zoho': {
'title': 'Zoho',
'superService': False,
'module': zoho_bugtracker
},
'gsites': {
'title': 'Google Sites',
'superService': False,
'module': gsites
},
'trello': {
'title': 'Trello',
'superService': False,
'module': trello
},
}
SuperServices = {
'kloudless': {
'module': kloudlessDrives,
},
'google': {
'module': google,
},
}
def getAllServiceData(accountInfo=None, serviceName=None, superServiceName=None, specificCard=None):
pp.pprint(accountInfo)
allServiceData = {}
if specificCard:
if 'service' in specificCard:
serviceName = specificCard['service']
if 'superService' in specificCard:
superServiceName = specificCard['superService']
if 'mimeType' in specificCard:
try:
allServiceData['subService'] = service['module'].getSubServiceByFileType(specificCard['mimeType'])
except Exception as e:
print('No subService')
if accountInfo:
if 'service' in accountInfo:
serviceName = accountInfo['service']
if 'superService' in accountInfo:
superServiceName = accountInfo['superService']
if serviceName and serviceName in ['gdocs', 'gsheets', 'gslides']: # This fixes legacy cards
allServiceData['subService'] = serviceName
serviceName = 'gdrive'
if serviceName and serviceName in Services:
allServiceData['service'] = Services[serviceName]
allServiceData['service']['serviceName'] = serviceName
else:
return None
if superServiceName and superServiceName in SuperServices:
allServiceData['superService'] = SuperServices[superServiceName]
allServiceData['superService']['superServiceName'] = superServiceName
return allServiceData
def getIntegrationData(accountInfo=None, serviceName=None, superServiceName=None, specificCard=None):
allServiceData = getAllServiceData(accountInfo, serviceName, superServiceName, specificCard)
print('allServiceData')
pp.pprint(allServiceData)
return None if not allServiceData else allServiceData['superService'] if 'superService' in allServiceData else allServiceData['service'] if 'service' in allServiceData else None
| explaain/savvy-nlp | parse/services.py | services.py | py | 2,780 | python | en | code | 0 | github-code | 13 |
20319905140 | #!/usr/bin/python3
# -*- coding: utf-8 -*-
class environment_functions:
u_h = None
u_e = None
u_v = None
f_v = None
v_t = None
k = None
infected_imigration = None
gamma = None
delta = None
def __init__(self, o_multiplier = 1, s_a_multiplier = 1, a_multiplier = 1,
p_hm_multiplier = 1, PDR_multiplier = 1, p_mh_multiplier = 1,
u_h_multiplier = 1, u_e_multiplier = 1, u_v_multiplier = 1,
f_v_multiplier = 1, d_multiplier = 1, v_t_multiplier = 1, k_multiplier = 1,
infected_imigration_multiplier = 1, gamma_multiplier = 1,
delta_multiplier = 1, incubation_multiplier = 1):
self.o_constant = o_multiplier * 0.00856
self.s_a_constant = s_a_multiplier * -0.00599
self.a_constant = a_multiplier * 0.000202
self.p_hm_constant = p_hm_multiplier * 0.000491
self.PDR_constant = PDR_multiplier * 0.0000665
self.p_mh_constant = p_mh_multiplier * 0.000849
self.u_h = u_h_multiplier * 0.00001869013
self.u_e = u_e_multiplier * 0.01
self.u_v = u_v_multiplier * 0.048184
self.f_v = f_v_multiplier * 0.5
self.v_t = v_t_multiplier * 0.11
self.k = k_multiplier * 2
self.gamma = gamma_multiplier * (1./5)
self.infected_imigration = infected_imigration_multiplier * 0.00001
self.d_constant = d_multiplier * -0.37017772
self.delta = delta_multiplier * (1./5.9)
self.incubation_constant = incubation_multiplier * 0.0000665
def o(self, T):
min_value = 14.58
max_value = 34.61
T[T < min_value] = min_value
T[T > max_value] = max_value
return self.o_constant * T * (T - min_value) * ((max_value - T) ** (1./2))
def i(self, T):
min_value = 10.68
max_value = 45.90
T[T < min_value] = min_value
T[T > max_value] = max_value
return self.incubation_constant * T * (T - min_value) * ((max_value - T) ** (1./2))
def s_a(self, T):
min_value = 13.56
max_value = 38.29
T[T < min_value] = min_value
T[T > max_value] = max_value
return self.s_a_constant * (T - min_value) * (T - max_value)
def a(self, T):
min_value = 13.35
max_value = 40.08
T[T < min_value] = min_value
T[T > max_value] = max_value
return self.a_constant * T * (T - min_value) * ((max_value - T) ** (1./2))
def p_hm(self, T):
min_value = 12.22
max_value = 37.46
T[T < min_value] = min_value
T[T > max_value] = max_value
return self.p_hm_constant * T * (T - min_value) * ((max_value - T) ** (1./2))
def PDR(self, T):
min_value = 10.68
max_value = 45.90
T[T < min_value] = min_value
T[T > max_value] = max_value
return self.PDR_constant * T * (T - min_value) * ((max_value - T) ** (1./2))
def p_mh(self, T):
min_value = 17.05 #17 para 15
max_value = 35.83
T[T < min_value] = min_value
T[T > max_value] = max_value
return self.p_mh_constant * T * (T - min_value) * ((max_value - T) ** (1./2))
def d(self, R):
#min_value = 0
#max_value = 1
#R[R < min_value] = min_value
#R[R > max_value] = max_value
#return variable_peh *(-0.22358793 * R **2 + 0.32969868 * R + 0.46623609)
rain = self.d_constant * (R - 0 ) * (R - 1)
if rain < 0.0001:
return 0
return rain | Dinidiniz/doutorado_modelo_dia | base_functions.py | base_functions.py | py | 3,560 | python | en | code | 0 | github-code | 13 |
27189557008 | # Jakub Worek 06.03.2023-11.03.2023
#
# Dowód poprawności:
# Jak się okazuje nie bez powodu ulubiony palindrom
# cesarzowej Bajtocji jest nieparzystej długości.
# Idąc po kolei po każdej literze napisu traktujemy ją jako środek
# prawdopodobnego palindromu, ponieważ palindrom ten jest na pewno
# nieparzystej długości.
# Następnie puszczamy z tego środka dwa "wskaźniki na litery", które
# poruszają się, jeden w lewo po napisie, drugi w prawo po napisie
# i sprawdzamy czy tworzy się palindrom i zwiększamy sukcesywnie
# długość ostatecznego rezultatu.
#
# Przykład:
# a k o n t n o k n o n a b c d d c b a
# > > > > ^
# _
# < _ >
# < < _ > >
# < < < _ > > >
# koniec, bo a != k, ten palindrom ma długość 7
#
# Złożoność obliczeniowa:
# Badamy po kolei każdą literę i rozchodzące się z niej podciągi
# O(n^2)
from zad1testy import runtests
def ceasar( s ):
n=len(s)
# Minimalny wynik to 1, ponieważ ciąg złożony z jednej litery
# jest palindromem
wynik=1
i=0
while(i < n):
lewy = i-1 # lewy "wskaźnik" po napisie
prawy = i+1 # prawy "wskaźnik" po napisie
# długość utworzonego w ten sposób słowa to:
# prawy-lewy+1
while( (lewy>=0) and (prawy < n) and (s[lewy] == s[prawy])):
#wynik=max(prawy-lewy+1,wynik) WOLNIEJSZE OD IFA
if(prawy-lewy+1 > wynik):
wynik=prawy-lewy+1
lewy-=1
prawy+=1
i+=1
return wynik
# zmien all_tests na True zeby uruchomic wszystkie testy
runtests( ceasar , all_tests = True ) | JakubWorek/algorithms_and_data_structures_course | 2022-2023/OFFLINE/OFFLINE_1/zad1.py | zad1.py | py | 1,628 | python | pl | code | 0 | github-code | 13 |
29763569779 | # -*- coding: utf-8 -*-
"""
Created on Fri May 20 09:35:05 2022
@author: aceso
"""
import pandas as pd
import numpy as np
import os
import datetime
import matplotlib.pyplot as plt
from sklearn.experimental import enable_iterative_imputer # need this module to use below fx
from sklearn.impute import IterativeImputer
from sklearn.preprocessing import MinMaxScaler
import pickle
import seaborn as sns
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import LSTM, Dense, Dropout
from sklearn.metrics import mean_absolute_error
# Constant
TRAIN = os.path.join(os.getcwd(), "Data", "cases_malaysia_train.csv")
TEST = os.path.join(os.getcwd(), "Data", "cases_malaysia_test.csv")
SCALER = os.path.join(os.getcwd(), "Saved", "minmax.pkl")
#%% Classes
class EDA():
def data_clean(self,data):
data["cases_new"] = pd.to_numeric(data["cases_new"], errors="coerce")
chain = IterativeImputer()
data["cases_new"] = chain.fit_transform(data)
return data
def data_scaling(self,data, title):
minmax = MinMaxScaler() # minmax because in this data there's no -ve value
data = minmax.fit_transform(np.expand_dims(data, -1))
sns.distplot(data)
plt.title(title)
plt.legend()
plt.show()
pickle.dump(minmax, open(SCALER, "wb"))
return data
def data_split(self, train, test, window_size):
# Training Data
X_train = []
y_train = []
for i in range(window_size, len(train)): #(window_size, max number or rows)
X_train.append(train[i-window_size:i, 0])
y_train.append(train[i,0])
X_train = np.array(X_train)
X_train = np.expand_dims(X_train, -1) # dimension expansion
y_train = np.array(y_train)
# Testing data
temp = np.concatenate((train, test)) # using np.concat since both in array
temp = temp[-(window_size + len(test)):] # last 60(training) + 96(test) == -156
X_test = []
y_test = []
for i in range(window_size, len(temp)):
X_test.append(temp[i-window_size:i,0])
y_test.append(temp[i,0])
X_test = np.array(X_test)
X_test = np.expand_dims(X_test, -1) # dimension expansion
y_test = np.array(y_test)
return X_train, y_train, X_test, y_test
class ModelConfig():
def lstm(self, nodes, X_train):
model = Sequential()
model.add(LSTM(nodes, return_sequences=(True), input_shape=(X_train.shape[1],1)))
model.add(Dropout(0.2))
model.add(LSTM(nodes, return_sequences=(True)))
model.add(Dropout(0.2))
model.add(LSTM(nodes, return_sequences=(True)))
model.add(Dropout(0.2))
model.add(LSTM(nodes))
model.add(Dropout(0.2))
model.add(Dense(1, activation = "relu")) # since there's no -ve value use relu
model.summary()
return model
class Performance():
def mape(self, y_true, y_pred):
print(f"MAPE prediction is: {(mean_absolute_error(y_true, y_pred)/sum(abs(y_true))) * 100}%")
| AceSongip/Covid_Cases_Analysis | covid_classes.py | covid_classes.py | py | 3,226 | python | en | code | 0 | github-code | 13 |
11597921772 | from sqlalchemy import Column, Integer, String, DateTime, Float, Boolean, ForeignKey
from sqlalchemy.ext.declarative import declarative_base
Base = declarative_base()
class User(Base):
__tablename__ = "user_table"
uid = Column(Integer, primary_key=True, nullable=False)
nice_total_disk_usage = Column(String(256), nullable=False)
username = Column(String(256), nullable=False)
is_admin = Column(Boolean, nullable = False)
quota_percent = Column(Float, nullable=False)
total_disk_usage = Column(Float, nullable=False)
purged = Column(Boolean, nullable = False)
quota = Column(String(256), nullable=False)
email = Column(String(256), nullable=False)
id = Column(String(256), nullable=False)
deleted = Column(Boolean, nullable = False)
def __init__(self, dictionary):
self.__dict__.update(dictionary)
def update(self, dictionary):
dictionary.pop('uid', None)
self.__dict__.update(dictionary)
def __repr__(self):
return '<Galaxy User {}>'.format(self.id)
class HistoryNotification(Base):
__tablename__ = 'history_notification_table'
id = Column(Integer, primary_key=True, nullable=False)
h_id = Column('history_id', String(256), ForeignKey('history_table.id'), nullable=False)
h_date = Column(DateTime, nullable=False)
n_id = Column('notification_id', Integer, ForeignKey('notification_table.id'), nullable=False)
class History(Base):
__tablename__ = "history_table"
hid = Column(Integer, primary_key=True, nullable=False)
update_time = Column(DateTime, nullable=False)
size = Column(Float, nullable=False)
name = Column(String(256), nullable=False)
id = Column(String(256), nullable=False)
user_id = Column(String(256), ForeignKey('user_table.id'), nullable=True)
status = Column(String(256))
def __init__(self, dictionary):
self.__dict__.update(dictionary)
def update(self, dictionary):
dictionary.pop('hid', None)
self.__dict__.update(dictionary)
def __repr__(self):
return '<Galaxy History {}>'.format(self.id)
class Notification(Base):
__tablename__ = "notification_table"
id = Column(Integer, primary_key=True, nullable=False)
user_id = Column(String(256), ForeignKey('user_table.id'), nullable=False)
message_id = Column(String(256), ForeignKey('message_table.message_id'))
sent = Column(DateTime, nullable=False)
status = Column(String(256), nullable=False)
type = Column(String(64), nullable=False)
class Message(Base):
__tablename__ = "message_table"
id = Column(Integer, primary_key=True, nullable=False)
message_id = Column(Integer, nullable=False)
status = Column(String(256), nullable=False) | usegalaxy-au/history-mailer | models.py | models.py | py | 2,758 | python | en | code | 0 | github-code | 13 |
38733638402 | """
student URL Configuration
"""
from django.urls import path
from . import views
urlpatterns = [
path('', views.index, name='index'),
path('register/', views.register, name='register'),
path('info/<int:student_id>/', views.info, name='info'),
] | lianix/feiranSMS | student/urls.py | urls.py | py | 261 | python | en | code | 0 | github-code | 13 |
17484526595 | import itertools
import numpy as np
def print_one_transformation(imgs):
import matplotlib.pyplot as plt
assert len(imgs) == 4, "4 images needed"
fig, axes = plt.subplots(2, 2)
imgs = [ np.squeeze(x) for x in imgs]
axes[0, 0].imshow(imgs[0])
axes[0, 1].imshow(imgs[1], cmap='gray')
axes[1, 0].imshow(imgs[2])
axes[1, 1].imshow(imgs[3], cmap='gray')
def print_batch(color, gray, covered, recovered, n=10, skip=0):
for imgs in itertools.islice(zip(color, gray, covered, recovered), skip, skip+n):
print_one_transformation(imgs)
def print_from_set(m, S, n=10, skip=0):
subset = S.take(n)
covered = m.encoder(subset)
recovered = m.decoder(covered)
color, gray = subset['color'], subset['gray']
print_batch(color, gray, covered, recovered, n=n, skip=skip) | AdvenamTacet/Steganography-with-Neural-Networks | data/display_data.py | display_data.py | py | 824 | python | en | code | 3 | github-code | 13 |
34467618427 | # -*- coding: utf-8 -*-
"""
Created on Sat Dec 18 21:03:40 2021
@author: Anthony
This group of function definitions can be used to apply low-pass Butterworth
filters to experimental data.
"""
import numpy as np
import scipy.signal as sg
import pandas as pd
# %% butterworth trial
def apply_butterworth_filter(trial, filter_frequencies, sampling_frequency=1000):
"""
Apply a low-pass Butterworth filter to select signals within a data trial.
This function accepts a Pandas DataFrame containing prosthesis signals and
a dictionary with signal names and filter cut-off frequencies, and returns
a new Pandas DataFrame with low-pass filtered signals.
Parameters
----------
trial : Pandas DataFrame
This should be a DataFrame of raw signals collected on the COBRA ankle
prosthesis. Each column is a signal name.
filter_frequencies : Dictionary
This dictionary contains the instructions for which signals to filter
and at what frequency. Each key should be a string that matches one of
the signal names in the DataFrame and each key should the desired
cut-off frequency for that signal in Hz.
Returns
-------
trial_filtered_signals : Pandas DataFrame
This DataFrame will contain the newly filtered signals.
"""
# constants to define size of data to be produced
num_signals = len(filter_frequencies)
trial_length_seconds = 10
num_datapoints = trial_length_seconds*sampling_frequency
# create an empty pandas dataframe to hold filtered signals for this trial
empty_container = np.zeros((num_datapoints, num_signals))
trial_filtered_signals = pd.DataFrame(data=empty_container,
columns=filter_frequencies.keys())
# iterate over each signal and filter
for signal_name in filter_frequencies:
# setup low-pass filter
order = 2
cutoff = filter_frequencies[signal_name]
# extract the signal
signal = trial[signal_name]
# low pass filter
b, a = sg.butter(N=order, Wn=cutoff/(0.5*sampling_frequency))
filtered_signal = sg.filtfilt(b, a, signal)
# put into container
trial_filtered_signals[signal_name] = filtered_signal
return trial_filtered_signals
# %%
def filter_participant_trials(trials, filter_frequencies):
"""
Low-pass filter all trials for a single participant.
Parameters
----------
trials : Dictionary
This parameter is a dictionary where each key is a file name and each
value is a Pandas DataFrame of raw signals collected from the ankle
prosthesis.
filter_frequencies : Dictionary
This dictionary contains the instructions for which signals to filter
and at what frequency. Each key should be a string that matches one of
the signal names in the DataFrame and each key should the desired
cut-off frequency for that signal in Hz.
Returns
-------
filtered_trials : Dictionary
This output is a dictionary where each key is a file name and each
value is a Pandas DataFrame of the newly filtered prosthesis signals.
"""
# create a container for the filtered trials
filtered_trials = {}
# loop through all trials, filter each signal, and add it to the filtered
# trials dict
for name in trials:
# extract a single trial
trial = trials[name]
# filter the select signals in that trial
filtered_signals = apply_butterworth_filter(trial, filter_frequencies)
# add to dict to return
filtered_trials[name] = filtered_signals
return filtered_trials
# %% filter signals
def filter_signals(data, filter_frequencies):
"""
Zero-lag low-pass filter different signals at unique cutoff frequencies for
all participants and all trials.
This function uses a bi-directional low-pass Butterworth filter. Nothing is
returned because the input data structure is modified in place.
Parameters
----------
filter_frequencies : dict
Each key is a signal present in the real-time data. Each value is a
the cutoff frequency at which to low pass filter than signal.
Returns
-------
None.
"""
# iterate over each participant
for participant in data:
# extract raw data for all trials
trials = data[participant]['raw signals']
# filter select signals from each trial and get as dict
filtered_trials = filter_participant_trials(trials, filter_frequencies)
# put the data in the primary data structure
data[participant]['filtered signals'] = filtered_trials
return None | ajanders/cobra-knee | src/filters.py | filters.py | py | 4,748 | python | en | code | 0 | github-code | 13 |
37941148068 |
from AthenaCommon.AlgSequence import AlgSequence
topSeq = AlgSequence()
ServiceMgr.MessageSvc.OutputLevel = DEBUG
from AthenaCommon.DetFlags import DetFlags
DetFlags.bpipe_setOn()
DetFlags.ZDC_setOn()
DetFlags.Truth_setOn()
from AthenaServices.AthenaServicesConf import AtRndmGenSvc
ServiceMgr += AtRndmGenSvc()
from AthenaCommon.GlobalFlags import jobproperties
jobproperties.Global.ConditionsTag = "OFLCOND-SDR-BS7T-05-14"
from AthenaCommon.AthenaCommonFlags import athenaCommonFlags
athenaCommonFlags.PoolHitsOutput = "atlasG4.hits.pool.root"
athenaCommonFlags.EvtMax = 3
from G4AtlasApps.SimFlags import simFlags
simFlags.load_atlas_flags()
simFlags.SimLayout='ATLAS-GEO-18-01-00'
simFlags.EventFilter.set_Off()
simFlags.MagneticField.set_Off()
simFlags.ForwardDetectors.set_On()
simFlags.ForwardDetectors=2
from AthenaCommon.AthenaCommonFlags import athenaCommonFlags
athenaCommonFlags.PoolEvgenInput.set_Off()
import AthenaCommon.AtlasUnixGeneratorJob
spgorders = ['pdgcode: constant 2112',
'vertX: flat -1.0 1.0',
'vertY: flat -1.0 1.0',
'vertZ: constant -139500.0',
't: constant 0.0',
'eta: constant -100.0',
'phi: flat 0 6.28318',
'e: constant 1360000'
]
from ParticleGenerator.ParticleGeneratorConf import ParticleGenerator
topSeq += ParticleGenerator()
topSeq.ParticleGenerator.orders = sorted(spgorders)
include("G4AtlasApps/G4Atlas.flat.configuration.py")
from AthenaCommon.CfgGetter import getAlgorithm
topSeq += getAlgorithm("G4AtlasAlg",tryDefaultConfigurable=True)
| rushioda/PIXELVALID_athena | athena/ForwardDetectors/ZDC/ZDC_SimuDigitization/share/jobOptions.G4Atlas.zdc.pgun.py | jobOptions.G4Atlas.zdc.pgun.py | py | 1,626 | python | en | code | 1 | github-code | 13 |
74907154257 | import cv2 as cv
import numpy as np
import matplotlib.pyplot as plt
from PIL import Image
img = cv.imread("../../resource/chapter6/1.jpg")
def showing(img, isgray=False):
plt.axis("off")
if isgray:
plt.imshow(img, cmap="gray")
else:
plt.imshow(img)
plt.show()
img = cv.imread("../../resource/chapter6/1.jpg")
img = np.array(img, dtype=np.int32)
img[..., 0] = img[..., 0] * 28.0
img[..., 1] = img[..., 1] * 151.0
img[..., 2] = img[..., 2] * 77.0
img = np.sum(img, axis=2)
arr = [np.right_shift(y.item(), 8) for x in img for y in x]
arr = np.array(arr)
arr.resize(img.shape)
showing(Image.fromarray(arr), True)
| codezzzsleep/records2.1 | robot-and-vision/test/chapter6/demo04.py | demo04.py | py | 647 | python | en | code | 0 | github-code | 13 |
3579935528 | # -*- coding: utf-8 -*-
from io import StringIO
import sys
import unittest
try:
# Python 3
from urllib.parse import urlencode
# Convert bytes to str, if required
def convert_str(s):
return s.decode('utf-8') if isinstance(s, bytes) else s
except:
# Python 2
from urllib import urlencode
# No conversion required
def convert_str(s):
return s
import awsgi
class TestAwsgi(unittest.TestCase):
def compareStringIOContents(self, a, b, msg=None):
a_loc = a.tell()
b_loc = b.tell()
a.seek(0)
b.seek(0)
if a.read() != b.read():
raise self.failureException(msg)
a.seek(a_loc)
b.seek(b_loc)
def test_environ(self):
event = {
'httpMethod': 'TEST',
'path': '/test',
'queryStringParameters': {
'test': '✓',
},
'body': u'test',
'headers': {
'X-test-suite': 'testing',
'Content-type': 'text/plain',
'Host': 'test',
'X-forwarded-for': 'first, second',
'X-forwarded-proto': 'https',
'X-forwarded-port': '12345',
},
}
expected = {
'REQUEST_METHOD': event['httpMethod'],
'SCRIPT_NAME': '',
'PATH_INFO': event['path'],
'QUERY_STRING': urlencode(event['queryStringParameters']),
'CONTENT_LENGTH': str(len(event['body'])),
'HTTP': 'on',
'SERVER_PROTOCOL': 'HTTP/1.1',
'wsgi.version': (1, 0),
'wsgi.input': StringIO(event['body']),
'wsgi.errors': sys.stderr,
'wsgi.multithread': False,
'wsgi.multiprocess': False,
'wsgi.run_once': False,
'CONTENT_TYPE': event['headers']['Content-type'],
'HTTP_CONTENT_TYPE': event['headers']['Content-type'],
'SERVER_NAME': event['headers']['Host'],
'HTTP_HOST': event['headers']['Host'],
'REMOTE_ADDR': event['headers']['X-forwarded-for'].split(', ')[0],
'HTTP_X_FORWARDED_FOR': event['headers']['X-forwarded-for'],
'wsgi.url_scheme': event['headers']['X-forwarded-proto'],
'HTTP_X_FORWARDED_PROTO': event['headers']['X-forwarded-proto'],
'SERVER_PORT': event['headers']['X-forwarded-port'],
'HTTP_X_FORWARDED_PORT': event['headers']['X-forwarded-port'],
'HTTP_X_TEST_SUITE': event['headers']['X-test-suite'],
}
result = awsgi.environ(event, object())
self.addTypeEqualityFunc(StringIO, self.compareStringIOContents)
for k, v in result.items():
self.assertEqual(v, expected[k])
| vecchp/awsgi | test_awsgi.py | test_awsgi.py | py | 2,772 | python | en | code | null | github-code | 13 |
36754330938 | #!/usr/bin/env python3
import numpy as np
import matplotlib.pyplot as plt
def to_grayscale(image):
painting2 = image
gray = np.dot(painting2[...,:3], [0.2126, 0.7152, 0.0722])
return gray
def to_red(image):
painting2 = image.copy()
painting2[:,:,1]=0
painting2[:,:,2]=0
return painting2
def to_green(image):
painting2 = image.copy()
painting2[:,:,0]=0
painting2[:,:,2]=0
return painting2
def to_blue(image):
painting2 = image.copy()
painting2[:,:,0]=0
painting2[:,:,1]=0
return painting2
def main():
image = "src/painting.png"
painting=plt.imread(image)
plt.subplot(3,1,1)
plt.imshow(to_red(painting))
plt.subplot(3,1,2)
plt.imshow(to_green(painting))
plt.subplot(3,1,3)
plt.imshow(to_blue(painting))
plt.imshow(to_grayscale(painting), cmap=plt.get_cmap('gray'), vmin=0, vmax=1)
plt.show()
if __name__ == "__main__":
main()
| tugee/dap2020 | part03-e11_to_grayscale/src/to_grayscale.py | to_grayscale.py | py | 938 | python | en | code | 0 | github-code | 13 |
25405605608 | def ucs(goal, start):
global graph, cost
ans = []
queue = []
for i in range(len(goal)):
ans.append(9999) # ans = big value
queue.append([0, start])
# dict for visited
visited = {}
count = 0
while (len(queue) > 0): # while not empty
queue = sorted(queue)
p = queue[-1] # last element in queue
del queue[-1]
p[0] *= -1
if (p[1] in goal):
# get the position
index = goal.index(p[1])
# if a new goal is reached
if (ans[index] == 9999):
count += 1
# if the cost is less
if (ans[index] > p[0]):
ans[index] = p[0]
# pop the element
del queue[-1]
queue = sorted(queue)
if (count == len(goal)):
return ans
# check for the non visited nodes
if (p[1] not in visited):
for i in range(len(graph[p[1]])):
# value is multiplied by -1 so that
# least priority is at the top
queue.append([(p[0] + cost[(p[1], graph[p[1]][i])]) * -1, graph[p[1]][i]])
# mark as visited
visited[p[1]] = 1
return ans
if __name__ == '__main__':
# create the graph
graph = [[] for i in range(8)]
cost = {}
graph[0].append(1)
graph[0].append(3)
graph[3].append(1)
graph[3].append(6)
graph[3].append(4)
graph[1].append(6)
graph[4].append(2)
graph[4].append(5)
graph[2].append(1)
graph[5].append(2)
graph[5].append(6)
graph[6].append(4)
cost[(0, 1)] = 2
cost[(0, 3)] = 5
cost[(1, 6)] = 1
cost[(3, 1)] = 5
cost[(3, 6)] = 6
cost[(3, 4)] = 2
cost[(2, 1)] = 4
cost[(4, 2)] = 4
cost[(4, 5)] = 3
cost[(5, 2)] = 6
cost[(5, 6)] = 3
cost[(6, 4)] = 7
# goal state
for i in cost:
print(i, " = ", cost[i])
goal = []
goal.append(6)
answer = ucs(goal, 0)
print("Minimum cost from 0 to G is = ", answer[0])
print(graph)
| xBodda/SearchingAlgorithms | In Python/UCS.py | UCS.py | py | 2,083 | python | en | code | 0 | github-code | 13 |
17460418814 | from PyQt5.QtWidgets import (QSizePolicy, QWidget, QSlider, QVBoxLayout, QLabel, QPushButton, QSpacerItem)
from Widget.Slider import Slider
class BallControl(QWidget):
def __init__(self, videoThread):
super().__init__()
self.videoThread = videoThread
colorThreshold = self.videoThread.imageProcessor.colorThreshold
layout = QVBoxLayout()
self.hMinSlider = Slider("H Min", 0, 179, colorThreshold.h_min)
self.hMinSlider.change_value_signal.connect(self.hMinSliderChange)
layout.addWidget(self.hMinSlider)
self.hMaxSlider = Slider("H Max", 0, 179, colorThreshold.h_max)
self.hMaxSlider.change_value_signal.connect(self.hMaxSliderChange)
layout.addWidget(self.hMaxSlider)
self.sMinSlider = Slider("S Min", 0, 255, colorThreshold.s_min)
self.sMinSlider.change_value_signal.connect(self.sMinSliderChange)
layout.addWidget(self.sMinSlider)
self.sMaxSlider = Slider("S Max", 0, 255, colorThreshold.s_max)
self.sMaxSlider.change_value_signal.connect(self.sMaxSliderChange)
layout.addWidget(self.sMaxSlider)
self.vMinSlider = Slider("V Min", 0, 255, colorThreshold.v_min)
self.vMinSlider.change_value_signal.connect(self.vMinSliderChange)
layout.addWidget(self.vMinSlider)
self.vMaxSlider = Slider("V Max", 0, 255, colorThreshold.v_max)
self.vMaxSlider.change_value_signal.connect(self.vMaxSliderChange)
layout.addWidget(self.vMaxSlider)
layout.addItem(QSpacerItem(20,20, QSizePolicy.Minimum, QSizePolicy.Expanding))
self.setLayout(layout)
def hMinSliderChange(self, v):
self.videoThread.imageProcessor.setColorThreshold(h_min = v)
self.videoThread.reprocessFrame()
def hMaxSliderChange(self, v):
self.videoThread.imageProcessor.setColorThreshold(h_max = v)
self.videoThread.reprocessFrame()
def sMinSliderChange(self, v):
self.videoThread.imageProcessor.setColorThreshold(s_min = v)
self.videoThread.reprocessFrame()
def sMaxSliderChange(self, v):
self.videoThread.imageProcessor.setColorThreshold(s_max = v)
self.videoThread.reprocessFrame()
def vMinSliderChange(self, v):
self.videoThread.imageProcessor.setColorThreshold(v_min = v)
self.videoThread.reprocessFrame()
def vMaxSliderChange(self, v):
self.videoThread.imageProcessor.setColorThreshold(v_max = v)
self.videoThread.reprocessFrame() | tanat44/PingpongBallTracker | Widget/BallControl.py | BallControl.py | py | 2,534 | python | en | code | 4 | github-code | 13 |
71995549459 | from urllib.parse import parse_qs, urlparse
import scrapy
from scrapy.loader import ItemLoader
from boatrace_crawler.items import OddsItem, RaceIndexItem, RaceProgramBracketItem, RaceProgramBracketResultsItem, RaceProgramItem, RaceResultPayoffItem, RaceResultStartTimeItem, RaceResultTimeItem, RacerItem
class BoatraceSpider(scrapy.Spider):
name = "boatrace_spider"
allowed_domains = ["www.boatrace.jp"]
start_urls = ["https://www.boatrace.jp"]
def __init__(self, start_url="https://www.boatrace.jp/owpc/pc/race/monthlyschedule?ym=202308", *args, **kwargs):
super(BoatraceSpider, self).__init__(*args, **kwargs)
self.start_urls = [start_url]
def start_requests(self):
for url in self.start_urls:
yield self._follow(url)
def parse(self, response):
self.logger.info(f"#parse: start: response={response.url}")
yield self._follow(self.start_urls[0])
def _follow(self, url):
self.logger.debug(f"#_follow: start: url={url}")
# Setting http proxy
meta = {}
if self.settings["CRAWL_HTTP_PROXY"]:
meta["proxy"] = self.settings["CRAWL_HTTP_PROXY"]
self.logger.debug(f"#_follow: start: meta={meta}")
# Build request
if url.startswith("https://www.boatrace.jp/owpc/pc/race/monthlyschedule?"):
self.logger.debug("#_follow: follow calendar page")
return scrapy.Request(url, callback=self.parse_calendar, meta=meta)
elif url.startswith("https://www.boatrace.jp/owpc/pc/race/index?"):
self.logger.debug("#_follow: follow oneday race page")
return scrapy.Request(url, callback=self.parse_oneday_race, meta=meta)
elif url.startswith("https://www.boatrace.jp/owpc/pc/race/raceindex?"):
self.logger.debug("#_follow: follow race index page")
return scrapy.Request(url, callback=self.parse_race_index, meta=meta)
elif url.startswith("https://www.boatrace.jp/owpc/pc/race/racelist?"):
self.logger.debug("#_follow: follow race program page")
return scrapy.Request(url, callback=self.parse_race_program, meta=meta)
elif url.startswith("https://www.boatrace.jp/owpc/pc/race/odds3t?"):
self.logger.debug("#_follow: follow odds 3t page")
return scrapy.Request(url, callback=self.parse_odds_3t, meta=meta)
elif url.startswith("https://www.boatrace.jp/owpc/pc/race/odds3f?"):
self.logger.debug("#_follow: follow odds 3f page")
return scrapy.Request(url, callback=self.parse_odds_3f, meta=meta)
elif url.startswith("https://www.boatrace.jp/owpc/pc/race/odds2tf?"):
self.logger.debug("#_follow: follow odds 2tf page")
return scrapy.Request(url, callback=self.parse_odds_2tf, meta=meta)
elif url.startswith("https://www.boatrace.jp/owpc/pc/race/oddsk?"):
self.logger.debug("#_follow: follow odds k page")
return scrapy.Request(url, callback=self.parse_odds_k, meta=meta)
elif url.startswith("https://www.boatrace.jp/owpc/pc/race/oddstf?"):
self.logger.debug("#_follow: follow odds tf page")
return scrapy.Request(url, callback=self.parse_odds_tf, meta=meta)
elif url.startswith("https://www.boatrace.jp/owpc/pc/race/raceresult?"):
self.logger.debug("#_follow: follow race result page")
return scrapy.Request(url, callback=self.parse_race_result, meta=meta)
elif url.startswith("https://www.boatrace.jp/owpc/pc/data/racersearch/profile?"):
self.logger.debug("#_follow: follow racer profile page")
return scrapy.Request(url, callback=self.parse_racer_profile, meta=meta)
else:
raise Exception("Unknown url")
def parse_calendar(self, response):
"""Parse calendar page.
@url https://www.boatrace.jp/owpc/pc/race/monthlyschedule?ym=202307
@returns items 0 0
@returns requests 81 81
@calendar_contract
"""
self.logger.info(f"#parse_calendar: start: response={response.url}")
# Parse link
for a in response.xpath("//a"):
race_index_url = urlparse(response.urljoin(a.xpath("@href").get()))
race_index_qs = parse_qs(race_index_url.query)
if race_index_url.path == "/owpc/pc/race/raceindex" and "jcd" in race_index_qs and "hd" in race_index_qs:
self.logger.debug(f"#parse_calendar: a={race_index_url.geturl()}")
race_index_url = f"https://www.boatrace.jp/owpc/pc/race/raceindex?jcd={race_index_qs['jcd'][0]}&hd={race_index_qs['hd'][0]}"
yield self._follow(race_index_url)
def parse_oneday_race(self, response):
"""Parse oneday race page.
@url https://www.boatrace.jp/owpc/pc/race/index?hd=20230909
@returns items 0 0
@returns requests 144
@oneday_race_contract
"""
self.logger.info(f"#parse_oneday_race: start: response={response.url}")
for a in response.xpath("//div[@class='table1']/table/tbody/tr/td/a"):
url = urlparse(response.urljoin(a.xpath("@href").get()))
qs = parse_qs(url.query)
if url.path == "/owpc/pc/race/raceindex" and "hd" in qs and "jcd" in qs:
self.logger.debug(f"#parse_oneday_race: a={url.geturl()}")
for rno in range(12):
racelist_url = f"https://www.boatrace.jp/owpc/pc/race/racelist?rno={rno+1}&jcd={qs['jcd'][0]}&hd={qs['hd'][0]}"
yield self._follow(racelist_url)
def parse_race_index(self, response):
"""Parse race index page.
@url https://www.boatrace.jp/owpc/pc/race/raceindex?jcd=01&hd=20230731
@returns items 1 1
@returns requests 17 17
@race_index_contract
"""
self.logger.info(f"#parse_race_index: start: response={response.url}")
# レースインデックスを構築する
loader = ItemLoader(item=RaceIndexItem(), response=response)
loader.add_value("url", response.url)
race_index_url = urlparse(response.url)
race_index_qs = parse_qs(race_index_url.query)
loader.add_value("place_id", race_index_qs["jcd"][0])
loader.add_xpath("place_name", "//div[@class='heading2_area']/img/@alt")
loader.add_xpath("race_grade", "//div[@class='heading2_head']/div[2]/@class")
loader.add_xpath("race_name", "//h2[@class='heading2_titleName']/text()")
for a in response.xpath("//a[@class='tab2_inner']"):
loader.add_value("race_index_urls", response.urljoin(a.xpath("@href").get()))
loader.add_value("race_index_urls", response.url)
item = loader.load_item()
self.logger.debug(f"#parse_race_index: race_index={item}")
yield item
# 他の日をフォローする
for a in response.xpath("//a[@class='tab2_inner']"):
race_index_url = response.urljoin(a.xpath("@href").get())
self.logger.debug(f"#parse_race_index: a={race_index_url}")
yield self._follow(race_index_url)
# レースラウンドをフォローする
for a in response.xpath("//div[@class='table1']/table/tbody/tr/td[1]/a"):
race_program_url = urlparse(response.urljoin(a.xpath("@href").get()))
race_program_qs = parse_qs(race_program_url.query)
if race_program_url.path == "/owpc/pc/race/racelist" and "rno" in race_program_qs and "jcd" in race_program_qs and "hd" in race_program_qs:
self.logger.debug(f"#parse_race_index: a={race_program_url.geturl()}")
race_program_url = f"https://www.boatrace.jp/owpc/pc/race/racelist?rno={race_program_qs['rno'][0]}&jcd={race_program_qs['jcd'][0]}&hd={race_program_qs['hd'][0]}"
yield self._follow(race_program_url)
def parse_race_program(self, response):
"""Parse race program page.
@url https://www.boatrace.jp/owpc/pc/race/racelist?rno=5&jcd=01&hd=20230817
@returns items 49 49
@returns requests 12 12
@race_program_contract
"""
self.logger.info(f"#parse_race_program: start: response={response.url}")
race_program_url = urlparse(response.url)
race_program_qs = parse_qs(race_program_url.query)
#
# レース出走表を構築する
#
loader = ItemLoader(item=RaceProgramItem(), response=response)
loader.add_value("url", response.url + "#info")
loader.add_xpath("course_length", "translate(normalize-space(//h3[@class='title16_titleDetail__add2020']), ' ', '')")
# 出走時刻を抽出する
race_round_classes = []
for th in response.xpath("//div[@class='table1 h-mt10']/table/thead/tr/th"):
race_round_classes.append(th.xpath("@class").get())
start_times = []
for td in response.xpath("//div[@class='table1 h-mt10']/table/tbody/tr/td"):
start_times.append(td.xpath("text()").get())
for race_round_class, start_time in zip(race_round_classes, start_times):
if race_round_class is None:
loader.add_value("start_time", start_time)
item = loader.load_item()
self.logger.debug(f"#parse_race_program: race_program={item}")
yield item
#
# ボートレーサーを構築する
#
for tbody in response.xpath("//div[@class='table1 is-tableFixed__3rdadd']/table/tbody"):
loader = ItemLoader(item=RaceProgramBracketItem(), selector=tbody)
loader.add_value("url", response.url + "#bracket")
loader.add_xpath("bracket_number", "tr[1]/td[1]/text()")
loader.add_xpath("racer_data1", "translate(normalize-space(tr[1]/td[3]/div[1]), ' ', '')")
loader.add_xpath("racer_data2", "translate(normalize-space(tr[1]/td[3]/div[3]), ' ', '/')")
loader.add_xpath("racer_data3", "translate(normalize-space(tr[1]/td[4]), ' ', '/')")
loader.add_xpath("racer_rate_all_place", "translate(normalize-space(tr[1]/td[5]), ' ', '/')")
loader.add_xpath("racer_rate_current_place", "translate(normalize-space(tr[1]/td[6]), ' ', '/')")
loader.add_xpath("motor_rate", "translate(normalize-space(tr[1]/td[7]), ' ', '/')")
loader.add_xpath("boat_rate", "translate(normalize-space(tr[1]/td[8]), ' ', '/')")
item = loader.load_item()
self.logger.debug(f"#parse_race_program: race_program_bracket={item}")
yield item
#
# 今節成績を構築する
#
for i in range(14):
loader = ItemLoader(item=RaceProgramBracketResultsItem(), selector=tbody)
loader.add_value("url", response.url + "#bracket_result")
loader.add_xpath("bracket_number", "tr[1]/td[1]/text()")
loader.add_value("run_number", i)
loader.add_xpath("bracket_color", f"tr[1]/td[{i+10}]/@class")
loader.add_xpath("race_round", f"tr[1]/td[{i+10}]/text()")
loader.add_xpath("approach_course", f"tr[2]/td[{i+1}]/text()")
loader.add_xpath("start_timing", f"tr[3]/td[{i+1}]/text()")
loader.add_xpath("result", f"tr[4]/td[{i+1}]/a/text()")
item = loader.load_item()
if item["race_round"][0] == "\xa0":
# 空データの場合、読み飛ばす
continue
self.logger.debug(f"#parse_race_program: race_program_bracket_result={item}")
yield item
#
# リンクを解析する
#
# オッズ
odds_url = response.urljoin(f"/owpc/pc/race/odds3t?rno={race_program_qs['rno'][0]}&jcd={race_program_qs['jcd'][0]}&hd={race_program_qs['hd'][0]}")
self.logger.debug(f"#parse_race_program: a={odds_url}")
yield self._follow(odds_url)
odds_url = response.urljoin(f"/owpc/pc/race/odds3f?rno={race_program_qs['rno'][0]}&jcd={race_program_qs['jcd'][0]}&hd={race_program_qs['hd'][0]}")
self.logger.debug(f"#parse_race_program: a={odds_url}")
yield self._follow(odds_url)
odds_url = response.urljoin(f"/owpc/pc/race/odds2tf?rno={race_program_qs['rno'][0]}&jcd={race_program_qs['jcd'][0]}&hd={race_program_qs['hd'][0]}")
self.logger.debug(f"#parse_race_program: a={odds_url}")
yield self._follow(odds_url)
odds_url = response.urljoin(f"/owpc/pc/race/oddsk?rno={race_program_qs['rno'][0]}&jcd={race_program_qs['jcd'][0]}&hd={race_program_qs['hd'][0]}")
self.logger.debug(f"#parse_race_program: a={odds_url}")
yield self._follow(odds_url)
odds_url = response.urljoin(f"/owpc/pc/race/oddstf?rno={race_program_qs['rno'][0]}&jcd={race_program_qs['jcd'][0]}&hd={race_program_qs['hd'][0]}")
self.logger.debug(f"#parse_race_program: a={odds_url}")
yield self._follow(odds_url)
# 結果
result_url = response.urljoin(f"/owpc/pc/race/raceresult?rno={race_program_qs['rno'][0]}&jcd={race_program_qs['jcd'][0]}&hd={race_program_qs['hd'][0]}")
self.logger.debug(f"#parse_race_program: a={result_url}")
yield self._follow(result_url)
# レーサー
for a in response.xpath("//div[@class='table1 is-tableFixed__3rdadd']/table/tbody/tr/td[3]//a"):
profile_url = urlparse(response.urljoin(a.xpath("@href").get()))
if profile_url.path == "/owpc/pc/data/racersearch/profile":
self.logger.debug(f"#parse_race_program: a={profile_url.geturl()}")
yield self._follow((profile_url.geturl()))
def parse_odds_3t(self, response):
"""Parse odds 3t page.
@url https://www.boatrace.jp/owpc/pc/race/odds3t?rno=5&jcd=01&hd=20230817
NOTE: レース中止 @url https://www.boatrace.jp/owpc/pc/race/oddsk?rno=12&jcd=24&hd=20221223
@returns items 120 120
@returns requests 0 0
@odds_3t_contract
"""
self.logger.info(f"#parse_odds_3t: start: response={response.url}")
table = response.xpath("//div[@class='table1']/table")
if len(table) == 0:
# レース中止などでデータがない場合、何もせずに処理を戻す
self.logger.debug("#parse_odds_3t: no data")
return
for i in range(6):
bracket_number_1 = table.xpath(f"thead/tr/th[{i*2+1}]/text()").get()
for j in range(5):
bracket_number_2 = table.xpath(f"tbody/tr[{j*4+1}]/td[{i*3+1}]/text()").get()
for k in range(4):
if k == 0:
bracket_number_3 = table.xpath(f"tbody/tr[{j*4+k+1}]/td[{i*3+2}]/text()").get()
odds = table.xpath(f"tbody/tr[{j*4+k+1}]/td[{i*3+3}]/text()").get()
else:
bracket_number_3 = table.xpath(f"tbody/tr[{j*4+k+1}]/td[{i*2+1}]/text()").get()
odds = table.xpath(f"tbody/tr[{j*4+k+1}]/td[{i*2+2}]/text()").get()
loader = ItemLoader(item=OddsItem(), selector=None)
loader.add_value("url", response.url)
loader.add_value("bracket_number_1", bracket_number_1)
loader.add_value("bracket_number_2", bracket_number_2)
loader.add_value("bracket_number_3", bracket_number_3)
loader.add_value("odds", odds)
item = loader.load_item()
self.logger.debug(f"#parse_odds_3t: odds={item}")
yield item
def parse_odds_3f(self, response):
"""Parse odds 3f page.
@url https://www.boatrace.jp/owpc/pc/race/odds3f?rno=5&jcd=01&hd=20230817
NOTE: レース中止 @url https://www.boatrace.jp/owpc/pc/race/odds3f?rno=12&jcd=24&hd=20221223
@returns items 20 20
@returns requests 0 0
@odds_3f_contract
"""
self.logger.info(f"#parse_odds_3f: start: response={response.url}")
table = response.xpath("//div[@class='table1']/table")
if len(table) == 0:
# レース中止などでデータがない場合、何もせずに処理を戻す
self.logger.debug("#parse_odds_3f: no data")
return
def load_odds_item(bracket_number_1, bracket_number_2, bracket_number_3, table_row, table_col):
odds = table.xpath(f"tbody/tr[{table_row}]/td[{table_col}]/text()").get()
loader = ItemLoader(item=OddsItem(), selector=None)
loader.add_value("url", response.url)
loader.add_value("bracket_number_1", bracket_number_1)
loader.add_value("bracket_number_2", bracket_number_2)
loader.add_value("bracket_number_3", bracket_number_3)
loader.add_value("odds", odds)
item = loader.load_item()
self.logger.debug(f"#parse_odds_3f: odds={item}")
return item
yield load_odds_item(1, 2, 3, 1, 3)
yield load_odds_item(1, 2, 4, 2, 2)
yield load_odds_item(1, 2, 5, 3, 2)
yield load_odds_item(1, 2, 6, 4, 2)
yield load_odds_item(1, 3, 4, 5, 3)
yield load_odds_item(1, 3, 5, 6, 2)
yield load_odds_item(1, 3, 6, 7, 2)
yield load_odds_item(1, 4, 5, 8, 3)
yield load_odds_item(1, 4, 6, 9, 2)
yield load_odds_item(1, 5, 6, 10, 3)
yield load_odds_item(2, 3, 4, 5, 3)
yield load_odds_item(2, 3, 5, 6, 2)
yield load_odds_item(2, 3, 6, 7, 2)
yield load_odds_item(2, 4, 5, 8, 3)
yield load_odds_item(2, 4, 6, 9, 2)
yield load_odds_item(2, 5, 6, 10, 3)
yield load_odds_item(3, 4, 5, 8, 3)
yield load_odds_item(3, 4, 6, 9, 2)
yield load_odds_item(3, 5, 6, 10, 3)
yield load_odds_item(4, 5, 6, 10, 3)
def parse_odds_2tf(self, response):
"""Parse odds 2tf page.
@url https://www.boatrace.jp/owpc/pc/race/odds2tf?rno=5&jcd=01&hd=20230817
NOTE: レース中止 @url https://www.boatrace.jp/owpc/pc/race/odds2tf?rno=12&jcd=24&hd=20221223
@returns items 45 45
@returns requests 0 0
@odds_2tf_contract
"""
self.logger.info(f"#parse_odds_2tf: start: response={response.url}")
# 2連単オッズをパースする
table = response.xpath("//div[@class='table1'][1]/table")
if len(table) == 0:
# レース中止などでデータがない場合、何もせずに処理を戻す
self.logger.debug("#parse_odds_2tf: no data")
return
for i in range(6):
for j in range(5):
loader = ItemLoader(item=OddsItem(), selector=table)
loader.add_value("url", response.url + "#odds2t")
loader.add_xpath("bracket_number_1", f"thead/tr/th[{i*2+1}]/text()")
loader.add_xpath("bracket_number_2", f"tbody/tr[{j+1}]/td[{i*2+1}]/text()")
loader.add_xpath("odds", f"tbody/tr[{j+1}]/td[{i*2+2}]/text()")
item = loader.load_item()
self.logger.debug(f"#parse_odds_2tf: odds={item}")
yield item
# 2連複オッズをパースする
table = response.xpath("//div[@class='table1'][2]/table")
for i in range(6):
for j in range(5):
loader = ItemLoader(item=OddsItem(), selector=table)
loader.add_value("url", response.url + "#odds2f")
loader.add_xpath("bracket_number_1", f"thead/tr/th[{i*2+1}]/text()")
loader.add_xpath("bracket_number_2", f"tbody/tr[{j+1}]/td[{i*2+1}]/text()")
loader.add_xpath("odds", f"tbody/tr[{j+1}]/td[{i*2+2}]/text()")
item = loader.load_item()
if len(item["bracket_number_2"][0].strip()) == 0:
# 空データの場合、読み飛ばす
continue
self.logger.debug(f"#parse_odds_2tf: odds={item}")
yield item
def parse_odds_k(self, response):
"""Parse odds k page.
@url https://www.boatrace.jp/owpc/pc/race/oddsk?rno=5&jcd=01&hd=20230817
NOTE: レース中止 @url https://www.boatrace.jp/owpc/pc/race/oddsk?rno=12&jcd=24&hd=20221223
@returns items 15 15
@returns requests 0 0
@odds_k_contract
"""
self.logger.info(f"#parse_odds_k: start: response={response.url}")
table = response.xpath("//div[@class='table1'][1]/table")
if len(table) == 0:
# レース中止などでデータがない場合、何もせずに処理を戻す
self.logger.debug("#parse_odds_k: no data")
return
for i in range(6):
for j in range(5):
loader = ItemLoader(item=OddsItem(), selector=table)
loader.add_value("url", response.url)
loader.add_xpath("bracket_number_1", f"thead/tr/th[{i*2+1}]/text()")
loader.add_xpath("bracket_number_2", f"tbody/tr[{j+1}]/td[{i*2+1}]/text()")
loader.add_xpath("odds", f"tbody/tr[{j+1}]/td[{i*2+2}]/text()")
item = loader.load_item()
if len(item["bracket_number_2"][0].strip()) == 0:
# 空データの場合、読み飛ばす
continue
self.logger.debug(f"#parse_odds_k: odds={item}")
yield item
def parse_odds_tf(self, response):
"""Parse odds tf page.
@url https://www.boatrace.jp/owpc/pc/race/oddstf?rno=5&jcd=01&hd=20230817
NOTE: レース中止 @url https://www.boatrace.jp/owpc/pc/race/oddstf?rno=12&jcd=24&hd=20221223
@returns items 12 12
@returns requests 0 0
@odds_tf_contract
"""
self.logger.info(f"#parse_odds_tf: start: response={response.url}")
# 単勝オッズをパースする
table = response.xpath("//div[@class='grid_unit'][1]/div[@class='table1']/table")
if len(table) == 0:
# レース中止などでデータがない場合、何もせずに処理を戻す
self.logger.debug("#parse_odds_tf: no data")
return
for i in range(6):
loader = ItemLoader(item=OddsItem(), selector=table)
loader.add_value("url", response.url + "#oddst")
loader.add_xpath("bracket_number_1", f"tbody[{i+1}]/tr/td[1]/text()")
loader.add_xpath("odds", f"tbody[{i+1}]/tr/td[3]/text()")
item = loader.load_item()
self.logger.debug(f"#parse_odds_tf: odds={item}")
yield item
# 複勝オッズをパースする
table = response.xpath("//div[@class='grid_unit'][2]/div[@class='table1']/table")
for i in range(6):
loader = ItemLoader(item=OddsItem(), selector=table)
loader.add_value("url", response.url + "#oddsf")
loader.add_xpath("bracket_number_1", f"tbody[{i+1}]/tr/td[1]/text()")
loader.add_xpath("odds", f"tbody[{i+1}]/tr/td[3]/text()")
item = loader.load_item()
self.logger.debug(f"#parse_odds_tf: odds={item}")
yield item
def parse_race_result(self, response):
"""Parse race result page.
@url https://www.boatrace.jp/owpc/pc/race/raceresult?rno=5&jcd=01&hd=20230817
NOTE: レース中止 @url https://www.boatrace.jp/owpc/pc/race/raceresult?rno=12&jcd=24&hd=20221223
@returns items 22 22
@returns requests 0 0
@race_result_contract
"""
self.logger.info(f"#parse_race_result: start: response={response.url}")
tables = response.xpath("//div[@class='table1']/table")
if len(tables) == 0:
# レース中止になった場合、何もせずに戻る
self.logger.debug("#parse_race_result: no data")
return
# 着順
for tbody in tables[0].xpath("tbody"):
loader = ItemLoader(item=RaceResultTimeItem(), selector=tbody)
loader.add_value("url", response.url + "#result")
loader.add_xpath("result", "tr/td[1]/text()")
loader.add_xpath("bracket_number", "tr/td[2]/text()")
loader.add_xpath("result_time", "tr/td[4]/text()")
item = loader.load_item()
self.logger.debug(f"#parse_race_result: result={item}")
yield item
# スタート情報
for tr in tables[1].xpath("tbody/tr"):
loader = ItemLoader(item=RaceResultStartTimeItem(), selector=tr)
loader.add_value("url", response.url + "#start")
loader.add_xpath("bracket_number", "td/div/span[1]/text()")
loader.add_xpath("start_time", "translate(normalize-space(td/div/span[3]/span), ' ', '')")
item = loader.load_item()
self.logger.debug(f"#parse_race_result: start_time={item}")
yield item
# 払い戻し情報
bet_type = ""
for tr in tables[2].xpath("tbody/tr"):
loader = ItemLoader(item=RaceResultPayoffItem(), selector=tr)
loader.add_value("url", response.url + "#payoff")
if len(tr.xpath("td")) == 4:
bet_type = tr.xpath("td[1]/text()").get()
loader.add_value("bet_type", bet_type)
loader.add_xpath("bracket_number", "translate(normalize-space(td[2]), ' ', '')")
loader.add_xpath("payoff", "string(td[3])")
loader.add_xpath("favorite", "string(td[4])")
else:
loader.add_value("bet_type", bet_type)
loader.add_xpath("bracket_number", "translate(normalize-space(td[1]), ' ', '')")
loader.add_xpath("payoff", "string(td[2])")
loader.add_xpath("favorite", "string(td[3])")
item = loader.load_item()
if len(item["bracket_number"][0].strip()) == 0:
# 空データの場合、読み飛ばす
continue
self.logger.debug(f"#parse_race_result: payoff={item}")
yield item
def parse_racer_profile(self, response):
"""Parse racer profile page.
@url https://www.boatrace.jp/owpc/pc/data/racersearch/profile?toban=4463
@returns items 1 1
@returns requests 0 0
@racer_profile_contract
"""
self.logger.info(f"#parse_racer_profile: start: response={response.url}")
loader = ItemLoader(item=RacerItem(), selector=response)
loader.add_value("url", response.url)
loader.add_xpath("name", "//p[@class='racer1_bodyName']/text()")
loader.add_xpath("name_kana", "//p[@class='racer1_bodyKana']/text()")
loader.add_xpath("racer_id", "//dl[@class='list3']/dd[1]/text()")
loader.add_xpath("birth_day", "//dl[@class='list3']/dd[2]/text()")
loader.add_xpath("height", "//dl[@class='list3']/dd[3]/text()")
loader.add_xpath("weight", "//dl[@class='list3']/dd[4]/text()")
loader.add_xpath("blood_type", "//dl[@class='list3']/dd[5]/text()")
loader.add_xpath("belong_to", "//dl[@class='list3']/dd[6]/text()")
loader.add_xpath("birth_place", "//dl[@class='list3']/dd[7]/text()")
loader.add_xpath("debut_period", "//dl[@class='list3']/dd[8]/text()")
loader.add_xpath("racer_class", "//dl[@class='list3']/dd[9]/text()")
item = loader.load_item()
self.logger.debug(f"#parse_racer_profile: racer={item}")
yield item
| u6k/boatrace-crawler | boatrace_crawler/spiders/boatrace_spider.py | boatrace_spider.py | py | 27,741 | python | en | code | 0 | github-code | 13 |
16169436802 | import json
from queue import Queue, Empty
event_queues = {}
class Event:
def __init__(self, event_type, data):
self.type = event_type
self.data = data
def __str__(self):
data = json.dumps(self.data) \
if not isinstance(self.data, str)\
else self.data
return f'event:{self.type}\ndata:{data}\n\n'
def get_queue(sessid):
if sessid not in event_queues:
event_queues[sessid] = Queue()
return event_queues[sessid]
def get_event_source(sessid):
queue = get_queue(sessid)
yield 'event:ping\ndata:1\n\n'
while True:
try:
event = queue.get(timeout=5)
assert isinstance(event, Event)
yield str(event)
except Empty:
yield 'event:ping\ndata:1\n\n'
| frankli0324/BiliDownload | app/event.py | event.py | py | 802 | python | en | code | 0 | github-code | 13 |
7044496818 | from __future__ import absolute_import
from __future__ import print_function
import os
import re
import time
import argparse
from hid_test import test_hid
from serial_test import test_serial
from msd_test import test_mass_storage
from daplink_board import get_all_attached_daplink_boards
from project_generator.generate import Generator
from test_info import TestInfo
TEST_REPO = 'https://developer.mbed.org/users/c1728p9/code/daplink-validation/'
def test_endpoints(board, parent_test):
"""Run tests to validate DAPLINK fimrware"""
test_info = parent_test.create_subtest('test_endpoints')
board.build_target_firmware(test_info)
test_hid(board, parent_test)
test_serial(board, test_info)
test_mass_storage(board, test_info)
# Determine interface or bootloader - also check if name is valid
# Handle building project when requested
# Handle testing project when requested
class ProjectTester(object):
_if_exp = re.compile("^([a-z0-9]+)_([a-z0-9_]+)_if$")
_bl_exp = re.compile("^([a-z0-9]+)_bl$")
_tool = 'uvision'
_name_to_board_id = {
'k20dx_k22f_if': 0x0231,
'k20dx_k64f_if': 0x0240,
'kl26z_microbit_if': 0x9900,
'kl26z_nrf51822_if': 0x9900,
'lpc11u35_lpc812_if': 0x1050,
'lpc11u35_lpc1114_if': 0x1114,
'lpc11u35_efm32gg_stk_if': 0x2015,
'sam3u2c_nrf51822_if': 0x1100,
}
def __init__(self, yaml_prj, path='.'):
self.prj = yaml_prj
self._path = path
self.name = yaml_prj.name
if_match = self._if_exp.match(self.name)
if if_match is not None:
self._bl_name = if_match.group(1) + '_bl'
self._is_bl = False
self._board_id = self._name_to_board_id[self.name]
bl_match = self._bl_exp.match(self.name)
if bl_match is not None:
self._is_bl = True
if if_match is None and bl_match is None:
raise Exception("Invalid project name %s" % self.name)
self._built = False
self._boards = None
self._bl = None
self._test_info = TestInfo(self.get_name())
build_path = os.path.normpath(path + os.sep + 'projectfiles' + os.sep +
self._tool + os.sep + self.name +
os.sep + 'build')
self._hex_file = os.path.normpath(build_path + os.sep +
self.name + '.hex')
self._bin_file = os.path.normpath(build_path + os.sep +
self.name + '.bin')
# By default test all configurations and boards
self._only_test_first = False
self._load_if = True
self._load_bl = True
self._test_if_bl = True
self._test_ep = True
def is_bl(self):
return self._is_bl
def get_name(self):
return self.name
def get_built(self):
"""
Return true if the project has been built in the current session
"""
return self._built
def get_bl_name(self):
"""
Get the name of the bootloader
This function should only be called if the project
is not a bootloader
"""
assert not self.is_bl()
return self._bl_name
def set_bl(self, bl_prj):
"""
Set the boot loader for this interface.
Note - this function should only be called on
an interface project.
"""
assert isinstance(bl_prj, ProjectTester)
assert not self._is_bl
self._bl = bl_prj
def get_binary(self):
"""
Get the binary file associated with this project
Returns None if the bin file has not been created yet.
"""
return self._bin_file if os.path.isfile(self._bin_file) else None
def get_hex(self):
"""
Get the hex file associated with this project
Returns None if the hex file has not been created yet.
"""
return self._hex_file if os.path.isfile(self._hex_file) else None
def get_board_id(self):
"""
Get the board ID for this project
Board ID is only valid if the target is not a bootloader
"""
return self._board_id
def set_test_boards(self, boards):
assert type(boards) is list
self._boards = boards
def get_test_boards(self):
return self._boards
def get_test_info(self):
return self._test_info
def build(self, clean=True):
self._test_info.info("Building %s" % self.name)
start = time.time()
#TODO - build bootloader if it isn't built yet
#TODO - print info on bootloader
ret = self.prj.generate(self._tool, False)
self._test_info.info('Export return value %s' % ret)
if ret != 0:
raise Exception('Export return value %s' % ret)
ret = self.prj.build(self._tool)
self._test_info.info('Build return value %s' % ret)
if ret != 0:
raise Exception('Build return value %s' % ret)
files = self.prj.get_generated_project_files(self._tool)
export_path = files['path']
base_file = os.path.normpath(export_path + os.sep + 'build' +
os.sep + self.name)
built_hex_file = base_file + '.hex'
built_bin_file = base_file + '.bin'
assert (os.path.abspath(self._hex_file) ==
os.path.abspath(built_hex_file))
assert (os.path.abspath(self._bin_file) ==
os.path.abspath(built_bin_file))
self._hex_file = built_hex_file
self._bin_file = built_bin_file
stop = time.time()
self._test_info.info('Build took %s seconds' % (stop - start))
self._built = True
def test_set_first_board_only(self, first):
assert type(first) is bool
self._only_test_first = first
def test_set_load_if(self, load):
assert type(load) is bool
self._load_if = load
def test_set_load_bl(self, load):
assert type(load) is bool
self._load_bl = load
def test_set_test_if_bl(self, run_test):
assert type(run_test) is bool
self._test_if_bl = run_test
def test_set_test_ep(self, run_test):
assert type(run_test) is bool
self._test_ep = run_test
def test(self):
boards_to_test = self._boards
if self._only_test_first:
boards_to_test = self._boards[0:1]
for board in boards_to_test:
# Load interface & bootloader
if self._load_if:
print("Loading interface")
board.load_interface(self.get_hex(), self._test_info)
#TODO - check CRC
if self._load_bl:
pass
#TODO - load bootloader
#TODO - check CRC
if self._test_if_bl:
# Test bootloader
# Test interface
board.test_fs(self._test_info)
board.test_fs_contents(self._test_info)
# Test endpoint
if self._test_ep:
test_endpoints(board, self._test_info)
return not self._test_info.get_failed()
def main():
# Save current directory
cur_dir = os.getcwd()
parent_dir = os.path.dirname(cur_dir)
os.chdir(parent_dir)
# Wrap every project in a ProjectTester object
# Tie every bootloader to one or more interface projects
projects = list(Generator('projects.yaml').generate())
yaml_dir = os.getcwd()
all_projects = [ProjectTester(project, yaml_dir) for project in projects]
if_project_list = [project for project in all_projects
if not project.is_bl()]
bl_project_list = [project for project in all_projects
if project.is_bl()]
bl_name_to_proj = {project.name: project for
project in bl_project_list}
for project in if_project_list:
bl_name = project.get_bl_name()
if bl_name in bl_name_to_proj:
project.set_bl(bl_name_to_proj[bl_name])
#TODO - make sure all bootloaders are tied to an interface, make sure all
# objects are accounted for
# Create list of projects to show user
prj_names = [prj.get_name() for prj in if_project_list]
name_to_prj = {prj.get_name(): prj for prj in if_project_list}
VERB_MINIMAL = 'Minimal' # Just top level errors
VERB_NORMAL = 'Normal' # Top level errors and warnings
VERB_VERBOSE = 'Verbose' # All errors and warnings
VERB_ALL = 'All' # All errors
VERB_LEVELS = [VERB_MINIMAL, VERB_NORMAL, VERB_VERBOSE, VERB_ALL]
description = 'DAPLink validation and testing tool'
parser = argparse.ArgumentParser(description=description)
parser.add_argument('--targetdir',
help='Directory with pre-build target test images.',
default=None)
parser.add_argument('--user', type=str, default=None,
help='MBED username (required for compile-api)')
parser.add_argument('--password', type=str, default=None,
help='MBED password (required for compile-api)')
parser.add_argument('--project', help='Project to test', action='append',
choices=prj_names, default=[], required=False)
parser.add_argument('--nobuild', help='Skip build step. Binaries must have been built already.', default=False,
action='store_true')
parser.add_argument('--noloadif', help='Skip load step for interface.',
default=False, action='store_true')
parser.add_argument('--noloadbl', help='Skip load step for bootloader.',
default=False, action='store_true')
parser.add_argument('--notestendpt', help='Dont test the interface USB endpoints.',
default=False, action='store_true')
parser.add_argument('--testfirst', help='If multiple boards of the same type are found only test the first one.',
default=False, action='store_true')
parser.add_argument('--verbose', help='Verbose output',
choices=VERB_LEVELS, default=VERB_NORMAL)
args = parser.parse_args()
use_prebuilt = args.targetdir is not None
use_compile_api = args.user is not None and args.password is not None
# Validate args
if not args.notestendpt:
if not use_prebuilt and not use_compile_api:
print("Endpoint test requires target test images.")
print(" Directory with pre-build target test images")
print(" must be specified with '--targetdir'")
print("OR")
print(" Mbed login credentials '--user' and '--password' must")
print(" be specified so test images can be built with")
print(" the compile API.")
exit(-1)
boards_explicitly_specified = len(args.project) != 0
# Put together the list of projects to build
if boards_explicitly_specified:
projects_to_test = [name_to_prj[name] for name in args.project]
else:
projects_to_test = if_project_list
# Collect attached mbed devices
all_boards = get_all_attached_daplink_boards()
# Attach firmware build credentials
if not args.notestendpt:
for board in all_boards:
if args.targetdir is None:
board.set_build_login(args.user, args.password)
else:
board.set_build_prebuilt_dir(args.targetdir)
# Create table mapping each board id to boards
board_id_to_board_list = {}
for board in all_boards:
board_id = board.get_board_id()
if board_id not in board_id_to_board_list:
board_id_to_board_list[board_id] = []
board_id_to_board_list[board_id].append(board)
# Attach each test board to a project
for project in projects_to_test:
board_id = project.get_board_id()
if board_id in board_id_to_board_list:
project.set_test_boards(board_id_to_board_list[board_id])
# Fail if a board for the requested project is not attached
if boards_explicitly_specified:
for project in projects_to_test:
if project.get_test_boards() is None:
print("No test board(s) for project %s" % project.get_name())
exit(-1)
# Build all projects
if not args.nobuild:
for project in projects_to_test:
project.build()
# Test all projects with boards that are attached
test_passed = True
tested_projects = []
untested_projects = []
for project in projects_to_test:
if project.get_test_boards() is not None:
project.test_set_first_board_only(args.testfirst)
project.test_set_load_if(not args.noloadif)
project.test_set_load_bl(not args.noloadbl)
project.test_set_test_ep(not args.notestendpt)
test_passed &= project.test()
tested_projects.append(project)
else:
# Cannot test board
untested_projects.append(project)
assert (len(tested_projects) + len(untested_projects) ==
len(projects_to_test))
if len(tested_projects) == 0:
print("Test Failed - no connected boards to test")
exit(-1)
if boards_explicitly_specified:
# Error should have been triggered before this
# point if there were boards that couldn't be tested
assert len(untested_projects) == 0
# Print info for boards tested
for project in tested_projects:
print('')
if args.verbose == VERB_MINIMAL:
project.get_test_info().print_msg(TestInfo.FAILURE, 0)
elif args.verbose == VERB_NORMAL:
project.get_test_info().print_msg(TestInfo.WARNING, None)
elif args.verbose == VERB_VERBOSE:
project.get_test_info().print_msg(TestInfo.WARNING, None)
elif args.verbose == VERB_ALL:
project.get_test_info().print_msg(TestInfo.INFO, None)
else:
# This should never happen
assert False
# Warn about untested boards
print('')
for project in untested_projects:
print('Warning - project %s is untested' % project.get_name())
if test_passed:
print("All boards passed")
exit(0)
else:
print("Test Failed")
exit(-1)
if __name__ == "__main__":
main()
| c1728p9/DAPLink_old | test/test_all.py | test_all.py | py | 14,516 | python | en | code | 0 | github-code | 13 |
23767957714 | # -*- coding: utf-8 -*-
"""
Created on Sun Apr 14 19:41:54 2019
@author: llavi
"""
#general imports
import os
import glob
from os.path import join
import pandas as pd
import numpy as np
import math
import time
import sys
import datetime
#other scripts that are used
#import raw_data_imports
## DATA MANIPULATION TO CREATE INPUT FILES ##
def create_gens_init(gens):
gens = gens.sort_values('X')
gens = gens.set_index('UNITNAME')
gen_index = []
commit = []
up = []
down = []
for g in list(gens.index):
gen_index.append(g)
commit.append(1)
up.append(200)
down.append(0)
df = pd.DataFrame(
{'Gen_Index': gen_index,
'commit_init': commit,
'time_up_init': up,
'time_down_init': down
})
return df
def create_generator_segments(segments):
segment_length = 1./float(segments)
index = []
segment_length_list = []
for gs in range(1,segments+1):
index.append(gs)
segment_length_list.append(segment_length)
df = pd.DataFrame(
{'generator_segment': index,
'length': segment_length_list
})
return df
def create_generator_segment_mc(segments, gens):
gens_reindex = gens.sort_values('X')
gens_reindex = gens_reindex.set_index('UNITNAME')
gen_index = []
segment_index = []
marginal_cost_list = []
for gs in range(1,segments+1):
col_name = "segment"+str(gs) #MUST ADD THESE NAMES INTO FILE!!
for g in list(gens_reindex.index):
gen_index.append(g)
segment_index.append(gs)
piecewise_heatrate_lookup = gens_reindex.loc[g][col_name]
modeled_unit_mc = gens_reindex.loc[g]['FuelCost']*gens_reindex.loc[g]['GEN_HEATRATE']*piecewise_heatrate_lookup + gens_reindex.loc[g]['NREL_V_OM']
marginal_cost_list.append(modeled_unit_mc)
df = pd.DataFrame(
{'Gen_Index': gen_index,
'generator_segment': segment_index,
'marginal_cost': marginal_cost_list
})
return df
def create_zonal_timepoints(zone_df, zone_list, load_df, wind_shape, solar_shape, lda_load_df, hydro_df, hydro_fracs):
#get the lda load df ready
lda_load_df['ZONE'] = lda_load_df['ISO Zone']
lda_load_df_wzone = pd.merge(lda_load_df, zone_df, on='ZONE')
#want to re-index by timepoint
tmps = lda_load_df_wzone.shape[0]/len(lda_load_df_wzone.ZONE.unique())
tmp_indices = len(lda_load_df_wzone.ZONE.unique())*list(range(1,int(tmps)+1))
lda_load_df_wzone['new_index'] = tmp_indices
lda_load_df_wzone = lda_load_df_wzone.set_index('new_index')
lda_load_df_wzone['loadmw'] = lda_load_df_wzone['Load MW']
lda_load_df_wzone['isozone'] = lda_load_df_wzone['ISO Zone']
hydro_df['indices'] = list(range(1,load_df.shape[0]+1))
hydro_df = hydro_df.set_index('indices')
hydro_fracs = hydro_fracs.set_index('zone')
zone_index = []
time_index = []
assigned_load = []
wind_cf = []
solar_cf = []
max_hydro = []
min_hydro = []
ramp_hydro = []
for z in zone_list:
for t in range(1,load_df.shape[0]+1):
#print(load_df.iloc[t-1])
#print(lda_load_df_wzone.isozone[(lda_load_df_wzone.index==t) & (lda_load_df_wzone.Assigned_Zone==z)])
#print(sum(lda_load_df_wzone.loadmw[(lda_load_df_wzone.index==t) & (lda_load_df_wzone.Assigned_Zone==z)]))
time_index.append(t)
zone_index.append(z)
#assigned_load.append(load_df.iloc[t-1]*sum(zone_df.Frac_Load[zone_df.Assigned_Zone==z])) ##OLDMETHOD
assigned_load.append(sum(lda_load_df_wzone.loadmw[(lda_load_df_wzone.index==t) & (lda_load_df_wzone.Assigned_Zone==z)]))
wind_cf.append(wind_shape.iloc[t-1])
solar_cf.append(solar_shape.iloc[t-1])
max_hydro.append(hydro_df.iloc[t-1]['Max']*hydro_fracs.loc[z]['hydro_frac'])
min_hydro.append(hydro_df.iloc[t-1]['Min']*hydro_fracs.loc[z]['hydro_frac'])
ramp_hydro.append(hydro_df.iloc[t-1]['RampLim']*hydro_fracs.loc[z]['hydro_frac'])
df = pd.DataFrame(
{'timepoint': time_index,
'zone': zone_index,
'gross_load': assigned_load,
'wind_cf': wind_cf,
'solar_cf': solar_cf,
'max_hydro': max_hydro,
'min_hydro': min_hydro,
'ramp_hydro': ramp_hydro
})
return df
'''
def create_lines(lines, zone_list):
line_names = []
from_zone = []
to_zone = []
min_flow = []
max_flow = []
losses_frac = []
count_z = 0
for z in zone_list:
count_l = 0
for l in zone_list:
if len(lines[(lines.tx_from_zone==z) & (lines.tx_to_zone==l)]) == 1:
index_val = lines[(lines.tx_from_zone==z) & (lines.tx_to_zone==l)].index[0]
from_str = str(lines.tx_from_zone[(lines.tx_from_zone==z) & (lines.tx_to_zone==l)][index_val])
to_str = str(lines.tx_to_zone[(lines.tx_from_zone==z) & (lines.tx_to_zone==l)][index_val])
line_names.append((str(from_str)+"_to_"+str(to_str)))
from_zone.append(from_str)
to_zone.append(to_str)
min_flow.append(lines.min_flow[(lines.tx_from_zone==z) & (lines.tx_to_zone==l)][index_val])
max_flow.append(lines.max_flow[(lines.tx_from_zone==z) & (lines.tx_to_zone==l)][index_val])
losses_frac.append(.02)
elif count_l > count_z:
line_names.append(z+"_to_"+l)
from_zone.append(z)
to_zone.append(l)
min_flow.append(0)
max_flow.append(0)
losses_frac.append(.02)
count_l+=1
count_z+=1
df = pd.DataFrame({'transmission_line': line_names,
'transmission_from': from_zone,
'transmission_to': to_zone,
'min_flow': min_flow,
'max_flow': max_flow,
'line_losses_frac': losses_frac
})
return df
'''
def create_lines(lines, zone_list):
line_names = []
old = []
count_z = 0
for z in zone_list:
count_l = 0
for l in zone_list:
if len(lines[(lines.tx_from_zone==z) & (lines.tx_to_zone==l)]) == 1:
index_val = lines[(lines.tx_from_zone==z) & (lines.tx_to_zone==l)].index[0]
from_str = str(lines.tx_from_zone[(lines.tx_from_zone==z) & (lines.tx_to_zone==l)][index_val])
to_str = str(lines.tx_to_zone[(lines.tx_from_zone==z) & (lines.tx_to_zone==l)][index_val])
line_names.append((str(from_str)+"_to_"+str(to_str)))
old.append(0.1)
elif count_l > count_z:
line_names.append(z+"_to_"+l)
old.append(0)
count_l+=1
count_z+=1
df = pd.DataFrame({'transmission_line': line_names,
'old': old
})
return df
def create_hourly_lines(lines, zone_list, load_df_for_timepoints):
time_index = []
line_names = []
from_zone = []
to_zone = []
min_flow = []
max_flow = []
hurdle_rate = []
count_z = 0
#print(lines.columns)
for z in zone_list:
count_l = 0
for l in zone_list:
if len(lines[(lines.tx_from_zone==z) & (lines.tx_to_zone==l)]) >= 1:
#print(lines[(lines.tx_from_zone==z) & (lines.tx_to_zone==l)])
for t in range(1,load_df_for_timepoints.shape[0]+1):
time_index.append(t)
index_val = lines[(lines.tx_from_zone==z) & (lines.tx_to_zone==l)].index[t-1]
from_str = str(lines.tx_from_zone[(lines.tx_from_zone==z) & (lines.tx_to_zone==l)][index_val])
to_str = str(lines.tx_to_zone[(lines.tx_from_zone==z) & (lines.tx_to_zone==l)][index_val])
line_names.append((str(from_str)+"_to_"+str(to_str)))
from_zone.append(from_str)
to_zone.append(to_str)
min_flow.append((-1.)*lines.limit_mw[(lines.tx_from_zone==z) & (lines.tx_to_zone==l)][index_val])
max_flow.append(lines.limit_mw[(lines.tx_from_zone==z) & (lines.tx_to_zone==l)][index_val])
hurdle_rate.append(2)
elif count_l > count_z:
for t in range(1,load_df_for_timepoints.shape[0]+1):
time_index.append(t)
line_names.append(z+"_to_"+l)
from_zone.append(z)
to_zone.append(l)
min_flow.append(0)
max_flow.append(0)
hurdle_rate.append(.25) #hurdle rate hopefully just reflects incremental loss cost
count_l+=1
count_z+=1
df = pd.DataFrame({'timepoint': time_index,
'transmission_line': line_names,
'transmission_from': from_zone,
'transmission_to': to_zone,
'min_flow': min_flow,
'max_flow': max_flow,
'hurdle_rate': hurdle_rate
})
return df
def create_zones(zone_df, zone_list, subzone_status, wind, solar, hydro_df, hydro_fracs):
#hydro_df['indices'] = list(range(1,load_df.shape[0]+1))
#hydro_df = hydro_df.set_index('indices')
hydro_fracs = hydro_fracs.set_index('zone')
zone_index = []
wind_cap = []
solar_cap = []
total_hydro = []
for z in zone_list:
zone_index.append(z)
wind_cap.append(wind.iloc[0][str(z)])
solar_cap.append(solar.iloc[0][str(z)])
total_hydro.append(sum(hydro_df.Avg)*hydro_fracs.loc[z]['hydro_frac'])
#wind_cap.append(sum(zone_df.wind_capacity_MW[zone_df.Assigned_Zone==z]))
#solar_cap.append(sum(zone_df.solar_capacity_MW[zone_df.Assigned_Zone==z]))
df = pd.DataFrame(
{'zone': zone_index,
'wind_cap': wind_cap,
'solar_cap': solar_cap,
'total_hydro': total_hydro,
'in_sub_zone': subzone_status
})
return df
def knit_generator_zone(gens, zones, subzone_status, hydro_df):
'''
takes list of zones, and df of generators, and knits together to get the capacity of
each generator in a zone
modifies capacity of hydro resources according to their monthly CF (a bit clunky for now)
for now, format is for capacity to be 0 in zones where generator doesn't exist
'''
gens = gens.sort_values('X')
minutes_in_tmp = 60 #number of minutes in timepoint in run, for scaling ramp rates
hours_in_month = 744 #assumes 744 hours in month for calculating CF for hydro
hydro_df['UTILUNIT_y']=hydro_df['UTILUNIT']
gens_w_hydro = pd.merge(gens,hydro_df,on="UTILUNIT_y")
gens_w_hydro = gens_w_hydro.set_index('UNITNAME')
cap_factor = gens_w_hydro.NETACTGEN/(gens_w_hydro.RATINGMW_y*hours_in_month)
gens = gens.set_index('UNITNAME')
gen_index = []
zone_index = []
gen_zone_cap = []
ramp_rate = []
ramp_start = []
ramp_shut = []
in_subzone = []
for z in range(len(zones)):
for g in list(gens.index):
try:
cf = cap_factor[g]
except KeyError:
cf = 1
gen_index.append(g)
zone_index.append(zones[z])
cf=1 #overwrites to full capacity
if zones[z] == gens.Assigned_Zone[g]:
gen_zone_cap.append(max(0,gens.RATINGMW_y[g]*cf))
else:
gen_zone_cap.append(0)
if zones[z] == gens.Assigned_Zone[g] and subzone_status[z]==1:
in_subzone.append(1)
else:
in_subzone.append(0)
ramp_rate.append(gens.RATINGMW_y[g]*gens.ramp_rate[g]*minutes_in_tmp) #gets the hourly ramp rate
ramp_start.append(gens.RATINGMW_y[g]*gens.Pmin[g]*1.001) #I think ok
ramp_shut.append(gens.RATINGMW_y[g]*gens.Pmin[g]*.1) #derate this bc of shutdown concerns
df = pd.DataFrame(
{'Gen_Index': gen_index,
'zone': zone_index,
'capacity': gen_zone_cap,
'Ramp_Rate': ramp_rate,
'Ramp_Start': ramp_start,
'Ramp_Shutdown': ramp_shut,
'In_Sub_Zone': in_subzone
})
return df
def create_scheduled_outage_file(n_timepoints, list_gens, unitmatch_ID, outage_schedule):
'''
takes a number of timepoints, and a list of the generators, and creates an outage schedule
also takes in data on which units have scheduled outages
scheduled outages are based on what was known at 6PM the previous day; i.e. around when the Day Ahead...
...Market would have cleared
'''
time_list = []
gens_list = []
scheduled_list = []
length_day = 24 #will eventually want to change this, but OK to assume 24 daily timepoints for now
for t in range(1,n_timepoints+1):
for g in list_gens:
match_ID = unitmatch_ID[g]
try:
match_time = int((t-1)/length_day)
scheduled_out = outage_schedule.iloc[match_time][match_ID] #reindexed because these are daily now
#scheduled_out = outage_schedule.iloc[t-1][match_ID] #old indexing
except (KeyError, TypeError) as e: #this is for when the ID doesn't work
scheduled_out = 0 #just assume generator is fully available if it cannot match
time_list.append(t)
gens_list.append(g)
scheduled_list.append(1-scheduled_out)
df = pd.DataFrame(
{'timepoint': time_list,
'Gen_Index': gens_list,
'available': scheduled_list
})
return df
def create_operating_reserve_curve(n_segments, price_cap):
'''
creates operating reserve demand curve
is single curve for now, but should eventually be determined hourly
quantity and price are dummy inputs for now
'''
segment_list = []
primary_synch_segment_quantity = []
primary_nonsynch_segment_quantity = []
secondary_segment_quantity = []
segment_price = []
segment_dummy = 100 #made up for now
for s in range(1,n_segments+1):
segment_list.append(s)
primary_synch_segment_quantity.append(segment_dummy)
primary_nonsynch_segment_quantity.append(1.5*segment_dummy)
secondary_segment_quantity.append(3.*segment_dummy)
segment_price.append(price_cap/(s**2)) #made up for now
df = pd.DataFrame(
{'segments': segment_list,
'SynchMW': primary_synch_segment_quantity,
'NonSynchMW': primary_nonsynch_segment_quantity,
'SecondaryMW': secondary_segment_quantity,
'Price': segment_price
})
return df
def create_zone_assign_col(row):
'not used right now'
if row['Zone'] == "WEST":
val = 0
else:
val = 1
return val
def modify_min_up_down(orig_df, modifying_df):
'''
takes (1) dataframe of generator characteristics, and
(2) dataframe of empirical min up/down times calculated from 2014 CEMS data provided by N. Muller
returns modified df where matched ORIS code generators have their min up and down modified
'''
#match with oris codes, get unit type
orig_df['index'] = orig_df.index #add index as col
merged_up_down = orig_df.merge(modifying_df, left_on='ORISPL_x', right_on='ORISPL')
merged_up_down.set_index('index',inplace=True)
#print(merged_up_down.index)
new_min_up = []
new_min_down = []
#do I also want to aggregate on unit type and assign that when unk?
for i in orig_df.index:
if i not in list(merged_up_down.index):
#print(i)
new_min_up.append(orig_df.loc[i]['minup'])
new_min_down.append(orig_df.loc[i]['mindown'])
else:
up_val = min(merged_up_down.loc[i]['minup'],merged_up_down.loc[i]['MinUp'])
new_min_up.append(up_val)
down_val = min(merged_up_down.loc[i]['mindown'],merged_up_down.loc[i]['MinDown'])
new_min_down.append(down_val)
out_df = orig_df
out_df['final_minup'] = new_min_up
out_df['final_mindown'] = new_min_down
#print(out_df.head())
return out_df
def reassign_fuel_costs(df,fuelcostcol,eia923df,gasprice_df):
new_fuel_costs = []
df_noDR = df[df['ID6_y']!='NA'] #get rid of DR
merged_df = pd.merge(df_noDR,eia923df,how='left',left_on='ORISPL_x',right_on='Plant Id')
#print(merged_df.columns)
## coal grouper ##
st1_ids = merged_df.index[(merged_df['FUEL_GROUP']=='Coal') & (merged_df['ID6_y']=='ST1')]
st2_ids = merged_df.index[(merged_df['FUEL_GROUP']=='Coal') & (merged_df['ID6_y']=='ST2')]
coal_ids = list(st1_ids)+list(st2_ids)
coal_df = merged_df.loc[coal_ids,:]
coal_df_oris_group = coal_df.groupby(['ORISPL_y','STATE','Plant Name','Purchase Type','FUEL_GROUP','FUEL_COST','Regulated'],as_index=False).mean()
coal_df_oris_group['$TOTAL_PAYMENT'] = coal_df_oris_group['FUEL_COST']*coal_df_oris_group['QUANTITY']*.01 #total pmt
coal_df_oris_only = coal_df_oris_group.groupby(['ORISPL_y']).sum() #group on ORISPL ONLY
#calculate avg price at oris level
coal_df_oris_only['coal_price_per_mmbtu'] = coal_df_oris_only['$TOTAL_PAYMENT']/coal_df_oris_only['QUANTITY']
#also create a frame that groups on state for later use
coal_df_state = coal_df_oris_group.groupby(['STATE']).sum()
coal_df_state['state_avg_$mmbtu'] = coal_df_state['$TOTAL_PAYMENT']/coal_df_state['QUANTITY']
## gas grouper ##
cc_ids = merged_df.index[(merged_df['FUEL_GROUP']=='Natural Gas') & (merged_df['ID6_y']=='CC')]
ct_ids = merged_df.index[(merged_df['FUEL_GROUP']=='Natural Gas') & (merged_df['ID6_y']=='CT')]
gas_ids = list(cc_ids)+list(ct_ids)
gas_df = merged_df.loc[gas_ids,:]
if 'Natural Gas Transportation Service' in gas_df.columns:
gas_transport_string = 'Natural Gas Transportation Service'
elif 'Natural Gas Delivery Contract Type':
gas_transport_string = 'Natural Gas Delivery Contract Type'
else:
print('ugh,again!')
gas_df_oris_group = gas_df.groupby(['ORISPL_y','STATE','Plant Name','Purchase Type','FUEL_GROUP','FUEL_COST','Regulated',gas_transport_string],as_index=False).mean()
gas_df_oris_group['$TOTAL_PAYMENT'] = gas_df_oris_group['FUEL_COST']*gas_df_oris_group['QUANTITY']*.01 #total pmt
gas_df_oris_only = gas_df_oris_group.groupby(['ORISPL_y']).sum() #group on ORISPL ONLY
#calculate avg price at oris level
gas_df_oris_only['gas_price_per_mmbtu'] = gas_df_oris_only['$TOTAL_PAYMENT']/gas_df_oris_only['QUANTITY']
#also create a frame that groups on state for later use
gas_df_state = gas_df_oris_group.groupby(['STATE']).sum()
gas_df_state['state_avg_$mmbtu'] = gas_df_state['$TOTAL_PAYMENT']/gas_df_state['QUANTITY']
#print(gas_df_state)
#print(gas_df_oris_only)
for i in df.index:
if df.loc[i,'new_ID']=='ST1' or df.loc[i,'new_ID']=='ST2':
if df.loc[i,'ORISPL_y'] in coal_df_oris_only.index:
new_price = coal_df_oris_only.loc[df.loc[i,'ORISPL_y'],'coal_price_per_mmbtu']
elif df.loc[i,'STATE'] in coal_df_state.index:
new_price = coal_df_state.loc[df.loc[i,'STATE'],'state_avg_$mmbtu']
else:
new_price = coal_df_state.loc['VA','state_avg_$mmbtu']
new_fuel_costs.append(new_price)
elif df.loc[i,'new_ID']=='CC' or df.loc[i,'new_ID']=='CT':
if df.loc[i,'ORISPL_y'] in gas_df_oris_only.index:
new_price = gas_df_oris_only.loc[df.loc[i,'ORISPL_y'],'gas_price_per_mmbtu']
elif df.loc[i,'STATE'] in gas_df_state.index:
new_price = gas_df_state.loc[df.loc[i,'STATE'],'state_avg_$mmbtu']
else: #in the case of gas, prefer to assign prevailing hub price here for unobserved states
hub = df.loc[i,'assigned_gashub']
new_price = gasprice_df.loc[hub]['DeliveryPrice']
new_fuel_costs.append(new_price)
else:
new_fuel_costs.append(df.loc[i,'FuelCost'])
return new_fuel_costs
def redo_capacity_HR(gens_df,epa_df):
#add a column for capacity-weighted HR contribution in epa_df
matcher = epa_df.groupby("ORIS Plant Code")["Capacity (MW)"].sum()
newcapacity = []
for i in epa_df.index:
origcap = epa_df.loc[i,'Capacity (MW)']
capsum = matcher[epa_df.loc[i,'ORIS Plant Code']]
newcapacity.append(origcap/capsum)
epa_df['capacityfraction'] = newcapacity
epa_df['HRcontribution'] = epa_df['capacityfraction']*epa_df['Heat Rate (Btu/kWh)']
epa_oris_group_sum = epa_df.groupby(['ORIS Plant Code']).sum()
epa_oris_group_sum_relevantcols = epa_oris_group_sum[['Capacity (MW)','HRcontribution']]
#now replace the relevant columns in gens_df
for i in gens_df.index:
oris = gens_df.loc[i,'ORISPL_x']
count = (gens_df['ORISPL_x'] == oris).sum()
try:
potential_newcap = epa_oris_group_sum_relevantcols.loc[oris,'Capacity (MW)']/count
except KeyError:
potential_newcap = 0.0
try:
potential_newheatrate = epa_oris_group_sum_relevantcols.loc[oris,'HRcontribution']
except KeyError:
potential_newheatrate = 0.0
if potential_newcap < 0.1 and potential_newheatrate < 0.1: #
continue
#finalcapacity.append(merged_df.loc[i,'RATINGMW'])
elif potential_newheatrate < 0.1:
capscalar = potential_newcap/gens_df.loc[i,'RATINGMW_y']
gens_df.loc[i,'startcost'] = gens_df.loc[i,'startcost']*capscalar
gens_df.loc[i,'RATINGMW_y'] = potential_newcap
elif potential_newcap < 0.1:
HRscalar = (potential_newheatrate/1000)/gens_df.loc[i,'GEN_HEATRATE']
gens_df.loc[i,'NO_LOAD_MMBTU'] = gens_df.loc[i,'NO_LOAD_MMBTU']*HRscalar
gens_df.loc[i,'GEN_HEATRATE'] = potential_newheatrate/1000
else:
capscalar = potential_newcap/gens_df.loc[i,'RATINGMW_y']
HRscalar = (potential_newheatrate/1000)/gens_df.loc[i,'GEN_HEATRATE']
gens_df.loc[i,'NO_LOAD_MMBTU'] = gens_df.loc[i,'NO_LOAD_MMBTU']*capscalar*HRscalar
gens_df.loc[i,'GEN_HEATRATE'] = potential_newheatrate/1000
gens_df.loc[i,'startcost'] = gens_df.loc[i,'startcost']*capscalar
gens_df.loc[i,'RATINGMW_y'] = potential_newcap
#gens_df.loc[i,'startcost']
return gens_df
## DUMP TO OUTPUT FILES ##
def write_data(data, results_directory, init, scenario_inputs_directory, date, inputs_directory,
input_primary_reserve_scalar, input_secondary_reserve_scalar):
print('writing results to output files...')
loadMW = data[3]
#write the timepoints of the input case to a file, just to have a record
#this is IN NO WAY used by the optimization run
timepoint_match_df = pd.DataFrame(
{'model_timepoint': list(range(1,len(data[7])+1)),
'input_datetime': list(data[7])
})
timepoint_match_df.to_csv(os.path.join(results_directory,"timepoint_input_record.csv"), index=False)
#create segmented ORDC based on hourly load and temperature
#write generators files
gens = data[2]
gen_types = pd.read_csv(os.path.join(scenario_inputs_directory,"gentype_inputs.csv"))
merged_gens = pd.merge(gens, gen_types, on='ID6_y')
merged_gens['startcost'] = merged_gens.start_scalar * merged_gens.RATINGMW_y
#rewrite using gas price data
print("re-doing gas prices")
gasprice_data = data[13]
is_firm = data[17] #new
eia_923_data = data[18]
dual_fuel = data[19]
#print(gasprice_data)
#print(gasprice_data.columns)
#print(is_firm)
#print(is_firm.columns)
#print(eia_923_data)
#print(eia_923_data.columns)
#print(dual_fuel)
#print(dual_fuel.columns)
#join data on whether firm contract is held
#print(gasprice_data)
#add delivery charge to the original data
delivery = 0.4 #in $/mmbtu
gasprice_data['DeliveryPrice'] = gasprice_data['Wtd Avg Index $'] + delivery
#gasprice_data.to_csv(os.path.join(inputs_directory,'gasprice.csv'),index=False)
#eia_923_data.to_csv(os.path.join(inputs_directory,'eia923.csv'),index=False)
#dual_fuel.to_csv(os.path.join(inputs_directory,'dualfuel.csv'),index=False)
#initial load of the zone match file, though we'll use only for gas prices here
zone_file = pd.read_csv(os.path.join(scenario_inputs_directory,"LDA_to_zone.csv"))
gens_w_zone = pd.merge(merged_gens, zone_file, on='ZONE')
gens_w_zone = gens_w_zone.sort_values('X')
#print(gens_w_zone.head())
gens_w_zone['index'] = gens_w_zone.index #add index as col
gens_w_zone_and_firm = gens_w_zone.merge(is_firm, left_on='ORISPL_x', right_on='ORISPL')
gens_w_zone_and_firm.set_index('index',inplace=True)
# replace gas generators fuel cost with hub price
#it'll be slow but do as an iterated list for now
#actually another thing I may use is diesel fuel price, so note that
diesel_fuel_cost = gens_w_zone['FuelCost'][gens_w_zone['ID6_y']=="DS"].mean()
#print(diesel_fuel_cost)
#print('some info on dates!')
#print(date)
#print(type(date))
#print('end info on dates')
'''
new_fuel_price = []
for g in gens_w_zone.index:
if gens_w_zone.loc[g]['ID6_y'] == 'CC' or gens_w_zone.loc[g]['ID6_y'] == 'CT':
fuel_flag = False
if g in list(gens_w_zone_and_firm.index):
firmbool = gens_w_zone_and_firm.loc[g]['FirmBool']
if math.isnan(float(firmbool)):
fuel_flag = False
elif float(firmbool)>=0.1: #flag as don't use spot price if firm contract is held for gas plant
#fuel_flag=False
fuel_flag = True
else: #known that generator holds interruptible contract
#fuel_flag=True
fuel_flag = False
hub = gens_w_zone.loc[g]['assigned_gashub']
if fuel_flag: #this is the default options
fuel_price = gasprice_data.loc[hub]['DeliveryPrice']
else: #for the exception
#fuel_price = gens_w_zone.loc[g]['FuelCost']
#take oil price if higher than gas price, otherwise gas price
#this REALLY should ONLY bind on select days with gas delivery problems
#known examples are 1.6.2014-1.8.2014
fuel_price = max(diesel_fuel_cost,gens_w_zone.loc[g]['FuelCost'])
new_fuel_price.append(fuel_price)
else:
new_fuel_price.append(gens_w_zone.loc[g]['FuelCost'])
'''
new_lab = []
for i in gens_w_zone.index:
new_lab.append(gens_w_zone.loc[i,'ID6_y'])
#if gens_w_zone.loc[i,'ID6_y']=='ST1':
# new_lab.append('CT')
#else:
# new_lab.append(gens_w_zone.loc[i,'ID6_y'])
gens_w_zone['new_ID'] = new_lab
new_fuel_price = reassign_fuel_costs(gens_w_zone,'FuelCost',eia_923_data, gasprice_data)
#line_df.to_csv(os.path.join(results_directory,"transmission_lines.csv"), index=False)
#gens_w_zone.to_csv(os.path.join(inputs_directory,'gens_before_fuelchange.csv'),index=False)
gens_w_zone['FuelCost'] = new_fuel_price #replaces old fuel costs with new fuel costs
#print(len(new_fuel_price))
#print(len(gens_w_zone.index))
#gens_w_zone.to_csv(os.path.join(inputs_directory,'gens_after_fuelchange.csv'),index=False)
#print(gens_w_zone['FuelCost'])
print("finished re-doing gas prices")
###now also want to re-do capacity and heat rates
print("re-doing capacity and heat rates...")
epa_needs_data = data[20]
#print(epa_needs_data.head())
#print(gens_w_zone.columns)
#gens_w_zone.to_csv(os.path.join(inputs_directory,'beforechanges.csv'),index=False)
new_gens_w_zone = redo_capacity_HR(gens_w_zone,epa_needs_data)
#new_gens_w_zone.to_csv(os.path.join(inputs_directory,'afterchanges.csv'),index=False)
gens_w_zone = new_gens_w_zone #in case this owrks
print("...end re-doing capacity and heat rates")
'''
Brian's Code
# overwrite single gas price with henry hub values
hubPrice = pd.read_excel(os.path.join(inputs_directory,"gas_price_pull.xlsx"))
# subset to henry hub (for now) and downselect date
price = hubPrice.loc[(hubPrice['Price Hub'] == "Henry") & (hubPrice['Delivery Date'] == pd.to_datetime(date))]
price = price['Wtd Avg Index $'].iloc[0]
# add delivery charge ( $ per mmBtu)
delivery = 0.4
price = price + delivery
# replace gas generators fuel cost with hub price
merged_gens.loc[merged_gens['ID6_y'].isin(['CC', 'CT']) , 'FuelCost'] = price
'''
# using fuel cost and heat rate to calculate marginal costs
gens_w_zone['marginalcost'] = gens_w_zone.FuelCost * gens_w_zone.GEN_HEATRATE + gens_w_zone.NREL_V_OM
gens_w_zone['noloadcost'] = gens_w_zone.NO_LOAD_MMBTU * gens_w_zone.FuelCost
gens_w_zone = gens_w_zone.sort_values('X')
#pjm_out = gens_w_zone[['UNITNAME','marginalcost','Pmin','startcost','can_spin','can_nonspin','minup','mindown','noloadcost']]
#pjm_out.columns = ['Gen_Index', 'Fuel_Cost ','Pmin','start_cost','Can_Spin','Can_NonSpin','Min_Up','Min_Down','No_Load_Cost']
## ##
pjm_out_new = modify_min_up_down(gens_w_zone,data[16])
#final_minup
pjm_out = pjm_out_new[['UNITNAME','marginalcost','Pmin','startcost','can_spin','can_nonspin','final_minup','final_mindown','noloadcost']]
pjm_out.columns = ['Gen_Index', 'Fuel_Cost ','Pmin','start_cost','Can_Spin','Can_NonSpin','Min_Up','Min_Down','No_Load_Cost']
## ##
pjm_out.to_csv(os.path.join(results_directory,"PJM_generators.csv"), index=False)
if init:
out_init = create_gens_init(gens_w_zone)
out_init.to_csv(os.path.join(results_directory,"initialize_generators.csv"), index=False)
pjm_out_full = gens_w_zone[['X','UNITNAME','ZONE','ID6_y','RATINGMW_y','marginalcost','can_spin','UTILUNIT_y','In_Sub_Zone']]
pjm_out_full = pjm_out_full.sort_values('X')
pjm_out_full.columns = ['Gen_Index', 'Name', 'Zone', 'Category', 'Capacity', 'Fuel_Cost', 'Can_Spin', 'UTILUNIT','In_Sub_Zone']
#pjm_out_full['In_Sub_Zone'] = pjm_out_full.apply(create_zone_assign_col, axis=1)
pjm_out_full.to_csv(os.path.join(results_directory,"PJM_generators_full.csv"), index=False)
#create generator segments
segment_df = create_generator_segments(data[0].value[11])
segment_df.to_csv(os.path.join(results_directory,"generator_segments.csv"), index=False)
#create marginal costs on segments
mc_segment_df = create_generator_segment_mc(data[0].value[11], gens_w_zone)
mc_segment_df.to_csv(os.path.join(results_directory,"generator_segment_marginalcost.csv"), index=False)
#knit with zones, write zones file
#this perhaps should be in the "only if zonal" clause of the script
#recall we have now loaded the zone file a bit earlier
zone_list = list(zone_file.Assigned_Zone.unique())
if len(zone_list)==5: #my purposeful pjm sorting
print('re-ordering zones according to my criteria and noting their subzone status')
new_zone_list = []
mad_subzone_status = []
for z in [0,3,4,1,2]:
new_zone_list.append(zone_list[z])
if zone_list[z]=="WEST":
mad_subzone_status.append(0)
else:
mad_subzone_status.append(1)
zone_list = new_zone_list
hydro_df = data[12]
pjm_gens_zones = knit_generator_zone(gens_w_zone, zone_list, mad_subzone_status, hydro_df)
pjm_gens_zones.to_csv(os.path.join(results_directory,"PJM_generators_zone.csv"), index=False)
#attempt to re-do the hydro
gens_w_zone_hydro_only = gens_w_zone[gens_w_zone.ID6_y=="HD"] #gets you only the hydro
hydro_frac_list = []
count_list = []
count = 0
for z in zone_list:
count += 1
hydro_frac_list.append(sum(gens_w_zone_hydro_only.RATINGMW_y[gens_w_zone_hydro_only.Assigned_Zone==z])/sum(gens_w_zone_hydro_only.RATINGMW_y))
count_list.append(count)
hydro_frac_df = pd.DataFrame(
{'zone': zone_list,
'hydro_frac': hydro_frac_list,
'counter': count_list})
#write scheduled outage file
merged_gens_reindex = gens_w_zone.sort_values('X')
merged_gens_reindex = merged_gens_reindex.set_index('UNITNAME')
outage_schedule = data[11]
scheduled_outage_df = create_scheduled_outage_file(loadMW.shape[0],list(merged_gens_reindex.index), merged_gens_reindex.UTILUNIT_y, outage_schedule)
scheduled_outage_df.to_csv(os.path.join(results_directory,'PJM_generators_scheduled_outage.csv'), index=False)
#write operating reserve file
segment_int = int(data[0].value[6])
cost_int = data[0].value[8]
operating_reserve_df = create_operating_reserve_curve(segment_int,cost_int)
operating_reserve_df.to_csv(os.path.join(results_directory,'operating_reserve_segments.csv'), index=False)
#write timepoints file (just has temperatures for now)
temperatures = pd.DataFrame(data[4])
#temperatures = temperatures.rename(columns={temperatures.columns[0]: "temperature" })
temperatures['timepoint']=list(range(1, temperatures.shape[0]+1))
temperatures = temperatures.iloc[:, ::-1] #reorder to get timepoint first
temperatures.to_csv(os.path.join(results_directory,"timepoints_index_allweather.csv"),index=False)
temperatures_short = temperatures.iloc[:,0:2] #grabs first two columns
temperatures_short = temperatures_short.rename(columns={temperatures_short.columns[1]: "temperature" })
#hourly_loads['ones'] = len(hourly_loads.index)*[1]
temperatures_short['primaryreservescalar'] = len(temperatures_short.index)*[input_primary_reserve_scalar]
temperatures_short['secondaryreservescalar'] = len(temperatures_short.index)*[input_secondary_reserve_scalar]
temperatures_short['reservescalarratio'] = len(temperatures_short.index)*[input_secondary_reserve_scalar/input_primary_reserve_scalar]
temperatures_short.to_csv(os.path.join(results_directory,"timepoints_index.csv"),index=False)
if not data[0].value[5]:
loadMW.to_csv(os.path.join(results_directory,"timepoints_zonal.csv"))
zone_df = pd.DataFrame([["PJM", 15, 14]], columns = ['zone', 'wind_cap','solar_cap'])
zone_df.to_csv(os.path.join(results_directory,"zones.csv"))
else:
#create zones csv
zone_df = create_zones(zone_file, zone_list, mad_subzone_status, data[9], data[10], data[15], hydro_frac_df) #now adds wind and solar installed data
zone_df.to_csv(os.path.join(results_directory,"zones.csv"), index=False)
#create timepoints/zones csv
timepoints_zonal_df = create_zonal_timepoints(zone_file, zone_list, loadMW, data[5], data[6], data[14], data[15], hydro_frac_df)
timepoints_zonal_df.to_csv(os.path.join(results_directory,"timepoints_zonal.csv"), index=False)
#loadMW.to_csv(os.path.join(results_directory,"timepoints_zonal.csv"))
#create transmission lines csv
input_lines = pd.read_csv(os.path.join(scenario_inputs_directory,"transmission_lines_inputs.csv"))
line_df = create_lines(input_lines, zone_list)
line_df.to_csv(os.path.join(results_directory,"transmission_lines.csv"), index=False)
#create alternate transmission lines csv
hourly_lines = pd.merge(data[8], input_lines, on='interface_limit_name')
hourly_line_output = create_hourly_lines(hourly_lines, zone_list, loadMW)
hourly_line_output.to_csv(os.path.join(results_directory,"transmission_lines_hourly.csv"), index=False)
print('...results written')
return None | llavin13/dispatch_RA_model | data_to_csvs.py | data_to_csvs.py | py | 36,725 | python | en | code | 0 | github-code | 13 |
74772490898 | from selenium import webdriver
import os
from selenium.webdriver.chrome.options import Options
from selenium.webdriver.common.by import By
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
import time
from threading import Thread
import threading
class Mail_google:
def checkHeader(header):
try:
header=header.replace("\n","\\n").replace("\r","\\r").replace("'","\\'")
path = os.path.dirname(os.path.abspath(__file__))
driverpath = path + '\\chromedriver.exe'
options = Options()
options.add_argument('--headless')
options.add_argument('--disable-gpu')
driver = webdriver.Chrome(driverpath,chrome_options=options)
#header=header.replace("\n","\\n").replace("\r","\\r")
driver.get("https://toolbox.googleapps.com/apps/messageheader/analyzeheader")
driver.execute_script("document.getElementsByClassName('mdl-textfield__input')[0].innerHTML='{0}'".format(header))
#time.sleep(20)
driver.find_elements_by_xpath("//input[@type='submit']")[0].click()
try:
time = WebDriverWait(driver, 4).until(EC.presence_of_element_located((By.XPATH, "//span[@style='color:green;']"))).get_attribute("innerHTML").strip()
except:
time = WebDriverWait(driver, 4).until(EC.presence_of_element_located((By.XPATH, "//span[@style='color:red;']"))).get_attribute("innerHTML").strip()
elements = driver.find_elements_by_xpath("//tbody")[0].find_elements_by_class_name("mdl-data-table__cell--non-numeric")
CheckMail.value_dict["time"]=time
for i in range(0,len(elements),2):
CheckMail.value_dict[elements[i].get_attribute("innerHTML").strip()]=elements[i+1].get_attribute("innerHTML").strip()
if "SPF:" not in CheckMail.value_dict:
CheckMail.value_dict["SPF:"]="fail"
if "DKIM:" not in CheckMail.value_dict:
CheckMail.value_dict["DKIM:"]="fail"
if "DMARC:" not in CheckMail.value_dict:
CheckMail.value_dict["DMARC:"]="fail"
driver.quit()
except Exception as e:
CheckMail.value_dict["time"]=-1
CheckMail.value_dict["SPF:"]="fail"
CheckMail.value_dict["DKIM:"]="fail"
CheckMail.value_dict["DMARC:"]="fail"
driver.quit()
print(e)
class Mail_tracker:
def checkHeader(header):
try:
header=header.replace("\n","\\n").replace("\r","\\r").replace("'","\\'")
path = os.path.dirname(os.path.abspath(__file__))
driverpath = path + '\\chromedriver.exe'
options = Options()
options.add_argument('--headless')
options.add_argument('--disable-gpu')
driver = webdriver.Chrome(driverpath, chrome_options=options)
driver.get("https://www.iptrackeronline.com/email-header-analysis.php")
driver.execute_script("document.getElementsByTagName('textarea')[0].innerHTML='{0}'".format(header))
driver.find_elements_by_xpath("//input[@type='submit']")[0].click()
element = WebDriverWait(driver, 4).until(EC.presence_of_element_located((By.XPATH, "//div[@class='three-columns']")))
texts = driver.find_elements_by_xpath("//input[@size='40']")
CheckMail.value_dict["IP"]=texts[0].get_attribute("value")
CheckMail.value_dict["Hostname"]=texts[1].get_attribute("value")
CheckMail.value_dict["Organization"]=texts[2].get_attribute("value")
CheckMail.value_dict["Country"]=texts[3].get_attribute("value")
CheckMail.value_dict["City"]=texts[4].get_attribute("value")
CheckMail.value_dict["Longitude"]=texts[-3].get_attribute("value")
CheckMail.value_dict["Latitude"]=texts[-4].get_attribute("value")
driver.quit()
except Exception as e:
driver.quit()
print(e)
CheckMail.value_dict["IP"]="n/a"
CheckMail.value_dict["Hostname"]="n/a"
CheckMail.value_dict["Organization"]="n/a"
CheckMail.value_dict["Country"]="n/a"
CheckMail.value_dict["City"]="n/a"
CheckMail.value_dict["Longitude"]="n/a"
CheckMail.value_dict["Latitude"]="n/a"
class CheckMail:
value_dict={}
def run(header):
print(header)
thread1 = threading.Thread(target = Mail_google.checkHeader,args=([header]))
thread1.daemon=True
thread1.start()
thread2 = threading.Thread(target = Mail_tracker.checkHeader,args=([header]))
thread2.daemon=True
thread2.start()
thread1.join()
thread2.join()
return CheckMail.value_dict
| sagarpatel24/Phishing-Detection-ML | Back End/mail.py | mail.py | py | 4,364 | python | en | code | 8 | github-code | 13 |
39681398032 | import os
from pathlib import Path
for path in Path('.').rglob('*'):
if not path.exists():
continue
if path.is_dir() and path.name == 'out':
for child in path.rglob('*'):
os.remove(child)
os.remove( path )
elif not path.is_dir():
if path.name.endswith( ( '.exe', '.pdb', 'output' ) ):
os.remove( path )
| ENDERZOMBI102/AdventOfCode | clean.py | clean.py | py | 325 | python | en | code | 0 | github-code | 13 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.