seq_id stringlengths 4 11 | text stringlengths 113 2.92M | repo_name stringlengths 4 125 ⌀ | sub_path stringlengths 3 214 | file_name stringlengths 3 160 | file_ext stringclasses 18
values | file_size_in_byte int64 113 2.92M | program_lang stringclasses 1
value | lang stringclasses 93
values | doc_type stringclasses 1
value | stars int64 0 179k ⌀ | dataset stringclasses 3
values | pt stringclasses 78
values |
|---|---|---|---|---|---|---|---|---|---|---|---|---|
39761904162 | import json
import os
from time import sleep
from aviso_monitoring import logger
from aviso_monitoring.collector.config import Config
from aviso_monitoring.collector.time_collector import TimeCollector
from aviso_monitoring.udp_server import UdpServer
def take_some_time(seconds=0.1, flag=False, flag2=True):
sleep(seconds)
print(flag)
print(flag2)
return flag2
telemetry_type = "test_time"
collector_config = {
"transmitter": {
"monitoring_server_host": "127.0.0.1",
"monitoring_server_port": 1116,
"component_name": "test_component",
"frequency": 2,
},
"enabled": True,
}
upd_server_config = {"host": "127.0.0.1", "port": 1116, "buffer_size": 64 * 1024}
received = False
class ReceiverMock:
def process_message(self, message):
logger.debug(f"Message received: {message}")
message = json.loads(message)
assert message.get("telemetry_type") == telemetry_type
assert message.get("component_name") == collector_config.get("transmitter").get("component_name")
assert message.get("hostname")
assert message.get("time")
assert message.get("telemetry")
assert message.get("telemetry").get(f"{telemetry_type}_avg") > 0
assert message.get("telemetry").get(f"{telemetry_type}_counter") == 10
global received
received = True
def test_measure_time():
logger.debug(os.environ.get("PYTEST_CURRENT_TEST").split(":")[-1].split(" ")[0])
# create the UDP server with mock ServiceRegister
udp_server = UdpServer(upd_server_config, ReceiverMock())
udp_server.start()
# create the collector
timer = TimeCollector(Config(**collector_config), tlm_type=telemetry_type)
# call the function
for i in range(10):
timer(take_some_time, args=0.1)
# wait to receive it
sleep(2)
assert received
udp_server.stop()
def test_calling_timer():
logger.debug(os.environ.get("PYTEST_CURRENT_TEST").split(":")[-1].split(" ")[0])
# create the collector
timer = TimeCollector(Config(**collector_config), tlm_type=telemetry_type)
assert timer(take_some_time)
timer(take_some_time, args=0.1)
timer(take_some_time, args=[0.1])
assert not timer(take_some_time, args=(0.1, True, False))
timer(take_some_time, args=[0.1, False])
timer(take_some_time, kwargs={"flag": True})
timer(take_some_time, args=0.2, kwargs={"flag": True})
| ecmwf/aviso | aviso-server/monitoring/tests/test_time_collector.py | test_time_collector.py | py | 2,449 | python | en | code | 9 | github-code | 13 |
37966286118 | import commands
###########################################################################
include("LArCalibProcessing/LArCalib_Flags.py")
include("RecExCommission/GetInputFiles.py")
#######################################################
# Run properties
#######################################################
if not 'RunNumber' in dir():
RunNumber = 88237
if not ' GainList' in dir():
GainList = ["HIGH", "MEDIUM", "LOW"]
if not 'ChannelSelection' in dir():
ChannelSelection=""
if not 'Partition' in dir():
Partition = "EB-EMB"
if not 'LArCaliInputKey' in dir():
LArCaliInputKey = ""
if not 'LArDetInputKey' in dir():
LArDetInputKey = ""
if not 'LArParamsTag' in dir():
LArParamsTag = ""
if not 'LArInputKey' in dir():
LArInputKey = [""]
if not 'AllChannels2Ntuple' in dir():
AllChannels2Ntuple = False
#######################################################
# Delay output name
#######################################################
if not 'WriteNtuple' in dir():
WriteNtuple = LArCalib_Flags.WriteNtuple
if not 'DBConnectionCOOL' in dir():
DBConnectionCOOL = "oracle://ATLAS_COOLPROD;schema=ATLAS_COOLONL_LAR;dbname=COMP200"
if not 'DBConnection' in dir():
DBConnection = DBConnectionCOOL
## Output
if not 'OutputRootFileDir' in dir():
OutputRootFileDir = commands.getoutput("pwd")
if not 'KeyOutput' in dir():
KeyOutput = "" # Key of LArPhysWaveContainer saved in Pool file
if not 'BaseFileName' in dir():
BaseFileName = "LArParams"
BaseFileName = BaseFileName+"_"+str(RunNumber)+"_"+Partition.replace("*","")
if not 'OutputRootFileName' in dir():
OutputRootFileName = BaseFileName+".root"
###########################################################################
#
# Global settings
#
###########################################################################
include("AthenaCommon/Atlas_Gen.UnixStandardJob.py")
#
# Provides ByteStreamInputSvc name of the data file to process in the offline context
#
## get a handle to the default top-level algorithm sequence
from AthenaCommon.AlgSequence import AlgSequence
topSequence = AlgSequence()
## get a handle to the ApplicationManager, to the ServiceManager and to the ToolSvc
from AthenaCommon.AppMgr import (theApp, ServiceMgr as svcMgr,ToolSvc)
include("LArCalibProcessing/LArCalib_MinimalSetup.py")
## define the DB Gobal Tag :
svcMgr.IOVDbSvc.GlobalTag = LArCalib_Flags.globalFlagDB
svcMgr.IOVDbSvc.DBInstance=""
from IOVDbSvc.CondDB import conddb
conddb.addFolder("","/LAR/BadChannels/BadChannels<dbConnection>"+DBConnectionCOOL+"</dbConnection>")
conddb.addFolder("","/LAR/BadChannels/MissingFEBs<dbConnection>"+DBConnectionCOOL+"</dbConnection>")
svcMgr.PoolSvc.ReadCatalog += ["prfile:poolcond/PoolCat_oflcond.xml",
"xmlcatalog_file:/afs/cern.ch/atlas/conditions/poolcond/catalogue/fragments/PoolCat_comcond.000005.lar_conditions.recon.pool.v0000_castor.xml",
"xmlcatalog_file:/afs/cern.ch/atlas/conditions/poolcond/catalogue/fragments/PoolCat_comcond.000006.lar_conditions.recon.pool.v0000_castor.xml",
"xmlcatalog_file:/afs/cern.ch/atlas/conditions/poolcond/catalogue/fragments/PoolCat_diskbuffer_afs.xml",
"xmlcatalog_file:/afs/cern.ch/user/l/larcalib/w0/stableConds/PoolCat_stable.xml",
"xmlcatalog_file:/afs/cern.ch/atlas/conditions/poolcond/catalogue/fragments/PoolCat_cond08_data.000001.lar.COND_castor.xml"]
## The reference is the Oracle DB
if 'LArCaliParamsFolder' in dir():
if not 'InputTagSpecCali' in dir():
InputTagSpecCali = LArCalibFolderTag(LArCaliParamsFolder,LArInputTag)
# print 'Input tag: ',InputTagSpecCali," in folder: ",LArCaliParamsFolder
conddb.addFolder("",LArCaliParamsFolder+"<tag>"+InputTagSpecCali+"</tag><key>"+LArCaliInputKey+"</key><dbConnection>"+DBConnection+"</dbConnection>"+ChannelSelection)
if 'LArDetParamsFolder' in dir():
if not 'InputTagSpecDet' in dir():
InputTagSpecDet = LArCalibFolderTag(LArDetParamsFolder,LArInputTag)
# print 'Input tag: ',InputTagSpecDet," in folder: ",LArDetParamsFolder
conddb.addFolder("",LArDetParamsFolder+"<tag>"+InputTagSpecDet+"</tag><key>"+LArDetInputKey+"</key><dbConnection>"+DBConnection+"</dbConnection>"+ChannelSelection)
##########################################################################
# #
# Output #
# #
##########################################################################
if (WriteNtuple):
from LArCalibTools.LArCalibToolsConf import LArParams2Ntuple
LArParams2Ntuple = LArParams2Ntuple( "LArParams2Ntuple" )
LArParams2Ntuple.NtupleName = "PARAMS"
LArParams2Ntuple.KeyList = LArInputKey
LArParams2Ntuple.AllChannels2Ntuple = AllChannels2Ntuple
topSequence+=LArParams2Ntuple
theApp.HistogramPersistency = "ROOT"
from GaudiSvc.GaudiSvcConf import NTupleSvc
if os.path.exists(OutputRootFileDir+"/"+OutputRootFileName):
os.remove(OutputRootFileDir+"/"+OutputRootFileName)
svcMgr += NTupleSvc()
svcMgr.NTupleSvc.Output = [ "FILE1 DATAFILE='"+OutputRootFileDir+"/"+OutputRootFileName+"' OPT='NEW'" ]
###########################################################################
###########################################################################
# Use EventSelector to select IOV #
###########################################################################
from McEventSelector.McEventSelectorConf import McEventSelector
svcMgr += McEventSelector("EventSelector")
svcMgr.EventSelector.RunNumber = RunNumber
svcMgr.EventSelector.EventsPerRun = 1
svcMgr.EventSelector.FirstEvent = 1
svcMgr.EventSelector.InitialTimeStamp = 0
svcMgr.EventSelector.TimeStampInterval = 1
##########################################################################
# don't remove otherwise infinite loop #
##########################################################################
theApp.EvtMax = 1
###########################################################################
svcMgr.MessageSvc.OutputLevel = INFO
svcMgr.MessageSvc.defaultLimit = 10000
svcMgr.MessageSvc.Format = "% F%20W%S%7W%R%T %0W%M"
svcMgr+=CfgMgr.AthenaEventLoopMgr(OutputLevel = INFO)
from AthenaCommon.AppMgr import theAuditorSvc
from AthenaCommon.ConfigurableDb import getConfigurable
theAuditorSvc += getConfigurable("MemStatAuditor")(OutputLevel = WARNING)
theAuditorSvc += getConfigurable("ChronoAuditor")()
theAuditorSvc += getConfigurable("NameAuditor")()
###########################################################################
| rushioda/PIXELVALID_athena | athena/LArCalorimeter/LArCalibTools/share/LArParamsFromDB2NTuple_jobOptions.py | LArParamsFromDB2NTuple_jobOptions.py | py | 6,957 | python | en | code | 1 | github-code | 13 |
41823447719 | import numpy as np
from PIL import Image
beta, eta, h =1e-3,2.1e-3,0.0
def E(x,y):
xxm=np.zeros_like(x)
xxm[:-1, :]=x[1:, :]
xxm[1:, :]+=x[:-1, :]
xxm[:, :-1] += x[:, 1:] # right
xxm[:, 1:] += x[:, :-1] # left
xx = np.sum(xxm * x)
xy = np.sum(x * y)
xsum = np.sum(x)
return h * xsum - beta * xx - eta * xy
def is_valid(i, j, shape):
"""Check if coordinate i, j is valid in shape."""
return i >= 0 and j >= 0 and i < shape[0] and j < shape[1]
def Elocal(E0, i, j, x, y):
old = x[i,j]
new = old * (-1)
Enew=E0-h*old+h*new
Enew+=eta*y[i,j]*old-eta*y[i,j]*new
adjacent = [(0, 1), (0, -1), (1, 0), (-1, 0)]
neighbors = [x[i + di, j + dj] for di, dj in adjacent
if is_valid(i + di, j + dj, x.shape)]
Enew+=beta * sum(a * old for a in neighbors)
Enew-= beta * sum(a * new for a in neighbors)
return old,new,E0,Enew
def ICM(y):
x=np.array(y)
Ebest=E(x,y)
for idx in np.ndindex(y.shape):
old, new, E0, Enew=Elocal(Ebest, idx[0],idx[1],x,y)
if(Enew < Ebest):
Ebest = Enew
x[idx] = new
return x
def sign(data, translate):
temp = np.array(data)
return np.vectorize(lambda x: translate[x])(temp)
im = Image.open('flipped.png')
im.show()
data = sign(im.getdata(), {0: -1, 255: 1})
y = data.reshape(im.size[::-1])
result=ICM(y)
result = sign(result, {-1: 0, 1: 255})
output_image = Image.fromarray(np.uint8(result))
output_image=output_image.convert('1', dither=Image.NONE)
output_image.save('1.png')
output_image.show() | imwebson/learn | image/image.py | image.py | py | 1,589 | python | en | code | 0 | github-code | 13 |
17350992302 | '''This application connects to the USGS and OpenCage API's to collect earthquake data and store it in
a csv file that is overwritten whenever the app is ran'''
import requests, xmltodict, json, csv, toTime, creds
response = requests.get("https://earthquake.usgs.gov/earthquakes/feed/v1.0/summary/all_hour.geojson")
#if connected to USGS API
if response:
#open file as append
with open("earthquakes.csv", "w") as output:
writer = csv.writer(output, lineterminator="\n")
#load USGS json data
data = json.loads(response.text)
#drill down to earthquakes
quakes = data["features"]
#loop through each quake and id each quality
for quake in quakes:
mag = str(quake["properties"]["mag"])
time = toTime.conv(quake["properties"]["time"])
long = str(quake["geometry"]["coordinates"][0])
lat = str(quake["geometry"]["coordinates"][1])
coord = "(" + lat + ", " + long + ")"
#put together OpenCage API url
base_url = "https://api.opencagedata.com/geocode/v1/xml?q="
full_url = base_url + lat + "+" + long + creds.api_key
response2 = requests.get(full_url)
#if connected to OpenCage API
if response2:
data2 = xmltodict.parse(response2.text)
category = data2["response"]["results"]["result"]["components"]["_category"]
#if in ocean
if category == "natural/water":
print("Magnitude " + mag + " earthquake on " + time + " and located at " + coord + " in the ocean.\n")
#write to output file
row = [time, mag, lat, long, "N/A", "N/A"]
writer.writerow(row)
#if on land:
else:
country = data2["response"]["results"]["result"]["components"]["country"]
#if in USA
if country == "United States":
#if county attribute exists
try:
county = data2["response"]["results"]["result"]["components"]["county"]
state = data2["response"]["results"]["result"]["components"]["state"]
print("Magnitude " + mag + " earthquake on " + time + " and located at " + coord + " in " + county + ", " + state + ".\n")
#write to output file
row = [time, mag, lat, long, county, state]
writer.writerow(row)
#if county attribute missing
except:
print("Magnitude " + mag + " earthquake on " + time + " and located at " + coord + " in the US.\n")
#write to output file
row = [time, mag, lat, long, "N/A", "N/A"]
writer.writerow(row)
#if couldn't connect to OpenCage API
else:
print("Could not connect to OpenCage API")
#if couldn't connect to USGS API
else:
print("Could not connect to USGS API")
| JosephABB/earthquake_data_pub | main.py | main.py | py | 3,291 | python | en | code | 0 | github-code | 13 |
29753234806 | # 함수 실습 p.2
def personal_info(file, save, search):
def file_open():
f = open(file, "r")
data = f.read()
data = data.split("\n")
f.close()
return data
def name(data):
i = 0
lst = []
while i <len(data):
if "Name" in data[i]:
lst.append(data[i])
i += 1
result = "\n".join(lst)
f1 = open(save, "w")
f1.write(result)
f1.close()
def age(data):
i = 0
lst = []
while i < len(data):
if "Age" in data[i]:
lst.append(data[i])
i += 1
result = "\n".join(lst)
f1 = open(save, "w")
f1.write(result)
f1.close()
def phone(data):
i = 0
lst = []
while i < len(data):
if "Phone" in data[i]:
lst.append(data[i])
i += 1
result = "\n".join(lst)
f1 = open(save, "w")
f1.write(result)
f1.close()
d = file_open()
if search == "이름":
name(d)
elif search =="나이":
age(d)
elif search == "전화번호":
phone(d)
# personal_info("Personal_info.txt", "extract_info.txt", "이름")
# personal_info("Personal_info.txt","extract_info.txt", "나이")
# personal_info("Personal_info.txt","extract_info.txt", "전화번호")
| 1000hyehyang/Advanced-Python-Programming | week15/Ex07_01.py | Ex07_01.py | py | 1,394 | python | en | code | 0 | github-code | 13 |
11628410045 | #!/usr/bin/env python
# ===============================================================================
from . import Logger, __home__, __scratch__
from .campaigns import Campaign
from .databases import Database
from .datasets import Dataset
from .emulators import Emulator
from .evaluators import Evaluator
from .objects import Object
from .planners import (
Planner,
get_cat_planners_list,
get_cont_planners_list,
get_disc_planners_list,
get_planners_list,
)
from .plotter import Plotter
from .scalarizers import Scalarizer
from .surfaces import (
Surface,
list_cat_surfaces,
list_cont_surfaces,
list_surfaces,
)
# ===============================================================================
class Olympus(Object):
"""Master class of the olympus package"""
def __init__(self, *args, **kwargs):
Object.__init__(**locals())
self.home = __home__
self.scratch = __scratch__
self.database = Database()
def _check_planner_param_type(self, planner, param_type):
map_ = {
"continuous": get_cont_planners_list,
"discrete": get_disc_planners_list,
"categorical": get_cat_planners_list,
}
return planner in map_[param_type]()
# *** Production ****************************************************************
def run(
self,
planner="Phoenics",
dataset="alkox",
model="BayesNeuralNet",
goal="default",
campaign=Campaign(),
database=Database(),
num_iter=3,
):
# check the dataset type
# TODO: can we check this without creating the object here??
dataset_obj = Dataset(kind=dataset)
for param_type in dataset_obj.param_types:
pass
# if not self._check_planner_param_type(planner, param_type):
# message = f'Planner {planner} cannot handle {param_type} parameters!'
# Logger.log(message, 'FATAL')
if "continuous" in dataset_obj.param_types:
# we need a NN emulator
emulator = Emulator(dataset=dataset, model=model)
else:
# fully categorical and/or discrete, we use the dataset object as
# a lookup table in place of the NN emulator, handled in Evaluator
emulator = dataset_obj
if goal == "default":
goal = dataset_obj.goal
planner_ = Planner(kind=planner, goal=goal)
# set links
self.planner = planner_
self.emulator = emulator
self.campaign = campaign
self.database = database
# define evaluator and optimize
self.evaluator = Evaluator(
planner=planner_,
emulator=emulator,
campaign=campaign,
database=database,
)
self.evaluator.optimize(num_iter=num_iter)
def run_analytic(
self,
planner="Phoenics",
surface="Dejong",
param_dim=2,
num_opts=None,
goal="minimize",
scalarizer=None,
campaign=Campaign(),
database=Database(),
num_iter=3,
):
self.planner = Planner(kind=planner, goal=goal)
# check if surface is categorical, and check planner
# param type compatibility
if surface in list_cont_surfaces():
if not "continuous" in self.planner.PARAM_TYPES:
message = (
f"Planner {planner} does not support continuous parameters"
)
Logger.log(message, "FATAL")
elif surface in list_cat_surfaces():
if not "categorical" in self.planner.PARAM_TYPES:
message = f"Planner {planner} does not support categorical parameters"
Logger.log(message, "FATAL")
self.surface = Surface(
kind=surface, param_dim=param_dim, num_opts=num_opts
)
if len(self.surface.value_space) > 1 and not scalarizer:
message = f"You must pass a scalarizer instance for multiobjective optimization in Olympus"
Logger.log(message, "FATAL")
elif len(self.surface.value_space) > 1 and scalarizer:
self.scalarizer = scalarizer
else:
self.scalarizer = None
self.campaign = campaign
self.database = database
self.evaluator = Evaluator(
planner=self.planner,
emulator=self.surface,
campaign=self.campaign,
scalarizer=self.scalarizer,
database=self.database,
)
self.evaluator.optimize(num_iter=num_iter)
def benchmark(
self,
dataset="alkox",
planners="all",
database=Database(kind="sqlite"),
num_ind_runs=5,
num_iter=3,
):
"""
Args:
dataset (str): the dataset to use
"""
if planners == "all":
planners = get_planners_list()
for planner in planners:
for _ in range(num_ind_runs):
self.run(
planner=planner,
dataset=dataset,
database=database,
campaign=Campaign(),
num_iter=num_iter,
)
# *** Analysis ******************************************************************
# def load_database(self, file_name):
# ''' connects to a database previously written by olympus
#
# Args:
# file_name (str or list): path and name of the database file
# '''
# if hasattr(self, 'database'):
# self.database.from_file(file_name)
# else:
# self.database = Database().from_file(file_name)
#
#
# def get_campaigns(self, file_names=[]):
# if len(file_names) > 0:
# return Database().from_file(file_names).get_campaigns()
# else:
# return self.database.get_campaigns()
# ===============================================================================
| aspuru-guzik-group/olympus | src/olympus/olympus.py | olympus.py | py | 5,968 | python | en | code | 70 | github-code | 13 |
26390923019 | #!/usr/bin/env python
"""
Constructs a PNG graph of workout data
"""
#from pylab import *
from datetime import datetime
class DataReader(object):
"""
In charge of reading in a csv file with workout data
"""
def __init__(self):
self.data = {}
def read_data(self, file_name):
"""
Reads lines of the given file_name and fills the data list
"""
with open(file_name, 'r') as file_stream:
text = file_stream.read()
# Ignore the first line, it should have headers, last line is most
# assuredly blank
data_lines = text.split("\n")[1:-1]
for line in data_lines:
data_line = DataLine()
data_line.set_from_csv_line(line)
self.data[data_line.date_time] = data_line.rep_workouts
class DataLine(object):
"""
The date, time and workout data
"""
def __init__(self):
self.date_time = None
self.rep_workouts = []
def set_from_csv_line(self, csv_line):
"""
Sets data members from a CSV line
"""
data_from_line = csv_line.split(",")
# Assumption: date, time, 2 rep workouts
assert len(data_from_line) == 6
self.date_time = parse_datetime_from_standard_date_and_time(
data_from_line[0],
data_from_line[1] )
# Assumption: from the 21st char on we should have csv workouts, which
# look a lot like the inside of a list
self.rep_workouts = eval("[" + csv_line[21:] + "]")
def parse_datetime_from_standard_date_and_time(date, time):
"""
Returns a datetime object of the form of the date and time strings
passed in
"""
date_parts = parse_standard_date(date)
time_parts = parse_standard_time(time)
current_mill = (datetime.now().year / 1000)*1000
# Assumption: Date is in the form YY so the current
# year is actually YY + current millenium
date_time = datetime( current_mill + date_parts[2],
date_parts[0],
date_parts[1],
time_parts[0],
time_parts[1],
time_parts[2] )
return date_time
def parse_standard_date(date):
"""
Takes a string of the form "MM/DD/YY" and returns a list of ints that
looks like [MM, DD, YY]
"""
return [ int(i) for i in date.split("/") ]
def parse_standard_time(time):
"""
Takes a string of the form "HH:MM:SS PD" and returns a list of ints
that looks like [HH, MM, SS] (<PD> is period AM|PM).
"""
parts = time.split(" ")
assert len(parts) == 2
int_parts = [ int(i) for i in parts[0].split(":") ] + [parts[-1]]
if parts[1] == "PM":
int_parts[0] += 12
return int_parts
def same_date(date1, date2):
"""
Checks to see if two dates have the same year, month and day
"""
return ( date1.year == date2.year and
date1.day == date2.day and
date1.month == date2.month )
| mjgpy3/ExerciseManager | build_progress_graph.py | build_progress_graph.py | py | 3,209 | python | en | code | 0 | github-code | 13 |
32591192681 | import random
import requests
from .globals import bitcoin
def get_fee(tx):
# multiply stuff by 100000000 because bitcoind returns values in btc
inputsum = sum(
[
int(
bitcoin.getrawtransaction(inp["txid"], True)["vout"][inp["vout"]][
"value"
]
* 100000000
)
for inp in tx["vin"]
]
)
outputsum = sum([int(out["value"] * 100000000) for out in tx["vout"]])
return inputsum - outputsum
def get_outspends(txid):
return call_esplora(f"/tx/{txid}/outspends")
esploras = [
"https://mempool.space/api",
"https://blockstream.info/api",
"https://mempool.ninja/api",
"https://mempool.emzy.de/api",
]
def call_esplora(path):
random.shuffle(esploras)
for host in esploras:
try:
r = requests.get(host + path)
if r.ok:
return r.json()
except requests.exceptions.ConnectionError:
pass
raise Exception("ALL ESPLORAS HAVE FAILED")
| fiatjaf/lnchannels | getdata/utils.py | utils.py | py | 1,065 | python | en | code | 23 | github-code | 13 |
23886654065 | #!/usr/bin/env python3
import colorsys, io, logging, math, pantilthat, picamera, random, socketserver, sys, threading, time
from http import server
HTML_PAGE = """<!doctype html>
<html lang="en">
<head>
<meta name="charset" value="utf-8">
<title> Raspberry Webcam </title>
<style type="text/css">
html, body, img {
background-color: #000;
cursor: progress;
width: 100%;
height: 100%;
overflow: hidden;
padding: 0px;
margin: 0px;
}
</style>
<script type="text/javascript">
window.addEventListener( 'keyup', function( event ) {
var valid = [ 'ArrowLeft', 'ArrowRight', 'ArrowUp', 'ArrowDown', '+', '-' ];
if( valid.indexOf( event.key ) > -1 ) {
var request = new XMLHttpRequest();
request.open( 'POST', '//' + location.host + '/' + event.key );
request.send( null );
}
} );
</script>
</head>
<body>
<img src="stream.mjpg" />
</body>
</html>
"""
class StreamingOutput( object ) :
def __init__( self ) :
self.condition = threading.Condition()
self.buffer = io.BytesIO()
self.frame = None
def write( self, buffer ) :
if buffer.startswith( b'\xff\xd8' ) :
self.buffer.truncate()
with self.condition :
self.frame = self.buffer.getvalue()
self.condition.notify_all()
self.buffer.seek( 0 )
return self.buffer.write( buffer )
class StreamingServer( socketserver.ThreadingMixIn, server.HTTPServer ):
allow_reuse_address = True
daemon_threads = True
# https://www.raspberrypi.org/forums/viewtopic.php?t=196337
class WebcamHandler( server.BaseHTTPRequestHandler ):
def do_GET( self ) :
if self.path in( '/camera.mjpg', '/stream.mjpg' ) :
self.send_response( 200 )
self.send_header( 'Age', 0 )
self.send_header( 'Cache-Control', 'no-cache, private' )
self.send_header( 'Pragma', 'no-cache' )
self.send_header( 'Content-Type', 'multipart/x-mixed-replace; boundary=WEBCAMFRAME' )
self.end_headers()
while True :
with output.condition :
output.condition.wait()
frame = output.frame
try :
self.wfile.write( b'--WEBCAMFRAME\r\n' )
self.send_header( 'Content-Length', len( frame ) )
self.send_header( 'Content-Type', 'image/jpeg' )
self.end_headers()
self.wfile.write( frame )
self.wfile.write( b'\r\n' )
except Exception as error :
logging.warning( 'disconnected %s: %s', self.client_address, str( error ) )
print( error )
break
elif self.path in ( '', '/', '/index.htm', '/index.html' ) :
content = HTML_PAGE.encode( 'utf-8' )
self.send_response( 200 )
self.send_header( 'Content-Type', 'text/html' )
self.send_header( 'Content-Length', len( content ) )
self.end_headers()
self.wfile.write( content )
else :
self.send_error( 404 )
self.end_headers()
def do_POST( self ) :
self.send_response( 204 )
self.end_headers()
if self.path == '/ArrowUp' :
return self.do_PAN( - 7 )
if self.path == '/ArrowDown' :
return self.do_PAN( 7 )
if self.path == '/ArrowLeft' :
return self.do_TILT( 7 )
if self.path == '/ArrowRight' :
return self.do_TILT( - 7 )
if self.path == '/+' :
return self.do_LIGHT( 255 )
if self.path == '/-' :
return self.do_LIGHT( 64 )
print( str( self.path ) )
def do_PAN( self, value = 0, maximum = 70 ) :
target = pantilthat.get_pan() + value
if target < - maximum : target = - maximum
if target > maximum : target = maximum
print( value, target )
pantilthat.pan( target )
def do_TILT( self, value = 0, maximum = 70 ) :
target = pantilthat.get_tilt() + value
if target < - maximum : target = - maximum
if target > maximum : target = maximum
print( value, target )
pantilthat.tilt( target )
def do_LIGHT( self, value = 0 ) :
if value < 0 : value = 0
if value > 255 : value = 255
pantilthat.clear()
pantilthat.set_all( value, value, value )
pantilthat.show()
# http://docs.pimoroni.com/pantilthat/
if __name__ == '__main__' :
pantilthat.light_mode( pantilthat.WS2812 )
pantilthat.light_type( pantilthat.GRBW )
pantilthat.pan( 0 )
pantilthat.tilt( 0 )
pantilthat.clear()
pantilthat.show()
if __name__ == '__main__' :
time.sleep( 13 )
# resolution = ( 1920, 1080 )
resolution = ( 1024, 768 )
with picamera.PiCamera( resolution = resolution, framerate = 25 ) as camera:
camera.rotation = 180
camera.led = True
output = StreamingOutput()
camera.start_recording( output, format = 'mjpeg' )
try : StreamingServer( ( '', 80 ), WebcamHandler ).serve_forever()
except : print >>sys.stderr, sys.exc_info()[ 1 ]
finally: camera.stop_recording()
| hacker-bastl/raspberry | ansible/roles/camera/files/webcam.py | webcam.py | py | 5,540 | python | en | code | 2 | github-code | 13 |
39637045120 | """
Module of manager for managing multiple bulbs.
"""
import logging
import platform
import subprocess
import threading
import time
from copy import copy
from queue import Queue
from typing import List
import ifaddr
from sqlalchemy import func
from yeelight import discover_bulbs, BulbException
from .bulb import Bulb, session
from .color import Color
from .exception import BulbConnectionLostException
from .generator import ColorGenerator
class ThreadExtension:
""" Container for threading objects """
def __init__(self):
self.polling_thread = None
self.polling_thread_exit_event = threading.Event()
self.events_queue = Queue()
class BulbManager:
"""
Class for managing bulbs.
"""
def __init__(self, use_last_bulb: bool, bulb_ip_address: str,
effect: str = 'smooth', timeout: int = 5):
self.use_last_bulb = use_last_bulb
self.effect = effect
self.bulb_ip_address = bulb_ip_address
self.timeout = timeout
self.cached_bulbs = None
self.chosen_bulbs = set()
self.thread_ext = ThreadExtension()
self.cmd_map = {}
self._init_cmd_map()
def run_atmosphere(self, strategy, delay) -> None:
"""
Main entrypoint. Chooses bulb, starts colors polling thread.
:param strategy: defines screens areas to capture
:param delay: delay (secs) between generator yield colors
:return:
"""
self.choose()
if isinstance(self.thread_ext.polling_thread, threading.Thread):
# Stop current thread.
self.thread_ext.polling_thread_exit_event.set()
logging.info("Waiting for thread exit.")
time.sleep(1)
self.thread_ext.polling_thread_exit_event.clear()
self.thread_ext.polling_thread = threading.Thread(
target=self._colors_polling,
args=(strategy, delay, self.thread_ext.polling_thread_exit_event))
self.thread_ext.polling_thread.setDaemon(True)
self.thread_ext.polling_thread.start()
def gather_commands(self) -> None:
"""
Entrypoint for interaction with running program.
:return:
"""
desc = self._get_cmd_description()
while True:
cmd = input(desc)
cmd_num = int(cmd) if cmd.isnumeric() else None
if cmd_num in self.cmd_map:
cmd_execution = self.cmd_map[cmd_num]
func_cmd = cmd_execution["f"]
func_args = cmd_execution["args"]
func_kwargs = cmd_execution["kwargs"]
func_cmd(*func_args, **func_kwargs)
else:
logging.info("Bad command.")
def choose(self, reset=False) -> Bulb:
"""
Choose bulbs to interact with.
:param reset:
:return:
"""
result = None
if reset:
self.use_last_bulb = False
self.bulb_ip_address = None
self.cached_bulbs = None
if self.use_last_bulb:
last_bulb = self.get_last_bulb()
if last_bulb and last_bulb.is_valid():
last_bulb.last_ip = self.get_current_ip_by_bulb_id(last_bulb.id)
last_bulb.last_usage = func.now()
self.send_to_db(last_bulb)
result = last_bulb
else:
logging.info("Last bulb was not found.")
elif self.bulb_ip_address:
if self.is_bulb_alive(self.bulb_ip_address):
result = self.get_bulb_by_ip(self.bulb_ip_address)
else:
logging.info("IP %s is not active.", self.bulb_ip_address)
if not result:
result = self.choose_alive()
if result and not self.bulb_chosen(result):
self.chosen_bulbs.add(result)
return result
def choose_alive(self) -> (Bulb, None):
"""
Command line function to choose bulb.
:return: chosen bulb
"""
while True:
try:
bulbs = self.get_alive_bulbs()
variants = set()
for i, bulb in enumerate(bulbs):
print(f"{i}) {bulb.get('ip', None)}")
variants.add(str(i))
while True:
inp = input("Enter bulb number ('' for none): ")
if inp == '':
return None
if inp in variants:
break
choice = bulbs[int(inp)]
new_bulb = self.new_bulb_from_dict(choice)
if not self.bulb_chosen(new_bulb):
new_bulb.init_obj(new_bulb.last_ip, self.effect)
self.send_to_db(new_bulb)
break
except BulbConnectionLostException:
time.sleep(3)
self.get_alive_bulbs(True)
return new_bulb
def add_bulb(self) -> Bulb:
"""
Chose alive bulb and add to chosen set.
:return:
"""
result = self.choose_alive()
if result and not self.bulb_chosen(result):
self.chosen_bulbs.add(result)
return result
def get_alive_bulbs(self, reset=False) -> List[dict]:
"""
:param reset: flag to reset cached list of bulbs
:return: result of discover_bulbs(), list of dicts:
{'ip': '192.168.1.4', 'port': 55443, 'capabilities': {...}}
"""
if (not self.cached_bulbs) or reset:
tmp_res = []
srt_adapters = sorted(
ifaddr.get_adapters(), key=lambda a: tuple(sorted(
[ip.ip for ip in a.ips if isinstance(ip.ip, str)]
)),
reverse=True) # Sorting of adapters by IP, to visit local 192.* first
for adapter in srt_adapters:
logging.info("Start discover bulbs with %s s timeout "
"at interface %s.",
self.timeout, adapter.nice_name)
try:
tmp_res = discover_bulbs(self.timeout, adapter.name)
except OSError:
tmp_res = []
if tmp_res:
break
self.cached_bulbs = tmp_res
logging.info("Found %s bulbs.", len(self.cached_bulbs))
return self.cached_bulbs
def get_current_ip_by_bulb_id(self, id_) -> str:
"""
:param id_: bulb id given by discover_bulbs(), example: '0x00000000129f22a6'
:return: ip address string
"""
alive_bulbs = self.get_alive_bulbs()
current_ip = None
for bulb_d in alive_bulbs:
capabilities = bulb_d.get('capabilities', None)
if isinstance(capabilities, dict):
cur_id = capabilities.get('id', None)
if cur_id == id_:
cur_ip = bulb_d.get("ip", None)
current_ip = cur_ip
return current_ip
def get_bulb_by_ip(self, ip_address) -> Bulb:
"""
:param ip_address:
:return: dict from discover_bulbs() representing a bulb
"""
bulbs = self.get_alive_bulbs()
res = None
for bulb in bulbs:
if bulb.get('ip') == ip_address:
res = bulb
break
new_bulb = None
if res:
new_bulb = self.new_bulb_from_dict(res)
if not self.bulb_chosen(new_bulb):
new_bulb.init_obj(new_bulb.last_ip, self.effect)
self.send_to_db(new_bulb)
return new_bulb
@staticmethod
def get_last_bulb() -> Bulb:
"""
:return: last used bulb from database
"""
max_query = session.query(func.max(Bulb.last_usage))
bulb = session.query(Bulb).filter(Bulb.last_usage == max_query.scalar_subquery()).first()
if bulb and bulb.last_ip is None:
bulb = None
return bulb if bulb else None
def is_bulb_alive(self, ip_address) -> bool:
"""
:param ip_address: ip address of bulb
:return: True if a bulb is pinging
"""
res = self._ping_bulb(ip_address)
return res
def bulb_chosen(self, bulb: Bulb) -> bool:
"""
Check if bulb is already chosen by its IP.
:param bulb:
:return:
"""
return bulb.last_ip in [b.last_ip for b in self.chosen_bulbs]
@staticmethod
def send_to_db(inst: Bulb) -> None:
"""
Saves instance to database.
:param inst: instance of Bulb
:return: None
"""
session.merge(inst)
session.commit()
@staticmethod
def new_bulb_from_dict(b_dict) -> Bulb:
"""
Create Bulb object from dictionary (given by discover_bulbs())
:param b_dict: dict of bulb
:return: Bulb object
"""
tmp_bulb = Bulb()
caps = b_dict.get("capabilities", {})
tmp_bulb.id = caps.get("id", '')
tmp_bulb.name = caps.get("name", '')
tmp_bulb.last_usage = func.now()
tmp_bulb.last_ip = b_dict.get("ip", '')
return tmp_bulb
def change_color(self, color: Color, bulb: Bulb = None) -> None:
"""
Interface of changing bulb color, with catching connection errors.
:param color: Color object to set.
:param bulb: Bulb object to be changed,
if None given operation applies to all chosen bulbs of BulbManager.
:return: None
"""
bulbs = self.chosen_bulbs if not bulb else set(bulb)
# Safe iteration over copy to prevent changing by other threads.
for bulb_item in copy(bulbs):
try:
bulb_item.change_color(color)
except BulbException:
logging.info("Connection lost. Retrying... ")
try:
bulb_item.init_obj()
except BulbConnectionLostException:
self.chosen_bulbs.clear()
logging.info("Connection was not established.")
self.choose(True)
@staticmethod
def _ping_bulb(host) -> bool:
param = '-n' if platform.system().lower() == 'windows' else '-c'
command = ['ping', param, '1', host]
with subprocess.Popen(command,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
shell=True) as pipe:
_, errors = pipe.communicate()
return not errors
def _colors_polling(self, strategy, delay, stop_event) -> None:
"""
Creates color generator and changes colors of bulbs
:param strategy: defines screens areas to capture
:param delay: delay (secs) between generator yield colors
:param stop_event: if set the thread stops
:return:
"""
for new_color in ColorGenerator(strategy).generator(delay):
self.change_color(new_color)
if stop_event.is_set():
logging.info("Thread %s stopped.", threading.get_ident())
break
def _init_cmd_map(self):
"""
Creates commands map.
:return:
"""
self.cmd_map = {
1: {
"desc": "Add bulb.",
"f": self.add_bulb,
"args": (),
"kwargs": {}
},
}
def _get_cmd_description(self) -> str:
"""
:return: String representation of available commands.
"""
start = "Available commands:"
lines = []
for cmd_num, cmd_dict in self.cmd_map.items():
desc = cmd_dict.get("desc")
lines.append(f"{cmd_num}. {desc}")
return "\n".join([start, ] + lines + ["\n", ])
| NikSavilov/yeelight-atmosphere | src/yeelight_atmosphere/manager.py | manager.py | py | 11,882 | python | en | code | 0 | github-code | 13 |
12229802582 | from aiogram import Bot, types
import logging
from datetime import datetime
import asyncio
from aiogram.dispatcher import Dispatcher
from aiogram.utils import executor
from aiogram.types import *
from zqmpwork import Parcer
from time import sleep
bot = Bot(token='YOUR TOKKEN')
dp = Dispatcher(bot)
logging.basicConfig(level=logging.INFO)
button_hello = KeyboardButton('🧨 Начать 🧨')
greet_kb = ReplyKeyboardMarkup(resize_keyboard=True)
greet_kb.add(button_hello)
keyboardMain = ReplyKeyboardMarkup(resize_keyboard=True)
button_main = ['Список оружий 🛠']
keyboardMain.add(*button_main)
button_back = ReplyKeyboardMarkup(resize_keyboard=True).add('Назад ↩️')
keyboardWeapon = ReplyKeyboardMarkup(resize_keyboard=True)
but_gun = ['AK', 'M4', 'GLOCK', 'AWP', 'DEAGLE']
keyboardWeapon.add(*but_gun)
@dp.message_handler(commands='start')
async def press_start(message: types.Message):
await message.answer('Добро пожаловать в CEDIC. С помощью нашего бота ты сможешь:\n1)Узнавать скидки на скины 😁\n2) Забирать скины быстрее всех 🥵', reply_markup=greet_kb)
@dp.message_handler(lambda message: message.text=='🧨 Начать 🧨')
async def start(message: types.Message):
await message.answer('Хорошо, давай начнем...\nВыбери операцию ниже 👇🏼', reply_markup=keyboardMain)
@dp.message_handler(lambda message: message.text=='Список оружий 🛠')
async def all_weapons(message: types.Message):
await message.answer('Выбери, какое оружие тебе по душе!', reply_markup=keyboardWeapon)
@dp.message_handler(lambda message: message.text=='AK')
async def ak(message: types.Message):
await message.reply('Загружаем информацию...')
mess = Parcer().show_all()
for item in mess:
if 'AK-47' in item:
await message.answer(f'{item}', reply_markup=button_back)
else:
await message.answer('На этом всё 😥')
@dp.message_handler(lambda message: message.text=='M4')
async def ak(message: types.Message):
await message.reply('Загружаем информацию...')
mess = Parcer().show_all()
for item in mess:
if 'M4A4' in item or 'M4A1-S' in item:
await message.answer(f'{item}', reply_markup=button_back)
else:
await message.answer('На этом всё 😥')
@dp.message_handler(lambda message: message.text=='GLOCK')
async def ak(message: types.Message):
await message.reply('Загружаем информацию...')
mess = Parcer().show_all()
for item in mess:
if 'GLOCK' in item:
await message.answer(f'{item}', reply_markup=button_back)
else:
await message.answer('На этом всё 😥')
@dp.message_handler(lambda message: message.text=='AWP')
async def ak(message: types.Message):
await message.reply('Загружаем информацию...')
mess = Parcer().show_all()
for item in mess:
if 'AWP' in item:
await message.answer(f'{item}', reply_markup=button_back)
else:
await message.answer('На этом всё 😥')
@dp.message_handler(lambda message: message.text=='DEAGLE')
async def ak(message: types.Message):
await message.reply('Загружаем информацию...')
mess = Parcer().show_all()
for item in mess:
if 'DEAGLE' in item:
await message.answer(f'{item}', reply_markup=button_back)
else:
await message.answer('На этом всё 😥')
@dp.message_handler(lambda message: message.text=='Назад ↩️')
async def get_back(message: types.Message):
await message.answer('Возвращаемся...')
await message.answer('Хорошо, давай начнем...\nВыбери операцию ниже 👇🏼', reply_markup=keyboardMain)
async def update_data(time_for):
while True:
now = datetime.utcnow()
try:
Parcer().get_data()
with open('data/log.txt', 'a', encoding='utf-8') as file:
file.write(f'[+] {now} | Data was updates succesfully\n')
except Exception as ex:
with open('data/log.txt', 'a', encoding='utf-8') as file:
file.write(f'[-] {now} | {ex} |Data wasnt updates\n')
await asyncio.sleep(time_for)
if __name__ == '__main__':
loop = asyncio.get_event_loop()
loop.create_task(update_data(40))
executor.start_polling(dp, skip_updates=True) | prn-ic/myfsttgbot | bot.py | bot.py | py | 4,360 | python | ru | code | 0 | github-code | 13 |
38760984852 | # import necessary modules
from bs4 import BeautifulSoup
import requests
import datetime
import pandas as pd
# create the file
writer = pd.ExcelWriter('data.xlsx', engine='xlsxwriter')
# data source
url = "https://coinmarketcap.com/"
# get the data from the source
result = requests.get(url).text
doc = BeautifulSoup(result, "html.parser")
tbody = doc.tbody
trs = tbody.contents
names = []
current_price = []
mkap = []
supply = []
volume = []
# extract the data from the source
for tr in trs[:10]:
name, price = tr.contents[2:4]
fixed_name = name.p.string
fixed_prices = price.a.string
mcap, sup = tr.contents[8:10]
fixed_mcap = mcap.a.string
fixed_sup = sup.p.string
vol = tr.contents[7]
fixed_vol = vol.span.string
names.append(fixed_name)
current_price.append(fixed_prices)
mkap.append(fixed_mcap)
supply.append(fixed_sup)
volume.append(fixed_vol)
# zip the extracted data together
final = (list(zip(names, current_price, mkap, volume, supply)))
# get the time of document creation
tm = datetime.datetime.now()
date_time = tm.strftime("%m/%d/%Y, %H:%M:%S")
stamp = f"Data generated at: {date_time}"
df = pd.DataFrame(final, columns=["Name", "Price", "Market Cap", "Volume", "Supply"])
# write the data in the workbook
df.to_excel(writer, sheet_name="Crypto Data", index=False)
# get workbook
workbook = writer.book
# get Crypto Data sheet
worksheet = writer.sheets['Crypto Data']
worksheet.write(15, 0, stamp)
# adjust column parameters
worksheet.set_column(0, 4, 20)
writer.close()
| GKK7/Projects | Web projects/Crypto.py | Crypto.py | py | 1,613 | python | en | code | 2 | github-code | 13 |
74632619216 | # Jason Morris
# Work in progress
# June 1, 2016
import json, requests
print('')
#load all json data from json dump
data_in_file = open('airQualityIn.json')
dataIn = json.load(data_in_file)
data_in_file.close()
#form data from waterQualityRaw.json into arrays of objects per-site location
dataOut = {}
for row in dataIn:
if row['locality'] == 'AUSTIN' :
siteIdentity = str(row['facility_site_name']) + "*&&*" + str(row['latitude_msr']) + "*&&*" + str(row['longitude_msr'])
if siteIdentity not in dataOut:
dataOut[siteIdentity] = []
dataOut[siteIdentity].append(row)
else:
dataOut[siteIdentity].append(row)
#use formatted data to extract individual markers in the next for loop
uniqueDataDict = dataOut
dataOut = {}
count = 0
for site in uniqueDataDict:
siteArray = site.split("*&&*")
buildingJson = {}
buildingJson['layer'] = 'air_quality'
buildingJson['message'] = "SITE: " + str(siteArray[0])
buildingJson['data'] = uniqueDataDict[site]
buildingJson['lat'] = float(siteArray[1])
buildingJson['lng'] = float(siteArray[2])
buildingJson['enable'] = ['click']
buildingJson['logic'] = 'emit'
iconDict = {}
iconDict['type'] = 'awesomeMarker'
iconDict['icon'] = 'cloud'
iconDict['markerColor'] = 'blue'
iconDict['iconColor'] = 'white'
iconDict['prefix'] = 'fa'
buildingJson['icon'] = iconDict
dataOut['air' + str(count)] = buildingJson;
count +=1
data_out_file = open('airQualityOut.json', 'w')
data_out_json = json.dumps(dataOut, indent=4, sort_keys=True)
data_out_file.write(data_out_json)
data_out_file.close();
print('') | blayne/atxgreenatlas | public/data/airQualityParser.py | airQualityParser.py | py | 1,570 | python | en | code | 0 | github-code | 13 |
17733262827 | '''pmf.py: probabilistic matrix factorization using variational inference
Model:
U: user vectors of rank n, Ixn
V: item vectors of rank n, Jxn
M: Ratings matrix, IxJ
m_ij = rating for item j given by user i.
Likelihood:
P(m_ij | u_i, v_j) = N((u_i)^T v_j, tau^2)
Priors:
P(u_i) = prod_l=1^n N(u_il, diag(nu_il^2))
P(v_j) = prod_l=1^n N(v_jl, diag(rho_i^2))
Posterior:
P(U,V|M) = P(M|U,V)P(U)P(V)/P(M)
=> Intractable due to P(M)
Maximum a Posteriori:
U, V = argmax(U, V) P(U,V|M)
(MaP gets rid of P(M), so it is doable)
Approximate posterior estimation (This project):
Exact inference is intractable
=> So, perform variational inference
Let Q(U, V) be the approximate posterior distribution.
Applying the mean-field approximation, Q is factorized as:
Q(U, V) = Q(U) Q(V)
Let Q(U) and Q(V) be gaussians such that:
Q(u_i) = N(u_i, phi_i)
V(v_j) = N(v_j, psi_j)
The code below maximizes the variational lower bound F(Q(U), Q(V)),
with respect to variational parameters U, Phi, V and Psi (E-step) and
model parameters sigma^2, rho^2 and tau^2.
'''
import numpy as np
from tqdm.auto import trange
def user_params(user_count, rank):
u = np.random.normal(loc=0, scale=1, size=(user_count, rank))
phi = np.zeros((user_count, rank, rank))
return (u, phi)
def item_params(item_count, rank):
v = np.random.normal(loc=0, scale=1, size=(item_count, rank))
psi = np.zeros((item_count, rank, rank))
return (v, psi)
def model_params(user_count, item_count, rank):
tau2 = 1
u0 = np.zeros(rank)
sigma2 = np.ones(rank)
v0 = np.zeros(rank)
rho2 = np.ones(rank) * 1/rank
return (tau2, sigma2, rho2)
def var_inference(ratings, tau2, sigma2, rho2, u, phi, v, psi):
user_count = len(u)
item_count = len(v)
rank = len(sigma2)
S = np.array([np.diag(rho2) for _ in range(item_count)])
t = np.zeros((item_count, rank))
inv = np.linalg.inv
for user in range(user_count):
sum_term = sum(psi[j] + np.outer(v[j], v[j])
for i, j, r in ratings if i == user)
sum_term = sum_term/tau2 + np.zeros((rank, rank))
phi_ = inv(np.diag(1/sigma2) + sum_term)
phi[user] = phi_
sum_term = sum(r * v[j]
for i, j, r in ratings if i == user)
sum_term = sum_term/tau2 + np.zeros(rank)
u[user] = np.dot(phi_, sum_term)
for item, r in ((j, r) for i, j, r in ratings if i == user):
S[item] += (phi_ + np.outer(u[user], u[user])) / tau2
t[item] += (r * u[user]) / tau2
for item in range(item_count):
psi[item] = inv(S[item])
v[item] = np.dot(psi[item], t[item])
return ((u, phi), (v, psi))
def expectation(ratings, tau2, sigma2, rho2, u, phi, v, psi):
return var_inference(ratings, tau2, sigma2, rho2, u, phi, v, psi)
def maximization(ratings, tau2, sigma2, rho2, u, phi, v, psi):
user_count = len(u)
item_count = len(v)
rank = len(sigma2)
for l in range(rank):
sum_term = sum(phi[j, l, l] + u[j, l]**2 for j in range(user_count))
sigma2[l] = sum_term/(user_count-1)
sum_term = 0.0
for i, j, r in ratings:
part1 = phi[i] + np.outer(u[i], u[i])
part2 = psi[j] + np.outer(v[j], v[j])
sum_term += r**2 - 2*r*np.dot(u[i], v[j]) + np.sum(part1 * part2) # tr(AB)
tau2 = sum_term/(len(ratings)-1)
return tau2, sigma2, rho2
def error(ratings, u, v):
return np.sqrt(np.mean([( np.dot(u[i], v[j]) - m)**2 for i, j, m in ratings])) | saurabhmathur96/variational-collaborative-filtering | pmf.py | pmf.py | py | 3,398 | python | en | code | 0 | github-code | 13 |
37951534158 |
# Input files
import AthenaPoolCnvSvc.ReadAthenaPool
ServiceMgr.EventSelector.InputCollections = ["aod.pool.root"]
# Number of events to be processed (default is 10)
theApp.EvtMax = -1
ToolSvc = Service('ToolSvc')
# Select Vertex or Track Algorithms
useVertex = True
# Do Histogramming
doHists = False
if useVertex:
from InDetBeamSpotFinder.InDetBeamSpotFinderConf import InDet__InDetBeamSpotVertex
InDetBeamSpotVertex = InDet__InDetBeamSpotVertex(OutputLevel = INFO,
DoHists = doHists,
UseLikelihood = True
)
ToolSvc += InDetBeamSpotVertex
else:
from InDetBeamSpotFinder.InDetBeamSpotFinderConf import InDet__InDetBeamSpotTrackChi2
InDetBeamSpotTrackChi2 = InDet__InDetBeamSpotTrackChi2(OutputLevel = INFO)
ToolSvc += InDetBeamSpotTrackChi2
# InDetBeamSpotTrackChi2.nTracks_0 = 500
# InDetBeamSpotTrackChi2.nTracks_1 = 1500
# InDetBeamSpotTrackChi2.nTracks = InDetBeamSpotTrackChi2.nTracks_0 + InDetBeamSpotTrackChi2.nTracks_1
# Use default or TrackParticleCandidate for AOD
# InDetBeamSpotTrackChi2.TrackContainer = "TrackParticleCandidate"
# For output to a Database
from InDetBeamSpotFinder.InDetBeamSpotFinderConf import InDet__InDetBeamSpotDbWriterTool
InDetBeamSpotDbWriterTool = InDet__InDetBeamSpotDbWriterTool(OutputLevel = INFO,
Tag = "nominal")
ToolSvc += InDetBeamSpotDbWriterTool
# Main Algorithm
from InDetBeamSpotFinder.InDetBeamSpotFinderConf import InDet__InDetBeamSpotFinder as InDetBeamSpotFinder
from AthenaCommon.AlgSequence import AlgSequence
topSequence = AlgSequence()
topSequence += InDetBeamSpotFinder()
InDetBeamSpotFinder = InDetBeamSpotFinder(OutputLevel = INFO,
DoHists = doHists,
WriteDb = True)
if useVertex:
InDetBeamSpotFinder.BeamSpotTool = InDetBeamSpotVertex
else:
InDetBeamSpotFinder.BeamSpotTool = InDetBeamSpotTrackChi2
# Call the histogramming service
if doHists:
# produce ROOT ntuple using THistSvc
from AthenaCommon.AppMgr import ServiceMgr
if not hasattr(ServiceMgr, 'THistSvc'):
from GaudiSvc.GaudiSvcConf import THistSvc
ServiceMgr += THistSvc()
ServiceMgr.THistSvc.Output+=[ "INDETBEAMSPOTFINDER DATAFILE='beamspot.root' OPT='RECREATE'"]
# Write database to a sqlite file
from AthenaCommon.AppMgr import ToolSvc
from AthenaCommon.AppMgr import ServiceMgr as svcMgr
from IOVDbSvc.IOVDbSvcConf import IOVDbSvc
svcMgr += IOVDbSvc()
#IOVDbSvc.dbConnection = "impl=cool;techno=sqlite;schema=beamspot.db;X:TESTCOOL"
IOVDbSvc.dbConnection = "sqlite://;schema=beamspot.db;dbname=BEAMSPOT"
| rushioda/PIXELVALID_athena | athena/InnerDetector/InDetCalibAlgs/InDetBeamSpotFinder/share/InDetBeamSpotFinder_jobOptions.py | InDetBeamSpotFinder_jobOptions.py | py | 2,856 | python | en | code | 1 | github-code | 13 |
21871943494 | import RPi.GPIO as GPIO
import time
leds = [24]
GPIO.setmode(GPIO.BCM)
GPIO.setwarnings(False)
GPIO.setup(leds, GPIO.OUT)
p = GPIO.PWM(24, 120)
for n in range(10):
for i in range(100):
p.start(i)
time.sleep(0.003)
p.stop()
for i in range(100):
p.start(100 - i)
time.sleep(0.003)
p.stop()
GPIO.cleanup() | mershavka/get | examples/4-3-dac-pwm.py | 4-3-dac-pwm.py | py | 364 | python | en | code | 0 | github-code | 13 |
71130597139 | import numpy as np
x = np.array([[3,3],[4,3],[1,1]])
x_0 = np.array([[3,4,1],[3,3,1]])
y = np.array([1,1,-1])
N,M = x.shape
w = np.ones(M)
b = 0.0
import sympy as sp
w1,w2,B,lam1,lam2,lam3 = sp.symbols('w1 w2 B lam1 lam2 lam3')
f = (w1**2+w2**2)/2
st1 = 1 - (3*w1 + 3*w2 + B)
st2 = 1 - (4*w1 + 3*w2 + B)
st3 = 1 - (-1*w1 + -1*w2 + -B)
sigma = 0
lams = [lam1,lam2,lam3]
for i in range(N):
for j in range(N):
sigma += lams[i] * lams[j] * y[i] * y[j] * x[i].dot(x[j])
print(sigma)
f = sigma/2 - lam1 - lam2 - lam3
st = y[0]*lam1 + y[1]*lam2 + y[2]*lam3
f = f.subs(lam3,lam1+lam2)
d_lam1 = sp.diff(f,lam1)
d_lam2 = sp.diff(f,lam2)
# print(d_lam1)
# print(d_lam2)
solve = sp.solve([d_lam1,d_lam2],lam1,lam2)
solve1 = sp.solve([sp.diff(f.subs(lam1,0),lam2)],lam2)
solve1[lam1] = 0
min_subs = f.subs(solve1)
solve = solve1
solve2 = sp.solve([sp.diff(f.subs(lam2,0),lam1)],lam1)
solve2[lam2] = 0
if f.subs(solve2) < min_subs:
solve = solve2
result = np.array([
float(solve[lam1]),
float(solve[lam2]),
float(solve[lam1]+solve[lam2])
])
print(result)
w = np.dot(result*y,x)
print(w)
b = y[0] - np.dot(result*y,np.dot(x,x[0]))
print(b)
print(result*y,np.dot(x,x[0]))
print(x,x[0]) | patricklan2/test | SVM/SVM2.py | SVM2.py | py | 1,216 | python | en | code | 0 | github-code | 13 |
1029823680 | # Program to check if a sentence is Pangram or not.
"""
A pangram is a sentence where every letter of the English alphabet appears at least once.
Given a string sentence containing only lowercase English letters, return true if sentence is a pangram, or false otherwise.
"""
class Solution(object):
def checkIfPangram(self, sentence):
"""
:type sentence: str
:rtype: bool
"""
template = ['a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i', 'j', 'k', 'l', 'm', 'n', 'o', 'p', 'q', 'r', 's', 't',
'u', 'v', 'w', 'x', 'y', 'z']
chars = [char for char in sentence]
# print(chars)
flag = True
for char in template:
if char in chars:
flag = flag and True
else:
flag = flag and False
return flag
obj = Solution()
print(obj.checkIfPangram('the'))
| kumarvaibhav2003/LeetCodeProblems | pangram_check.py | pangram_check.py | py | 897 | python | en | code | 0 | github-code | 13 |
27014271335 | # Sample Gunicorn configuration file.
import multiprocessing
#
# Server socket
#
bind = "127.0.0.1:8018"
# bind = 'unix:/tmp/gunicorn.api-nethub.sock'
backlog = 2048
#
# Worker processes
#
workers = multiprocessing.cpu_count() * 2 + 1
worker_class = 'gevent'
timeout = 120
keepalive = 2
#
# Debugging
#
debug = True
spew = False
#
# Server mechanics
#
daemon = False
pidfile = "/var/python-apps/alquiler_perfil/run/lock.lock"
umask = 0
user = "www-data"
group = None
tmp_upload_dir = "/var/python-apps/alquiler_perfil/tmp/"
#
# Logging
#
loglevel = 'info'
errorlog = '/var/python-apps/alquiler_perfil/log/error.log'
accesslog = '/var/python-apps/alquiler_perfil/log/access.log'
#
# Process naming
#
proc_name = "alquiler_perfil-unix"
preload = True | arleincho/publish | facenew/gunicorn.py | gunicorn.py | py | 758 | python | en | code | 0 | github-code | 13 |
3721174060 | '''
A transformation sequence from word beginWord to word endWord using a dictionary wordList is a sequence of words beginWord -> s1 -> s2 -> ... -> sk such that:
Every adjacent pair of words differs by a single letter.
Every si for 1 <= i <= k is in wordList. Note that beginWord does not need to be in wordList.
sk == endWord
Given two words, beginWord and endWord, and a dictionary wordList, return the number of words in the shortest transformation sequence from beginWord to endWord, or 0 if no such sequence exists.
'''
class Solution:
'''
reference : kevined51
'''
wordList = set(wordList)
queue = collections.deque([[beginWord, 1]]) # 단어와 거쳐온 경로의 개수 저장
while queue:
word, length = queue.popleft()
if word == endWord: # 제일 먼저 나오는 경로가 제일 짧은 경로임
return length
for i in range(len(word)):
for c in 'abcdefghijklmnopqrstuvwxyz':
next_word = word[:i] + c + word[i + 1:]
if next_word in wordList:
wordList.remove(next_word)
queue.append([next_word, length + 1])
return 0
'''시간초과
def ladderLength(self, beginWord: str, endWord: str, wordList: List[str]) -> int:
self.shortest = len(wordList)
self.endWord = endWord
self.found_answer = False
self.generator(beginWord, wordList, 1)
return self.shortest if self.found_answer else 0
def generator(self, current: int, wl: List[str], length: int):
if self.shortest <= length:
return
for word in wl:
if self.is_match(current, word):
temp = copy.deepcopy(wl)
temp.remove(word)
if self.endWord == word: # 마지막 단어 완성시
self.shortest = min(self.shortest, length + 1)
self.found_answer = True
else:
self.generator(word, temp, length + 1)
def is_match(self, word1: str, word2: str): # 비교 대상이 한글자 빼고 모두 맞는지
cnt = 0
for w1, w2 in zip(word1, word2):
if w1 != w2:
cnt += 1
if cnt > 1:
return False
return True
''' | JaeEon-Ryu/Coding_test | LeetCode/0127_ Word Ladder.py | 0127_ Word Ladder.py | py | 2,324 | python | en | code | 1 | github-code | 13 |
73693523217 | # -*- coding: utf-8 -*-
'''
Escreva a sua solução aqui
Code your solution here
Escriba su solución aquí
'''
func_num = int(input())
horas_trab = int(input())
valor_hora = float(input())
salario = horas_trab * valor_hora
print(f"NUMBER = {func_num}")
print(f"SALARY = U$ {salario:.2f}") | AkiraTorres/beecrowd | Respostas/Python/1008.py | 1008.py | py | 293 | python | pt | code | 3 | github-code | 13 |
29718420631 | import numpy as np
import cv2
img = cv2.imread('watch.jpg',cv2.IMREAD_COLOR)# read
#cv2.line(img,(0,0),(200,300),(255,255,255),50) #line
cv2.rectangle(img,(500,250),(1000,500),(0,0,255),15) # rectangle
font = cv2.FONT_HERSHEY_SIMPLEX
cv2.putText(img,'YOLO',(-100,100), font, 6, (200,255,155), 13, cv2.LINE_AA)
cv2.imshow('image',img)
cv2.waitKey(0)
cv2.destroyAllWindows() | aman15012/stuffPrecog | opencv/code3.py | code3.py | py | 377 | python | en | code | 0 | github-code | 13 |
18994412933 | import disnake
from disnake.ext import commands
from utils import database, checks
class CloseSession(commands.Cog):
def __init__(self, bot: disnake.ext.commands.Bot):
self.bot = bot
@commands.slash_command(
guild_only=True,
name="закрыть-активную-сессию",
description="Закрыть активную сессию",
)
async def close_session(self, inter: disnake.ApplicationCommandInteraction):
if not await checks.is_admin(inter=inter):
return await inter.response.send_message(
embed=disnake.Embed(
title="ОШИБКА",
description="Вы должны быть администратором для использования данной команды!",
color=disnake.Color.red(),
),
ephemeral=True,
)
current_session = await database.get_current_session_name()
if current_session is None:
return await inter.response.send_message(
embed=disnake.Embed(
title="ОШИБКА",
description="Активной сессии не существует!",
color=disnake.Color.red(),
),
ephemeral=True,
)
await database.close_current_session()
return await inter.response.send_message(
embed=disnake.Embed(
title="УСПЕХ",
description=f"Сессия `{current_session}` закрыта!",
color=disnake.Color.orange(),
),
ephemeral=True,
)
def setup(bot: commands.Bot):
bot.add_cog(CloseSession(bot))
print(f">{__name__} is launched")
| Komo4ekoI/DiscordCasinoBot | cogs/commands/close_session.py | close_session.py | py | 1,812 | python | ru | code | 0 | github-code | 13 |
10569109750 | n = int(input())
numbers = list(map(int,input().split()))
dp = [0 for _ in range(n)]
dp[0] = numbers[0]
maxx = dp[0]
# dp[n] = n + dp[n-1]
for i in range(1,n):
if dp[i-1] < 0:
dp[i] = max(dp[i-1], numbers[i])
elif dp[i-1] + numbers[i] > 0 :
dp[i] = numbers[i] + dp[i-1]
if maxx < dp[i]:
maxx = dp[i]
print(maxx) | sossont/Pycharmpractice | Baekjoon Online Judge/다이나믹 프로그래밍/1912 연속합.py | 1912 연속합.py | py | 349 | python | en | code | 1 | github-code | 13 |
35448583315 | records = ["Enter uid1234 Muzi", "Enter uid4567 Prodo","Leave uid1234","Enter uid1234 Prodo","Change uid4567 Ryan"]
def solution(records):
answer = []
dic = dict()
l = []
for record in records:
split = record.split()
verb, uid = split[0], split[1]
if verb == 'Enter':
if uid in dic:
if dic[uid] != split[2]:
dic[uid] = split[2]
else:
dic[uid] = split[2]
l.append(('Enter', uid))
elif verb == 'Leave':
l.append(('Leave', uid))
else: # split[0] == 'Change'
dic[uid] = split[2]
for t in l:
s = dic[t[1]]
if t[0] == 'Enter':
s += "님이 들어왔습니다."
else:
s += "님이 나갔습니다."
answer.append(s)
return answer
print(solution(records)) | xjayleex/problem_solving | programmers/오픈채팅방.py | 오픈채팅방.py | py | 882 | python | en | code | 0 | github-code | 13 |
17403751467 | import time
import datetime as dt
from pytz import timezone
from bs4 import BeautifulSoup
from selenium import webdriver
from selenium.webdriver.chrome.options import Options
import dbAdapter
da = dbAdapter.adapter()
videoData = {
'id': '',
'title': '',
'description': '',
'videoThumbnail': '',
'interactionCount': 0,
'likeCount': 0,
'dislikeCount': 0,
'uploadDate': dt.datetime(2021, 1, 1),
'datePublished': dt.datetime(2021, 1, 1),
'channelID': '',
'genre': '',
'comments': []
}
class videoScraper:
def scrapeVideo(self, id):
baseUrl = 'https://www.youtube.com/watch?v=' + id
chrome_options = Options()
chrome_options.add_argument("--headless")
driver = webdriver.Chrome('./chromedriver', options=chrome_options)
driver.get(baseUrl)
driver.execute_script('window.scrollTo(1, 500);')
time.sleep(5)
driver.execute_script('window.scrollTo(1, 10000);')
soup = BeautifulSoup(driver.page_source, 'html.parser')
# Video info
title = soup.find('meta', property='og:title')
videoThumbnail = soup.find('meta', property="og:image")
interactionCount = soup.find('meta', itemprop='interactionCount')
datePublished = soup.find('meta', itemprop='datePublished')
genre = soup.find('meta', itemprop='genre')
channelID = soup.find('meta', itemprop='channelId')
videoData['id'] = id
videoData['title'] = title['content']
videoData['videoThumbnail'] = videoThumbnail['content']
videoData['interactionCount'] = interactionCount['content']
videoData['genre'] = genre['content']
videoData['channelID'] = channelID['content']
uploadDate_meta = soup.find('meta', itemprop='uploadDate')
uploadDate = uploadDate_meta['content'].split('-')
datePublished_meta = soup.find('meta', itemprop='uploadDate')
datePublished = datePublished_meta['content'].split('-')
videoData['uploadDate'] = dt.datetime(int(uploadDate[0]), int(uploadDate[1]), int(uploadDate[2]), tzinfo=timezone('Asia/Kuala_Lumpur'))
videoData['datePublished'] = dt.datetime(int(datePublished[0]), int(datePublished[1]), int(datePublished[2]), tzinfo=timezone('Asia/Kuala_Lumpur'))
likeCount = driver.find_element_by_xpath('/html/body/ytd-app/div/ytd-page-manager/ytd-watch-flexy/div[5]/div[1]/div/div[8]/div[2]/ytd-video-primary-info-renderer/div/div/div[3]/div/ytd-menu-renderer/div/ytd-toggle-button-renderer[1]/a/yt-icon-button/button').get_attribute('aria-label')
dislikeCount = driver.find_element_by_xpath('/html/body/ytd-app/div/ytd-page-manager/ytd-watch-flexy/div[5]/div[1]/div/div[8]/div[2]/ytd-video-primary-info-renderer/div/div/div[3]/div/ytd-menu-renderer/div/ytd-toggle-button-renderer[2]/a/yt-icon-button/button').get_attribute('aria-label')
videoData['likeCount'] = int(likeCount.replace(',', '').replace('like this video along with ', '').replace(' other people', '').replace(' other person', '').replace('I like this', '0'))
videoData['dislikeCount'] = int(dislikeCount.replace(',', '').replace('dislike this video along with ', '').replace(' other people', '').replace(' other person', '').replace('I dislike this', '0'))
description_div = soup.find('div', {'id': 'description', 'slot': 'content', 'class': 'style-scope ytd-video-secondary-info-renderer'})
description_formatted = description_div.find('yt-formatted-string')
videoData['description'] = description_formatted.get_text()
# Comments
driver.execute_script('window.scrollTo(1, 40000);')
time.sleep(5)
soup = BeautifulSoup(driver.page_source, 'html.parser')
commentThreads = soup.findAll('ytd-comment-thread-renderer', limit=5)
for x in commentThreads:
commentContents = x.find('yt-formatted-string', {'class': 'style-scope ytd-comment-renderer', 'id': 'content-text'})
videoData['comments'].append(commentContents.get_text())
driver.quit()
da.saveVideoData(videoData)
videoData['comments'] = [] | tohlh/Summer2021-Homework-2-Scraper | videoPage.py | videoPage.py | py | 4,166 | python | en | code | 0 | github-code | 13 |
33020320416 | BASE_TEMP_ADR = 5
PUSH_CMD = "push"
POP_CMD = "pop"
POSSIBLE_COMMANDS = [PUSH_CMD, POP_CMD]
SOLID_MEMORY_segmentS = {"local": "LCL",
"argument": "ARG",
"this": "THIS",
"that": "THAT"}
VIRTUAL_MEMORY_segmentS = ["constant", "static", "pointer", "temp"]
POINTER = ["THIS", "THAT"]
def solid_memory_segment_parser(command, segment, value):
if command == PUSH_CMD:
return [
"\t@" + SOLID_MEMORY_segmentS[segment],
"\tD=M",
"\t@" + value,
"\tA=D+A",
"\tD=M",
"\t@SP",
"\tM=M+1",
"\tA=M-1",
"\tM=D"
]
if command == POP_CMD:
return [
"\t@" + SOLID_MEMORY_segmentS[segment],
"\tD=M",
"\t@" + value,
"\tD=D+A",
"\t@SP",
"\tA=M",
"\tM=D",
"\tA=A-1",
"\tD=M",
"\tA=A+1",
"\tA=M",
"\tM=D",
"\t@SP",
"\tM=M-1"
]
def virtual_push(symbol, register):
return [
"\t@" + symbol,
"\tD=" + register,
"\t@SP",
"\tM=M+1",
"\tA=M-1",
"\tM=D",
]
def virtual_pop(symbol):
return [
"\t@SP",
"\tM=M-1",
"\tA=M",
"\tD=M",
"\t@" + symbol,
"\tM=D"
]
def virtual_memory_segment_parser(command, segment, value, name):
if segment == "pointer":
if command == PUSH_CMD:
return virtual_push(POINTER[int(value)], "M")
if command == POP_CMD:
return virtual_pop(POINTER[int(value)])
if segment == "constant":
# always a push command when segment is constant
return virtual_push(value, "A")
if segment == "static":
if command == PUSH_CMD:
return virtual_push(name + "." + value, "M")
if command == POP_CMD:
return virtual_pop(name + "." + value)
if segment == "temp":
if command == PUSH_CMD:
return virtual_push('R' + str(BASE_TEMP_ADR + int(value)), "M")
if command == POP_CMD:
return virtual_pop('R' + str(BASE_TEMP_ADR + int(value)))
def parse(line, name):
command, segment, value = line.split(" ")
if segment in SOLID_MEMORY_segmentS:
return solid_memory_segment_parser(command, segment, value)
if segment in VIRTUAL_MEMORY_segmentS:
return virtual_memory_segment_parser(command, segment, value, name)
| damebrown/NAND_ex8 | memory_parser.py | memory_parser.py | py | 2,651 | python | en | code | 0 | github-code | 13 |
27977355893 | """Demo app using SQLAlchemy."""
from flask_debugtoolbar import DebugToolbarExtension
from flask import Flask, request, redirect, render_template
from models import db, connect_db, Pet
app = Flask(__name__)
app.config['SQLALCHEMY_DATABASE_URI'] = 'postgresql:///pet_shop_db'
app.config['SQLALCHEMY_TRACK_MODIFICATIONS'] = False
app.config['SQLALCHEMY_ECHO'] = True
connect_db(app)
app.debug = False
app.config['SECRET_KEY'] = "SECRET!"
debug = DebugToolbarExtension(app)
@app.route("/")
def list_pets():
"""List pets and show add form."""
pets = Pet.query.all()
return render_template("list.html", pets=pets)
@app.route("/users/new", methods=["POST"])
def create_user():
name = request.form["name"]
species = request.form["species"]
hunger = request.form["hunger"]
hunger = int(hunger) if hunger else None
new_pet = Pet(name=name, species=species, hunger=hunger)
db.session.add(new_pet)
db.session.commit()
return redirect(f"/{new_pet.id}")
@app.route("/<int:pet_id>")
def show_pet(pet_id):
"""show details of a pet"""
pet = Pet.query.get_or_404(pet_id)
return render_template("detail.html", pet=pet)
@app.route("/species/<species_id>")
def show_pets_by_species(species_id):
pets = Pet.get_by_species(species_id)
return render_template("species.html", pets=pets, species=species_id)
| petitepirate/sqlAlchemyFundamentals | petsDemo/app.py | app.py | py | 1,413 | python | en | code | 0 | github-code | 13 |
17062296314 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import json
from alipay.aop.api.constant.ParamConstants import *
class ZhimaMerchantOrderCreditCreateModel(object):
def __init__(self):
self._amount = None
self._category = None
self._channel = None
self._deposit = None
self._from_channel = None
self._item_id = None
self._order_process_url = None
self._out_order_no = None
self._overdue_time = None
self._subject = None
@property
def amount(self):
return self._amount
@amount.setter
def amount(self, value):
self._amount = value
@property
def category(self):
return self._category
@category.setter
def category(self, value):
self._category = value
@property
def channel(self):
return self._channel
@channel.setter
def channel(self, value):
self._channel = value
@property
def deposit(self):
return self._deposit
@deposit.setter
def deposit(self, value):
self._deposit = value
@property
def from_channel(self):
return self._from_channel
@from_channel.setter
def from_channel(self, value):
self._from_channel = value
@property
def item_id(self):
return self._item_id
@item_id.setter
def item_id(self, value):
self._item_id = value
@property
def order_process_url(self):
return self._order_process_url
@order_process_url.setter
def order_process_url(self, value):
self._order_process_url = value
@property
def out_order_no(self):
return self._out_order_no
@out_order_no.setter
def out_order_no(self, value):
self._out_order_no = value
@property
def overdue_time(self):
return self._overdue_time
@overdue_time.setter
def overdue_time(self, value):
self._overdue_time = value
@property
def subject(self):
return self._subject
@subject.setter
def subject(self, value):
self._subject = value
def to_alipay_dict(self):
params = dict()
if self.amount:
if hasattr(self.amount, 'to_alipay_dict'):
params['amount'] = self.amount.to_alipay_dict()
else:
params['amount'] = self.amount
if self.category:
if hasattr(self.category, 'to_alipay_dict'):
params['category'] = self.category.to_alipay_dict()
else:
params['category'] = self.category
if self.channel:
if hasattr(self.channel, 'to_alipay_dict'):
params['channel'] = self.channel.to_alipay_dict()
else:
params['channel'] = self.channel
if self.deposit:
if hasattr(self.deposit, 'to_alipay_dict'):
params['deposit'] = self.deposit.to_alipay_dict()
else:
params['deposit'] = self.deposit
if self.from_channel:
if hasattr(self.from_channel, 'to_alipay_dict'):
params['from_channel'] = self.from_channel.to_alipay_dict()
else:
params['from_channel'] = self.from_channel
if self.item_id:
if hasattr(self.item_id, 'to_alipay_dict'):
params['item_id'] = self.item_id.to_alipay_dict()
else:
params['item_id'] = self.item_id
if self.order_process_url:
if hasattr(self.order_process_url, 'to_alipay_dict'):
params['order_process_url'] = self.order_process_url.to_alipay_dict()
else:
params['order_process_url'] = self.order_process_url
if self.out_order_no:
if hasattr(self.out_order_no, 'to_alipay_dict'):
params['out_order_no'] = self.out_order_no.to_alipay_dict()
else:
params['out_order_no'] = self.out_order_no
if self.overdue_time:
if hasattr(self.overdue_time, 'to_alipay_dict'):
params['overdue_time'] = self.overdue_time.to_alipay_dict()
else:
params['overdue_time'] = self.overdue_time
if self.subject:
if hasattr(self.subject, 'to_alipay_dict'):
params['subject'] = self.subject.to_alipay_dict()
else:
params['subject'] = self.subject
return params
@staticmethod
def from_alipay_dict(d):
if not d:
return None
o = ZhimaMerchantOrderCreditCreateModel()
if 'amount' in d:
o.amount = d['amount']
if 'category' in d:
o.category = d['category']
if 'channel' in d:
o.channel = d['channel']
if 'deposit' in d:
o.deposit = d['deposit']
if 'from_channel' in d:
o.from_channel = d['from_channel']
if 'item_id' in d:
o.item_id = d['item_id']
if 'order_process_url' in d:
o.order_process_url = d['order_process_url']
if 'out_order_no' in d:
o.out_order_no = d['out_order_no']
if 'overdue_time' in d:
o.overdue_time = d['overdue_time']
if 'subject' in d:
o.subject = d['subject']
return o
| alipay/alipay-sdk-python-all | alipay/aop/api/domain/ZhimaMerchantOrderCreditCreateModel.py | ZhimaMerchantOrderCreditCreateModel.py | py | 5,343 | python | en | code | 241 | github-code | 13 |
38131554534 | import numpy as np
adjacencymatrix=np.loadtxt("3adjacencymatrix.dat")
clustersperframe=np.zeros((len(adjacencymatrix),60))
Nmono=60
for ts in range(len(adjacencymatrix)):
nclu = 0
clu=np.zeros(60,dtype=int)
todo=np.zeros(60,dtype=int)
for i in range(60):
if clu[i] == 0:
nclu+=1
clu[i] = nclu
todo[0] = i
n=0
m=0
while n >= m:
j = todo[m]
for k in range(60):
if j <= k:
jp = k - 1 + ((j)*(2*Nmono-j-3))/2
else:
jp = j - 1 + ((k)*(2*Nmono-k-3))/2
#print ts, jp, j, k
#if adjacencymatrix[ts,jp] >= 3.0:
if adjacencymatrix[ts,jp] == 2.0 or adjacencymatrix[ts,jp] == 4.0:
#if adjacencymatrix[ts,jp] >= 2.0 and not adjacencymatrix[ts,jp] == 4.0:
if clu[k] == 0:
clu[k] = nclu
n+=1
todo[n]=k
m+=1
clustersperframe[ts]=clu
#np.savetxt("6clustersperframeevery25stepsGT3.dat",clustersperframe,fmt='%d')
np.savetxt("3clustersperframeevery25steps2and4.dat",clustersperframe,fmt='%d')
#np.savetxt("6clustersperframeevery25stepswithsidechains.dat",clustersperframe,fmt='%d')
| mccullaghlab/FFInitialAggregationOrdering | MakeTreeOfDimerClusters.py | MakeTreeOfDimerClusters.py | py | 1,388 | python | en | code | 0 | github-code | 13 |
43348328023 | class Solution:
def getRow(self, rowIndex: int) -> List[int]:
answer = [0 for _ in range(rowIndex+1)]
answer[0] = 1
for i in range(1, rowIndex+1):
tmp_answer = [0 for _ in range(i+1)]
for j in range(i):
tmp_answer[j] += answer[j]
tmp_answer[j+1] += answer[j]
answer = tmp_answer
return answer
| W00SUNGLEE/leetcode | 119-pascals-triangle-ii/119-pascals-triangle-ii.py | 119-pascals-triangle-ii.py | py | 474 | python | en | code | 0 | github-code | 13 |
73996341138 | from imutils import paths
from sklearn.cluster import DBSCAN
from imutils import build_montages
import face_recognition
import numpy as np
import argparse
import pickle
import cv2
import os
img_path='all'
enc_path='./pkl/dsface.pkl'
def face_cfg():
if os.path.exists(enc_path):
data = pickle.loads(open(enc_path, "rb").read())
data = np.array(data)
return data
imagePaths = list(paths.list_images(img_path))
data = []
for (i, imagePath) in enumerate(imagePaths):
print("img {}".format(i + 1))
image = cv2.imread(imagePath)
rgb = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
boxes = face_recognition.face_locations(rgb,
model='cnn')
encodings = face_recognition.face_encodings(rgb, boxes)
d = [{"imagePath": imagePath, "box": box, "enc": enc}
for (box, enc) in zip(boxes, encodings)]
data.extend(d)
f = open(enc_path, "wb")
f.write(pickle.dumps(data))
f.close()
return data
def db_clu(data):
encs = [d["enc"] for d in data]
clt = DBSCAN(metric="euclidean",eps=0.5,min_samples=2)
clt.fit(encs)
print(clt.labels_)
labelIDs = np.unique(clt.labels_)
print(labelIDs)
numUniqueFaces = len(np.where(labelIDs > -1)[0])
print("[INFO] # unique faces: {}".format(numUniqueFaces))
for labelID in labelIDs:
print("[INFO] faces for face ID: {}".format(labelID))
idxs = np.where(clt.labels_ == labelID)[0]
idxs = np.random.choice(idxs, size=min(25, len(idxs)),
replace=False)
faces = []
for i in idxs:
image = cv2.imread(data[i]["imagePath"])
(top, right, bottom, left) = data[i]["box"]
face = image[top:bottom, left:right]
face = cv2.resize(face, (96, 96))
faces.append(face)
montage = build_montages(faces, (96, 96), (5, 5))[0]
cv2.imwrite('./dbout/lab_'+str(labelID+1)+'_img.jpg', montage)
data=face_cfg()
db_clu(data)
| hry8310/ai | ml/same_face/dsc_face.py | dsc_face.py | py | 2,008 | python | en | code | 2 | github-code | 13 |
26621894489 | import pandas as pd
import numpy as np
import re
import os
import sys
import transformers
from transformers import AutoTokenizer, AutoModelForSequenceClassification
from transformers import RobertaTokenizer, RobertaForSequenceClassification
import torch
import torch.nn.functional as F
from torch.utils.data import Dataset, DataLoader
from transformers import T5Tokenizer, T5ForConditionalGeneration
import gradio as gr
def is_false_alarm(code_text):
code_text = re.sub('\/\*[\S\s]*\*\/', '', code_text)
code_text = re.sub('\/\/.*', '', code_text)
code_text = re.sub('(\\\\n)+', '\\n', code_text)
# 1. CFA-CodeBERTa-small.pt -> CodeBERTa-small-v1 finetunig model
path = os.getcwd() + '\models\CFA-CodeBERTa-small.pt'
tokenizer = AutoTokenizer.from_pretrained("huggingface/CodeBERTa-small-v1")
input_ids = tokenizer.encode(
code_text, max_length=512, truncation=True, padding='max_length')
input_ids = torch.tensor([input_ids])
model = RobertaForSequenceClassification.from_pretrained(
path, num_labels=2)
model.to('cpu')
pred_1 = model(input_ids)[0].detach().cpu().numpy()[0]
# model(input_ids)[0].argmax().detach().cpu().numpy().item()
# 2. CFA-codebert-c.pt -> codebert-c finetuning model
path = os.getcwd() + '\models\CFA-codebert-c.pt'
tokenizer = AutoTokenizer.from_pretrained(path)
input_ids = tokenizer(code_text, padding=True, max_length=512,
truncation=True, return_token_type_ids=True)['input_ids']
input_ids = torch.tensor([input_ids])
model = AutoModelForSequenceClassification.from_pretrained(
path, num_labels=2)
model.to('cpu')
pred_2 = model(input_ids)[0].detach().cpu().numpy()[0]
# 3. CFA-codebert-c-v2.pt -> undersampling + codebert-c finetuning model
path = os.getcwd() + '\models\CFA-codebert-c-v2.pt'
tokenizer = RobertaTokenizer.from_pretrained(path)
input_ids = tokenizer(code_text, padding=True, max_length=512,
truncation=True, return_token_type_ids=True)['input_ids']
input_ids = torch.tensor([input_ids])
model = RobertaForSequenceClassification.from_pretrained(
path, num_labels=2)
model.to('cpu')
pred_3 = model(input_ids)[0].detach().cpu().numpy()
# 4. codeT5 finetuning model
path = os.getcwd() + '\models\CFA-codeT5'
model_params = {
# model_type: t5-base/t5-large
"MODEL": path,
"TRAIN_BATCH_SIZE": 8, # training batch size
"VALID_BATCH_SIZE": 8, # validation batch size
"VAL_EPOCHS": 1, # number of validation epochs
"MAX_SOURCE_TEXT_LENGTH": 512, # max length of source text
"MAX_TARGET_TEXT_LENGTH": 3, # max length of target text
"SEED": 2022, # set seed for reproducibility
}
data = pd.DataFrame({'code': [code_text]})
pred_4 = T5Trainer(
dataframe=data,
source_text="code",
model_params=model_params
)
pred_4 = int(pred_4[0])
# ensemble
tot_result = (pred_1 * 0.1 + pred_2 * 0.1 +
pred_3 * 0.7 + pred_4 * 0.1).argmax()
if tot_result == 0:
return "false positive !!"
else:
return "true positive !!"
# codeT5
class YourDataSetClass(Dataset):
def __init__(
self, dataframe, tokenizer, source_len, source_text):
self.tokenizer = tokenizer
self.data = dataframe
self.source_len = source_len
# self.summ_len = target_len
# self.target_text = self.data[target_text]
self.source_text = self.data[source_text]
def __len__(self):
return len(self.source_text)
def __getitem__(self, index):
source_text = str(self.source_text[index])
source_text = " ".join(source_text.split())
source = self.tokenizer.batch_encode_plus(
[source_text],
max_length=self.source_len,
pad_to_max_length=True,
truncation=True,
padding="max_length",
return_tensors="pt",
)
source_ids = source["input_ids"].squeeze()
source_mask = source["attention_mask"].squeeze()
return {
"source_ids": source_ids.to(dtype=torch.long),
"source_mask": source_mask.to(dtype=torch.long),
}
def validate(epoch, tokenizer, model, device, loader):
model.eval()
predictions = []
with torch.no_grad():
for _, data in enumerate(loader, 0):
ids = data['source_ids'].to(device, dtype=torch.long)
mask = data['source_mask'].to(device, dtype=torch.long)
generated_ids = model.generate(
input_ids=ids,
attention_mask=mask,
max_length=150,
num_beams=2,
repetition_penalty=2.5,
length_penalty=1.0,
early_stopping=True
)
preds = [tokenizer.decode(
g, skip_special_tokens=True, clean_up_tokenization_spaces=True) for g in generated_ids]
if ((preds != '0') | (preds != '1')):
preds = '0'
predictions.extend(preds)
return predictions
def T5Trainer(dataframe, source_text, model_params, step="test",):
torch.manual_seed(model_params["SEED"]) # pytorch random seed
np.random.seed(model_params["SEED"]) # numpy random seed
torch.backends.cudnn.deterministic = True
tokenizer = T5Tokenizer.from_pretrained(model_params["MODEL"])
model = T5ForConditionalGeneration.from_pretrained(model_params["MODEL"])
model = model.to('cpu')
dataframe = dataframe[[source_text]]
val_dataset = dataframe
val_set = YourDataSetClass(
val_dataset, tokenizer, model_params["MAX_SOURCE_TEXT_LENGTH"], source_text)
val_params = {
'batch_size': model_params["VALID_BATCH_SIZE"],
'shuffle': False,
'num_workers': 0
}
val_loader = DataLoader(val_set, **val_params)
for epoch in range(model_params["VAL_EPOCHS"]):
predictions = validate(epoch, tokenizer, model, 'cpu', val_loader)
return predictions
#################################################################################
'''demo = gr.Interface(
fn = greet,
inputs = "text",
outputs= "number")
demo.launch(share=True)
'''
with gr.Blocks() as demo1:
gr.Markdown(
"""
<h1 align="center">
False-Alarm-Detector
</h1>
""")
gr.Markdown(
"""
정적 분석기를 통해 오류라고 보고된 C언어 코드의 함수를 입력하면,
오류가 True-positive 인지 False-positive 인지 분류 해 주는 프로그램입니다.
""")
'''
with gr.Accordion(label='모델에 대한 설명 ( 여기를 클릭 하시오. )',open=False):
gr.Markdown(
"""
총 3개의 모델을 사용하였다.
1. codeBERTa-small-v1
- codeBERTa-small-v1 설명
2. codeBERT - C
- codeBERT - C 설명
3. codeT5
- codeT5 설명
"""
)
'''
with gr.Row():
with gr.Column():
inputs = gr.Textbox(
lines=10, placeholder="코드를 입력하시오.", label='Code')
with gr.Row():
btn = gr.Button("결과 출력")
with gr.Column():
output = gr.Text(label='Result')
btn.click(fn=is_false_alarm, inputs=inputs, outputs=output)
if __name__ == "__main__":
demo1.launch()
| hyomin14/Classifying-false-alarm | app.py | app.py | py | 7,486 | python | en | code | 0 | github-code | 13 |
19121052760 | import torch
from torch import nn
from torch.nn import functional as F
class PositionWiseFeedForward(nn.Module):
'''
Position-wise feed forward layer
'''
def __init__(self, config) -> None:
super(PositionWiseFeedForward, self).__init__()
d_model = config.D_MODEL
d_ff = config.D_FF
dropout = config.DROPOUT
self.fc1 = nn.Linear(d_model, d_ff)
self.fc2 = nn.Linear(d_ff, d_model)
self.dropout_1 = nn.Dropout(p=dropout)
self.dropout_2 = nn.Dropout(p=dropout)
self.layer_norm = nn.LayerNorm(d_model)
def forward(self, input) -> torch.Tensor:
out = self.dropout_1(F.gelu(self.fc1(input)))
out = self.dropout_2(self.fc2(out))
out = self.layer_norm(input + out)
return out | hieunghia-pat/OpenViVQA | models/modules/positionwise_feed_forward.py | positionwise_feed_forward.py | py | 796 | python | en | code | 8 | github-code | 13 |
17057561914 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import json
from alipay.aop.api.constant.ParamConstants import *
class OutputInvoiceRedApplyVO(object):
def __init__(self):
self._blue_invoice_code = None
self._blue_invoice_no = None
self._invoice_material = None
self._invoice_type = None
self._operator = None
self._red_amt = None
self._red_invalid_reason_type = None
self._red_notice_no = None
self._red_reason = None
@property
def blue_invoice_code(self):
return self._blue_invoice_code
@blue_invoice_code.setter
def blue_invoice_code(self, value):
self._blue_invoice_code = value
@property
def blue_invoice_no(self):
return self._blue_invoice_no
@blue_invoice_no.setter
def blue_invoice_no(self, value):
self._blue_invoice_no = value
@property
def invoice_material(self):
return self._invoice_material
@invoice_material.setter
def invoice_material(self, value):
self._invoice_material = value
@property
def invoice_type(self):
return self._invoice_type
@invoice_type.setter
def invoice_type(self, value):
self._invoice_type = value
@property
def operator(self):
return self._operator
@operator.setter
def operator(self, value):
self._operator = value
@property
def red_amt(self):
return self._red_amt
@red_amt.setter
def red_amt(self, value):
self._red_amt = value
@property
def red_invalid_reason_type(self):
return self._red_invalid_reason_type
@red_invalid_reason_type.setter
def red_invalid_reason_type(self, value):
self._red_invalid_reason_type = value
@property
def red_notice_no(self):
return self._red_notice_no
@red_notice_no.setter
def red_notice_no(self, value):
self._red_notice_no = value
@property
def red_reason(self):
return self._red_reason
@red_reason.setter
def red_reason(self, value):
self._red_reason = value
def to_alipay_dict(self):
params = dict()
if self.blue_invoice_code:
if hasattr(self.blue_invoice_code, 'to_alipay_dict'):
params['blue_invoice_code'] = self.blue_invoice_code.to_alipay_dict()
else:
params['blue_invoice_code'] = self.blue_invoice_code
if self.blue_invoice_no:
if hasattr(self.blue_invoice_no, 'to_alipay_dict'):
params['blue_invoice_no'] = self.blue_invoice_no.to_alipay_dict()
else:
params['blue_invoice_no'] = self.blue_invoice_no
if self.invoice_material:
if hasattr(self.invoice_material, 'to_alipay_dict'):
params['invoice_material'] = self.invoice_material.to_alipay_dict()
else:
params['invoice_material'] = self.invoice_material
if self.invoice_type:
if hasattr(self.invoice_type, 'to_alipay_dict'):
params['invoice_type'] = self.invoice_type.to_alipay_dict()
else:
params['invoice_type'] = self.invoice_type
if self.operator:
if hasattr(self.operator, 'to_alipay_dict'):
params['operator'] = self.operator.to_alipay_dict()
else:
params['operator'] = self.operator
if self.red_amt:
if hasattr(self.red_amt, 'to_alipay_dict'):
params['red_amt'] = self.red_amt.to_alipay_dict()
else:
params['red_amt'] = self.red_amt
if self.red_invalid_reason_type:
if hasattr(self.red_invalid_reason_type, 'to_alipay_dict'):
params['red_invalid_reason_type'] = self.red_invalid_reason_type.to_alipay_dict()
else:
params['red_invalid_reason_type'] = self.red_invalid_reason_type
if self.red_notice_no:
if hasattr(self.red_notice_no, 'to_alipay_dict'):
params['red_notice_no'] = self.red_notice_no.to_alipay_dict()
else:
params['red_notice_no'] = self.red_notice_no
if self.red_reason:
if hasattr(self.red_reason, 'to_alipay_dict'):
params['red_reason'] = self.red_reason.to_alipay_dict()
else:
params['red_reason'] = self.red_reason
return params
@staticmethod
def from_alipay_dict(d):
if not d:
return None
o = OutputInvoiceRedApplyVO()
if 'blue_invoice_code' in d:
o.blue_invoice_code = d['blue_invoice_code']
if 'blue_invoice_no' in d:
o.blue_invoice_no = d['blue_invoice_no']
if 'invoice_material' in d:
o.invoice_material = d['invoice_material']
if 'invoice_type' in d:
o.invoice_type = d['invoice_type']
if 'operator' in d:
o.operator = d['operator']
if 'red_amt' in d:
o.red_amt = d['red_amt']
if 'red_invalid_reason_type' in d:
o.red_invalid_reason_type = d['red_invalid_reason_type']
if 'red_notice_no' in d:
o.red_notice_no = d['red_notice_no']
if 'red_reason' in d:
o.red_reason = d['red_reason']
return o
| alipay/alipay-sdk-python-all | alipay/aop/api/domain/OutputInvoiceRedApplyVO.py | OutputInvoiceRedApplyVO.py | py | 5,360 | python | en | code | 241 | github-code | 13 |
21831128126 | # python debug
import pdb
def doubleval(argsum, val):
argsum = 0
newval = argsum + val
return newval
pdb.set_trace() # break point
values = [1,2,3,4,5]
mysum = 0
for val in values:
mysum = 0
mysum = doubleval(mysum, val)
print(mysum)
# c or continue to go to next step of loop
# n - next line - keep the current context, not enetering e.g. functions that you calls
# s - next step - with changing contecst
# l - list show the code and your position
# h - help
# quit
| cepheus87/pythonStuff | pdbExample.py | pdbExample.py | py | 527 | python | en | code | 0 | github-code | 13 |
40289098625 | # 1.Write a program to loop through a list of numbers and add +2 to every value to elements in list
list = [2,3,5,6,9,6,10]
New_list = [x + 2 for x in list]
print(New_list)
# 2.Write a program to get the below pattern
rows = int(input("Enter the number of rows: "))
for i in range(0, rows + 1):
# inner loop for decrement in i values
for j in range(rows - i, 0, -1):
print(j, end=' ')
print()
# Python Program to Print the Fibonacci sequence
nterms = int(input("How many terms you want? "))
# first two terms
n1 = 0
n2 = 1
count = 2
# check if the number of terms is valid
if nterms <= 0:
print("Plese enter a positive integer")
elif nterms == 1:
print("Fibonacci sequence:")
print(n1)
else:
print("Fibonacci sequence:")
print(n1,",",n2,end=', ')
while count < nterms:
nth = n1 + n2
print(nth,end=' , ')
# update values
n1 = n2
n2 = nth
count += 1
# • Explain Armstrong number and write a code with a function
from math import *
number = int(input("Enter the number : "))
result = 0
n = 0
temp = number;
while (temp != 0):
temp =int(temp / 10)
n = n + 1
#Checking if the number is armstrong
temp = number
while (temp != 0):
remainder = temp % 10
result = result + pow(remainder, n)
temp = int(temp/10)
if(result == number):
print("Armstrong number")
else:
print("Not an Armstrong number")
# Write a program to print the multiplication table of 9
num = int(input(" Enter the number : "))
# using for loop to iterate multiplication 10 times
print("Multiplication Table of : ")
for i in range(1,11):
print(num,'x',i,'=',num*i)
# • Check if a program is negative or positive
num = int(input("Enter a number:"))
if num > 0:
print("Number us positive")
else:
print("Number is negative")
# Write a program to convert the number of days to ages
days = int(input("Enter number of days:"))
years = days/365
print(years)
# solve trigonometry problem using math function WAP to solve using math function
import math
x = int(input("Enter value"))
print(math.sin(x),"sin")
print(math.cos(x),"cos")
print(math.tan(x),"tan")
#creat a calculator only on a code level by using if condition .
a=int(input("num1 "))
b=int(input("num2 "))
c=input("Enter operator")
if c == '+':
print("Addition of two numbers",a+b)
elif c=='-':
print("Subtraction of two numbers",a-b)
elif c=='*':
print("Multiplication of two numbers",a*b)
elif c=='/':
if a==0:
print("Numerator cannot be zero")
elif b==0:
print("Zero division error")
elif a==0 and b==0:
print("Zero")
else:
print("Division of two numbes",a/b)
else:
print("not found")
| abhishekkumar116/python-internship-code-expreance | Day9_solution.py | Day9_solution.py | py | 2,834 | python | en | code | 0 | github-code | 13 |
12703191666 | import argparse
import warnings
import cv2
import torch
from matplotlib import pyplot as plt
from tqdm import tqdm
from detector import (
distance_calculation,
height_calculation,
input_handling,
output_creation,
pose_estimation,
synchrony_detection,
visualization,
)
from models.with_mobilenet import PoseEstimationWithMobileNet
from modules.load_state import load_state
warnings.filterwarnings("ignore")
def run(
video="",
show_livestream=True,
save_livestream=False,
save_output_table=False,
save_camera_input=False,
net="",
frame_skip=1,
):
plt.ion()
# Check arguments
if video == "":
raise ValueError("--video has to be provided")
if net == "":
raise ValueError("--checkpoint-path has to be provided")
if frame_skip < 1 or not isinstance(frame_skip, int):
raise ValueError("--frame-skip needs to be a positive integer")
print("system setup starting...")
# Setup input handling
frame_provider = input_handling.VideoReader(video)
# Setup pose estimation
height_size = 256
stride = 8
upsample_ratio = 4
previous_poses = []
track = 1
smooth = 1
pose_estimator = pose_estimation.PoseEstimator(
net, height_size, stride, upsample_ratio
)
# Setup output generation
delay = 1
if save_livestream:
output_handler_video = output_creation.OutputHandler(
output_type="video",
file_name="output_video.avi",
fps=frame_provider.fps,
)
if save_camera_input:
output_handler_video_raw = output_creation.OutputHandler(
output_type="video",
file_name="input_video.avi",
fps=frame_provider.fps,
)
if save_output_table:
output_handler_table = output_creation.OutputHandler(
output_type="table", file_name="output_table.csv", fps=None
)
# Setup visualization
visualizer = visualization.Visualizer(trace_len=100)
print("Setup finished.")
# Frame analysis
print(
"Starting frame analysis. "
"To interrupt analysis, press 'esc' in the livestream window."
)
for frame_idx, img in enumerate(
tqdm(
frame_provider,
desc="Frame processing",
total=frame_provider.total_frames,
)
):
img = cv2.flip(img, 1)
# For non-webcam input, skip frames if desired
if video != "0" and frame_idx % frame_skip != 0:
continue
# Attach input frame to output video
if save_camera_input:
output_handler_video_raw.build_outputs(img)
# Estimate poses
all_poses = pose_estimator.img_to_poses(img)
# Track poses between frames
if track:
pose_estimation.track_poses(
previous_poses, all_poses, smooth=smooth
)
previous_poses = all_poses
# Build visualization
visualizer.update(
img,
all_poses,
frame_idx
)
visualizer.create_plot()
visualizer.counter_overlay()
if show_livestream:
cv2.imshow("diid2", visualizer.img)
key = cv2.waitKey(delay)
if key == 27: # esc
break
elif key == 112: # 'p'
if delay == 1:
delay = 0
else:
delay = 1
# Attach illustrated frame to output video
if save_livestream:
output_handler_video.build_outputs(visualizer.img)
# Iteration over frames finished (No frames left or keyboard interrupt)
print("Frame analysis stopped. Closing files. Releasing outputs.")
# Release resources
if save_livestream:
output_handler_video.release_outputs()
if save_camera_input:
output_handler_video_raw.release_outputs()
if save_output_table:
output_handler_table.release_outputs()
if show_livestream:
cv2.destroyAllWindows()
if __name__ == "__main__":
parser = argparse.ArgumentParser(description="")
parser.add_argument(
"--video", type=str, default="", help="path to video file or camera id"
)
parser.add_argument(
"--show-livestream", default=True, help="show detection on stream"
)
parser.add_argument(
"--save-livestream", default=False, help="save illustrated input video"
)
parser.add_argument(
"--save-output-table", default=False, help="save entanglement as csv"
)
parser.add_argument(
"--save-camera-input", default=False, help="save input from camera"
)
parser.add_argument(
"--frame-skip", default=1, help="only every i-th frame gets analyzed"
)
args = parser.parse_args()
#if torch.backends.mps.is_available():
# print("M1 GPU avail")
# torch.device("mps")
# Setup pose estimation with OpenPose Lightweight
net = PoseEstimationWithMobileNet()
checkpoint = torch.load(
"models/checkpoint_iter_370000.pth", map_location="cpu"
)
load_state(net, checkpoint)
net = net.eval()
#net.to(torch.device("mps"))
run(
args.video,
args.show_livestream,
args.save_livestream,
args.save_output_table,
args.save_camera_input,
net,
args.frame_skip
)
exit()
| StrgFJojo/diid2 | main.py | main.py | py | 5,425 | python | en | code | 0 | github-code | 13 |
383217623 | from __future__ import absolute_import, unicode_literals
import logging
import os
from abc import ABCMeta
from six import add_metaclass
from virtualenv.info import fs_supports_symlink
from virtualenv.util.path import Path
from virtualenv.util.zipapp import ensure_file_on_disk
from ..creator import Creator, CreatorMeta
class ViaGlobalRefMeta(CreatorMeta):
def __init__(self):
super(ViaGlobalRefMeta, self).__init__()
self.copy_error = None
self.symlink_error = None
if not fs_supports_symlink():
self.symlink = "the filesystem does not supports symlink"
@property
def can_copy(self):
return not self.copy_error
@property
def can_symlink(self):
return not self.symlink_error
@add_metaclass(ABCMeta)
class ViaGlobalRefApi(Creator):
def __init__(self, options, interpreter):
super(ViaGlobalRefApi, self).__init__(options, interpreter)
copies = getattr(options, "copies", False)
symlinks = getattr(options, "symlinks", False)
self.symlinks = symlinks is True and copies is False
self.enable_system_site_package = options.system_site
@classmethod
def add_parser_arguments(cls, parser, interpreter, meta, app_data):
super(ViaGlobalRefApi, cls).add_parser_arguments(parser, interpreter, meta, app_data)
parser.add_argument(
"--system-site-packages",
default=False,
action="store_true",
dest="system_site",
help="give the virtual environment access to the system site-packages dir",
)
group = parser.add_mutually_exclusive_group()
if meta.can_symlink:
group.add_argument(
"--symlinks",
default=True,
action="store_true",
dest="symlinks",
help="try to use symlinks rather than copies, when symlinks are not the default for the platform",
)
if meta.can_copy:
group.add_argument(
"--copies",
"--always-copy",
default=not meta.can_symlink,
action="store_true",
dest="copies",
help="try to use copies rather than symlinks, even when symlinks are the default for the platform",
)
def create(self):
self.install_patch()
def install_patch(self):
text = self.env_patch_text()
if text:
pth = self.purelib / "_virtualenv.pth"
logging.debug("create virtualenv import hook file %s", pth)
pth.write_text("import _virtualenv")
dest_path = self.purelib / "_virtualenv.py"
logging.debug("create %s", dest_path)
dest_path.write_text(text)
def env_patch_text(self):
"""Patch the distutils package to not be derailed by its configuration files"""
with ensure_file_on_disk(Path(__file__).parent / "_virtualenv.py", self.app_data) as resolved_path:
text = resolved_path.read_text()
return text.replace('"__SCRIPT_DIR__"', repr(os.path.relpath(str(self.script_dir), str(self.purelib))))
def _args(self):
return super(ViaGlobalRefApi, self)._args() + [("global", self.enable_system_site_package)]
def set_pyenv_cfg(self):
super(ViaGlobalRefApi, self).set_pyenv_cfg()
self.pyenv_cfg["include-system-site-packages"] = "true" if self.enable_system_site_package else "false"
| alexnathanson/solar-protocol | backend/createHTML/venv-bk/lib/python3.7/site-packages/virtualenv/create/via_global_ref/api.py | api.py | py | 3,500 | python | en | code | 207 | github-code | 13 |
25902876216 | import os
from datetime import datetime
from web2qgis.tinycss.color3 import parse_color_string
from PyQt5.QtCore import QDir
def getTempDir():
tempDir = os.path.join(
unicode(QDir.tempPath()),
'web2qgis',
datetime.now().strftime("%Y_%m_%d-%H_%M_%S_%f"))
if not QDir(tempDir).exists():
QDir().mkpath(tempDir)
return tempDir
def getScript(scriptFolder, scriptFilename):
scriptPath = os.path.join(scriptFolder, scriptFilename)
with open(scriptPath, 'r') as scriptFile:
script = scriptFile.read()
return script
def getRGBA(color):
red, green, blue, alpha = parse_color_string(color)
rgba = ",".join([str(int(red * 255)), str(int(green * 255)),
str(int(blue * 255)), str(int(alpha * 255))])
return rgba | tomchadwin/web2qgis | utils.py | utils.py | py | 815 | python | en | code | 15 | github-code | 13 |
20645607564 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
from __future__ import unicode_literals, print_function, division
from collections import Counter
from tqdm import tqdm
from sentencepiece import SentencePieceTrainer
import dill
import sentencepiece as sp
# ==================================================
class SentencepieceTokenizer:
"""Wrapper class for sentencepiece tokenizer
This class supports methods for NLP task based on google sentencepiece
"""
def __init__(
self,
model_path: str = None,
pad_token: str = "[PAD]",
cls_token: str = "[CLS]",
sep_token: str = "[SEP]",
unknown_token: str = "<unk>",
start_token: str = "<s>",
end_token: str = "</s>",
):
"""Initialize SentencepieceTokenizer with special tokens
Args:
model_path (str): tokenizer instance (based on sentencepiece)
pad_token (str): pad token for padding of sentence
cls_token (str): cls token
sep_token (str): separate token for set separation of sentence
unknown_token (str): unknown token
start_token (str): start token for set start of sentence
end_token (str): end token for set end of sentence
"""
self._unknown_token = unknown_token
self._pad_token = pad_token
self._cls_token = cls_token
self._start_token = start_token
self._sep_token = sep_token
self._end_token = end_token
self.tok = sp.SentencePieceProcessor()
self.except_idx = self.get_except_idx()
self.model_name = ""
if model_path is not None:
self.tok.Load(model_path)
def get_except_idx(self):
except_list = list()
except_list.append(self.token_to_idx(self._unknown_token))
except_list.append(self.token_to_idx(self._pad_token))
except_list.append(self.token_to_idx(self._cls_token))
except_list.append(self.token_to_idx(self._start_token))
except_list.append(self.token_to_idx(self._sep_token))
except_list.append(self.token_to_idx(self._unknown_token))
except_list.append(self.token_to_idx(self._end_token))
return except_list
def tokenize(self, text, to_id=True):
if to_id:
return self.tok.EncodeAsIds(text)
else:
return self.tok.EncodeAsPieces(text)
def token_to_text(self, token):
return self.tok.decode_pieces(token)
def idx_to_token(self, idx):
return self.tok.IdToPiece(idx)
def token_to_idx(self, token):
return self.tok.PieceToId(token)
def idx_to_text(self, idx):
text = list()
for i in idx:
if i not in self.except_idx:
text.append(self.idx_to_token(i))
return self.tok.DecodePieces(text)
def text_to_idx(
self,
text: str,
max_seq_len: int = None,
use_pad: bool = False,
cls_token: bool = False,
start_token: bool = False,
end_token: bool = False,
):
"""
convert text to token indices
Args:
text(str): target text
max_seq_len(int): max sequence length
use_pad(bool): whether use padding(default: False)
start_token: whether use start_token(default: False)
end_token: whether use end_token before padding(default: False)
Return:
token indices(list)
"""
idx = self.tokenize(text)
if max_seq_len is None:
max_seq_len = len(idx)
if cls_token:
idx = [self.token_to_idx(self._cls_token)] + idx
if start_token:
idx = [self.token_to_idx(self._start_token)] + idx
if end_token:
idx = idx + [self.token_to_idx(self._end_token)]
if use_pad:
if start_token or end_token:
idx += [self.token_to_idx(self._pad_token)] * (
max_seq_len - (len(idx) + 1)
)
else:
idx += [self.token_to_idx(self._pad_token)] * (max_seq_len - len(idx))
return idx[:max_seq_len]
def train(
self,
input_path: list,
model_prefix: str,
character_coverage=0.9995,
vocab_size=None,
model_type: str = "bpe",
control_symbols: list = ["[PAD]", "[SEP]", "[MASK]", "[CLS]", "<s>", "</s>"],
):
"""
Function for train tokenizer
Args:
input_path (str):
model_prefix (str):
character_coverage (float):
vocab_size (float):
model_type (str):
control_symbols (list):
"""
if character_coverage is None and vocab_size is None:
print("at least character_coverage or vocab_size should be given!")
assert character_coverage or vocab_size
coverage_conditions = ""
if character_coverage is not None:
coverage_condition = f" --character_coverage={str(character_coverage)} "
else:
coverage_condition = f" --vocab_size={str(vocab_size)} "
symbol_list = ""
for i in control_symbols:
symbol_list += i + ","
input_list = ""
for i in input_path:
input_list += i + ","
args = (
"--input={} "
"--model_prefix={} "
"--model_type={} "
"--control_symbols={} "
"--bos_id=5 --eos_id=6 --unk_id=1".format(
input_list, model_prefix, model_type, symbol_list
)
)
args += coverage_condition
print(args)
SentencePieceTrainer.Train(args)
def __repr__(self):
unk = '"{}"'.format(self._unknown_token) if self._unknown_token else "None"
return "Vocab(size={}, unk={}, pad={})".format(
len(self.tok), unk, self._pad_token
)
def __len__(self):
return len(self.tok)
| seujung/electra_pytorch_kr | tokenizer.py | tokenizer.py | py | 6,073 | python | en | code | 1 | github-code | 13 |
19623020290 | # -*- coding: utf-8 -*-
class elevator:
def processData(self, fileName):
count = 0
fp = open(fileName, 'r')
lines = fp.readlines()
if len(lines) > 2:
maxCil = int(lines[0])
curCil = int(lines[1])
entries = []
for line in lines[2:]:
entries.append(int(line))
# Direita para esquerda
maxRead = max(entries)
for i in range(curCil - 1, -1, -1):
count += 1
if i in entries:
entries.remove(i)
for i in range(1, maxRead + 1):
count += 1
if i in entries:
entries.remove(i)
fp.close()
print('SCAN ', count)
| streeg/pseudo-os | inout/elevator.py | elevator.py | py | 796 | python | en | code | 0 | github-code | 13 |
36492502562 | import threading
from homealone import *
# import whichever gpio library is installed
try:
import RPi.GPIO as gpio
gpioLibrary = "RPi.GPIO"
except ImportError:
import RPIO as gpio
gpioLibrary = "RPIO"
# dictionary of MCP23017Interfaces indexed by their interrupt pins
gpioInterfaces = {}
# initial interrupt callback routine that is called when an interrupt pin goes low
def interruptCallback(pin, value=1):
debug('debugGPIO', "interruptCallback", "pin:", pin, "value:", value)
try:
# activate the interrupt routine for the MCP23017Interface associated with the pin
gpioInterfaces[pin].interruptEvent.set()
except KeyError:
# the interrupt occurred on a pin not associated with a MCP23017Interface
log("interruptCallback", "unknown interrupt", "pin:", pin, "value:", value, "gpioInterfaces:", gpioInterfaces)
# Interface to GPIO via MCP23017 I2C I/O expander
class MCP23017Interface(Interface):
# MCP23017 I2C I/O expander
IODIR = 0x00 # I/O direction
IPOL = 0x02 # input polarity
GPINTEN = 0x04 # interrupt on change
DEFVAL = 0x06 # default value
INTCON = 0x08 # interrupt control
IOCON = 0x0a # configuration
GPPU = 0x0c # pull up resistor
INTF = 0x0e # interrupt flag
INTCAP = 0x10 # interrupt capture
GPIO = 0x12 # I/O data
OLAT = 0x14 # output latch
# direct GPIO
gpioPins = [12, 16, 18, 22, 15, 13, 11, 7] # A/B
# 32, 36, 38, 40, 37, 35, 33, 31] # B+
def __init__(self, name, interface=None, event=None,
addr=0x20, # I2C address of MCP23017
bank=0, # bank within MCP23017 A=0, B=1
inOut=0x00, # I/O direction out=0, in=1
interruptPin=17, # RPIO pin used for interrupt (BCM number)
config=[]): # additional configuration
Interface.__init__(self, name, interface=interface, event=event)
global gpioInterfaces
self.name = name
if interface:
self.addr = addr
self.bank = bank
self.inOut = inOut
self.interruptPin = interruptPin+self.bank # offset pin with bank
self.config = config
self.state = 0x00
gpioInterfaces[self.interruptPin] = self
self.interruptEvent = threading.Event()
else:
self.interface = None
self.bank = 0
def start(self):
debug('debugGPIO', self.name, "using GPIO library", gpioLibrary)
gpio.setwarnings(False)
if self.interface:
gpio.setmode(gpio.BCM)
# configure the MCP23017
self.config.insert(0, (MCP23017Interface.IODIR, self.inOut)) # I/O direction
self.config.insert(1, (MCP23017Interface.GPINTEN, self.inOut)) # enable interrupts for inputs
self.config.insert(2, (MCP23017Interface.GPPU, self.inOut)) # pull up resistors on inputs
self.config.insert(3, (MCP23017Interface.IOCON, 0x04)) # interrupt pins are open drain
# write the configuration
for config in self.config:
if config[0] != MCP23017Interface.IOCON:
reg = config[0]+self.bank # offset register with bank
else: # except for IOCON
reg = config[0]
debug('debugGPIO', self.name, "start", "addr: 0x%02x"%self.addr, "reg: 0x%02x"%reg, "value: 0x%02x"%config[1])
self.interface.write((self.addr, reg), config[1])
# get the current state
self.readState()
# set up the interrupt handling
if gpioLibrary == "RPIO":
gpio.add_interrupt_callback(self.interruptPin, interruptCallback, edge="falling", pull_up_down=gpio.PUD_UP)
gpio.wait_for_interrupts(threaded=True)
elif gpioLibrary == "RPi.GPIO":
gpio.setup(self.interruptPin, gpio.IN, pull_up_down=gpio.PUD_UP)
gpio.add_event_detect(self.interruptPin, gpio.FALLING, callback=interruptCallback)
startThread(self.name, self.interrupt)
else: # direct only supports output - FIXME
gpio.setmode(gpio.BOARD)
for pin in MCP23017Interface.gpioPins:
debug('debugGPIO', self.name, "setup", pin, gpio.OUT)
gpio.setup(pin, gpio.OUT)
debug('debugGPIO', self.name, "write", pin, 0)
gpio.output(pin, 0)
# interrupt handler thread for this interface
def interrupt(self):
debug('debugGPIO', self.name, "starting interrupt thread")
self.lastState = self.interface.read((self.addr, MCP23017Interface.GPIO+self.bank))
debug('debugGPIO', self.name, "read ", "addr: 0x%02x"%self.addr, "reg: 0x%02x"%(MCP23017Interface.GPIO+self.bank), "value: 0x%02x"%self.lastState)
self.interruptEvent.clear()
while True:
self.interruptEvent.wait()
self.interruptEvent.clear()
# intFlags = self.interface.read((self.addr, MCP23017Interface.INTF+self.bank))
self.state = self.interface.read((self.addr, MCP23017Interface.INTCAP+self.bank))
debug('debugGPIO', self.name, "read ", "addr: 0x%02x"%self.addr, "reg: 0x%02x"%(MCP23017Interface.INTCAP+self.bank), "value: 0x%02x"%self.state)
# because INTF register isn't reliable, compare current state to previous for input pins
intFlags = (self.state ^ self.lastState) & self.inOut
self.lastState = self.state
debug('debugGPIO', self.name, "int ", "addr: 0x%02x"%self.addr, "reg: 0x%02x"%(MCP23017Interface.INTF+self.bank), "intFlags: 0x%02x"%intFlags)
for i in range(8):
if (intFlags >> i) & 0x01:
try:
sensor = self.sensorAddrs[i]
state = (self.state >> i) & 0x01
if sensor.event: # don't notify polled sensors
debug('debugGPIO', self.name, "notifying", sensor.name, state)
sensor.notify(state)
except KeyError:
debug('debugGPIO', self.name, "no sensor for interrupt on addr", i, self.sensorAddrs)
def read(self, addr):
if self.interface:
self.readState()
return (self.state >> addr) & 0x01
else:
return 0
def readState(self):
byte = self.interface.read((self.addr, MCP23017Interface.GPIO+self.bank))
debug('debugGPIORead', self.name, "read ", "addr: 0x%02x"%self.addr, "reg: 0x%02x"%(MCP23017Interface.GPIO+self.bank), "value: 0x%02x"%byte)
self.state = byte
def write(self, addr, value):
if self.interface:
byte = self.state
mask = 0x01<<addr
byte = (byte & (~mask)) | ((value << addr) & mask)
debug('debugGPIO', self.name, "write", "addr: 0x%02x"%self.addr, "reg: 0x%02x"%(MCP23017Interface.GPIO+self.bank), "value: 0x%02x"%byte)
self.interface.write((self.addr, MCP23017Interface.GPIO+self.bank), byte)
self.state = byte
else:
debug('debugGPIO', self.name, "write", "addr: 0x%02x"%addr, "value: 0x%02x"%value)
gpio.output(MCP23017Interface.gpioPins[addr], value)
| jbuehl/homealone | homealone/interfaces/mcp23017Interface.py | mcp23017Interface.py | py | 7,639 | python | en | code | 0 | github-code | 13 |
74607400976 | # Smoothing tool for solar neutrino spectra
# Davide Basilico - Dec 2020 - davide.basilico@mi.infn.it
import plotly.graph_objects as go
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import scipy
import matplotlib.pylab as pylab
from scipy import signal
from scipy import interpolate
from scipy import ndimage
import sys
from scipy.interpolate import UnivariateSpline
from scipy.interpolate import CubicSpline
from sklearn.linear_model import LinearRegression
from patsy import cr
from scipy.fft import fft, fftfreq
from scipy.signal.signaltools import wiener
def Convolution(x, window_len, window):
if window_len < 3:
return x
if not window in ['flat', 'hanning', 'hamming', 'bartlett', 'blackman']:
print("Window is none of 'flat', 'hanning', 'hamming', 'bartlett', 'blackman'. Please retry")
quit()
s=np.r_[2*x[0]-x[window_len:1:-1], x, 2*x[-1]-x[-1:-window_len:-1]]
if window == 'flat': #moving average
w = np.ones(window_len,'d')
else:
w = getattr(np, window)(window_len)
y = np.convolve(w/w.sum(), s, mode='same')
return y[window_len-1:-window_len+1]
def SavitzkyGolay(yy,Window,Pol):
yy_smoothed = signal.savgol_filter(yy,Window,Pol)
return yy_smoothed
def GaussianFilter(yy,Sigma):
yy_smoothed = ndimage.gaussian_filter1d(yy, Sigma)
return yy_smoothed
def ButterFilter(yy,Window):
yy_smoothed = scipy.signal.butter(yy,0.4,mysize=Window)
return yy_smoothed
def SplineFilterOld(xx,yy):
Spline = UnivariateSpline(xx, yy)
Spline.set_smoothing_factor(0.5)
yy_smoothed = np.array(Spline(xx))
return yy_smoothed
def SplineFilter(xx,yy):
x_basis = cr(xx, df=200, constraints="center")
model = LinearRegression().fit(x_basis, yy)
# Get estimates
yy_smoothed = model.predict(x_basis)
yy_smoothed = np.array(yy_smoothed)
print(yy_smoothed)
return(yy_smoothed)
if __name__=='__main__':
# filename is the text input file. It must contain the energy bins as the first column.
# The other columns (you can have as many columns as you want) contains the events for each of the bin spectra energies (first column).
if(len(sys.argv)<5 or (sys.argv[4]!='WF' and sys.argv[4]!='US' and sys.argv[4]!='SG' and sys.argv[4]!='GF' and sys.argv[4]!='CV')):
print('python3 SmoothingTool.py [InputFilename] [ColumnToBeSmoothed] [OutputName] [WhichFilter] [Additional]\n')
print('- SavitzkyGolay Filter => [WhichFilter] = SG 2 additional par: [WindowLength] [PolDegree]')
print('- Gaussian Filter => [WhichFilter] = GF 1 additional par: [Sigma]')
print('- Convolution => [WhichFilter] = CV 2 additional par: [WindowLength] [Shape=]')
print('WindowLength (for SG and CV): the dimension of the smoothing window. It must be odd')
print('')
quit()
filename = sys.argv[1] # input filename
i = sys.argv[2] # which column (= which species spectrum) do you want to smooth
spectrum = np.loadtxt(filename,usecols=(0,int(i)))
OutName = sys.argv[3] + '.data'
OutNamePDF = sys.argv[3] + '.pdf'
Mode = sys.argv[4] # which filter
xx = spectrum[:,0] # energy binning
yy = spectrum[:,1] # counts
# yy = np.exp(-0.5*(xx-700)*(xx-700)/(100*100)) + 0.005*xx*np.exp(-xx/400)+ 0.02*np.random.randn(xx.size)
# yy = np.sin(2*3.14/30.*xx) + 0.2*np.random.randn(xx.size)
# yy = 0.1*np.random.normal(0,1,8001)
# Plotting section
yy_smoothed = np.empty(len(xx))
if(Mode=="SG"):
if (int(sys.argv[5]) % 2) == 0 :
print("Smoothing window must be odd. Please retry!")
quit()
yy_smoothed = SavitzkyGolay(yy,int(sys.argv[5]),int(sys.argv[6]))
l = ['Original signal', 'Filtered ' + Mode+'_Window' + sys.argv[5] + '_Pol' + sys.argv[6]]
if(Mode=="GF"):
yy_smoothed = GaussianFilter(yy,int(sys.argv[5]))
l = ['Original signal', 'Filtered ' + Mode+'_Sigma' + sys.argv[5]]
if(Mode=="WF"):
yy_smoothed = WienerFilter(yy,int(sys.argv[5]))
l = ['Original signal', 'Filtered ' + Mode+'_Window' + sys.argv[5]]
if(Mode=="CV"):
if (int(sys.argv[5]) % 2) == 0 :
print("Smoothing window must be odd. Please retry!")
quit()
yy_smoothed = Convolution(yy,int(sys.argv[5]),sys.argv[6])
l = ['Original signal', 'Filtered ' + Mode+'_Window' + sys.argv[5] + '_Shape-' + sys.argv[6]]
if(Mode=="US"):
yy_smoothed = SplineFilter(xx,yy)
l = ['Original signal', 'Filtered ' + Mode+'_k' + sys.argv[5]]
params = {'legend.fontsize': 'x-large',
'figure.figsize': (7, 5),
'axes.labelsize': 'x-large',
'axes.titlesize':'x-large',
'xtick.labelsize':'x-large',
'ytick.labelsize':'x-large'}
pylab.rcParams.update(params)
print(type(yy_smoothed))
MovingRMSArrayPostSmoothing = []
MovingRMSArrayPreSmoothing = []
for i in range(0,int(xx.size),1):
#print(np.std(yy_smoothed[i:i+5]))
MovingRMSArrayPreSmoothing.append(np.std(yy[i:i+5]))
MovingRMSArrayPostSmoothing.append(np.std(yy_smoothed[i:i+5]))
#print(MovingRMSArray[i])
MovingRMSArrayPreSmoothing=np.array(MovingRMSArrayPreSmoothing)
MovingRMSArrayPostSmoothing=np.array(MovingRMSArrayPostSmoothing)
fig, axs = plt.subplots(3,figsize=(8,12))
axs[0].plot(xx,yy,color='xkcd:medium blue',lw=0,marker='o',ms=0.5) # plotting original un-smoothed histo
axs[0].plot(xx,yy_smoothed,color='xkcd:vermillion',lw=1) # plotting smoothed histo
#plt.yscale('log')
# Residuals = np.abs((yy_smoothed-yy)/(np.sqrt(yy)))
Residuals = np.abs(yy_smoothed-yy)
axs[1].plot(xx,Residuals,color='xkcd:jade',lw=1)
axs[1].set_ylim(0,0.0001)
axs[2].set_ylim(1e-9,1)
#plt.yscale('log')
#axs[3].plot(xx,MovingRMSArrayPreSmoothing,color='xkcd:medium blue',lw=1)
#axs[3].plot(xx,MovingRMSArrayPostSmoothing,color='xkcd:vermillion',lw=1)
#x_Frequency = fftfreq(EndpointPlots+1,EndpointPlots/10.)
samplingFrequency = 1
samplingInterval = 1/samplingFrequency
beginTime = 0
endTime=len(xx)
time = np.arange(beginTime,endTime,samplingInterval)
y_FrequencyPreSmoothing = np.fft.fft(yy)
y_FrequencyPostSmoothing = np.fft.fft(yy_smoothed)
Frequency = np.fft.fftfreq(time.shape[-1])
#for i in range(5,65,4):
# yy_smoothed_2 = GaussianFilter(yy,int(i))
# axs[0].plot(xx,yy_smoothed_2,lw=0.5) # plotting original un-smoothed histo
for i in range(0,2):
axs[i].set_xlim(0,len(xx))
axs[i].grid()
axs[i].set_xlim(0,3000)
#axs[0].legend(l,fontsize=18)
axs[2].legend(l,fontsize=18)
#axs[2].plot(x_Frequency,y_FrequencyPreSmoothing,color='xkcd:medium blue',lw=1)
#axs[2].plot(x_Frequency,y_FrequencyPostSmoothing,color='xkcd:vermillion',lw=1)
arr1inds = Frequency.argsort()
Frequency = Frequency[arr1inds[::-1]]
y_FrequencyPostSmoothing = y_FrequencyPostSmoothing[arr1inds[::-1]]
y_FrequencyPreSmoothing = y_FrequencyPreSmoothing[arr1inds[::-1]]
axs[2].plot(Frequency,np.abs(y_FrequencyPreSmoothing),color='xkcd:medium blue',lw=1)
axs[2].plot(Frequency,np.abs(y_FrequencyPostSmoothing),color='xkcd:vermillion',lw=1)
#axs[3].set_xlim(0,3000)
#axs[3].set_yscale('log')
axs[0].set_ylabel('PDFs')
axs[1].set_ylabel('Difference')
axs[2].set_ylabel('Amplitude')
axs[0].set_xlabel('Energy [p.e.]')
axs[1].set_xlabel('Energy [p.e.]')
axs[2].set_xlabel('Frequency [1/p.e.]')
#Residuals = np.isfinite(Residuals)
print("High frequency average")
HighFrequencyAvg = np.log10(np.average(np.abs(y_FrequencyPostSmoothing[0:200])/np.abs(y_FrequencyPreSmoothing[0:200])))
print(HighFrequencyAvg)
print("Residuals average")
ResidualsAverage = np.sum(Residuals[np.isfinite(Residuals)])
print(ResidualsAverage)
# axs[2].plot(abs(y_FrequencyPostSmoothing),color='xkcd:vermillion',lw=1)
# axs[0].set_yscale('log')
axs[2].set_yscale('log')
# axs[1].set_ylim(-0.01,0.01)
#for i in Residuals:
# print(i, end = '\n')
#axs[2].set_xlim(-100,100)
axs[2].grid()
#axs[2].set_xlim(-100,100)
plt.savefig(OutNamePDF,bbox_inches='tight')
# Textfile output
#np.savetxt(OutName, yy_smoothed)
#np.savetxt(OutName, sys.argv[5] + ' ' + str(HighFrequencyAvg) + ' ' + str(ResidualsAverage))
#plt.show()
with open(OutName, 'a') as f:
sys.stdout = f # Change the standard output to the file we created.
print(sys.argv[5] + ' ' + str(HighFrequencyAvg) + ' ' + str(ResidualsAverage))
| davidebas/SmoothingPDFs | SmoothingTool.py | SmoothingTool.py | py | 8,266 | python | en | code | 0 | github-code | 13 |
73078923539 | from grabscreen import grab_screen
from cs_model import load_model
import cv2
import win32gui
import win32con
import torch
import numpy as np
from utils.general import non_max_suppression, scale_coords, xyxy2xywh
from utils.augmentations import letterbox
import pynput
from mouse_control import lock, recoil_control
from threading import Thread
import argparse
##此区域为调试模块 执行前需按照标砖设置参数
parser = argparse.ArgumentParser()
parser.add_argument('--model-path', type=str, default='./models/aim-csgo2.pt')
parser.add_argument('--imgsz', type=int, default=640)
parser.add_argument('--conf-thres', type=float, default=0.4)
parser.add_argument('--iou-thres', type=float, default=0.05)
parser.add_argument('--use-cuda', type=bool, default=True)
parser.add_argument('--color', type=tuple, default=(0, 255, 0))
parser.add_argument('--thickness', type=int, default=3)
parser.add_argument('--show-window', type=bool, default=True)
parser.add_argument('--fullscreen-detect', type=bool, default=True)
parser.add_argument('--resolution', type=tuple, default=(1920, 1080))
parser.add_argument('--region', type=tuple, default=(0, 0, 1920, 1080))
parser.add_argument('--hold-lock', type=bool, default=False)
parser.add_argument('--lock-button', type=str, default='shift')
parser.add_argument('--head-first', type=bool, default=True)
parser.add_argument('--lock-tag', type=list, default=[0, 1, 2, 3])
parser.add_argument('--lock-choice', type=list, default=[0, 1, 2, 3])
parser.add_argument('--recoil-button', type=str, default='x1')
parser.add_argument('--recoil-sen', type=float, default=-2)
args = parser.parse_args()
'------------------------------------------------------------------------------------'
4
args.lock_tag = [str(i) for i in args.lock_tag]
args.lock_choice = [str(i) for i in args.lock_choice]
device = 'cuda' if args.use_cuda else 'cpu'
half = device != 'cpu'
imgsz = args.imgsz
conf_thres = args.conf_thres
iou_thres = args.iou_thres
if args.fullscreen_detect:
top_x, top_y = 0, 0
x, y = args.resolution
len_x, len_y = args.resolution
else:
top_x, top_y, x, y = args.region
len_x, len_y = args.region[2] - args.region[0], args.region[3] - args.region[1]
model = load_model(args)
stride = int(model.stride.max())
names = model.module.names if hasattr(model, 'module') else model.names
lock_mode = False
mouse = pynput.mouse.Controller()
t = Thread(target=recoil_control, kwargs={'args': args})
t.start()
cv2.namedWindow('csgo-detect', cv2.WINDOW_NORMAL)
cv2.resizeWindow('csgo-detect', len_x // 3, len_y // 3)
with pynput.mouse.Events() as events:
print('enjoy yourself!')
while True:
it = next(events)
while it is not None and not isinstance(it, pynput.mouse.Events.Click):
it = next(events)
if args.hold_lock:
if it is not None and it.button == eval('it.button.' + args.lock_button) and it.pressed:
lock_mode = True
print('lock mode on')
if it is not None and it.button == eval('it.button.' + args.lock_button) and not it.pressed:
lock_mode = False
print('lock mode off')
else:
if it is not None and it.button == eval('it.button.' + args.lock_button) and it.pressed:
lock_mode = not lock_mode
print('lock mode', 'on' if lock_mode else 'off')
img0 = grab_screen(region=(top_x, top_y, x, y))
img0 = cv2.resize(img0, (len_x, len_y))
img = letterbox(img0, imgsz, stride=stride)[0]
img = img.transpose((2, 0, 1))[::-1]
img = np.ascontiguousarray(img)
img = torch.from_numpy(img).to(device)
img = img.half() if half else img.float()
img /= 255.
if len(img.shape) == 3:
img = img[None] # img = img.unsqueeze(0)
pred = model(img, augment=False, visualize=False)[0]
pred = non_max_suppression(pred, conf_thres, iou_thres, agnostic=False)
aims = []
for i, det in enumerate(pred):
gn = torch.tensor(img0.shape)[[1, 0, 1, 0]]
if len(det):
det[:, :4] = scale_coords(img.shape[2:], det[:, :4], img0.shape).round()
for *xyxy, conf, cls in reversed(det):
# bbox:(tag, x_center, y_center, x_width, y_width)
"""
0 ct_head 1 ct_body 2 t_head 3 t_body
"""
xywh = (xyxy2xywh(torch.tensor(xyxy).view(1, 4)) / gn).view(-1).tolist() # normalized xywh
line = (cls, *xywh) # label format
aim = ('%g ' * len(line)).rstrip() % line
aim = aim.split(' ')
# print(aim)
aims.append(aim)
if len(aims):
if lock_mode:
lock(aims, mouse, top_x, top_y, len_x, len_y, args)
for i, det in enumerate(aims):
_, x_center, y_center, width, height = det
x_center, width = len_x * float(x_center), len_x * float(width)
y_center, height = len_y * float(y_center), len_y * float(height)
top_left = (int(x_center - width / 2.), int(y_center - height / 2.))
bottom_right = (int(x_center + width / 2.), int(y_center + height / 2.))
color = args.color # RGB
cv2.rectangle(img0, top_left, bottom_right, color, thickness=args.thickness)
cv2.imshow('csgo-detect', img0)
hwnd = win32gui.FindWindow(None, 'csgo-detect')
CVRECT = cv2.getWindowImageRect('csgo-detect')
win32gui.SetWindowPos(hwnd, win32con.HWND_TOPMOST, 0, 0, 0, 0, win32con.SWP_NOMOVE | win32con.SWP_NOSIZE)
if cv2.waitKey(1) & 0xFF == ord('q'):
cv2.destroyAllWindows()
break | MacroXie/CSGO-Copilot | aim-csgo/main.py | main.py | py | 6,050 | python | en | code | 1 | github-code | 13 |
72915524498 | import io
import struct
import pytest
import hypothesis
from hypothesis import strategies as hst
from qutebrowser.misc import elf
from qutebrowser.utils import utils
@pytest.mark.parametrize('fmt, expected', [
(elf.Ident._FORMAT, 0x10),
(elf.Header._FORMATS[elf.Bitness.x64], 0x30),
(elf.Header._FORMATS[elf.Bitness.x32], 0x24),
(elf.SectionHeader._FORMATS[elf.Bitness.x64], 0x40),
(elf.SectionHeader._FORMATS[elf.Bitness.x32], 0x28),
])
def test_format_sizes(fmt, expected):
"""Ensure the struct format have the expected sizes.
See https://en.wikipedia.org/wiki/Executable_and_Linkable_Format#File_header
and https://en.wikipedia.org/wiki/Executable_and_Linkable_Format#Section_header
"""
assert struct.calcsize(fmt) == expected
@pytest.mark.skipif(not utils.is_linux, reason="Needs Linux")
def test_result(webengine_versions, qapp, caplog):
"""Test the real result of ELF parsing.
NOTE: If you're a distribution packager (or contributor) and see this test failing,
I'd like your help with making either the code or the test more reliable! The
underlying code is susceptible to changes in the environment, and while it's been
tested in various environments (Archlinux, Ubuntu), might break in yours.
If that happens, please report a bug about it!
"""
pytest.importorskip('qutebrowser.qt.webenginecore')
versions = elf.parse_webenginecore()
if webengine_versions.webengine >= utils.VersionNumber(6, 5):
assert versions is None
pytest.xfail("ELF file structure not supported")
assert versions is not None
# No failing mmap
assert len(caplog.messages) == 2
assert caplog.messages[0].startswith('QtWebEngine .so found at')
assert caplog.messages[1].startswith('Got versions from ELF:')
from qutebrowser.browser.webengine import webenginesettings
webenginesettings.init_user_agent()
ua = webenginesettings.parsed_user_agent
assert ua.qt_version == versions.webengine
assert ua.upstream_browser_version == versions.chromium
@pytest.mark.parametrize("data, expected", [
# Simple match
(
b"\x00QtWebEngine/5.15.9 Chrome/87.0.4280.144\x00",
elf.Versions("5.15.9", "87.0.4280.144"),
),
# Ignoring garbage string-like data
(
b"\x00QtWebEngine/5.15.9 Chrome/87.0.4xternalclearkey\x00\x00"
b"QtWebEngine/5.15.9 Chrome/87.0.4280.144\x00",
elf.Versions("5.15.9", "87.0.4280.144"),
),
# Piecing stuff together
(
(
b"\x00QtWebEngine/6.4.0 Chrome/98.0.47Navigation to external protocol "
b"blocked by sandb/webengine\x00"
b"lots-of-other-stuff\x00"
b"98.0.4758.90\x0099.0.4844.84\x00"
),
elf.Versions("6.4.0", "98.0.4758.90"),
),
])
def test_find_versions(data, expected):
assert elf._find_versions(data) == expected
@pytest.mark.parametrize("data, message", [
# No match at all
(
b"blablabla",
"No match in .rodata"
),
# Piecing stuff together: too short partial match
(
(
b"\x00QtWebEngine/6.4.0 Chrome/98bla\x00"
b"lots-of-other-stuff\x00"
b"98.0.4758.90\x0099.0.4844.84\x00"
),
"Inconclusive partial Chromium bytes"
),
# Piecing stuff together: no full match
(
(
b"\x00QtWebEngine/6.4.0 Chrome/98.0.47blabla"
b"lots-of-other-stuff\x00"
b"98.0.1234.56\x00"
),
"No match in .rodata for full version"
),
])
def test_find_versions_invalid(data, message):
with pytest.raises(elf.ParseError) as excinfo:
elf._find_versions(data)
assert str(excinfo.value) == message
@hypothesis.given(data=hst.builds(
lambda *a: b''.join(a),
hst.sampled_from([b'', b'\x7fELF', b'\x7fELF\x02\x01\x01']),
hst.binary(min_size=0x70),
))
def test_hypothesis(data):
"""Fuzz ELF parsing and make sure no crashes happen."""
fobj = io.BytesIO(data)
try:
elf._parse_from_file(fobj)
except elf.ParseError as e:
print(e)
| qutebrowser/qutebrowser | tests/unit/misc/test_elf.py | test_elf.py | py | 4,115 | python | en | code | 9,084 | github-code | 13 |
39976980609 | # -*- coding: utf-8 -*-
CURRENT_STAGE = {
"no":29,
"date":"2014/5/7 - 5/26",
"manager":u"鄭安琪"}
APP_URL="http://ntulifeguardapp.appspot.com/"
APP_ADMIN_EMAIL="ntulifeguard@gmail.com"
APP_LOGIN_MAX_RETRY=3
APP_NOTICE_EMAIL="ntulifeguardreg@groups.facebook.com"
APP_EMAIL_GREETING=u"[ntulifeguard] 謝謝使用台大救生班資料管理系統"
APP_SPREADSHEET_ID="0Aht604Cbunc4dFVReDZUMVVfeWxLTVpuaF9MaU9kS1E"
APP_SPREADSHEET_WORKSHEET_ID="od6"
APP_IMG_UPLOADER_URL="https://script.google.com/macros/exec?service=AKfycbxXvu47J7iovp8A_sADMqi9lQYVIASoj9UFxT4sRfzbaPsANuo&id="
| timchen86/ntulifeguardapp | ntulifeguardapp/globals.py | globals.py | py | 603 | python | en | code | 0 | github-code | 13 |
71139097619 | import argparse
import math
import random
import os
from types import GeneratorType
import numpy as np
import torch
from torch import nn, autograd, optim
from torch.nn import functional as F
from torch.utils import data
import torchvision
from tqdm import tqdm
import pdb
import wandb
import sys
sys.path.append("../")
from common import ROOT
from datasets.pizza10 import Pizza10Dataset
from sefa_new.models import SimpleLabelEncoder, LabelEncoder, Generator, Discriminator
from mpg.non_leaking import augment
from torchvision import transforms
from common import infinite_loader, count_parameters
from ingr_classifier.train import load_classifier
# from angle_classifier.val_all import load_classifier as load_viewpoint_classifier
train_transform = transforms.Compose([
transforms.ToTensor(),
transforms.Normalize(0.5, 0.5)])
def data_sampler(dataset, shuffle):
if shuffle:
return data.RandomSampler(dataset)
else:
return data.SequentialSampler(dataset)
def requires_grad(model, flag=True):
for p in model.parameters():
p.requires_grad = flag
def accumulate(model1, model2, decay=0.999):
par1 = dict(model1.named_parameters())
par2 = dict(model2.named_parameters())
for k in par1.keys():
par1[k].data.mul_(decay).add_(par2[k].data, alpha=1 - decay)
def infinite_loader(loader):
while True:
for batch in loader:
yield batch
def d_logistic_loss(real_pred, fake_pred, wrong_pred=None):
"""This loss works for both unconditonal and conditonal loss
Args:
real_pred (torch.tensor): D outputs for real images
fake_pred (torch.tensor): D outputs for fake images
wrong_pred (torch.tensor, optional): D outputs for contional wrong images. Defaults to None.
Returns:
torch.tensor: a scalar that averages all D outputs
"""
real_loss = F.softplus(-real_pred) # real_pred -> max
fake_loss = F.softplus(fake_pred) # fake_pred -> min
if wrong_pred is None:
return real_loss.mean() + fake_loss.mean()
else:
wrong_loss = F.softplus(wrong_pred) # wrong_pred -> min
return real_loss.mean() + fake_loss.mean() + wrong_loss.mean()
def d_r1_loss(real_pred, real_img):
grad_real, = autograd.grad(
outputs=real_pred.sum(), inputs=real_img, create_graph=True
)
grad_penalty = grad_real.pow(2).reshape(grad_real.shape[0], -1).sum(1).mean()
return grad_penalty
def g_nonsaturating_loss(fake_pred):
loss = F.softplus(-fake_pred).mean()
return loss
def g_path_regularize(fake_img, latents, mean_path_length, decay=0.01):
noise = torch.randn_like(fake_img) / math.sqrt(
fake_img.shape[2] * fake_img.shape[3]
)
pdb.set_trace()
grad, = autograd.grad(
outputs=(fake_img * noise).sum(), inputs=latents, create_graph=True,
)
path_lengths = torch.sqrt(grad.pow(2).sum(2).mean(1))
path_mean = mean_path_length + decay * (path_lengths.mean() - mean_path_length)
path_penalty = (path_lengths - path_mean).pow(2).mean()
return path_penalty, path_mean.detach(), path_lengths
def make_noise(batch, latent_dim, n_noise, device):
if n_noise == 1:
return torch.randn(batch, latent_dim, device=device)
noises = torch.randn(n_noise, batch, latent_dim, device=device).unbind(0)
return noises
def mixing_noise(batch, latent_dim, prob, device):
if prob > 0 and random.random() < prob:
return make_noise(batch, latent_dim, 2, device)
else:
return [make_noise(batch, latent_dim, 1, device)]
def set_grad_none(model, targets):
for n, p in model.named_parameters():
if n in targets:
p.grad = None
def load_mpg(ckpt_path):
print(f'load mpg from {ckpt_path}')
ckpt = torch.load(ckpt_path)
ckpt_args = ckpt['args']
label_encoder, generator, discriminator, g_ema, label_encoder_optim, g_optim, d_optim = create_mpg(ckpt_args)
label_encoder.load_state_dict(ckpt['label_encoder'])
generator.load_state_dict(ckpt["g"])
discriminator.load_state_dict(ckpt["d"])
g_ema.load_state_dict(ckpt["g_ema"])
label_encoder_optim.load_state_dict(ckpt["label_encoder_optim"])
g_optim.load_state_dict(ckpt["g_optim"])
d_optim.load_state_dict(ckpt["d_optim"])
return ckpt_args, 1, label_encoder, generator, discriminator, g_ema, label_encoder_optim, g_optim, d_optim
def create_mpg(args):
args.z_dim=256
device = args.device
if 'encoder' not in args.__dict__ or args.encoder=='normal':
label_encoder = LabelEncoder(
size=args.size, input_dim=10, embed_dim=args.embed_dim
).to(device)
elif args.encoder=='simple':
label_encoder = SimpleLabelEncoder(
size=args.size, input_dim=10, embed_dim=args.embed_dim
).to(device)
generator = Generator(
size=args.size, embed_dim=args.embed_dim, z_dim=args.z_dim, n_mlp=args.n_mlp, channel_multiplier=args.channel_multiplier
).to(device)
discriminator = Discriminator(
args.size, channel_multiplier=args.channel_multiplier, z_dim=args.z_dim
).to(device)
g_ema = Generator(
size=args.size, embed_dim=args.embed_dim, z_dim=args.z_dim, n_mlp=args.n_mlp, channel_multiplier=args.channel_multiplier
).to(device)
g_ema.eval()
accumulate(g_ema, generator, 0)
print(f'label_encoder parameters: {count_parameters(label_encoder)}')
print(f'generator parameters: {count_parameters(generator)}')
print(f'discriminator parameters: {count_parameters(discriminator)}')
label_encoder_optim = optim.Adam(label_encoder.parameters(), lr=args.lr, betas=(0, 0.99))
g_optim = optim.Adam(generator.parameters(), lr=args.lr, betas=(0, 0.99))
d_optim = optim.Adam(discriminator.parameters(), lr=args.lr, betas=(0, 0.99))
return label_encoder, generator, discriminator, g_ema, label_encoder_optim, g_optim, d_optim
def handle_viewpoint_img_input(input_img):
img = (input_img-input_img.min())/(input_img.max()-input_img.min())
means = [0.485, 0.456, 0.406]
stds = [0.229, 0.224, 0.225]
for channel in range(3):
img[:,channel] = (img[:,channel]-means[channel])/stds[channel]
# img: resize
img = F.interpolate(img, size=(224, 224), mode='bilinear', align_corners=False)
return img
def train_cond(args, loader, label_encoder, generator, discriminator, label_encoder_optim, g_optim, d_optim, g_ema, classifier,viewpoint_classifier, device, save_dir):
img_save_dir = os.path.join(save_dir, 'images')
os.makedirs(img_save_dir, exist_ok=True)
loader = infinite_loader(loader)
pbar = range(args.iter)
pbar = tqdm(pbar, initial=args.start_iter, dynamic_ncols=True, smoothing=0.3)
mean_path_length = 0
r1_loss_cond = torch.tensor(0.0, device=device)
r1_loss_uncond = torch.tensor(0.0, device=device)
path_loss = torch.tensor(0.0, device=device)
path_lengths = torch.tensor(0.0, device=device)
mean_path_length_avg = 0
loss_dict = {}
if args.device == 'cuda':
label_encoder_module = label_encoder.module
g_module = generator.module
d_module = discriminator.module
else:
label_encoder_module = label_encoder
g_module = generator
d_module = discriminator
accum = 0.5 ** (32 / (10 * 1000))
ada_augment = torch.tensor([0.0, 0.0], device=device)
ada_aug_p = args.augment_p if args.augment_p > 0 else 0.0 # 0.0
ada_aug_step = args.ada_target / args.ada_length # 0.6 / 500k
r_t_stat = 0
sample_z = torch.randn(args.batch, args.style_dim, device=device)
# save to file
keys = [
"Real Score Uncond", "Fake Score Uncond",
"Real Score Cond", "Fake Score Cond", "Wrong Score Cond",
"D Loss Uncond", "D Loss Cond", "D Loss",
"G Loss FID", "G Loss Cls", "G Loss Uncond", "G Loss Cond", "G Loss",
"Augment", "Rt", "R1 Loss Uncond", "R1 Loss Cond",
"Path Length Regularization", "Mean Path Length", "Path Length",
]
f = open(os.path.join(save_dir, 'log.csv'), 'w')
f.write(','.join(keys))
f.write('\n')
sample_txt, sample_img, _, sample_binary_label = next(loader)
sample_viewpoint_img = handle_viewpoint_img_input(sample_img)
sample_viewpoint = viewpoint_classifier(sample_viewpoint_img.to(device))
sample_viewpoint_label = sample_viewpoint[:,0:args.viewpoint_dim]
with open(os.path.join(save_dir, f'real_txt.txt'), 'w') as file:
for i,txt in enumerate(sample_txt):
file.write(str(i+1)+'\n')
file.write(txt)
file.write('\n')
torchvision.utils.save_image(
sample_img,
os.path.join(save_dir, f"real_img.png"),
nrow=int(args.batch ** 0.5),
normalize=True,
range=(-1, 1),
)
# classifier regularier
cls_criterion = nn.BCEWithLogitsLoss()
# regression regularier
reg_criterion = nn.MSELoss()
# FIDLoss
if 'fid' not in args.__dict__:
args.fid = 0.0
if args.fid:
sys.path.append('../fid_loss')
from fid_loss import FIDLoss
import pickle
if 'pizzaGANdata_new_concise' in args.lmdb_file:
with open('../metrics/pizzaGANdata_new_concise.pkl', 'rb') as f_stat:
embeds = pickle.load(f_stat)
real_mean = torch.tensor(embeds['mean']).to(device)
real_cov = torch.tensor(embeds['cov']).to(device)
fid_criterion = FIDLoss(real_mean, real_cov)
for idx in pbar:
i = idx + args.start_iter
if i > args.iter:
print("Done!")
break
_, real_img, wrong_img, binary_label = next(loader)
real_img = real_img.to(device)
wrong_img = wrong_img.to(device)
binary_label = binary_label.to(device)
viewpoint_img = handle_viewpoint_img_input(real_img)
viewpoint_output = viewpoint_classifier(viewpoint_img)
viewpoint_label = viewpoint_output[:,0:args.viewpoint_dim]
requires_grad(label_encoder, False)
requires_grad(generator, False)
requires_grad(discriminator, True)
noise = mixing_noise(args.batch, args.style_dim, args.mixing, device)
text_outputs = label_encoder(torch.cat((binary_label,viewpoint_label),dim=1))
fake_img, _ = generator(noise, text_outputs, input_is_latent=args.input_is_latent)
if args.augment:
real_img_aug, _ = augment(real_img, ada_aug_p)
fake_img, _ = augment(fake_img, ada_aug_p)
else:
real_img_aug = real_img
if args.uncond>0:
# pdb.set_trace()
fake_pred_uncond = discriminator(fake_img)
real_pred_uncond = discriminator(real_img_aug)
d_loss_uncond = d_logistic_loss(real_pred_uncond, fake_pred_uncond)
else:
fake_pred_uncond = real_pred_uncond = d_loss_uncond = torch.tensor(0.0, device=device)
fake_pred_cond = discriminator(fake_img, text_outputs)
real_pred_cond = discriminator(real_img, text_outputs)
wrong_pred_cond = discriminator(wrong_img, text_outputs)
if args.wrong:
d_loss_cond = d_logistic_loss(real_pred_cond, fake_pred_cond, wrong_pred_cond)
else:
d_loss_cond = d_logistic_loss(real_pred_cond, fake_pred_cond, None)
d_loss = args.uncond * d_loss_uncond + args.cond * d_loss_cond
loss_dict["real_score_uncond"] = real_pred_uncond.mean()
loss_dict["fake_score_uncond"] = fake_pred_uncond.mean()
loss_dict["real_score_cond"] = real_pred_cond.mean()
loss_dict["fake_score_cond"] = fake_pred_cond.mean()
loss_dict["wrong_score_cond"] = wrong_pred_cond.mean()
loss_dict["d_loss_uncond"] = d_loss_uncond
loss_dict["d_loss_cond"] = d_loss_cond
loss_dict["d_loss"] = d_loss
discriminator.zero_grad()
d_loss.backward()
d_optim.step()
if args.augment and args.augment_p == 0:
if args.uncond > 0:
ada_augment_data_uncond = torch.tensor(
(torch.sign(real_pred_uncond).sum().item(), real_pred_uncond.shape[0]), device=device
)
ada_augment += ada_augment_data_uncond
ada_augment_data_cond = torch.tensor(
(torch.sign(real_pred_cond).sum().item(), real_pred_cond.shape[0]), device=device
)
ada_augment += ada_augment_data_cond
if ada_augment[1] > 255:
pred_signs, n_pred = ada_augment.tolist()
r_t_stat = pred_signs / n_pred
if r_t_stat > args.ada_target: # => overfitted
sign = 1
else: # not overfit
sign = -1
ada_aug_p += sign * ada_aug_step * n_pred # each image will increase/decrease ∆p=ada_aug_step
ada_aug_p = min(1, max(0, ada_aug_p))
ada_augment.mul_(0)
d_regularize = i % args.d_reg_every == 0
if d_regularize:
real_img.requires_grad = True
real_pred_cond = discriminator(real_img, text_outputs)
r1_loss_cond = d_r1_loss(real_pred_cond, real_img)
if args.uncond > 0:
real_pred_uncond = discriminator(real_img)
r1_loss_uncond = d_r1_loss(real_pred_uncond, real_img)
else:
r1_loss_uncond = torch.tensor(0.0, device=device)
discriminator.zero_grad()
(args.r1 / 2 * (r1_loss_cond+r1_loss_uncond) * args.d_reg_every + 0 * real_pred_cond[0]).backward()
d_optim.step()
loss_dict["r1_loss_cond"] = r1_loss_cond
loss_dict["r1_loss_uncond"] = r1_loss_uncond
requires_grad(label_encoder, True)
requires_grad(generator, True)
requires_grad(discriminator, False)
noise = mixing_noise(args.batch, args.style_dim, args.mixing, device)
text_outputs = label_encoder(torch.cat((binary_label,viewpoint_label),dim=1))
fake_img, _ = generator(noise, text_outputs, input_is_latent=args.input_is_latent)
if args.augment:
fake_img, _ = augment(fake_img, ada_aug_p)
g_loss_fid = torch.tensor(0.0)
if args.fid:
g_loss_fid = fid_criterion(fake_img)
g_loss_cls = torch.tensor(0.0)
g_loss_viewpoint = torch.tensor(0.0)
if args.cls:
# img: normalize
img = (fake_img-fake_img.min())/(fake_img.max()-fake_img.min())
means = [0.485, 0.456, 0.406]
stds = [0.229, 0.224, 0.225]
for channel in range(3):
img[:,channel] = (img[:,channel]-means[channel])/stds[channel]
# img: resize
img = F.interpolate(img, size=(224, 224), mode='bilinear', align_corners=False)
# retrieve
output = classifier(img)
g_loss_cls = cls_criterion(output, binary_label.float())
output = viewpoint_classifier(img)
# print(output[:,0:args.viewpoint_dim])
# print(viewpoint_label)
g_loss_viewpoint= reg_criterion(output[:,0:args.viewpoint_dim], viewpoint_label.float())
if args.uncond > 0:
fake_pred_uncond = discriminator(fake_img)
g_loss_uncond = g_nonsaturating_loss(fake_pred_uncond)
else:
g_loss_uncond = torch.tensor(0.0, device=device)
fake_pred_cond = discriminator(fake_img, text_outputs)
g_loss_cond = g_nonsaturating_loss(fake_pred_cond)
g_loss = args.uncond * g_loss_uncond + args.cond * g_loss_cond + args.cls * g_loss_cls + args.fid * g_loss_fid+args.viewpoint*g_loss_viewpoint
loss_dict["g_loss_fid"] = g_loss_fid
loss_dict["g_loss_cls"] = g_loss_cls
loss_dict["g_loss_uncond"] = g_loss_uncond
loss_dict["g_loss_cond"] = g_loss_cond
loss_dict["g_loss"] = g_loss
loss_dict["g_loss_view"] = g_loss_viewpoint
label_encoder.zero_grad()
generator.zero_grad()
g_loss.backward()
label_encoder_optim.step()
g_optim.step()
# *************************
# NOTE: generator regularizer is not working on multi-gpu training for now
# g_regularize = i % args.g_reg_every == 0
g_regularize = 0
# *************************
if g_regularize:
path_batch_size = max(1, args.batch // args.path_batch_shrink)
noise = mixing_noise(path_batch_size, args.style_dim, args.mixing, device)
text_outputs = label_encoder(binary_label)
fake_img, latents = generator(noise, text_outputs, input_is_latent=args.input_is_latent, return_latents=True)
path_loss, mean_path_length, path_lengths = g_path_regularize(
fake_img, latents, mean_path_length
)
label_encoder.zero_grad()
generator.zero_grad()
weighted_path_loss = args.path_regularize * args.g_reg_every * path_loss
if args.path_batch_shrink:
weighted_path_loss += 0 * fake_img[0, 0, 0, 0]
weighted_path_loss.backward()
label_encoder_optim.step()
g_optim.step()
# mean_path_length_avg = (
# reduce_sum(mean_path_length).item() / get_world_size()
# )
mean_path_length_avg = mean_path_length
loss_dict["path_loss"] = path_loss
loss_dict["path_length"] = path_lengths.mean()
accumulate(g_ema, g_module, accum)
loss_reduced = loss_dict
real_score_uncond_val = loss_reduced["real_score_uncond"].mean().item()
fake_score_uncond_val = loss_reduced["fake_score_uncond"].mean().item()
real_score_cond_val = loss_reduced["real_score_cond"].mean().item()
fake_score_cond_val = loss_reduced["fake_score_cond"].mean().item()
wrong_score_cond_val = loss_reduced["wrong_score_cond"].mean().item()
d_loss_uncond_val = loss_reduced["d_loss_uncond"].mean().item()
d_loss_cond_val = loss_reduced["d_loss_cond"].mean().item()
d_loss_val = loss_reduced["d_loss"].mean().item()
g_loss_fid_val = loss_reduced["g_loss_fid"].mean().item()
g_loss_cls_val = loss_reduced["g_loss_cls"].mean().item()
g_loss_uncond_val = loss_reduced["g_loss_uncond"].mean().item()
g_loss_cond_val = loss_reduced["g_loss_cond"].mean().item()
g_loss_val = loss_reduced["g_loss"].mean().item()
g_loss_view_val = loss_reduced["g_loss_view"].mean().item()
r1_loss_uncond_val = loss_reduced["r1_loss_uncond"].mean().item()
r1_loss_cond_val = loss_reduced["r1_loss_cond"].mean().item()
path_loss_val = loss_reduced["path_loss"].mean().item()
path_length_val = loss_reduced["path_length"].mean().item()
pbar.set_description(
(
f"d: {d_loss_val:.4f}; g: {g_loss_val:.4f}; r1 loss cond: {r1_loss_cond_val:.4f}; "
f"path loss: {path_loss_val:.4f}; mean path: {mean_path_length_avg:.4f}; "
f"augment: {ada_aug_p:.4f}"
)
)
log = {
"Real Score Uncond": real_score_uncond_val,
"Fake Score Uncond": fake_score_uncond_val,
"Real Score Cond": real_score_cond_val,
"Fake Score Cond": fake_score_cond_val,
"Wrong Score Cond": wrong_score_cond_val,
"D Loss Uncond": d_loss_uncond_val,
"D Loss Cond": d_loss_cond_val,
"D Loss": d_loss_val,
"G Loss FID": g_loss_fid_val,
"G Loss Cls": g_loss_cls_val,
"G Loss view": g_loss_view_val,
"G Loss Uncond": g_loss_uncond_val,
"G Loss Cond": g_loss_cond_val,
"G Loss": g_loss_val,
"Augment": ada_aug_p,
"Rt": r_t_stat,
"R1 Loss Uncond": r1_loss_uncond_val,
"R1 Loss Cond": r1_loss_cond_val,
"Path Length Regularization": path_loss_val,
# "Mean Path Length": mean_path_length.item(),
"Mean Path Length": mean_path_length,
"Path Length": path_length_val,
}
line = ','.join([str(log[k]) for k in keys])
f.write(line)
f.write('\n')
wandb.log(log)
if i % 100 == 0:
with torch.no_grad():
g_ema.eval()
text_outputs = label_encoder(torch.cat((sample_binary_label,sample_viewpoint_label.to('cpu')),dim=1))
sample, _ = g_ema([sample_z], text_outputs, input_is_latent=args.input_is_latent)
torchvision.utils.save_image(
sample,
os.path.join(img_save_dir, f"{str(i).zfill(6)}.png"),
nrow=int(args.batch ** 0.5),
normalize=True,
range=(-1, 1),
)
if i % 10000 == 0:
filename = os.path.join(save_dir, f"{str(i).zfill(6)}.pt")
print(f'saving mpg to {filename}')
torch.save(
{
'label_encoder': label_encoder_module.state_dict(),
"g": g_module.state_dict(),
"d": d_module.state_dict(),
"g_ema": g_ema.state_dict(),
'label_encoder_optim': label_encoder_optim.state_dict(),
"g_optim": g_optim.state_dict(),
"d_optim": d_optim.state_dict(),
"ada_aug_p": ada_aug_p,
"args": args,
},
filename,
)
f.close()
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument(
"--lmdb_file", type=str,
default=f'{ROOT}/data/pizzaGANdata_new_concise/pizzaGANdata.lmdb')
parser.add_argument("--classifier_path", type=str, default='../ingr_classifier/runs/1t5xrvwx/batch5000.ckpt')
# parser.add_argument("--viewpoint_classifier_path", type=str, default='../angle_classifier/runs/2lfc8wxr/batch2000.ckpt')
# parser.add_argument("--ckpt_path", type=str, default='../mpg_plusView/runs/2e1wjaiz/020000.pt')
parser.add_argument("--viewpoint_classifier_path", type=str, default='../angle_classifier/runs/dodui0og/batch4999.ckpt')
parser.add_argument("--ckpt_path", type=str, default='')
parser.add_argument("--encoder", type=str, default='normal', choices=['simple', 'normal'])
parser.add_argument("--device", type=str, default='cuda', choices=['cuda', 'cpu'])
parser.add_argument("--seed", type=int, default=8)
parser.add_argument("--input_is_latent", type=int, default=0)
parser.add_argument("--workers", type=int, default=8)
parser.add_argument("--iter", type=int, default=800000)
parser.add_argument("--batch", type=int, default=4)
parser.add_argument("--n_sample", type=int, default=64)
parser.add_argument("--embed_dim", type=int, default=256)
parser.add_argument("--style_dim", type=int, default=256)
parser.add_argument("--size", type=int, default=256)
parser.add_argument("--n_mlp", type=int, default=8)
parser.add_argument("--r1", type=float, default=10)
parser.add_argument("--path_regularize", type=float, default=2)
# parser.add_argument("--path_batch_shrink", type=int, default=2)
parser.add_argument("--path_batch_shrink", type=int, default=1, choices=[1])
parser.add_argument("--d_reg_every", type=int, default=16)
parser.add_argument("--g_reg_every", type=int, default=4)
parser.add_argument("--mixing", type=float, default=0.9)
parser.add_argument("--lr", type=float, default=0.002)
parser.add_argument("--channel_multiplier", type=int, default=2)
parser.add_argument("--augment", action="store_true")
parser.add_argument("--augment_p", type=float, default=0)
parser.add_argument("--ada_target", type=float, default=0.6)
parser.add_argument("--ada_length", type=int, default=500 * 1000)
parser.add_argument("--wrong", type=int, default=1, choices=[0,1])
parser.add_argument("--cond", type=float, default=1.0)
parser.add_argument("--uncond", type=float, default=1.0)
parser.add_argument("--cls", type=float, default=1.0)
parser.add_argument("--fid", type=float, default=0.0)
parser.add_argument("--viewpoint", type=float, default=1.0)
parser.add_argument("--viewpoint_dim", type=int, default=4)
args = parser.parse_args()
torch.manual_seed(args.seed)
device = args.device
torch.backends.cudnn.benchmark = True
if args.ckpt_path:
ckpt_args, batch, label_encoder, generator, discriminator, g_ema, label_encoder_optim, g_optim, d_optim = load_mpg(args.ckpt_path, device=device)
args.start_iter = batch + 1
else:
args.start_iter = 0
label_encoder, generator, discriminator, g_ema = create_mpg(args, device)
g_reg_ratio = args.g_reg_every / (args.g_reg_every + 1)
d_reg_ratio = args.d_reg_every / (args.d_reg_every + 1)
label_encoder_optim = optim.Adam(
label_encoder.parameters(),
lr=args.lr * g_reg_ratio,
betas=(0 ** g_reg_ratio, 0.99 ** g_reg_ratio),
)
g_optim = optim.Adam(
generator.parameters(),
lr=args.lr * g_reg_ratio,
betas=(0 ** g_reg_ratio, 0.99 ** g_reg_ratio),
)
d_optim = optim.Adam(
discriminator.parameters(),
lr=args.lr * d_reg_ratio,
betas=(0 ** d_reg_ratio, 0.99 ** d_reg_ratio),
)
if args.device == 'cuda':
label_encoder, generator, discriminator = [nn.DataParallel(x) for x in [label_encoder, generator, discriminator]]
_, _, classifier, _ = load_classifier(args.classifier_path)
_,_,viewpoint_classifier = load_viewpoint_classifier(args.viewpoint_classifier_path)
viewpoint_classifier = viewpoint_classifier.eval().to(device)
requires_grad(viewpoint_classifier, False)
classifier = classifier.eval().to(device)
requires_grad(classifier, False)
dataset = Pizza10Dataset(
lmdb_file=args.lmdb_file,
transform=train_transform, resolution=args.size)
loader = data.DataLoader(
dataset,
batch_size=args.batch,
num_workers=args.workers,
sampler=data_sampler(dataset, shuffle=True),
drop_last=True,
)
wandb.init(project="mpg")
wandb.config.update(args)
save_dir = os.path.join(os.path.dirname(__file__), 'runs', wandb.run.id)
os.makedirs(save_dir, exist_ok=True)
print(f'save_dir: {save_dir}')
train_cond(args, loader, label_encoder, generator, discriminator, label_encoder_optim, g_optim, d_optim, g_ema, classifier,viewpoint_classifier, device, save_dir) | klory/MPG2 | sefa_new/train_cs.py | train_cs.py | py | 26,949 | python | en | code | 5 | github-code | 13 |
7178569110 | """
Code for day 4 of the 2021 Advent of Code challenge :)
"""
# SYSTEM IMPORTS
# THIRD PARTY IMPORTS
# LOCAL APPLICATION IMPORTS
def read_input():
# Read input file
input_data = open("day_04_input", "r")
cleaned_input = []
for item in input_data:
line = item.split("\n")[0]
if line:
cleaned_input.append(line)
else:
cleaned_input.append("_")
# Output the cleaned data
return cleaned_input
def bingo_p1():
data = read_input()
for i in data:
print(i)
return data
def bingo_p2():
data = read_input()
bingo_p1()
# print("day_04_part01 result: {}".format(bingo_p1()))
# print("day_04_part02 result: {}".format(bingo_p2()))
| CatAndDogSoup/AdventOfCode_2021 | day_04.py | day_04.py | py | 730 | python | en | code | 1 | github-code | 13 |
33021737942 | from abc import ABC, abstractmethod
import pytube
import os
from os import path
from config import DOWNLOAD_PATH
from utils.logging.logger import logger
import json
class VideoDownloadBase(ABC):
@abstractmethod
def download(self, href, config):
pass
class VideoDownload(VideoDownloadBase):
def __init__(self):
self.errored = {}
if os.path.exists('error_cache.txt'):
with open('error_cache.txt', 'r') as inp:
data = str(inp.read())
self.errored = json.loads(data)
def __save_cache__(self):
logger.debug('Saving cache')
with open('error_cache.txt', 'w') as out:
out.write(json.dumps(self.errored))
def download(self, href, config):
logger.warning(self.errored)
token = href.split('=')[1]
file_name = token + '-' + str(config['height'])
file_path = DOWNLOAD_PATH + "/" + file_name + ".mp4"
if path.exists(file_path):
logger.debug('Already exists')
return file_path
if href in self.errored:
logger.warning('Was errored before', href)
return None
else:
logger.debug('Wasn\'t errored')
try:
yt = pytube.YouTube(href)
video_filter = yt.streams\
.filter(subtype='mp4') \
.filter(progressive=False)
quality = 0
for video in video_filter.all():
resolution = video.resolution
logger.debug(f"get {video.url}")
if resolution is not None:
resolution = int(video.resolution.replace('p', ''))
if resolution <= config['height'] and resolution >= quality:
quality = resolution
video_filter = video_filter.filter(resolution=str(quality) + "p")
video = video_filter.first()
logger.info("Quality: " + str(quality) + "p")
if video is None:
self.errored[href] = True
self.__save_cache__()
return None
subtype = video.subtype
print(f"Downloading {DOWNLOAD_PATH}")
video.download(
DOWNLOAD_PATH,
filename=file_name
)
return file_path
except Exception as error:
logger.error('Error handled', error)
self.errored[href] = True
self.__save_cache__()
return None
| timothyxp/Text2VideoServer | utils/video_download.py | video_download.py | py | 2,538 | python | en | code | 1 | github-code | 13 |
25579204842 | """
Converting DIMACS to Z3 expr
"""
from typing import List
import z3
def int_clauses_to_z3(clauses: List[List[int]]) -> z3.BoolRef:
"""
The int_clauses_to_z3 function takes a list of clauses, where each clause is a list of integers.
The function returns the conjunction (AND) of all clauses in the input.
Each integer represents an atomic proposition.
:param clauses:List[List[int]]: Represent the clauses of a cnf
:return: A z3 expr
"""
z3_clauses = []
vars = {}
for clause in clauses:
conds = []
for lit in clause:
a = abs(lit)
if a in vars:
b = vars[a]
else:
b = z3.Bool("k!{}".format(a))
vars[a] = b
b = z3.Not(b) if lit < 0 else b
conds.append(b)
z3_clauses.append(z3.Or(*conds))
return z3.And(*z3_clauses)
| ZJU-Automated-Reasoning-Group/arlib | arlib/translator/intclauses2z3.py | intclauses2z3.py | py | 893 | python | en | code | 6 | github-code | 13 |
43850061252 | # -*- coding: utf-8 -*-
import os
import django
# Scrapy settings for scraper project
#
# For simplicity, this file contains only the most important settings by
# default. All the other settings are documented here:
#
# http://doc.scrapy.org/en/latest/topics/settings.html
#
BOT_NAME = 'scraper'
SPIDER_MODULES = ['scraper.spiders']
NEWSPIDER_MODULE = 'scraper.spiders'
# Crawl responsibly by identifying yourself (and your website) on the user-agent
# USER_AGENT = 'scraper (+http://www.yourdomain.com)'
ITEM_PIPELINES = {
'scraper.pipelines.EslCafePipeline': 300,
}
# Django Item requires explicit Django settings in Scrapy settings
# http://doc.scrapy.org/en/latest/topics/djangoitem.html#django-settings-set-up
os.environ['DJANGO_SETTINGS_MODULE'] = 'jobboardscraper.settings'
# Although not documented anywhere, I *think* Scrapy falls under the
# use case of `AppRegistryNotReady` error "if you forget to call
# django.setup() in a standalone Python script."
# https://docs.djangoproject.com/en/1.7/ref/applications/#troubleshooting
django.setup()
| dillonko/jobboardscraper | jobboardscraper/scraper/settings.py | settings.py | py | 1,073 | python | en | code | 1 | github-code | 13 |
26934741384 | from setuptools import setup, find_packages
from codecs import open
from os import path
from pyway.version import __version__
here = path.abspath(path.dirname(__file__))
# Get the long description from the README file
with open(path.join(here, 'README.md'), encoding='utf-8') as f:
long_description = f.read()
# get the dependencies and installs
with open(path.join(here, 'requirements.txt'), encoding='utf-8') as f:
all_reqs = f.read().split('\n')
install_requires = [x.strip() for x in all_reqs if 'git+' not in x]
setup(
name='pyway',
version=__version__,
description='Pyway is a database versioning and migration tool inspired on Flyway',
long_description=long_description,
long_description_content_type='text/markdown',
url='https://github.com/sergiosbx/pyway',
download_url='https://github.com/sergiosbx/pyway/tarball/' + __version__,
license='GPL',
classifiers=[
'Development Status :: 3 - Alpha',
'Intended Audience :: Developers',
'Programming Language :: Python :: 3',
],
keywords='',
packages=find_packages(exclude=['docs', 'tests*']),
include_package_data=True,
author='Sérgio Ferreira Filho',
install_requires=install_requires,
author_email='sergio.ferreira.filho@gmail.com',
py_modules=['pyway'],
entry_points={
'console_scripts': ['pyway=pyway.scripts.main:cli']
}
)
| sergiosbx/pyway | setup.py | setup.py | py | 1,400 | python | en | code | null | github-code | 13 |
865050232 | def encrypt(message: str, rails: int) -> str:
"""
Encrypts a message using the Rail Fence Cipher with the specified number of rails.
Args:
message (str): The message to encrypt.
rails (int): The number of rails to use.
Returns:
str: The encrypted message.
Raises:
ValueError: If the number of rails is less than 2.
"""
if rails < 2:
raise ValueError("Number of rails must be at least 2.")
fence = [''] * rails
rail = 0
direction = 1
for char in message:
fence[rail] += char
rail += direction
if rail == rails - 1 or rail == 0:
direction *= -1
return ''.join(fence)
def decrypt(ciphertext: str, rails: int) -> str:
"""
Decrypts a message that has been encrypted using the Rail Fence Cipher with the specified number of rails.
Args:
ciphertext (str): The encrypted message.
rails (int): The number of rails that were used to encrypt the message.
Returns:
str: The decrypted message.
Raises:
ValueError: If the number of rails is less than 2.
"""
if rails < 2:
raise ValueError("Number of rails must be at least 2.")
fence = [''] * rails
rail = 0
direction = 1
for i in range(len(ciphertext)):
fence[rail] += ' '
rail += direction
if rail == rails - 1 or rail == 0:
direction *= -1
index = 0
for i in range(rails):
length = len(fence[i])
fence[i] = ciphertext[index:index+length]
index += length
message = ''
rail = 0
direction = 1
for i in range(len(ciphertext)):
message += fence[rail][0]
fence[rail] = fence[rail][1:]
rail += direction
if rail == rails - 1 or rail == 0:
direction *= -1
return message
| eshabaweja/classicciphers | classicciphers/railfence.py | railfence.py | py | 1,862 | python | en | code | 0 | github-code | 13 |
8417704054 | '''
上传头像 将一个图片从客户端发送给服务端
思路:客户端 读取头像图片内容 --》通过网络发送给服务端
服务端 先从网络中接收内容--》写入到服务端的文件
'''
from socket import *
import time
ADDR = ('127.0.0.1', 8888)
# 创建套接字
s = socket()
# 发起连接
s.connect(ADDR)
# 发送文件
f=open('TCP.png','rb')
while True:
data=f.read(1024)
if not data:
time.sleep(1)
s.send('文件发送完毕'.encode())
break
s.send(data)
f.close()
s.close() | FAREWELLblue/AID1912_personal | day05/send_file.py | send_file.py | py | 554 | python | zh | code | 0 | github-code | 13 |
36783910266 | from pyramid.config import Configurator
from pyramid.session import SignedCookieSessionFactory
def main(global_config, **settings):
""" This function returns a Pyramid WSGI application.
"""
my_session_factory = SignedCookieSessionFactory('itsaseekreet')
config = Configurator(settings=settings, session_factory=my_session_factory)
config.scan()
config.add_static_view('static', 'scrape_wikipedia:static', cache_max_age=3600)
config.add_route('home', '/')
config.include('pyramid_jinja2')
config.add_jinja2_search_path("scrape_wikipedia:templates")
return config.make_wsgi_app()
| ontiyonke/scrape_wikipedia | scrape_wikipedia/__init__.py | __init__.py | py | 621 | python | en | code | 0 | github-code | 13 |
42705253850 | #!/usr/bin/env python
# coding: utf-8
# In[1]:
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
get_ipython().run_line_magic('matplotlib', 'inline')
# In[2]:
train=pd.read_csv('train_IQ.csv')
test=pd.read_csv('test_IQ.csv')
# In[3]:
print(test.shape)
print(train.shape)
# In[4]:
print(train.dtypes.value_counts())
# In[4]:
train
# In[5]:
print(train.info())
# In[6]:
train.loc[:,train.dtypes==np.object]
# In[6]:
train.drop(['Id','idhogar'],axis=1,inplace=True)
# In[7]:
train['dependency'].value_counts()
# In[7]:
def map(i):
if i=='yes':
return(float(1))
elif i=='no':
return(float(0))
else:
return(float(i))
# In[8]:
train['dependency']=train['dependency'].apply(map)
# In[9]:
train['dependency']
# In[10]:
for i in train.columns:
a=train[i].dtype
if a == 'object':
print(i)
# In[11]:
train.info()
# In[12]:
train['edjefe']=train['edjefe'].apply(map)
train['edjefa']=train['edjefa'].apply(map)
# In[13]:
train.info()
# In[14]:
var_df=pd.DataFrame(np.var(train,0),columns=['variance'])
var_df.sort_values(by='variance').head(15)
print('Below are columns with variance 0.')
col=list((var_df[var_df['variance']==0]).index)
print(col)
# From above it is shown that all values of elimbasu5 is same so there is no variablity in dataset therefor we will drop this variable
# In[15]:
contingency_tab=pd.crosstab(train['r4t3'],train['hogar_total'])
Observed_Values=contingency_tab.values
import scipy.stats
b=scipy.stats.chi2_contingency(contingency_tab)
Expected_Values = b[3]
no_of_rows=len(contingency_tab.iloc[0:2,0])
no_of_columns=len(contingency_tab.iloc[0,0:2])
df=(no_of_rows-1)*(no_of_columns-1)
print("Degree of Freedom:-",df)
from scipy.stats import chi2
chi_square=sum([(o-e)**2./e for o,e in zip(Observed_Values,Expected_Values)])
chi_square_statistic=chi_square[0]+chi_square[1]
print("chi-square statistic:-",chi_square_statistic)
alpha=0.05
critical_value=chi2.ppf(q=1-alpha,df=df)
print('critical_value:',critical_value)
p_value=1-chi2.cdf(x=chi_square_statistic,df=df)
print('p-value:',p_value)
print('Significance level: ',alpha)
print('Degree of Freedom: ',df)
print('chi-square statistic:',chi_square_statistic)
print('critical_value:',critical_value)
print('p-value:',p_value)
if chi_square_statistic>=critical_value:
print("Reject H0,There is a relationship between 2 categorical variables")
else:
print("Retain H0,There is no relationship between 2 categorical variables")
if p_value<=alpha:
print("Reject H0,There is a relationship between 2 categorical variables")
else:
print("Retain H0,There is no relationship between 2 categorical variables")
# Therefore,variables ('r4t3','hogar_total') have relationship between them. For good result we can use any one of them.
# In[16]:
contingency_tab=pd.crosstab(train['tipovivi3'],train['v2a1'])
Observed_Values=contingency_tab.values
import scipy.stats
b=scipy.stats.chi2_contingency(contingency_tab)
Expected_Values = b[3]
no_of_rows=len(contingency_tab.iloc[0:2,0])
no_of_columns=len(contingency_tab.iloc[0,0:2])
df=(no_of_rows-1)*(no_of_columns-1)
print("Degree of Freedom:-",df)
from scipy.stats import chi2
chi_square=sum([(o-e)**2./e for o,e in zip(Observed_Values,Expected_Values)])
chi_square_statistic=chi_square[0]+chi_square[1]
print("chi-square statistic:-",chi_square_statistic)
alpha=0.05
critical_value=chi2.ppf(q=1-alpha,df=df)
print('critical_value:',critical_value)
p_value=1-chi2.cdf(x=chi_square_statistic,df=df)
print('p-value:',p_value)
print('Significance level: ',alpha)
print('Degree of Freedom: ',df)
print('chi-square statistic:',chi_square_statistic)
print('critical_value:',critical_value)
print('p-value:',p_value)
if chi_square_statistic>=critical_value:
print("Reject H0,There is a relationship between 2 categorical variables")
else:
print("Retain H0,There is no relationship between 2 categorical variables")
if p_value<=alpha:
print("Reject H0,There is a relationship between 2 categorical variables")
else:
print("Retain H0,There is no relationship between 2 categorical variables")
# Therefore,variables ('tipovivi3','v2a1') have relationship between them. For good result we can use any one of them
# In[17]:
contingency_tab=pd.crosstab(train['v18q'],train['v18q1'])
Observed_Values=contingency_tab.values
import scipy.stats
b=scipy.stats.chi2_contingency(contingency_tab)
Expected_Values = b[3]
no_of_rows=len(contingency_tab.iloc[0:2,0])
no_of_columns=len(contingency_tab.iloc[0,0:2])
df=(no_of_rows-1)*(no_of_columns-1)
print("Degree of Freedom:-",df)
from scipy.stats import chi2
chi_square=sum([(o-e)**2./e for o,e in zip(Observed_Values,Expected_Values)])
chi_square_statistic=chi_square[0]+chi_square[1]
print("chi-square statistic:-",chi_square_statistic)
alpha=0.05
critical_value=chi2.ppf(q=1-alpha,df=df)
print('critical_value:',critical_value)
p_value=1-chi2.cdf(x=chi_square_statistic,df=df)
print('p-value:',p_value)
print('Significance level: ',alpha)
print('Degree of Freedom: ',df)
print('chi-square statistic:',chi_square_statistic)
print('critical_value:',critical_value)
print('p-value:',p_value)
if chi_square_statistic>=critical_value:
print("Reject H0,There is a relationship between 2 categorical variables")
else:
print("Retain H0,There is no relationship between 2 categorical variables")
if p_value<=alpha:
print("Reject H0,There is a relationship between 2 categorical variables")
else:
print("Retain H0,There is no relationship between 2 categorical variables")
# Therefore,variables ('v18q','v18q1') have relationship between them. For good result we can use any one of them.
# *Therefore, there is bias in our dataset.*
# In[18]:
train.drop('r4t3',axis=1,inplace=True)
# In[19]:
train.parentesco1.value_counts()
# In[20]:
pd.crosstab(train['edjefa'],train['edjefe'])
# *Above cross tab shows 0 male head and 0 female head which implies that there are 435 families with no family head.*
# In[21]:
train.isna().sum().value_counts()
# In[22]:
train['Target'].isna().sum()
# There are no null values in Target variable. Now lets proceed further and identify and fillna of other variable.
# In[23]:
float_col=[]
for i in train.columns:
a=train[i].dtype
if a == 'float64':
float_col.append(i)
print(float_col)
# In[24]:
train[float_col].isna().sum()
# In[25]:
train['v18q1'].value_counts()
# In[26]:
pd.crosstab(train['tipovivi1'],train['v2a1'])
# In[27]:
pd.crosstab(train['v18q1'],train['v18q'])
# we can drop a column tipovivi3,v18q
# In[28]:
train['v2a1'].fillna(0,inplace=True)
train['v18q1'].fillna(0,inplace=True)
# In[29]:
train.drop(['tipovivi3', 'v18q','rez_esc','elimbasu5'],axis=1,inplace=True)
# In[30]:
train['meaneduc'].fillna(np.mean(train['meaneduc']),inplace=True)
train['SQBmeaned'].fillna(np.mean(train['SQBmeaned']),inplace=True)
print(train.isna().sum().value_counts())
# In[31]:
int_col=[]
for i in train.columns:
a=train[i].dtype
if a == 'int64':
int_col.append(i)
print(int_col)
# In[32]:
train[int_col].isna().sum().value_counts()
# Now there is no null value in our datset.
# In[33]:
train.Target.value_counts()
# In[34]:
Poverty_level=train[train['v2a1'] !=0]
# In[35]:
Poverty_level.shape
# In[36]:
poverty_level=Poverty_level.groupby('area1')['v2a1'].apply(np.median)
# In[37]:
poverty_level
# For rural area level if people paying rent less than 8000 is under poverty level.
# For Urban area level if people paying rent less than 140000 is under poverty level.
# In[38]:
def povert(x):
if x<8000:
return('Below poverty level')
elif x>140000:
return('Above poverty level')
elif x<140000:
return('Below poverty level: Ur-ban ; Above poverty level : Rural ')
# In[39]:
c=Poverty_level['v2a1'].apply(povert)
# In[40]:
c.shape
# In[41]:
pd.crosstab(c,Poverty_level['area1'])
# Rural :
#
# Above poverty level= 445
#
# Urban :
#
# Above poverty level =1103
#
# Below poverty level=1081
# In[42]:
from sklearn.ensemble import RandomForestClassifier
from sklearn.model_selection import train_test_split
# In[43]:
X_data=train.drop('Target',axis=1)
Y_data=train.Target
# In[45]:
X_data_col=X_data.columns
# In[46]:
from sklearn.preprocessing import StandardScaler
SS=StandardScaler()
X_data_1=SS.fit_transform(X_data)
X_data_1=pd.DataFrame(X_data_1,columns=X_data_col)
# In[48]:
X_train,X_test,Y_train,Y_test=train_test_split(X_data_1,Y_data,test_size=0.25,stratify=Y_data,random_state=0)
# In[50]:
from sklearn.pipeline import Pipeline
from sklearn.model_selection import GridSearchCV
rfc=RandomForestClassifier(random_state=0)
parameters={'n_estimators':[10,50,100,300],'max_depth':[3,5,10,15]}
grid=zip([rfc],[parameters])
best_=None
for i, j in grid:
a=GridSearchCV(i,param_grid=j,cv=3,n_jobs=1)
a.fit(X_train,Y_train)
if best_ is None:
best_=a
elif a.best_score_>best_.best_score_:
best_=a
print ("Best CV Score",best_.best_score_)
print ("Model Parameters",best_.best_params_)
print("Best Estimator",best_.best_estimator_)
# In[51]:
RFC=best_.best_estimator_
Model=RFC.fit(X_train,Y_train)
pred=Model.predict(X_test)
# In[52]:
print('Model Score of train data : {}'.format(Model.score(X_train,Y_train)))
print('Model Score of test data : {}'.format(Model.score(X_test,Y_test)))
# In[53]:
Important_features=pd.DataFrame(Model.feature_importances_,X_data_col,columns=['feature_importance'])
# In[54]:
Top50Features=Important_features.sort_values(by='feature_importance',ascending=False).head(50).index
# In[55]:
Top50Features
# In[56]:
for i in Top50Features:
if i not in X_data_col:
print(i)
# In[57]:
X_data_Top50=X_data[Top50Features]
# In[58]:
X_train,X_test,Y_train,Y_test=train_test_split(X_data_Top50,Y_data,test_size=0.25,stratify=Y_data,random_state=0)
# In[59]:
Model_1=RFC.fit(X_train,Y_train)
pred=Model_1.predict(X_test)
# In[60]:
from sklearn.metrics import confusion_matrix,f1_score,accuracy_score
# In[61]:
confusion_matrix(Y_test,pred)
# In[62]:
f1_score(Y_test,pred,average='weighted')
# In[63]:
accuracy_score(Y_test,pred)
# In[64]:
test.drop('r4t3',axis=1,inplace=True)
test.drop(['Id','idhogar'],axis=1,inplace=True)
test['dependency']=test['dependency'].apply(map)
test['edjefe']=test['edjefe'].apply(map)
test['edjefa']=test['edjefa'].apply(map)
# In[65]:
test['v2a1'].fillna(0,inplace=True)
test['v18q1'].fillna(0,inplace=True)
# In[66]:
test.drop(['tipovivi3', 'v18q','rez_esc','elimbasu5'],axis=1,inplace=True)
# In[67]:
train['meaneduc'].fillna(np.mean(train['meaneduc']),inplace=True)
train['SQBmeaned'].fillna(np.mean(train['SQBmeaned']),inplace=True)
# In[68]:
test_data=test[Top50Features]
# In[69]:
test_data.isna().sum().value_counts()
# In[70]:
test_data.SQBmeaned.fillna(np.mean(test_data['SQBmeaned']),inplace=True)
# In[71]:
test_data.meaneduc.fillna(np.mean(test_data['meaneduc']),inplace=True)
# In[72]:
Test_data_1=SS.fit_transform(test_data)
X_data_1=pd.DataFrame(Test_data_1)
# In[73]:
test_prediction=Model_1.predict(test_data)
# In[74]:
test_prediction
# Above is our prediction for test data.*
# Using RandomForest Classifier we can predict test_data with accuracy of 90%.
# In[ ]:
| sskatti/Income-Qualification | Income Qualification Project.py | Income Qualification Project.py | py | 11,503 | python | en | code | 0 | github-code | 13 |
42051957648 | import sys
sys.setrecursionlimit(10 ** 8)
ini = lambda: int(sys.stdin.readline())
inm = lambda: map(int, sys.stdin.readline().split())
inl = lambda: list(inm())
ins = lambda: sys.stdin.readline().rstrip()
debug = lambda *a, **kw: print("\033[33m", *a, "\033[0m", **dict(file=sys.stderr, **kw))
N = ini()
def solve():
edge_count = 0
for i in range(N - 1):
u, v = sorted(inl())
edge_count += u * (N - v + 1)
node_count = 0
for i in range(N):
node_count += (i + 1) * (N - i)
return node_count - edge_count
print(solve())
| keijak/comp-pub | atcoder/abc173/F/main.py | main.py | py | 569 | python | en | code | 0 | github-code | 13 |
30113998690 | import os
import sys
import io
import zipfile
import math as Math
CWD = os.path.dirname(__file__)
sys.path.append(CWD)
from get_config import SCRIPT_FILE
import parse_lua as PL
def get_tunings():
with zipfile.ZipFile(SCRIPT_FILE) as zip:
content = zip.read("scripts/tuning.lua")
fp = io.StringIO(content.decode("utf8"))
lp = PL.LUAParser(global_=PL.LUA_BUILTINS)
lp.global_.update(TechTree=TechTree)
lp.global_.update(RADIANS=180/3.1415, math=math, FRAMES=1/30)
lp.parse_lua(fp, start_line=18, end_cond=lambda x: "ORIGINAL_TUNING" in x)
return lp.global_['TUNING']
class TechTree:
AVAILABLE_TECH = {
"SCIENCE",
"MAGIC",
"ANCIENT",
"CELESTIAL",
"MOON_ALTAR",
"SHADOW",
"CARTOGRAPHY",
"SEAFARING",
"SCULPTING",
"ORPHANAGE",
"PERDOFFERING",
"WARGOFFERING",
"PIGOFFERING",
"CARRATOFFERING",
"BEEFOFFERING",
"CATCOONOFFERING",
"MADSCIENCE",
"CARNIVAL_PRIZESHOP",
"CARNIVAL_HOSTSHOP",
"FOODPROCESSING",
"FISHING",
"WINTERSFEASTCOOKING",
"HERMITCRABSHOP",
"TURFCRAFTING",
"MASHTURFCRAFTING",
"SPIDERCRAFT",
"ROBOTMODULECRAFT",
"BOOKCRAFT",
}
@classmethod
def Create(cls, t):
t = t or {}
for i, v in enumerate(cls.AVAILABLE_TECH):
t[v] = t.get(v) or 0
return t
class math:
pi = Math.pi
huge = Math.inf
@classmethod
def pow(cls, *args):
return pow(*args)
@classmethod
def ceil(cls, *args):
return Math.ceil(*args)
@classmethod
def floor(cls, *args):
return Math.floor(*args)
if __name__ == "__main__":
res = get_tunings()
with open("tunings.txt", 'w') as fp:
fp.write(repr(res))
| NullOnSpace/dst-qq | utils/dst/manage_server/parse_tunings.py | parse_tunings.py | py | 1,879 | python | en | code | 0 | github-code | 13 |
39789223802 | import torch
from functools import lru_cache
from unicore.data import BaseWrapperDataset
class PrependAndAppend2DDataset(BaseWrapperDataset):
def __init__(self, dataset, token=None):
super().__init__(dataset)
self.token = token
@lru_cache(maxsize=16)
def __getitem__(self, idx):
item = self.dataset[idx]
if self.token is not None:
h, w = item.size(-2), item.size(-1)
new_item = torch.full((h + 2, w + 2), self.token).type_as(item)
new_item[1:-1, 1:-1] = item
return new_item
return item
| dptech-corp/Uni-Mol | unimol/unimol/data/prepend_and_append_2d_dataset.py | prepend_and_append_2d_dataset.py | py | 590 | python | en | code | 453 | github-code | 13 |
31234364859 | def homework_1(nums): # 請同學記得把檔案名稱改成自己的學號(ex.1104813.py)
length = len(nums)#計算長度
count = 0#累計個數
ans=0#最大連續次數
for i in range(1,length):
if nums[i-1] != nums[i]:
n=i-count
count +=n
if n >ans:
ans = n
last = length-count
if last>ans:
ans = last
return ans
if __name__ == '__main__':
lst = [0,0,1,1,1,1,0,0,0,1]
print(homework_1(lst))
| daniel880423/Member_System | file/hw1/1100342/s1100342_1.py | s1100342_1.py | py | 522 | python | en | code | 0 | github-code | 13 |
4742946263 | #!/usr/bin/python
# -*- coding: utf-8 -*-
'''
Function to load data from CKAN object classes.
'''
import app.utilities.load as Load
from app.classes.user import User
from app.classes.country import Country
from app.classes.dataset import Dataset
from app.classes.revision import Revision
from app.classes.resource import Resource
from app.classes.gallery_item import GalleryItem
from app.classes.organization import Organization
config = Load.loadJSONFile('config/dev.json')
def _fields(config, key):
'''
Extract field names from the database
property in the configuration file.
'''
result = None
for table in config['database']:
if table['name'] == key:
result = [ i['field_name'] for i in table['columns'] ]
return result
def fetchClassData(key=None, id=None):
'''
Loads data from specified CKAN object class.
'''
classes = {
'users': User(id),
'countries': Country(id),
'datasets': Dataset(id),
'revisions': Revision(id),
'resources': Resource(id),
'gallery_items': GalleryItem(id),
'organizations': Organization(id)
}
#
# Selects only the fields
# of interest from the dictionary.
#
result = { k: classes[key].info()[k] for k in _fields(config, key) }
return result
| luiscape/hdx-monitor-sql-collect | app/functions/fetch_class_data.py | fetch_class_data.py | py | 1,269 | python | en | code | 0 | github-code | 13 |
42834416786 | from bisect import bisect, insort
from ..cores import ICore
from ..util import coerce_to_uri
_IMPL_REG_KEYS = []
_IMPL_REGISTRY = {}
def register_implementation(uri_prefix):
"""Registers a subclass of :class:`.interface.ICore`.
This is to be used as a decorator generator, as in::
@register_implementation("xtp://")
class XtpResource(rdfrest.interface.ICore):
'''Implementation of REST resource over the XTP protocol.'''
#...
:param str uri_prefix: the URI prefix that this implementation can handle
:return: the class decorator
The decorated class must implement
:meth:`factory <rdfrest.interface.ICore.factory>` as a class method.b
"""
uri_prefix = str(uri_prefix)
def decorator(cls):
"""Decorator created by :func:`register_implementation`"""
assert issubclass(cls, ICore)
assert cls.factory.__self__ is cls, \
"%s.factory should be a classmethod" % cls.__name__
assert uri_prefix not in _IMPL_REGISTRY
_IMPL_REGISTRY[uri_prefix] = cls.factory
insort(_IMPL_REG_KEYS, uri_prefix)
return cls
return decorator
def register_service(service):
"""Register a `.local.Service`:class:.
NB: this need normally not be called directly, as
:meth:`.local.Serice.__init__` already does it.
"""
assert isinstance(service, rdfrest.cores.local.Service)
assert service.root_uri not in _IMPL_REGISTRY
_IMPL_REGISTRY[service.root_uri] = service.get
insort(_IMPL_REG_KEYS, service.root_uri)
def unregister_service(service):
"""Unregister a `.local.Service`:class:.
"""
assert isinstance(service, rdfrest.cores.local.Service)
if service.root_uri in _IMPL_REGISTRY:
assert _IMPL_REGISTRY[service.root_uri] == service.get
del _IMPL_REGISTRY[service.root_uri]
i = bisect(_IMPL_REG_KEYS, service.root_uri) - 1
assert _IMPL_REG_KEYS[i] is service.root_uri
del _IMPL_REG_KEYS[i]
def factory(uri, rdf_types=None, _no_spawn=False):
"""I return an instance for the resource identified by `uri`.
This module searches all registered implementations for an appropriate one.
If none is found (or if the implementation does not recognize the URI),
None will be returned.
If ``rdf_types`` is provided, the returned instance will inherit
all the `registered <register_wrapper>`:meth: mix-in classes
corresponding to those types.
:param uri: the URI of the resource to instanciate
:type uri: basestring
:param rdf_types: if provided, a list of expected RDF types of the resource
:type rdf_types: list of :class:`rdflib.term.URIRef`
:param _no_spawn: if True, only *pre-existing* python objects will be
returned (may not be honnored by all implementations)
:type _no_spawn: bool
:rtype: :class:`.interface.ICore`
When using this function, it is a good practice to indicate the expected
return type, either informally (with a comment) or formally, with a
statement of the form::
assert isinstance(returned_object, expected_class)
Note that the expected class will usually be an abstract class (a
`registered <register_wrapper>`:func: mix-in class) rather than a specific
implementation.
"""
uri = coerce_to_uri(uri)
match = ""
for i in _IMPL_REG_KEYS:
if uri.startswith(i) and len(i) > len(match):
match = i
if match:
return _IMPL_REGISTRY[match](uri, rdf_types, _no_spawn)
else:
return None
# ensure all shipped implementations are registered
import rdfrest.cores.http_client # unused import #pylint: disable=W0611
# needed by some assertions
import rdfrest.cores.local
| ktbs/ktbs | lib/rdfrest/cores/factory.py | factory.py | py | 3,781 | python | en | code | 24 | github-code | 13 |
26010444038 | """Account DTO"""""
from src.dtos.transactions_dto import TransactionDTO
class AccountDTO:
"""Account Data Transfer Object class"""
def __init__(self, name: str, account_id: int, account_uuid: str, balance: int, currency: str, transactions: list[TransactionDTO]):
self.name = name
self.account_id = account_id
self.account_uuid = account_uuid
self.balance = balance
self.currency = currency
self.transactions = transactions
def __str__(self):
"""String representation of the AccountDTO class"""
transactions_str = [str(transaction) for transaction in self.transactions]
transactions_str = ", ".join(transactions_str)
return f"AccountDTO(account_id={self.account_id}, account_uuid={self.account_uuid}, balance={self.balance}, currency = {self.currency}, transactions=[{transactions_str}])"
| razvanmarinn/expense-tracker | src/dtos/accounts_dto.py | accounts_dto.py | py | 884 | python | en | code | 0 | github-code | 13 |
72929070417 | #! /usr/bin/env python2
import numpy as np
from scipy import interpolate
import cv2
import sys,os
import time
# Local imports
import parameters as defaults
cpath = os.path.split(os.path.abspath(__file__))[0]
print(cpath)
sys.path.append(cpath)
from utils import pcautils
from utils.cprint import cprint
libviso_available=True
try:
from features.FeatureMatcherLibviso import FeatureMatcherLibviso as FeatureMatcherLibviso
except:
print('*** ERROR *** : Libviso features are not available, falling back to FAST.')
print(' Please see README for instructions on how to install Libviso.')
libviso_available=False
FeatureMatcherLibviso = None
from features.FeatureMatcherFast import FeatureMatcherFast as FeatureMatcherFast
from features.FeatureMatcherORB import FeatureMatcherORB as FeatureMatcherORB
from features.FeatureMatcherAKAZE import FeatureMatcherAKAZE as FeatureMatcherAKAZE
from solver.RobustQuadraticSolverCython import RobustQuadraticSolverCython as RobustQuadraticSolver
from solver.EMSolver import EMSolver
import homographytools as ht
from collections import deque
class PCAFlow:
"""
Basic PCAFlow class.
"""
def __init__(self,pc_file_u,pc_file_v,
covfile,
covfile_sublayer=None,
pc_size=-1,
params={},
preset=None):
"""
Initialize PCAFlow object.
Parameters
----------
pc_file_u, pc_file_v : string
Files containing the principal components in horizontal and
vertical direction, respectively.
These files should be .npy files, in which each row is a flattened
principal component (i.e., the total size of these principal
component matrices is NUM_PC x (WIDTH*HEIGHT).
cov_file : string
File containing the covariance matrix of size NUM_PC x NUM_PC for
PCA-Flow.
covfile_sublayer : string, optional
File containing the covariance matrix for the layers (usually
biased towards the first PCs).
If PCA-Layers is used and this file is not given, use cov_file.
pc_size : tuple, optional
Size of principal components. Only required if PCs are not of size
512x256 or 1024x436.
params : dict, optional
Parameters. See parameters.py for documentation of parameters.
preset : string
Preset with useful parameter values for different datasets.
Can be one of
'pcaflow_sintel'
'pcalayers_sintel'
'pcaflow_kitti'
'pcalayers_kitti'
"""
np.random.seed(1)
self.params = defaults.get_parameters(params,preset)
cprint('[PCAFlow] Initializing.', self.params)
NC = int(self.params['NC'])
self.NC = NC
pc_u = np.load(pc_file_u)
pc_v = np.load(pc_file_v)
cov_matrix = np.load(covfile).astype('float32')
if covfile_sublayer is not None:
cov_matrix_sublayer = np.load(covfile_sublayer).astype('float32')
else:
cov_matrix_sublayer = None
pc_w = 0
pc_h = 0
if pc_size==-1:
# Try to guess principal component dimensions
if pc_u.shape[1] == 1024*436:
cprint('[PCAFLOW] Using PC dimensionality 1024 x 436', self.params)
pc_w = 1024
pc_h = 436
elif pc_v.shape[1] == 512*256:
cprint('[PCAFLOW] Using PC dimensionality 512 x 256', self.params)
pc_w = 512
pc_h = 256
else:
print('[PCAFLOW] *** ERROR *** ')
print('[PCAFLOW] Could not guess dimensionality of principal components.')
print('[PCAFLOW] Please provide as parameter.')
sys.exit(1)
self.PC = []
# Smooth principal components.
self.pc_u = self.filter_pcs(pc_u,(pc_w,pc_h)).astype('float32')
self.pc_v = self.filter_pcs(pc_v,(pc_w,pc_h)).astype('float32')
self.cov_matrix = cov_matrix
self.pc_w = pc_w
self.pc_h = pc_h
self.reshape_features=True
###############################
# Feature matcher
###############################
if self.params['features'].lower() == 'libviso' and libviso_available:
self.feature_matcher = FeatureMatcherLibviso(self.params)
elif self.params['features'].lower() == 'orb':
self.feature_matcher = FeatureMatcherORB(self.params)
elif self.params['features'].lower() == 'fast':
self.feature_matcher = FeatureMatcherFast(self.params)
elif self.params['features'].lower() == 'akaze' or not libviso_available:
self.feature_matcher = FeatureMatcherAKAZE(self.params)
else:
print('[PCAFLOW] *** ERROR ***')
print('[PCAFLOW] Unknown feature type {}. Please use "libviso" or "fast".'.format(self.params['features']))
sys.exit(1)
if self.params['n_models'] <= 1:
##############################
# Solver for PCA-Flow
##############################
self.solver = RobustQuadraticSolver(self.pc_u,
self.pc_v,
self.cov_matrix,
pc_size=(pc_w,pc_h),
params=self.params)
else:
##############################
# Solver for PCA-Layers
##############################
self.solver = EMSolver(self.pc_u, self.pc_v,
self.cov_matrix,
pc_size = (pc_w,pc_h),
params=self.params,
cov_matrix_sublayer=cov_matrix_sublayer)
self.images = deque(maxlen=2)
cprint('[PCAFLOW] Finished initializing.',self.params)
def filter_pcs(self,matrix,size):
"""
Apply Gaussian filter to principal components.
This makes them somewhat better behaved.
"""
matrix_out = np.zeros_like(matrix)
#pdb.set_trace()
for i,m in enumerate(matrix):
m_ = m.reshape((size[1],size[0]))
matrix_out[i,:] = cv2.GaussianBlur(m_,
ksize=(0,0),
sigmaX=size[0]/200.0).flatten()
return matrix_out
def push_back(self,I):
"""
Push back frame.
When processing a streaming video, this allows to pre-compute
features only once per frame.
Parameters
----------
I : array_like
Image, usually given as H x W x 3 color image.
"""
cprint('[PCAFLOW] Adding image...', self.params)
if not (I.shape[0] == self.pc_h and I.shape[1] == self.pc_w):
self.reshape_features = True
self.shape_I_orig = I.shape
if self.params['image_blur'] > 0:
I = cv2.GaussianBlur(
I,
ksize=(int(self.params['image_blur']),int(self.params['image_blur'])),
sigmaX=-1)
cprint('[PCAFLOW] Adding image to feature matcher.', self.params)
self.feature_matcher.push_back(I)
self.images.append(I)
cprint('[PCAFLOW] Done adding image.',self.params)
def compute_flow(self,
kp1=None,kp2=None,
return_additional=[],
**kwargs
):
"""
Compute the flow.
Parameters
----------
kp1, kp2 : array_like, shape (NUM_KP,2), optional
Matrices containing keypoints in image coordinates for
first and second frame, respectively.
The first column of both matrices contains the x coordinates,
the second contains the y coordinates.
If kp1 and kp2 are given, no additional feature matching is
performed.
return_additional: array of strings, optional.
If set, return additional data. Possible entries are:
'weights' : Return flow coefficients
'keypoints' : Return matched feature points
'keypoint_labels' : Return assigned layers for keypoints
(PCA-Layers only).
'segments' : Return segmentation map
(PCA-Layers only)
'segment_flows' : For each layer, return flow.
(PCA-Layers only)
The additional data is returned as a dict with the same keys.
Example:
u,v,data = pcaflow.compute_flow(return_additional=['weights',])
weights = data['weights']
Returns
-------
u, v : array_like
U and V flow fields.
data_additional : dict, optional
See above for details. The return formats are:
'weights' : array_like, shape (NUM_PC,)
'keypoints' : tuple (array_like, array_like)
Each array has shape (NUM_KP,2).
'keypoint_labels' : array_like, shape (NUM_KP,)
'segments' : array_like, shape (WIDTH,HEIGHT)
'segment_flows' : array_like, shape (WIDTH, HEIGHT, 2, NUM_LAYERS)
"""
# Parse return_additional.
return_weights = False
return_keypoints = False
return_keypoint_labels = False
return_segments = False
return_segment_flows = False
if 'weights' in return_additional:
return_weights = True
if 'keypoints' in return_additional:
return_keypoints = True
if 'keypoint_labels' in return_additional:
return_keypoint_labels = True
if 'segments' in return_additional:
return_segments = True
if 'segment_flows' in return_additional:
return_segment_flows = True
if kp1 is not None and kp2 is not None:
# We got some initial features.
kp1_ = kp1.copy()
kp2_ = kp2.copy()
else:
kp1_,kp2_ = self.feature_matcher.get_features()
if len(kp1_) == 0:
print('[PCAFlow] Warning: No features found. Setting flow to 0.')
u = np.zeros(self.shape_I_orig[:2])
v = np.zeros_like(u)
return (u,v)
if self.params['remove_homography'] == 1:
cprint('[PCAFlow] Removing homography...', self.params)
kp1_h, kp2_h, H, H_inv, inliers_ = ht.remove_homography_from_points(kp1_,kp2_)
dists_new = np.sqrt(np.sum((kp1_h - kp2_h)**2,axis=1))
inliers = dists_new < 2
kp1_ = kp1_h
kp2_ = kp2_h
#kp1[inliers,:] = kp0[inliers,:]
I1_warped = cv2.warpPerspective(self.images[1],
H,
(self.images[1].shape[1],self.images[1].shape[0]),
flags=cv2.WARP_INVERSE_MAP+cv2.INTER_LINEAR,
borderMode=cv2.BORDER_REPLICATE,
)
elif self.params['remove_homography'] == 2:
cprint('[PCAFlow] Computing homography...', self.params)
kp1_h, kp2_h, H, H_inv, inliers_ = ht.remove_homography_from_points(kp1_,kp2_)
dists_new = np.sqrt(np.sum((kp1_h - kp2_h)**2,axis=1))
inliers = dists_new < 2
I1_warped = self.images[1]
else:
inliers = None
I1_warped = self.images[1]
H = None
kp1_orig = kp1_.copy()
kp2_orig = kp2_.copy()
if self.reshape_features:
h_orig,w_orig = self.shape_I_orig[:2]
h_orig_f = float(h_orig)
w_orig_f = float(w_orig)
scale = [self.pc_w / w_orig_f, self.pc_h / h_orig_f]
kp1_ *= scale
kp2_ *= scale
I0_ = cv2.resize(self.images[0],(self.pc_w,self.pc_h))
I1_ = cv2.resize(I1_warped,(self.pc_w,self.pc_h))
else:
I0_ = self.images[0]
I1_ = I1_warped
cprint('[PCAFLOW] %s features detected...'%kp1_.shape[0], self.params)
# Solve
if self.params['n_models'] > 1:
u_,v_,weights,data_additional_em = self.solver.solve(kp1_,kp2_,
I0=I0_,
I1=I1_,
inliers=inliers,
H=H,
shape_I_orig=self.shape_I_orig,
return_additional=return_additional,
**kwargs)
else:
if return_weights:
u_,v_,weights = self.solver.solve(kp1_,kp2_,return_coefficients=True)
else:
u_,v_ = self.solver.solve(kp1_,kp2_)
data_additional_em = {}
if self.reshape_features:
u = cv2.resize(u_,(w_orig,h_orig))
v = cv2.resize(v_,(w_orig,h_orig))
u *= w_orig_f / self.pc_w
v *= h_orig_f / self.pc_h
if self.params['remove_homography']==1:
cprint('[PCAFlow] Re-applying homography...', self.params)
u2,v2 = ht.apply_homography_to_flow(u,v,H)
u = u2
v = v2
if len(return_additional) == 0:
return u,v
else:
# Return more additional data
data_additional = {}
if return_weights:
data_additional['weights'] = weights
if return_keypoints:
data_additional['keypoints'] = (kp1_orig,kp2_orig)
# Get additional data from EMSolver
for key,value in data_additional_em.items():
data_additional[key] = value
return u, v, data_additional
| jswulff/pcaflow | pcaflow/PCAFlow.py | PCAFlow.py | py | 14,213 | python | en | code | 83 | github-code | 13 |
36308673254 | import math
# For a = 2.3, find the ceil of a
a = 2.3
ceil_result = math.ceil(a)
print(f"The ceil of {a} is: {ceil_result}")
# For a = 2.3, find the floor of a
floor_result = math.floor(a)
print(f"The floor of {a} is: {floor_result}")
# For a = 5, find the factorial of a
b = 5
factorial_result = math.factorial(b)
print(f"The factorial of {b} is: {factorial_result}")
# Find the value of 23^5
c = 23
power_result = math.pow(c, 5)
print(f"The result of {c} raised to the power of 5 is: {power_result}")
# For a = 16, find the square root of a
d = 16
sqrt_result = math.sqrt(d)
print(f"The square root of {d} is: {sqrt_result}")
| debsicat22/AdvanceProgramming | Assessment1/Chapter 6/Exercise1.py | Exercise1.py | py | 633 | python | en | code | 0 | github-code | 13 |
9521303457 | from __future__ import absolute_import
from django.test import override_settings
from rest_framework import status
from rest_framework.reverse import reverse
from rest_framework.test import APITestCase
from silver.fixtures.factories import AdminUserFactory
from silver.fixtures.test_fixtures import PAYMENT_PROCESSORS
from silver.tests.utils import build_absolute_test_url
@override_settings(PAYMENT_PROCESSORS=PAYMENT_PROCESSORS)
class TestPaymentProcessorsEndpoints(APITestCase):
def setUp(self):
admin_user = AdminUserFactory.create()
self.client.force_authenticate(user=admin_user)
def test_payment_processors_list(self):
url = reverse('payment-processor-list')
response = self.client.get(url, format='json')
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertIn(
{
"name": "triggered",
"type": "triggered",
"allowed_currencies": ['RON', 'USD'],
"url": build_absolute_test_url(reverse('payment-processor-detail', ['triggered']))
},
response.data
)
self.assertIn(
{
"name": "manual",
"type": "manual",
"allowed_currencies": [],
"url": build_absolute_test_url(reverse('payment-processor-detail', ['manual']))
},
response.data
)
def test_payment_processors_detail(self):
url = reverse('payment-processor-detail', kwargs={
'processor_name': 'manual'
})
response = self.client.get(url, format='json')
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(response.data, {
'name': u'manual',
'type': u'manual',
'allowed_currencies': [],
'url': build_absolute_test_url(reverse('payment-processor-detail', ['manual']))
})
def test_payment_processors_detail_not_found(self):
url = reverse('payment-processor-detail', kwargs={
'processor_name': 'unexisting'
})
response = self.client.get(url, format='json')
self.assertEqual(response.status_code, status.HTTP_404_NOT_FOUND)
self.assertEqual(response.data, {"detail": "Not found."})
| silverapp/silver | silver/tests/api/test_payment_processors.py | test_payment_processors.py | py | 2,326 | python | en | code | 292 | github-code | 13 |
72033244178 | from __future__ import annotations
from typing import TYPE_CHECKING, List
from ..ns import *
from .SpatialObject import SpatialObject
if TYPE_CHECKING:
from rdflib import Graph, Literal
from ..Thing import Thing
from .GeometryType import GeometryType
class Geometry(SpatialObject):
__type__ = CLV["Geometry"]
hasGeometryType: GeometryType = None
lat: Literal = None
long: Literal = None
alt: Literal = None
coordinate: Literal = None
coordinateSystem: Literal = None
serialization: Literal = None
isGeometryFor: List[Thing] = None
def _addProperties(self, g: Graph):
super()._addProperties(g)
if self.hasGeometryType:
g.add((self.uriRef, CLV["hasGeometryType"],
self.hasGeometryType.uriRef))
if self.lat:
g.add((self.uriRef, CLV["lat"], self.lat))
if self.long:
g.add((self.uriRef, CLV["long"], self.long))
if self.alt:
g.add((self.uriRef, CLV["alt"], self.alt))
if self.coordinate:
g.add((self.uriRef, CLV["coordinate"], self.coordinate))
if self.coordinateSystem:
g.add((self.uriRef, CLV["coordinateSystem"],
self.coordinateSystem))
if self.serialization:
g.add((self.uriRef, CLV["serialization"], self.serialization))
if self.isGeometryFor:
for isGeometryFor in self.isGeometryFor:
g.add(
(self.uriRef, CLV["isGeometryFor"], isGeometryFor.uriRef))
| luca-martinelli-09/ontopia-py | ontopia_py/clv/Geometry.py | Geometry.py | py | 1,559 | python | en | code | 0 | github-code | 13 |
29977147523 | import torch
from torchvision import transforms as T
from torchvision.transforms import functional as F
import cv2 as cv
import numpy as np
def box_cxcywh_to_xyxy(x):
x_c, y_c, w, h = x.unbind(1)
b = [(x_c - 0.5 * w), (y_c - 0.5 * h),
(x_c + 0.5 * w), (y_c + 0.5 * h)]
return torch.stack(b, dim=1)
def rescale_bboxes(out_bbox, size):
img_w, img_h = size
b = box_cxcywh_to_xyxy(out_bbox)
b = b * torch.tensor([img_w, img_h, img_w, img_h], dtype=torch.float32, device=out_bbox.device)
return b.to(dtype=torch.int16)
def detr_pred_to_bbox(pred, result_img_size=(513, 513), num_classes=21, conf=0.7):
""" This function converts raw detr output to bbox mask tensor """
bbox = torch.zeros((pred['pred_logits'].shape[0], num_classes, *result_img_size), dtype=torch.float32, device=pred['pred_logits'].device)
bbox[:, 0, :, :] = 1
probas = pred['pred_logits'].softmax(-1)[:, :, :-1]
keep = probas.max(-1).values > conf
for i in range(probas.shape[0]):
image_bboxes = rescale_bboxes(pred['pred_boxes'][i, keep[i]], result_img_size)
image_probs = probas[i, keep[i]]
for class_probs, (x_min, y_min, x_max, y_max) in zip(image_probs, image_bboxes.tolist()):
class_index = class_probs.argmax()
bbox[i, class_index, y_min:y_max, x_min:x_max] = 1
bbox[i, 0, y_min:y_max, x_min:x_max] = 0
return bbox
def yolo_pred_to_bbox(predictions, input_img_size, result_img_size=(513, 513), num_classes=21):
""" Converts yolo predictions to the bounding box mask tensor """
bbox = torch.zeros((num_classes, *input_img_size))
bbox[0, :, :] = 1
for pred in predictions:
(x_min, y_min, x_max, y_max, _, class_index) = pred.int() # cast to int so conf level equals to 0
bbox[class_index + 1, y_min:y_max, x_min:x_max] = 1
bbox[0, y_min:y_max, x_min:x_max] = 0
bbox = F.resize(bbox, size=result_img_size, interpolation=T.InterpolationMode.NEAREST)
bbox.to(torch.float32)
return bbox
def masks_to_bboxes(input_mask, num_classes=21):
""" Generate bboxes by segmentation masks """
bbox = torch.zeros((input_mask.shape[0], num_classes, *input_mask.shape[1:]), dtype=torch.float)
bbox[:, 0, :, :] = 1
for class_index in range(1, num_classes):
batch, y, x = torch.where(input_mask == class_index)
class_masks = np.zeros_like(input_mask, dtype=np.uint8)
class_masks[batch, y, x] = 1
for batch_index, class_mask in enumerate(class_masks):
contours, _ = cv.findContours(class_mask, cv.RETR_TREE, cv.CHAIN_APPROX_SIMPLE)
for contour in contours:
(x, y, w, h) = cv.boundingRect(contour)
bbox[batch_index, class_index, y:y+h, x:x+w] = 1
bbox[batch_index, 0, y:y+h, x:x+w] = 0
return bbox
| OneMagicKey/TwoStageSegmentation | utilities/bbox_generators.py | bbox_generators.py | py | 2,882 | python | en | code | 0 | github-code | 13 |
28561661642 | # -*- coding: utf-8 -*-
import sys
import threading
import functools
import contextlib
import click
from ._compat import reraise
try:
import queue
except ImportError:
import Queue as queue
# The docs state that "Future should not be instantiated directly, only by
# Executors", but since I'm basically implementing my own executor here, I
# think we're fine.
try:
from concurrent.futures import Future as _Future
except ImportError:
from futures import Future as _Future
__version__ = '0.4.4'
_CTX_WORKER_KEY = __name__ + '.uiworker'
def _is_main_thread(thread=None):
thread = thread or threading.current_thread()
return type(thread).__name__ == '_MainThread'
class Thread(threading.Thread):
'''A thread that automatically pushes the parent thread's context in the
new thread.
Since version 5.0, click maintains global stacks of context objects. The
topmost context on that stack can be accessed with
:py:func:`get_current_context`.
There is one stack for each Python thread. That means if you are in the
main thread (where you can use :py:func:`get_current_context` just fine)
and spawn a :py:class:`threading.Thread`, that thread won't be able to
access the same context using :py:func:`get_current_context`.
:py:class:`Thread` is a subclass of :py:class:`threading.Thread` that
preserves the current thread context when spawning a new one, by pushing it
on the stack of the new thread as well.
'''
def __init__(self, *args, **kwargs):
self._click_context = click.get_current_context()
super(Thread, self).__init__(*args, **kwargs)
def run(self):
with self._click_context:
return super(Thread, self).run()
class UiWorker(object):
'''
A worker-queue system to manage and synchronize output and prompts from
other threads.
>>> import click
>>> from click_threading import UiWorker, Thread, get_ui_worker
>>> ui = UiWorker() # on main thread
>>> def target():
... click.echo("Hello world!")
... get_ui_worker().shutdown()
...
>>>
>>> @click.command()
... def cli():
... with ui.patch_click():
... t = Thread(target=target)
... t.start()
... ui.run()
>>> runner = click.testing.CliRunner()
>>> result = runner.invoke(cli, [])
>>> assert result.output.strip() == 'Hello world!'
Using this class instead of just spawning threads brings a few advantages:
- If one thread prompts for input, other output from other threads is
queued until the :py:func:`click.prompt` call returns.
- If you call echo with a multiline-string, it is guaranteed that this
string is not interleaved with other output.
Disadvantages:
- The main thread is used for the output (using any other thread produces
weird behavior with interrupts). ``ui.run()`` in the above example blocks
until ``ui.shutdown()`` is called.
'''
SHUTDOWN = object()
def __init__(self):
if not _is_main_thread():
raise RuntimeError('The UiWorker can only run on the main thread.')
self.tasks = queue.Queue()
def shutdown(self):
self.put(self.SHUTDOWN, wait=False)
def run(self):
while True:
func, future = self.tasks.get()
if func is self.SHUTDOWN:
return
try:
result = func()
except BaseException as e:
future.set_exception(e)
else:
future.set_result(result)
def put(self, func, wait=True):
if _is_main_thread():
return func()
future = _Future()
self.tasks.put((func, future))
if not wait:
return
return future.result()
@contextlib.contextmanager
def patch_click(self):
from .monkey import patch_ui_functions
def wrapper(f, info):
@functools.wraps(f)
def inner(*a, **kw):
return get_ui_worker() \
.put(lambda: f(*a, **kw), wait=info.interactive)
return inner
ctx = click.get_current_context()
with patch_ui_functions(wrapper):
ctx.meta[_CTX_WORKER_KEY] = self
try:
yield
finally:
assert ctx.meta.pop(_CTX_WORKER_KEY) is self
def get_ui_worker():
try:
ctx = click.get_current_context()
return ctx.meta[_CTX_WORKER_KEY]
except (RuntimeError, KeyError):
raise RuntimeError('UI worker not found.')
| ag1455/OpenPLi-PC | pre/python/lib/python2.7/dist-packages/click_threading/__init__.py | __init__.py | py | 4,625 | python | en | code | 19 | github-code | 13 |
20215645929 | import os
import argparse
import os.path as osp
import pickle
import sys
import numpy as np
from tqdm import tqdm
from opt import opt
from PIL import Image
import cv2
import torch
from torch.autograd import Variable
import torch.nn.functional as F
import torchvision.transforms as transforms
import torch.nn as nn
import torch.utils.data
from dataloader import WebcamLoader, DataWriter, crop_from_dets, Mscoco,PoseLoader,DetectionLoader,FeatureLoader,IOULoader
from yolo.darknet import Darknet
# from reid import reid_interface
from yolo.preprocess import prep_frame
from SPPE.src.main_fast_inference import *
from yolo.util import write_results, dynamic_write_results
from pPose_nms import pose_nms
from SPPE.src.utils.img import im_to_torch
from queue import Queue, LifoQueue
from pPose_nms import write_json
from fn import getTime
# #############################
from src.init import init_frames # 初始化器
from src.tools import load_data # 正常帧加载器
from src.tools import Visualize # 可视化过程,其中含有路径配置
from src.match import Hungarian_match, Feature_match, Update_tracker # 单通道匹配函数
from src.match import Inter_cam_match_1, Inter_cam_match_2
from src.tools import YOLO
import time
cam_number = 1
match_id_cam, match_id = [], [] # 记录五帧内帧间关联信息(由mvpose结果进行联合分析)
init_flag = 1 # 是否在初始化追踪器期间,默认处于
init_info = [[], [], [], []] # 对初始化25帧进行记录
# #############################
args = opt
def loop():
n = 0
while True:
yield n
n += 1
class for_store_match:
def __init__(self):
self.Q = LifoQueue(maxsize=1024) # for update match_id_cam,match_id
def write(self,match_id_cam,match_id):
self.Q.put((match_id_cam,match_id))
def read(self):
return self.Q.get()
def isfull(self):
return self.Q.full()
class for_store_tracker:
def __init__(self):
self.Q = LifoQueue(maxsize=1024) # for update match_id_cam,match_id
def write(self,Frames_Lib,tracker,tracker_id,tracker_cnt):
self.Q.put((Frames_Lib,tracker,tracker_id,tracker_cnt))
def read(self):
return self.Q.get()
def isfull(self):
return self.Q.full()
if __name__ == '__main__':
url_1 = "rtsp://linye:linye123@192.168.200.253:554/Streaming/Channels/101"
# url_1 = 0
url_2 = "rtsp://linye:linye123@192.168.200.253:554/Streaming/Channels/301"
# url_2 = 0
webcam = args.webcam
if not os.path.exists(args.outputpath):
os.mkdir(args.outputpath)
# Load input video
fvs_0 = WebcamLoader(url_1).start()
fvs_1 = WebcamLoader(url_2).start()
(fourcc, fps1, frameSize1) = fvs_0.videoinfo()
(fourcc, fps2, frameSize2) = fvs_1.videoinfo()
# read the camera parameter of this dataset
# with open ( opt.camera_parameter_path,'rb' ) as f:
# camera_parameter = pickle.load (f)
# detection module
print('Loading detection model ')
sys.stdout.flush()
det_loader_1 = DetectionLoader(fvs_0, batchSize=1).start()
det_loader_2 = DetectionLoader(fvs_1, batchSize=1).start()
save_path = os.path.join(args.outputpath, 'AlphaPose_webcam'+webcam+'.avi')
# writer1 = DataWriter(args.save_video, save_path, cv2.VideoWriter_fourcc(*'XVID'), fps1, frameSize1).start()
# writer2 = DataWriter(args.save_video, save_path, cv2.VideoWriter_fourcc(*'XVID'), fps2, frameSize2).start()
runtime_profile = {
'ld': [],
'dt': [],
'dn': [],
'pt': [],
'pn': []
}
def loop():
n = 0
while True:
yield n
n += 1
print('Initing tracker...')
sys.stdout.flush()
im_names_desc = tqdm(loop())
store_tracker1 = for_store_tracker()
store_tracker2 = for_store_tracker()
for i in im_names_desc:
try:
with torch.no_grad():
(orig_img,frame_id,bbox,bbox_score,kp,kp_score,imgs) = det_loader_1.read()
if bbox == None or len(bbox) == 0:
continue
if init_flag == 1:
init_info[0].append(frame_id)
init_info[1].append(bbox)
init_info[2].append(bbox_score)
init_info[3].append(imgs)
if len(init_info[0]) != 25: # 不满25帧,结束当前循环,进行积累
continue
else:
# 初始化追踪器 tracker / tracker_id
init_flag, Frames_Lib, tracker, tracker_id, tracker_cnt, feature_model = init_frames(cam_number, init_info)
init_info = [[], [], [], []] # 记录器清零
continue
# 处于正常处理状态
elif init_flag == 0:
iouloader1 = IOULoader(store_tracker1,det_loader_1,Frames_Lib,tracker,tracker_id, tracker_cnt, feature_model)
store_tracker1.write(Frames_Lib,tracker,tracker_id,tracker_cnt)
print('init success')
break
except KeyboardInterrupt:
exit('Initing fail...')
cam_number = 1
match_id_cam, match_id = [], [] # 记录五帧内帧间关联信息(由mvpose结果进行联合分析)
init_flag = 1 # 是否在初始化追踪器期间,默认处于
init_info = [[], [], [], []] # 对初始化25帧进行记录
im_names_desc = tqdm(loop())
for i in im_names_desc:
try:
with torch.no_grad():
(orig_img,frame_id,bbox,bbox_score,kp,kp_score,imgs) = det_loader_2.read()
if bbox == None or len(bbox) == 0:
continue
if init_flag == 1:
init_info[0].append(frame_id)
init_info[1].append(bbox)
init_info[2].append(bbox_score)
init_info[3].append(imgs)
if len(init_info[0]) != 25: # 不满25帧,结束当前循环,进行积累
continue
else:
# 初始化追踪器 tracker / tracker_id
init_flag, Frames_Lib, tracker, tracker_id, tracker_cnt, feature_model = init_frames(cam_number, init_info)
init_info = [[], [], [], []] # 记录器清零
continue
# 处于正常处理状态
elif init_flag == 0:
iouloader2 = IOULoader(store_tracker2,det_loader_2,Frames_Lib,tracker,tracker_id, tracker_cnt, feature_model)
store_tracker2.write(Frames_Lib,tracker,tracker_id,tracker_cnt)
print('init success')
break
except KeyboardInterrupt:
exit('Initing fail...')
print('Starting webcam demo, press Ctrl + C to terminate...')
sys.stdout.flush()
im_names_desc = tqdm(loop())
iouloader1.start();iouloader2.start()
store_match1 = for_store_match()
store_match1.write([],[])
store_match2 = for_store_match()
store_match2.write([],[])
featureloader1 = FeatureLoader(store_tracker1,iouloader1,store_match1).start()
featureloader2 = FeatureLoader(store_tracker2,iouloader2,store_match2).start()
for i in im_names_desc:
try:
start_time = time.time()
with torch.no_grad():
#orig_img,box_1,box_s_1,roi_1,kp_1,kp_s_1 = pose_loader_1.read()
#(result, orig_img, im_name) = det_loader_1.read()
# writer1.save(result, orig_img, str(i)+'.jpg')
# (result, orig_img, im_name) = det_loader_2.read()
# writer2.save(result, orig_img, str(i)+'.jpg')
# (orig_img,frame_id,bbox,bbox_score,kp,kp_score,imgs) = det_loader_1.read()
(orig_img1,frame_id1,cam_number,ID_list_cam1, match_id_cam1,match_id1,\
features1,tracker1,tracker_id1,tracker_cnt1) = featureloader1.read()
(orig_img2,frame_id2,cam_number,ID_list_cam2, match_id_cam2,match_id2,\
features2,tracker2,tracker_id2,tracker_cnt2) = featureloader2.read()
# 多通道联合分析(首先生成ID链接信息,后生成总的ID)
# print('ID_list_cam1:') # [[1]]
# print(tracker1) # ROI坐标
tracker = [tracker1[0],tracker2[0]]
ID_list_cam = [ID_list_cam1[0],ID_list_cam2[0]]
features = [features1[0],features2[0]]
match_id_cam1.extend(match_id_cam2)
match_id1.extend(match_id2)
tracker_id = [tracker_id1[0],tracker_id2[0]]
match_id_cam = match_id_cam1
match_id = match_id1
# if tracker_id == None:
# continue
match_id_cam, match_id = Inter_cam_match_1(cam_number, frame_id, ID_list_cam, \
match_id_cam,match_id,features) # 获取
fuse_ID = Inter_cam_match_2(tracker, tracker_id, match_id_cam, match_id)
#Visualize([orig_img1,orig_img2],2, frame_id, tracker, tracker_id, fuse_ID)
end_time = time.time()
print(' %f FPS' % (1 / (end_time - start_time)))
except KeyboardInterrupt:
break
| linye-boli/boli-tracking-0.0.1 | tracking_pj/tracking-multi-thread/main.py | main.py | py | 9,543 | python | en | code | 0 | github-code | 13 |
5419166620 | from shiny import *
from pathlib import Path
from test import treatment_file, data_separation, data_split, LR, evaluate_linear_regression, train_and_evaluate_random_forest, plot_linear_regression_results, create_shap_waterfall_chart, shap_beeswarm_plot, shap_violin_plot, Split_and_Shap,create_and_display_graph_test, Joblib
from test import plot_RF_results, RF, patient_data, pred_plot
import shiny as x
import requests_fhir as requests
import pandas as pd
from sklearn import *
import matplotlib.pyplot as plt
from shinywidgets import output_widget, render_widget
import plotly.express as px
from asyncio import sleep
import numpy as np
import plotly.graph_objs as go
from shiny import App, reactive, ui, Session, render, Inputs, Outputs
from sklearn.linear_model import LinearRegression
import base64
import random
import shinyswatch
from htmltools import css
import seaborn as sns
from flask import send_file
import plotly.express as px
import streamlit as st
import tempfile
import os
import joblib
from shiny.types import NavSetArg
from typing import List
BASE_URL = 'https://test/fhir'
#Opening the Treatment CSV file
infile = Path(__file__).parent / "data/treat_data.csv"
treat = pd.read_csv(infile)
#Loading the pre-trained model and Opening the Simulated Data
model = joblib.load('model.joblib')
data = pd.read_csv('simulated_data.csv')
#Opening the Snomed CSV file
infiles = Path(__file__).parent / "data/snomed.csv"
snomed = pd.read_csv(infiles)
#new navigation bar
def nav_controls(prefix: str) -> List[NavSetArg]:
return [
ui.nav_spacer(),
ui.nav("Patient Information", prefix + ": Patient Informations",
x.ui.card(
x.ui.card_header("Patient Information"),
ui.input_numeric("patient_id", "Enter the Patient ID", 2, min=1, max=1000000000),
ui.p(ui.input_action_button("send", "Enter", class_="btn-primary")),
ui.output_table("patient_table"),
),
x.ui.card(
x.ui.card_header("Patient History"),
ui.input_text("snowmed", "Snowmed code", value='chol'),
ui.input_numeric("patient2", "Enter the Patient ID", 2, min=1, max=1000000000),
ui.p(ui.input_action_button("send2", "Enter", class_="btn-primary")),
ui.output_table("history"),
),
),
ui.nav("Linear Regression & Random Forest", prefix + ": Linear Regression & Random Forest",
ui.row(
ui.column(
6,
x.ui.card(
x.ui.card_header("Linear Regression "),
ui.output_plot("Data_test"),
),
x.ui.card(
x.ui.card_header("Linear Regression Waterfall chart"),
ui.output_plot("WaterfallPNG"),
),
),
ui.column(
6,
x.ui.card(
x.ui.card_header("Random Forest"),
ui.output_plot("Random_Forest_plot"),
),
x.ui.card(
x.ui.card_header("Waterfall Random Forest"),
ui.output_plot("WaterRF")
),
),
),
),
ui.nav(
"BeeSwarm & Violin Graphs",prefix + ": BeeSwarm & Violin Graphs",
x.ui.card(
x.ui.card_header("Positive and negative SHAP features"),
ui.output_text("positive_negative"),
x.ui.card_header("BeeSwarm"),
ui.output_plot("plot_bee"),
),
x.ui.card(
x.ui.card_header("Violin Chart"),
ui.output_plot("plot_violin")
),
),
ui.nav(
"What if analysis",prefix + ": What if analysis",
x.ui.card(
x.ui.card_header("What if"),
ui.p(ui.input_action_button("pred", "Create a new Prediction!", class_="btn-primary")),
ui.input_slider(
"age",
"Age",
0,
120,
65,
),
ui.input_slider(
"blood_pressure",
"Blood_Pressure",
60,
150,
100,
step=0.01,
animate = True
),
ui.input_selectize(
"gender",
"Choose your gender:",
{
"Gender": {"M": "Male", "F": "Female", "O": "Others"},
},
multiple=False,
selected=False,
),
ui.input_selectize(
"diabetes",
"Diabetes or not:",
{
"Diabetes": {"Y": "Yes", "N": "No"},
},
multiple=False,
selected=False,
),
ui.output_text("New_Prediction"),
),
x.ui.card(
x.ui.card_header("Current plot"),
ui.output_plot("Current"),
),
x.ui.card(
x.ui.card_header("What if plot"),
ui.output_plot("new_LR_plt"),
),
),
ui.nav(
"Joblib Prediction", prefix + ": Joblib Prediction",
x.ui.card(
x.ui.card_header("Patient ROW"),
ui.input_numeric("patient_row", "Enter the Patient ROW", 1, min=1, max=len(data)),
ui.p(ui.input_action_button("send3", "Enter", class_="btn-primary")),
ui.output_text("patient_Row"),
),
x.ui.card(
x.ui.card_header("Predictions results"),
ui.output_text("Pred"),
),
x.ui.card(
x.ui.card_header("Predictions Plot"),
ui.output_plot("Pred_plot"),
)
),
ui.nav(
"Treatment Plans", prefix + ": Treatment Plans",
ui.div(
ui.input_select(
"x", label="Variable",
choices=["total_bill", "tip", "size"]
),
ui.input_select(
"color", label="Color",
choices=["smoker", "sex", "day", "time"]
),
class_="d-flex gap-3",
),
output_widget("my_widget"),
),
ui.nav(
"Feedback and Support", prefix + ": Feedback and Support",
)
]
app_ui = ui.page_navbar(
*nav_controls("Page"),
shinyswatch.theme.darkly(),
title="AI Dashboard for Cancer Care",
id="navbar_id",
)
#Server part of the Shiny for Python code :
def server(input: Inputs, output: Outputs, session: Session):
@reactive.Effect
def _():
print("Current navbar page: ", input.navbar_id())
#Loading the treat CSV file
@output
@render.table
def Treattable():
infile = Path(__file__).parent / "data/treat_data.csv"
treat = pd.read_csv(infile)
return treat
#Trying to display the Linear Regression Graphs and Waterfall chart on the Dashboard by creating
#PNG images and using them to display them on the dashboard
@output
@render.plot
def Data_test():
x, y = data_separation(treat)
x = pd.DataFrame(x)
x = x.fillna(0)
x = x.dropna()
X_train, X_test, Y_train, Y_test = data_split(x,y)
lr, y_lr_train_pred, y_lr_test_pred = LR(X_train, X_test, Y_train)
linear_regression_plot = plot_linear_regression_results(Y_train, y_lr_train_pred)
return linear_regression_plot
#Function for WaterFall chart for Linear Regression
@output
@render.plot
def WaterfallPNG():
x, y = data_separation(treat)
x = pd.DataFrame(x)
x = x.fillna(0)
x = x.dropna()
X_train, X_test, Y_train, Y_test = data_split(x,y)
lr, y_lr_train_pred, y_lr_test_pred = LR(X_train, X_test, Y_train)
Water = create_shap_waterfall_chart(lr, x, X_test, sample_index=14, max_display=14)
return Water
@output
@render.plot
def Random_Forest_plot():
x, y = data_separation(treat)
x = pd.DataFrame(x)
x = x.fillna(0)
x = x.dropna()
X_train, X_test, Y_train, Y_test = data_split(x,y)
rf, y_rf_train_pred, y_rf_test_pred = RF(X_train,Y_train, X_test, max_depth=2, random_state=100)
RF_plot = plot_RF_results(Y_train, y_rf_train_pred)
return RF_plot
@output
@render.plot
def WaterRF():
x, y = data_separation(treat)
x = pd.DataFrame(x)
x = x.fillna(0)
x = x.dropna()
X_train, X_test, Y_train, Y_test = data_split(x,y)
rf, y_rf_train_pred, y_rf_test_pred = RF(X_train,Y_train, X_test, max_depth=2, random_state=100)
RFWater = create_shap_waterfall_chart(rf, x, X_test, sample_index=14, max_display=14)
return RFWater
#Function for nav"Other Types of SHAP charts" to display two lists of the
#positive and negative features
@output
@render.text
def positive_negative():
x, y = data_separation(treat)
x = pd.DataFrame(x)
x = x.fillna(0)
x = x.dropna()
X_train, X_test, Y_train, Y_test = data_split(x,y)
# Drop rows containing NaN values
lr, y_lr_train_pred, y_lr_test_pred = LR(X_train, X_test, Y_train)
positive_feature_names, negative_feature_names = Split_and_Shap(lr, x, X_test, sample_index=14, max_display=14)
# Return the two lists as a tuple
return f" The positive features are : {positive_feature_names}\n & the negative features are :{negative_feature_names}"
#Function for nav"Other Types of SHAP charts" to display two other SHAP chart "Beeswarm" and "Violin"
@output
@render.plot
def plot_bee():
x, y = data_separation(treat)
x = pd.DataFrame(x)
x = x.fillna(0)
x = x.dropna()
X_train, X_test, Y_train, Y_test = data_split(x, y)
lr, y_lr_train_pred, y_lr_test_pred = LR(X_train, X_test, Y_train)
bee = shap_beeswarm_plot(lr, x, X_test, sample_index=14, max_display=14)
return bee # Return the SHAP graph
#Same but for Violin SHAP chart
@output
@render.plot
def plot_violin():
x, y = data_separation(treat)
x = pd.DataFrame(x)
x = x.fillna(0)
x = x.dropna()
X_train, X_test, Y_train, Y_test = data_split(x, y)
lr, y_lr_train_pred, y_lr_test_pred = LR(X_train, X_test, Y_train)
violin = shap_violin_plot(lr, x, X_test, sample_index=14, max_display=14)
return violin
#Shiny for Python function to display Patient info on the Joblib prediction tab
"""@output
@render.text
@reactive.event(input.send3, ignore_none=False)
def patient_Row():
patient_id = input.patient_row
pred = Joblib(patient_id())
return f"Prediction for the Patient {pred}"""
#Tab Joblib Prediction, Text to display Prediction made with Model and dataset
@output
@render.text
@reactive.event(input.send3, ignore_none=False)
def Pred():
selected_row = input.patient_row
prediction = Joblib(selected_row())
return prediction
@output
@render.plot
@reactive.event(input.send3, ignore_none=False)
def Pred_plot():
selected_row = input.patient_row
plot = pred_plot(selected_row())
return plot
#Shiny for Python for the What if navigation bar and what is inside
@output
@render.plot
@reactive.event(input.pred, ignore_none=False)
def new_LR_plt():
age = input.age.get()
blood = input.blood_pressure.get()
gender = input.gender
diabetes = input.diabetes
model = LinearRegression()
#Setting up x and y :
np.random.seed(19680801)
x = age * np.random.randn(437)
y = blood * np.random.randn(437)
X_train, X_test, Y_train, Y_test = data_split(x, y)
lr, y_lr_train_pred, y_lr_test_pred = LR(X_train, X_test, Y_train)
plot = plot_linear_regression_results(Y_train, y_lr_train_pred)
return plot
#Shiny for Python function for the What if tab
@output
@render.plot
def Current():
#Setting up x and y :
x, y = data_separation(treat)
x = pd.DataFrame(x)
x = x.fillna(0)
x= x.dropna()
X_train, X_test, Y_train, Y_test = data_split(x, y)
lr, y_lr_train_pred, y_lr_test_pred = LR(X_train, X_test, Y_train)
Cur = plot_linear_regression_results(Y_train, y_lr_train_pred)
return Cur
#Shiny for Python function to display Patient info
@output
@render.table
@reactive.event(input.send, ignore_none=False)
def patient_table():
patient_ids = input.patient_id
response = requests.get('{}/{}/{}'.format(BASE_URL, 'Patient', patient_ids()))
patient_df = pd.json_normalize(response.json())[['id', 'gender', 'birthDate']]
patient_df = patient_df.astype({'birthDate': 'datetime64[ns]'})
return patient_df
#Shiny for Python function to display Patient history
@output
@render.table
@reactive.event(input.send2, ignore_none=False)
def history() :
patient_id=input.patient_id
patient2 =input.patient2
code = input.snowmed
response = requests.get('{}/{}?patient={}&code={}'.format(BASE_URL, 'Observation', patient_id(), patient2(), code()))
history_df = pd.json_normalize(response.json())
return history_df
app = App(app_ui, server) | karlcise222/Cancer_Care_Dashboard | app.py | app.py | py | 14,598 | python | en | code | 0 | github-code | 13 |
74638119376 | n = int(input())
arr = []
# 문장 입력
for i in range(n):
arr.append(input())
# 문장 개수만큼 반복
for i in range(len(arr)):
# 현재 위치, x,y 저장 배열, 방향 초기화
cur=[0,0]
x,y =[0],[0]
move = [0,1]
# 문장길이만큼 반복
for j in range(len(arr[i])):
#이동 거리 초기화
distance = 0
# 90도 이동
if arr[i][j]=='R':
move[0],move[1] = move[1],-move[0]
#-90도 이동
elif arr[i][j]=='L':
move[0],move[1] = -move[1],move[0]
# 앞뒤로 움직일 때
else:
# 이동거리 +1
if arr[i][j] =='F':
distance+=1
# 이동거리 -1
elif arr[i][j] =='B':
distance-=1
# 현재위치 갱신
cur[0]+=(move[0]*distance)
cur[1]+=(move[1]*distance)
# x,y 배열에 추가
x.append(cur[0])
y.append(cur[1])
#x가 지나간 길이 * y가 지나간 길이
print(abs(max(x)-min(x)) * abs(max(y)-min(y))) | Coding-Test-Study-Group/Coding-Test-Study | yunhwan/백준 8911 거북이.py | 백준 8911 거북이.py | py | 967 | python | ko | code | 4 | github-code | 13 |
16426891221 | """
@author Mrinal Pandey
@date: 2nd April, 2019
@day_time Tuesday 22:31
"""
def displayData(a, n):
#Loop to print data
for i in range(n):
print (a[i], end = '\t')
print() #To change line
#Sub-routine to sort elements in the array using Selection Sort
def selectionSort(a, n):
#Outer loop
for i in range(n - 1):
#Inner loop
minPos = i #Initialize minPos with i
for j in range(i+1, n):
#Compare elements of array with current minimum element starting from i till n
if a[j] < a[minPos]:
minPos = j #update minPos to j if for any j, a[j] < a[minPos]
a[i], a[minPos] = a[minPos], a[i] #Swap a[i] and a[minPos]
#Driver code to test above code
a = [8, 6, 9, 4, 2] #Array of elements
print ("\nArray before sorting:")
n = len(a) #Calculate length of array
displayData(a, n) #To Display data
selectionSort(a, n) #To sort the data
print ("\nArray after sorting")
displayData(a, n) #To Display data
print() #To change line
| primyt/SelectionSort | SelectionSort.py | SelectionSort.py | py | 1,020 | python | en | code | 0 | github-code | 13 |
15220681772 | import random
import discord
from discord.ext import commands
class Speech(commands.Cog):
"""Speech Cog"""
def __init__(self, client):
# sets client variable so it can be used in cog
self.client = client
self._last_member = None
@commands.command()
async def hello(self, ctx, *, member: discord.Member = None):
"""Says hello"""
member = member or ctx.author
if self._last_member is None or self._last_member.id != member.id:
await ctx.send(f'Hello {member.mention}! *wags tail*'.format(member))
else:
await ctx.send('Hello {0.name}... This feels familiar.'.format(member))
self._last_member = member
@commands.command()
async def roast_me(self, ctx):
name = ctx.author.mention
roasts = [
f'{name} you\'re my favorite person besides every other person I\'ve ever met. Woof!',
f'{name} I envy people who have never met you. Woof!',
f'Bark! {name} if you were an inanimate object, you’d be a participation trophy. *wags tail*',
f'{name} you are a pizza burn on the roof of the world\'s mouth. Woof!',
f'{name} you have the charm and charisma of a burning orphanage. *wags tail contently*',
f'Bark! {name} if there was a single intelligent thought in your head it would have died from loneliness.',
f'{name} I want you to be the pallbearer at my funeral so you can let me down one last time. Woof!'
]
response = random.choice(roasts)
await ctx.channel.send(response)
@commands.command()
async def joke(self, ctx):
name = ctx.author.mention
jokes = [
f'{name} What do you call a dog that has been left outside in the cold for an extended period of time?\n '
f'\nA chili-dog.\n\nWoof!',
f'{name} What kind of dog likes taking a bath every day?\n '
f'\nA shampoo-dle.\n\n*wags tail*',
f'{name} What do you call a dog magician?\n\nA labracadabrador.\n\n*wags tail*',
f'{name} Why did the two-legged dog to come to an abrupt halt?\n\nIt had two paws.\n\nWoof!',
f'{name} What do you get when you cross a dog with a phone?\n\nA golden receiver.\n\n*wags tail*',
f'{name} What could be more incredible than a talking dog?\n\nA spelling bee.\n\nWoof!',
f'{name} Why did the dog upgrade his phone plan?\n\nTo get collar ID.\n\n*wags tail*',
f'{name} Why are dogs so loud?\n\nThey have built-in sub-woofers.\n\nWoof!',
f'{name} What do you call a dog that can\'t bark?\n\nA hushpuppy.\n\n*wags tail*',
f'{name} Where does a Labrador’s food go before it can be sold in stores?\n\nTo the lab for '
f'testing.\n\n*wags tail contently* ',
f'{name} Whenever I go to the park, the ducks always try to bite me.\n\nMust be because I\'m '
f'pure bread.\n\n*wags tail contently*'
]
response = random.choice(jokes)
await ctx.channel.send(response)
def setup(client):
client.add_cog(Speech(client))
| solorzao/wolf-pack-bot | cogs/speech.py | speech.py | py | 3,158 | python | en | code | 0 | github-code | 13 |
2686500447 | """Trains a simple deep NN on the MNIST dataset.
Gets to 98.40% test accuracy after 20 epochs
(there is *a lot* of margin for parameter tuning).
2 seconds per epoch on a K520 GPU.
"""
from keras import Sequential
from keras.datasets import mnist
from keras.layers import Dense, Dropout
from keras.optimizers import RMSprop
from keras.utils import to_categorical
from predictor import Predictor
class MLPPredictor(Predictor):
batch_size = 128
epochs = 20
def __init__(self, x_train, y_train, x_test, y_test):
self.x_train = x_train.reshape(-1, 784).astype('float32') / 255
self.x_test = x_test.reshape(-1, 784).astype('float32') / 255
self.y_train = to_categorical(y_train, self.num_classes)
self.y_test = to_categorical(y_test, self.num_classes)
self.model = self.create_model()
def create_model(self):
model = Sequential()
model.add(Dense(512, activation='relu', input_shape=(784,)))
model.add(Dropout(0.2))
model.add(Dropout(0.2))
model.add(Dense(self.num_classes, activation='softmax'))
model.summary()
model.compile(loss='categorical_crossentropy',
optimizer=RMSprop(),
metrics=['accuracy'])
return model
def fit(self):
self.model.fit(self.x_train, self.y_train,
batch_size=self.batch_size,
epochs=self.epochs,
verbose=1,
validation_data=(self.x_test, self.y_test))
def evaluate(self):
score = self.model.evaluate(self.x_test, self.y_test, verbose=0)
print('Test loss:', score[0])
print('Test accuracy:', score[1])
(x_train, y_train), (x_test, y_test) = mnist.load_data()
| mcai/Tidbits.Foresight | predictor_mlp.py | predictor_mlp.py | py | 1,788 | python | en | code | 0 | github-code | 13 |
20691864807 | #!/usr/bin/python3.6
import os, sys, time, datetime, subprocess
import shutil
import yaml
AB_PATH = os.path.abspath(__file__)
CUR_DIR = os.getcwd()
timestr = time.strftime("%Y%m%d-%H%M%S")
now_time = datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S')
def load_yaml_list(target_yaml='vid_config.yaml', the_key='vid_list'):
"""
This will loaded the URLs form a YAML to get all the most recent shows
:param target_yaml:
:return: List
"""
try:
with open(target_yaml) as stream:
the_yaml = yaml.safe_load(stream)
print("try Load Yaml: ", the_yaml[the_key])
the_list = the_yaml[the_key]
if type(the_list) is list:
return the_list
else:
print("List not loaded. Returning empty list")
return ['']
except yaml.YAMLError as e:
print("ERROR Loading Yaml: ", str(e))
return False
def loop_pull_vid(thelist=[]):
"""
Loop through a list and download the YouTube Video
:return boolean
"""
for vid in thelist:
#python /usr/local/bin/youtube-dl
stream = os.popen('python /usr/local/bin/youtube-dl {url}'.format(url=vid))
print("Downlaoded: ", str(vid))
output = stream.read()
print("LOOP DL DONE")
def move_videos(target_dir='videos'):
"""
Move the downloaded videos to target folder
:return: boolean
"""
cur_dir = CUR_DIR
all_files = os.listdir(cur_dir)
#MOVE FILE LOOP
try:
for file in all_files:
if file.endswith('.mkv') or file.endswith('.mp4'):
tmp_path = os.path.join(target_dir, file)
print("FOUND FILE", tmp_path)
shutil.move(file, tmp_path)
return True
except Exception as e:
print("Video File Move Error: ", str(e))
return False
def delete_played_videos(target_dir='videos'):
"""
Remove all downloaded videos from target directory
:return: boolean
"""
try:
for file in os.listdir(target_dir):
if file.endswith('.mkv') or file.endswith('.mp4'):
stream = os.popen('rm {vid_file}'.format(vid_file=file))
output = stream.read()
return True
except Exception as e:
print("Could not Delete All Videos: ", str(e))
return False
def stream_it():
try:
stream = os.popen('sh stream_vids.sh')
print("STREAMING :")
return True
except Exception as e:
print("FileGlob ERROR: ", str(e))
| maxnotmin/uotuw | dl.py | dl.py | py | 2,571 | python | en | code | 0 | github-code | 13 |
32466199242 | import PythonNmap as nmap
import json
import datetime
import time
import utils
def callback_print_and_record(host, ports, scan_data, output_file):
print("--------------------")
print(host)
print(ports)
print(scan_data)
if output_file:
with open(output_file, "a") as file:
file.write("----------------------------------------------------\n")
file.write(host)
file.write(" ")
file.write(ports)
file.write("\n")
if scan_data:
json_scan_data = json.dumps(scan_data)
file.write(json_scan_data)
else:
file.write("PortScannerError")
file.write("\n")
class PyNmapWrapper:
def __init__(self):
self.result_collect = []
self.batch_size = 4
def scan(self, ip_port_list, arguments = "-sV", timeout = 180, output_file = None):
start_time = time.time()
print("开始时间:", datetime.datetime.now())
result_collect = []
print("Many targets: ", str(len(ip_port_list)))
async_scanner_pool = []
batch_ip_port_list_collect = utils.split_array(ip_port_list, self.batch_size)
for batch_index in range(0, len(batch_ip_port_list_collect)):
print("batch = ", str(batch_index))
batch_ip_port_list = batch_ip_port_list_collect[batch_index]
for i in range(0, len(batch_ip_port_list)):
ip, port = batch_ip_port_list[i]
print("No.", str(i), ip, str(port))
# 默认有arguments和timeout
# 每轮里面都新建一个异步扫描器
this_async_scanner = nmap.PortScannerAsync()
async_scanner_pool.append(this_async_scanner)
#运行扫描,这不会被阻塞
this_async_scanner.scan(hosts = ip, ports = str(port), arguments = arguments, callback = callback_print_and_record, timeout = timeout, output_file = output_file)
#阻塞直到这个batch全部完成
running_sanner_count = len(async_scanner_pool)
while running_sanner_count > 0:
for scanner in async_scanner_pool:
if not scanner.still_scanning():
running_sanner_count = running_sanner_count - 1
async_scanner_pool.remove(scanner)
print("waiting for", str(running_sanner_count), "running scan in this batch")
time.sleep(2)
print("All finished")
print("结束时间:", datetime.datetime.now())
print("运行用时:", str(time.time()-start_time))
for result in result_collect:
print(result)
return result_collect
if __name__ == '__main__':
#要导入端口扫描的结果来运行,最后输出结果到文件
danny_ip_port_list = utils.getDannyIPandPorts()
PyNmapWrapperInst = PyNmapWrapper()
keep_record_file = "vuln incremental rerun danny.txt"
#results = PyNmapWrapperInst.scan([('172.19.219.32', 7676), ('172.19.219.14', 8009), ('172.19.219.11', 631), ('172.19.221.34', 443)], arguments = "-sV --version-all --script vuln", timeout = 300, keep_record_file = keep_record_file)
list1 = utils.getDannyIPandPorts0809()
list2 = utils.getDannyIPandPorts()
new_list = [item for item in list1 if item not in list2]
results = PyNmapWrapperInst.scan(new_list, arguments = "-sV --version-all --script vuln", timeout = 480, output_file = keep_record_file)
| FlyTweety/ExtendNmapMetasploit | AsyncNmap.py | AsyncNmap.py | py | 3,627 | python | en | code | 0 | github-code | 13 |
71471438417 | import numpy as np
import onnx
from onnx.backend.test.case.base import Base
from onnx.backend.test.case.node import expect
class StringConcat(Base):
@staticmethod
def export() -> None:
node = onnx.helper.make_node(
"StringConcat",
inputs=["x", "y"],
outputs=["result"],
)
x = np.array(["abc", "def"]).astype("object")
y = np.array([".com", ".net"]).astype("object")
result = np.array(["abc.com", "def.net"]).astype("object")
expect(node, inputs=[x, y], outputs=[result], name="test_string_concat")
x = np.array(["cat", "dog", "snake"]).astype("object")
y = np.array(["s"]).astype("object")
result = np.array(["cats", "dogs", "snakes"]).astype("object")
expect(
node,
inputs=[x, y],
outputs=[result],
name="test_string_concat_broadcasting",
)
x = np.array("cat").astype("object")
y = np.array("s").astype("object")
result = np.array("cats").astype("object")
expect(
node,
inputs=[x, y],
outputs=[result],
name="test_string_concat_zero_dimensional",
)
x = np.array(["abc", ""]).astype("object")
y = np.array(["", "abc"]).astype("object")
result = np.array(["abc", "abc"]).astype("object")
expect(
node,
inputs=[x, y],
outputs=[result],
name="test_string_concat_empty_string",
)
x = np.array(["的", "中"]).astype("object")
y = np.array(["的", "中"]).astype("object")
result = np.array(["的的", "中中"]).astype("object")
expect(
node,
inputs=[x, y],
outputs=[result],
name="test_string_concat_utf8",
)
| onnx/onnx | onnx/backend/test/case/node/string_concat.py | string_concat.py | py | 1,862 | python | en | code | 15,924 | github-code | 13 |
3532010382 | import matplotlib.pyplot as plt
import numpy as np
import math
def show_histogram(data, bins=100, title=None, x_axis_label=None, y_axis_label=None):
"""
Show the histogram of the data
:param data: data with ND-array
:param bins: num of bins in the histogram
:param title: figure title
:param x_axis_label: x axis label
:param y_axis_label: y axis label
"""
fig, axes = plt.subplots()
n, bins, patches = axes.hist(data, bins=bins, normed=1, facecolor='green', alpha=0.75)
if title is not None:
axes.set_title(title)
if x_axis_label is not None:
axes.set_xlabel(x_axis_label)
if y_axis_label is not None:
axes.set_ylabel(y_axis_label)
max_v = np.max(data)
min_v = np.min(data)
axes.set_xlim([min_v, max_v])
axes.grid()
plt.show()
def show_multiple_img(img_lists, title=None, num_cols=4, figsize=(16, 9), show=True):
"""
:param img_lists:
[{'img': img, 'title':'Image Title', 'cmap': None},...]
where the cmp could be 'gray', 'jet' etc., see the imshow() in matplotlib for reference
:param title: Super title of the figure
:param num_cols: number of the column in this figure
Example:
>>> show_multiple_img([{'img': gray_img, 'title': 'rgb'},
>>> {'img': depth, 'title': 'depth', 'cmap': 'jet'},
>>> {'img': normal2rgb(surface_normal), 'title': 'normal'}], title='Preview', num_cols=2)
"""
len_figures = len(img_lists)
rows = int(math.ceil(len_figures / num_cols))
cols = num_cols
fig, axs = plt.subplots(rows, cols, figsize=figsize)
if title is not None:
fig.suptitle(title)
for i in range(rows):
for j in range(cols):
idx = i * num_cols + j
if idx > len_figures - 1:
break
if rows > 1:
ax = axs[i, j]
else:
ax = axs[j]
img = img_lists[idx]['img']
sub_title = img_lists[idx]['title'] if 'title' in img_lists[idx] else None
cmap_option = img_lists[idx]['cmap'] if 'cmap' in img_lists[idx] else None
if cmap_option is None:
ax.imshow(img)
else:
ax.imshow(img, cmap=cmap_option)
if sub_title is not None:
ax.title.set_text(sub_title)
plt.tight_layout()
if show:
plt.show()
def normal2rgb(surface_normal):
"""
Remapping the surface normal to the RGB map
:param surface_normal: surface normal map
:return: rgb visualization image
"""
return (surface_normal + 1.0) / 2.0 | sfu-gruvi-3dv/sanet_relocal_demo | visualizer/visualizer_2d.py | visualizer_2d.py | py | 2,683 | python | en | code | 51 | github-code | 13 |
10687571889 | import random
import copy
POPULATION_SIZE = 100
TOPK= 10
QUEENS = int(input("Enter The Number Of Queens: "))
gen = 1
def InitialPopulation(PoP):
for i in range(POPULATION_SIZE):
temp = []
for i in range(QUEENS):
temp.append(random.randint(1,QUEENS))
PoP.append(temp)
return PoP
def IntersectQueen(Chromo):
fitness = 0
for i in range(0, len(Chromo)):
for j in range(0, len(Chromo)):
if i != j:
if Chromo[i] == Chromo[j]:
fitness += 1
for i in range(0,len(Chromo)):
k=i+1;j = Chromo[i]-2
while k<len(Chromo) and j>=0:
if Chromo[k] == j+1:
fitness+=1
k+=1;j-=1
k=i-1;j=Chromo[i]
while k>=0 and j<len(Chromo):
if Chromo[k] == j+1:
fitness+=1
k-=1; j+=1
for i in range(0,len(Chromo)):
k=i-1;j = Chromo[i]-2
while k>=0 and j>=0:
if Chromo[k] == j+1:
fitness+=1
k-=1;j-=1
k=i+1;j=Chromo[i]
while k<len(Chromo) and j<len(Chromo):
if Chromo[k] == j+1:
fitness+=1
k+=1; j+=1
return int(fitness/2)
def EvaluatePopulation(PoP):
PoPFitness = []
for Chromo in PoP:
PoPFitness.append(IntersectQueen(Chromo))
return PoPFitness
def FindFittest(PoP,PoPFitness):
PoPFitness, PoP = zip(*sorted(zip(PoPFitness, PoP)))
PoP = PoP[:TOPK]
return list(PoP),list(PoPFitness[:TOPK])
def Crossover(Chromo1,Chromo2):
Child = []
for i in range(QUEENS):
if(Chromo1[i] == Chromo2[i]):
Child.append(Chromo1[i])
else:
Child.append(random.randint(1,QUEENS))
return Child
def Mutate(Children):
for i in range(len(Children)):
Children[i][random.randint(0,QUEENS-1)]=random.randint(1,QUEENS)
return Children
def newGen(PoP):
global gen
Children=[]
for i in range((len(PoP))):
for j in range(i+1,len(PoP)):
if(PoP[i] == PoP[j]):
temp = []
for k in range(QUEENS):
temp.append(random.randint(1,QUEENS))
PoP[i] = copy.deepcopy(temp)
temp = []
for k in range(QUEENS):
temp.append(random.randint(1,QUEENS))
PoP[j] = copy.deepcopy(temp)
Child=Crossover(PoP[i],PoP[j])
Children.append(Child)
Children = Mutate(Children)
for i in range(len(Children)):
PoP.append(Children[i])
return PoP
def GoalTest(PoPFitness):
for i in range(len(PoPFitness)):
if(PoPFitness[i]==0):
return i
return -1
if __name__ == "__main__":
PoP = []
PoP = InitialPopulation(PoP)
PoPFitness = EvaluatePopulation(PoP)
print("Generation: ",gen,sep="")
print("Best Fitness Value: ",min(PoPFitness)," ","Worst Fitness Value: ",max(PoPFitness),sep="")
ind = GoalTest(PoPFitness)
while(ind==-1):
PoP,PoPFitness = FindFittest(PoP,PoPFitness)
print(PoP[0],"Fitness -> ",PoPFitness[0],sep="")
gen+=1
PoP = newGen(PoP)
PoPFitness = EvaluatePopulation(PoP)
print("Generation: ",gen,sep="")
print("Best Fitness Value: ",min(PoPFitness)," ","Worst Fitness Value: ",max(PoPFitness),sep="")
ind = GoalTest(PoPFitness)
print("SOLUTION FOUND!:")
print(PoP[ind])
| vineetjoshi253/CSE-643-Artificial-Intelligence | Assignment-1/N-Queen/GeneticN.py | GeneticN.py | py | 3,607 | python | en | code | 1 | github-code | 13 |
13614130570 | class Solution(object):
# solution1, need O(n) extra space and O(nlog n) running time
def wiggleSort(self, nums):
temp = sorted(nums)
mid = (len(temp) + 1) / 2
k = 0
for i in reversed(xrange(mid)):
nums[k] = temp[i]
k += 2
j = 1
for i in reversed(xrange(mid, len(temp))):
nums[j] = temp[i]
j += 2
# solution2, can optimize the space compexcity to O(1)
| clovery410/mycode | leetcode_review2/324wiggle_sort2.py | 324wiggle_sort2.py | py | 502 | python | en | code | 1 | github-code | 13 |
32417951408 |
import numpy as np
import seaborn as sn
import pandas as pd
import matplotlib as mpl
import matplotlib.pyplot as plt
import matplotlib.cm as cm
import matplotlib.patches as mpatches
from matplotlib.dates import DateFormatter
import datetime
def plot_test_train_splits(train, test):
"""
Plots test train splits as proportions of each label.
Parameters
----------
train
Training set.
test
Testing set.
"""
unique_labels_train, counts_train = np.unique(train, return_counts=True)
unique_labels_test, counts_test = np.unique(test, return_counts=True)
print(counts_test)
print(counts_train)
x = np.arange(len(unique_labels_train)) # the label locations
width = 0.35
fig, ax = plt.subplots()
rects1 = ax.bar(x - width / 2, counts_train, width, label='Train')
rects2 = ax.bar(x + width / 2, counts_test, width, label='Test')
ax.set_ylabel('Instances per set')
ax.set_title('Stratification of train/test per label set')
ax.set_xticks(x)
ax.legend()
def check_metrics(metric, string):
"""
Checks whether the metric is, what is specified in the string.
Parameters
----------
metric
Metric name in string format.
string
String to check the metric name against, also in string format.
Returns
-------
string in metric
Binary flag specifying whether the metric is the string.
"""
return string in metric
def plot_metrics(metrics, date, labels_=None):
"""
Plots the metrics
(TODO: this is a placeholder function. It only plots more rudimentary of visuals. This function needs to be expanded.)
Parameters
----------
metric
Metric container.
date
Date container.
labels_
Optional label container.
"""
figures_dict = {}
for index, date_ in enumerate(date):
props = []
labs = []
speed_list = []
average_speed = []
max_speed = []
date_ = np.datetime64(date_, 'D')
for metric in metrics:
if check_metrics(metric, 'duration'):
per_metric_container = metrics[metric]
current_metrics_per_date = per_metric_container[index]
for idx, proportion in enumerate(current_metrics_per_date):
for key in proportion:
if proportion[key] != 0.0:
props.append(proportion[key])
#labs.append((str(key) + ' ' + str("%.2f" % proportion[key])))
labs.append(str(key))
if len(props) != 0:
x = np.arange(len(props))
fig, ax = plt.subplots()
ax.bar(x, np.squeeze(props))
ax.set_xticks(x)
ax.set_xticklabels(labs)
ax.set_ylabel('Time (s)')
ax.set_xlabel('Metric')
title = 'durations_of_activities' + ' ' + str(date_)
ax.set_title(title)
figures_dict[title.replace(' ', '_')] = fig
props = []
labs = []
for metric in metrics:
if check_metrics(metric, 'activities'):
per_metric_container = metrics[metric]
current_metrics_per_date = per_metric_container[index]
for idx, proportion in enumerate(current_metrics_per_date):
for key in proportion:
if proportion[key] != 0.0:
props.append(proportion[key])
labs.append(str(key))
if len(props) != 0:
fig, ax = plt.subplots()
ax.pie(np.squeeze(props), labels=labs, autopct='%1.0f%%')
ax.legend(loc='upper center', ncol=6, fancybox=True,
shadow=False, fontsize=9, framealpha=0.7)
title = metric + ' ' + str(date_)
ax.set_title(title)
figures_dict[title.replace(' ', '_')] = fig
for metric in metrics:
if check_metrics(metric, 'locations'):
per_metric_container = metrics[metric]
current_metrics_per_date = per_metric_container[index]
for idx, proportion in enumerate(current_metrics_per_date):
for key in proportion:
if proportion[key] != 0.0:
props.append(proportion[key])
labs.append(str(key))
if len(props) != 0:
fig, ax = plt.subplots()
ax.pie(np.squeeze(props), labels=labs, autopct='%1.0f%%')
ax.legend(loc='upper center', ncol=6, fancybox=True,
shadow=False, fontsize=9, framealpha=0.7)
# Check difference between this and the previous plot
title = metric + ' ' + str(date_)
ax.set_title(title)
figures_dict[title.replace(' ', '_')] = fig
for metric in metrics:
if check_metrics(metric, 'transfers'):
per_metric_container = metrics[metric]
current_metrics_per_date = per_metric_container[index]
xlabs = labels_
ylabs = labels_
if len(current_metrics_per_date) != 0:
data_frame_ = pd.DataFrame(np.squeeze(current_metrics_per_date),
index=ylabs, columns=ylabs)
fig, ax = plt.subplots()
# TODO Rewrite not to use seaborn
g = sn.heatmap(data_frame_, annot=True, ax=ax)
g.set_yticklabels(g.get_yticklabels(), rotation = 0)
ylim = list(ax.get_ylim())
if ylim[1] == 0.5:
ylim[0] += 0.5
ylim[1] = 0
ax.set_ylim(ylim)
title = metric + ' ' + str(date_)
ax.set_title(title)
figures_dict[title.replace(' ', '_')] = fig
for metric in metrics:
if check_metrics(metric, 'speed'):
per_metric_container = metrics[metric]
speed_list = np.squeeze(per_metric_container[index][0])
average_speed = np.squeeze(per_metric_container[index][1])
max_speed = np.squeeze(per_metric_container[index][2])
if len(speed_list) != 0:
x = np.arange(len(speed_list))
fig, ax = plt.subplots()
ax.plot(x, speed_list, label='speed')
ax.plot([x[0], x[-1]], [average_speed]*2, label='avg. speed')
ax.set_xlabel('Sample')
ax.set_ylabel(r'Velocity $ms^{-1}$')
ax.legend(loc='upper center', ncol=6, fancybox=True,
shadow=False,
fontsize=9, framealpha=0.7)
title = 'velocity_from_labels' + ' ' + str(date_)
ax.set_title(title)
figures_dict[title.replace(' ', '_')] = fig
# for metric in metrics:
#
# if check_metrics(metric, 'visit'):
#
# per_metric_container = metrics[metric]
#
# current_metrics_per_date = per_metric_container[index]
#
# for idx, proportion in enumerate(current_metrics_per_date):
# for key in proportion:
# if proportion[key] != 0.0:
# props.append(proportion[key])
# labs.append(str(key))
#
# if len(props) != 0:
# x = np.arange(len(props))
# plt.figure()
# plt.bar(x, np.squeeze(props))
# plt.xticks(x, labs)
# plt.ylabel('Time (s)')
# plt.xlabel('Metric')
# plt.legend(loc='upper center', ncol=6, fancybox=True, shadow=False,
# fontsize=9, framealpha=0.7)
# plt.title('specific_' + ' ' + str(date_))
return figures_dict
def plot_features(X, ts=None, feature_names=None, xlab=None, ylab=None):
"""
Plots the raw features.
Parameters
----------
X
Raw data.
ts
Optional raw timestamps parameter.
feature_names
Optional feature names parameter.
xlab
Optional label for X axis.
ylab
Optional label for Y axis.
"""
number_of_instances = X.shape[0]
number_of_features = X.shape[1]
if ts is None:
ts = np.arange(number_of_instances)
if feature_names is None:
feature_names = np.arange(number_of_features)
for feature_id, feature in enumerate(feature_names):
plt.subplot(number_of_features, 1, feature_id+1)
plt.plot(ts, X[:, feature_id], linewidth=0.5, markersize=12, label=feature)
plt.legend(loc='upper center', ncol=6, fancybox=True, shadow=False,
fontsize=9, framealpha=0.7)
if ylab is not None:
plt.ylabel(ylab)
if xlab is not None:
plt.xlabel(xlab)
def features_figure(X, ts=None, feature_names=None, fig=None, ax=None,
figsize=None):
"""
Returns figure with lines for every feature in the y axis and time in the x axis.
Parameters
----------
X : (N, D) ndarray
Matrix with N samples and D feature values.
ts : (N, ) ndarray of datetime, optional
One-dimensional array with the datetime of every sample. If None
assigns numbers from 0 to N.
feature_names : (D, ) array_like, optional
List of names corresponding to each feature. It assumes that the order
corresponds to the columns of matrix X. If None the names are integers
from 0 to D.
fig : matplotlib.figure.Figure, optional
Matplotlib figure where to create the axes for the plot, if None a new
figure is created.
ax : matplotlib.axes.Axes, optional
Maptlotlib Axes where to create the plot, if None a new axes is
created.
figsize : (float, float), optional
width, height in inches. If not provided default from matplotlib.
Returns
-------
fig : matplotlib figure
ax : matplotlib axis
Examples
--------
# >>> X = np.array([[25, 70], [26, 60], [23, 65], [25, 70], [23, 77]])
# >>> ts = np.datetime64(0, 's') + np.arange(len(X))
# >>> feature_names = ['temperature', 'humidity']
# >>> features_figure(X, ts, feature_names)
(<Figure size 640x480 with 1 Axes>,
<matplotlib.axes._subplots.AxesSubplot at 0x7f8d8086f400>)
"""
if fig is None:
fig = plt.figure(figsize=figsize)
if ax is None:
ax = fig.add_subplot(111)
if ts is None:
ts = np.arange(X.shape[0]) + 1
if feature_names is None:
feature_names = np.arange(X.shape[1])
for i, feature in enumerate(feature_names):
ax.plot(ts, X[:, i], label=feature)
#plt.gcf().autofmt_xdate()
ax.legend(loc='upper center', ncol=6, fancybox=True, shadow=False,
fontsize=9, framealpha=0.7)
plt.title('raw_features')
ax.set_ylim(X.min(), X.max() + X.std(axis=0).max())
ax.set_xlim(ts[0], ts[-1])
plt.gcf().autofmt_xdate()
ax.grid(b=True)
ax.set_axisbelow(True)
return fig, ax
def features_figure_scatter(X, ts=None, feature_names=None, fig=None, ax=None,
figsize=None):
"""
Returns figure with lines for every feature in the y axis and time in the x axis.
Parameters
----------
X : (N, D) ndarray
Matrix with N samples and D feature values.
ts : (N, ) ndarray of datetime, optional
One-dimensional array with the datetime of every sample. If None
assigns numbers from 0 to N.
feature_names : (D, ) array_like, optional
List of names corresponding to each feature. It assumes that the order
corresponds to the columns of matrix X. If None the names are integers
from 0 to D.
fig : matplotlib.figure.Figure, optional
Matplotlib figure where to create the axes for the plot, if None a new
figure is created.
ax : matplotlib.axes.Axes, optional
Maptlotlib Axes where to create the plot, if None a new axes is
created.
figsize : (float, float), optional
width, height in inches. If not provided default from matplotlib.
Returns
-------
fig : matplotlib figure
ax : matplotlib axis
Examples
--------
# >>> X = np.array([[25, 70], [26, 60], [23, 65], [25, 70], [23, 77]])
# >>> ts = np.datetime64(0, 's') + np.arange(len(X))
# >>> feature_names = ['temperature', 'humidity']
# >>> features_figure(X, ts, feature_names)
(<Figure size 640x480 with 1 Axes>,
<matplotlib.axes._subplots.AxesSubplot at 0x7f8d8086f400>)
"""
if fig is None:
fig = plt.figure(figsize=figsize)
if ax is None:
ax = fig.add_subplot(111)
if ts is None:
ts = np.arange(X.shape[0]) + 1
if feature_names is None:
feature_names = np.arange(X.shape[1])
for i, feature in enumerate(feature_names):
ax.scatter(ts, X[:, i], label=feature, s=3)
#plt.gcf().autofmt_xdate()
ax.legend(loc='upper center', ncol=6, fancybox=True, shadow=False,
fontsize=9, framealpha=0.7)
plt.title('raw_features')
ax.set_ylim(X.min(), X.max() + X.std(axis=0).max())
ax.set_xlim(ts[0], ts[-1])
plt.gcf().autofmt_xdate()
ax.grid(b=True)
ax.set_axisbelow(True)
fig.show()
return fig, ax
def labels_figure(y_array, ts=None, labels=None, fig=None, ax=None,
figsize=None):
"""
Returns figure with labels in the y axis and time in the x axis.
All the contiguous samples with the same label are aggregated and a
horizontal box is drawn that extends from the first until de last sample.
This is repeated for every label and sample.
Parameters
----------
y_array : (N, ) ndarray of integers
One-dimensional array with all the labels in numerical discrete format.
ts : (N, ) ndarray of datetime, optional
One-dimensional array with the datetime of every label. If None assigns
numbers from 0 to N.
labels : (K, ) array_like, optional
List of names corresponding to each label. It assumes that the order
corresponds to the values in y_array. If None assigns numbers from 0 to
K (where K is the number of unique elements in y_array).
fig : matplotlib.figure.Figure, optional
Matplotlib figure where to create the axes for the plot, if None a new
figure is created.
ax : matplotlib.axes.Axes, optional
Maptlotlib Axes where to create the plot, if None a new axes is
created.
figsize : (float, float), optional
width, height in inches. If not provided default from matplotlib.
Returns
-------
fig : matplotlib figure
ax : matplotlib axis
Examples
--------
>>> y_array = np.array([0, 0, 0, 1, 1, 2, 2, 0])
>>> ts = np.datetime64(0, 's') + np.arange(len(y_array))
>>> labels = ['stand', 'walk', 'run']
>>> labels_figure(y_array, ts, labels)
(<Figure size 640x480 with 1 Axes>,
<matplotlib.axes._subplots.AxesSubplot at 0x7f0f50361e10>)
"""
if ax is None:
if fig is None:
fig = plt.figure(figsize)
ax = fig.add_subplot()
if labels is None:
labels = np.arrange(np.unique(y_array))
if ts is None:
ts = np.arange(len(y_array))
norm = mpl.colors.Normalize(vmin=0, vmax=len(labels))
cmap = cm.gist_rainbow
m = cm.ScalarMappable(norm=norm, cmap=cmap)
y_change = np.where(y_array != np.roll(y_array, 1))[0]
# First label needs to be added manually
if len(y_change) > 0:
y = y_array[0]
interval = (ts[0], ts[y_change[1]-1])
line_xs = np.array(interval)
line_ys = np.array((y, y))
ax.fill_between(line_xs, line_ys-0.5, line_ys+0.5,
facecolor=m.to_rgba(y), edgecolor='dimgray',
linewidth=0.2)
for i, change_id in enumerate(y_change):
if i == len(y_change)-1:
y = y_array[-1]
interval = (ts[change_id],
ts[-1])
else:
y = y_array[change_id]
interval = (ts[change_id],
ts[y_change[i+1]])
line_xs = np.array(interval)
line_ys = np.array((y, y))
ax.fill_between(line_xs, line_ys-0.5, line_ys+0.5,
facecolor=m.to_rgba(y), edgecolor='dimgray',
linewidth=0.2)
ax.set_yticks(range(len(labels)))
ax.set_yticklabels(list(labels), rotation=45, ha='right')
ax.set_ylim([-1, len(labels)])
xfmt = mpl.dates.DateFormatter('%H:%M\n%d/%m')
ax.xaxis.set_major_formatter(xfmt)
ax.set_xlim((ts[0], ts[-1]))
plt.gcf().autofmt_xdate()
ax.grid(b=True)
ax.set_axisbelow(True)
return fig, ax
def polar_labels_figure(labels, label_names, xticklabels, empty_rows=0,
leading_labels=0, spiral=False,
title=None, m=None, fig=None, ax=None, figsize=None):
"""
Returns polar plot with categorical bins from a matrix.
Parameters
----------
labels : (R, C) ndarray of integers
Matrix of integers from [-1, K] denoting different labels and with the
special value of -1 denoting no label. The value is used as an index
for the list label_names.
label_names : (K, ) array_like of strings
List of strings representing each of the K labels. (eg. bedroom,
living room, ..., kitchen)
xticklabels : (D, ) array_like of strings
List of strings to print around the circle with equal spacing in
between and with the first element corresponding to the 90 degree and
the following in a clockwise order. (eg. Monday, Tuesday, ..., Sunday)
empty_rows : integer, optional (default 0)
Number of empty rows to insert at the beginning of the labels matrix.
This can be used to reduce or increase the empty space at the centre of
the circle.
leading_labels : integer, optional (default 0)
Number of empty labels to insert at the beginning of the first row in
order to start the first label in a different position than 90 degrees.
spiral : boolean, optional (default False)
If True, the labels are arranged in a spiral in which a row starts at
the same level than the end bin of the previous row.
If False, each row is in its own concentric circle, the previous one
always smaller than the following one.
title : string, optional (default None)
Title for the figure.
m : matplotlib colormap, optional (default None)
Colormap that is used for each of the K labels.
If None:
If K < 11:
m = cm.get_cmap('tab10')
Else if K < 21:
m = cm.get_cmap('tab20')
Else:
m = cm.gist_rainbow (Normalised with maximum colour value at K)
fig : matplotlib.figure.Figure, optional
Matplotlib figure where to create the axes for the plot, if None a new
figure is created.
ax : matplotlib.axes.Axes, optional (default None)
Maptlotlib Axes where to create the plot in polar form. If None a new
axes is created.
figsize : (float, float), optional (default None)
width, height in inches. If not provided default from matplotlib.
Returns
-------
fig : matplotlib figure
ax : matplotlib axis
"""
if ax is None:
if fig is None:
fig = plt.figure(figsize)
ax = fig.add_axes([0, 0, 1.0, 0.9], polar=True)
if labels is None:
labels = np.arrange(np.unique(y_array))
n_rows = labels.shape[0] + empty_rows
n_columns = labels.shape[1]
labels = labels.flatten()
ax.set_title(title)
if m is None:
if len(label_names) < 11:
m = cm.get_cmap('tab10')
elif len(label_names) < 21:
m = cm.get_cmap('tab20')
else:
norm = mpl.colors.Normalize(vmin=0, vmax=len(label_names))
cmap = cm.gist_rainbow
colors = cm.ScalarMappable(norm=norm, cmap=cmap)
m = colors.to_rgba
width = 2 * np.pi / n_columns # All boxes are the same width
indices = np.arange(len(labels)) + leading_labels + empty_rows*n_columns
x = indices * 2 * np.pi / n_columns
bottom = indices / n_columns
if not spiral:
bottom = bottom.astype(int)
colors = [m(y) if y!= -1 else 'white' for y in labels]
ax.bar(x, height=1, width=width, bottom=bottom, align='edge', color=colors)
#for i, y in enumerate(labels):
# x = i * 2 * np.pi / n_columns
# bottom = i / n_columns
# if not spiral:
# bottom = int(bottom)
# ax.bar(x, height=1, width=width, bottom=bottom, color=m(y))
if spiral:
plt.ylim(0,n_rows+1)
else:
plt.ylim(0,n_rows)
ax.set_yticks([])
ax.set_xticks(2 * np.pi * np.arange(len(xticklabels)) /
len(xticklabels))
ax.set_xticklabels(xticklabels)
ax.set_theta_direction(-1)
ax.set_theta_offset(np.pi/2.0)
handles = [mpatches.Patch(color=m(i), label=y) for i, y in
enumerate(label_names)]
ax.legend(handles=handles, loc='upper left', bbox_to_anchor=(-0.2, 1.1),
ncol=1, fontsize=7 )
return fig, ax
| rymc/bHealth | bhealth/visualisations.py | visualisations.py | py | 21,861 | python | en | code | 2 | github-code | 13 |
585556547 | from pyowm import OWM
from pyowm.utils import config
from pyowm.utils import timestamps
owm = OWM('4f1d4d06b8d0c706cbd6971dfd330188')
mgr = owm.weather_manager()
from pyowm.utils.config import get_default_config
config_dict = get_default_config()
config_dict['language'] = 'ru'
place = input("Введи город ")
observation = mgr.weather_at_place(place)
w = observation.weather
temp = w.temperature('celsius')["temp"]
print("В городе " + place + " сейчас " + w.detailed_status)
print("Температура в районе " + str(temp) + " градусов")
if temp < 10:
print("На улице очень холодно, одевайся как танк!")
elif temp < 0:
print("Ты в Сибири или на Северном полюсе?")
elif temp < 20:
print("На улице холодно, одевайся теплее")
elif 25 > temp > 20:
print("Погода норм, одевай что хочешь")
elif 25 < temp < 30:
print("Не забудь солнцезащитные очки и крем от загара!")
else:
print("Ну и жарища...")
| TheSnow1/pogoda | mmm.py | mmm.py | py | 1,161 | python | ru | code | 0 | github-code | 13 |
24044234166 | # 在运行前或运行后替换
# 如果想在父类中定义的内容运行之前或者之后再修改行为.
# 可以覆盖函数,再接着用super来调用父类的版本
class Parent(object):
def altered(self):
print("PARENT altered()")
class Child(Parent):
def altered(self):
print("CHILD, BEFORE PARENT altered()")
super(Child, self).altered()
print("CHILD, AFTER PARENT altered()")
dad = Parent()
son = Child()
dad.altered()
son.altered()
| NullPointerC/Learn-Python-The-Hard-Way-Source-Code | ex44c.py | ex44c.py | py | 494 | python | zh | code | 0 | github-code | 13 |
12522775863 | from enum import Enum
import pandas as pd
import tkinter as tk
from tkinter import ttk
from matplotlib.backends.backend_tkagg import (
FigureCanvasTkAgg, NavigationToolbar2Tk)
from matplotlib.figure import Figure
import matplotlib.pyplot as plt
from matplotlib import gridspec
from dataclasses import dataclass
from pathlib import Path
import numpy as np
from learn_bot.recoil.weapon_id_name_conversion import weapon_id_to_name, weapon_name_to_id
data_path = Path(__file__).parent / '..' / '..' / '..' / 'analytics' / 'csv_outputs' / 'engagementAim.csv'
saved_path = Path(__file__).parent / 'saved_dataframe.csv'
recoil_saved_path = Path(__file__).parent / 'saved_recoil_dataframe.csv'
core_id_columns = ["id", "round id", "tick id", "demo tick id", "game tick id", "game time", "engagement id"]
weapon_id_column = "weapon id (t)"
recoil_index_column = "recoil index (t)"
prior_recoil_index_column = "recoil index (t-1)"
base_x_recoil_column = "scaled recoil angle x"
cur_x_recoil_column = "scaled recoil angle x (t)"
delta_x_recoil_column = "delta scaled recoil angle x"
base_y_recoil_column = "scaled recoil angle y"
cur_y_recoil_column = "scaled recoil angle y (t)"
delta_y_recoil_column = "delta scaled recoil angle y"
x_vel_column = "attacker vel x (t)"
y_vel_column = "attacker vel y (t)"
z_vel_column = "attacker vel z (t)"
attacker_duck_amount_column = "attacker duck amount (t)"
ticks_since_last_fire_column = "ticks since last fire (t)"
duck_options = ["any state", "standing", "crouching"]
class FilteredRecoilData:
all_cols_df: pd.DataFrame
recoil_cols_df: pd.DataFrame
def __init__(self, input_df: pd.DataFrame, weapon_id: int, min_recoil_index: float, max_recoil_index: float,
min_speed: float, max_speed: float,
min_ticks_since_fire: float, max_ticks_since_fire: float,
duck_option: str, delta_ticks: int):
speed_col = \
np.sqrt((input_df[x_vel_column].pow(2) + input_df[y_vel_column].pow(2) + input_df[z_vel_column].pow(2)))
conditions = (input_df[weapon_id_column] == weapon_id) & \
(input_df[recoil_index_column] >= min_recoil_index) & \
(input_df[recoil_index_column] <= max_recoil_index) & \
(speed_col >= min_speed) & (speed_col <= max_speed) & \
(input_df[ticks_since_last_fire_column] >= min_ticks_since_fire) & \
(input_df[ticks_since_last_fire_column] <= max_ticks_since_fire)
if duck_option == duck_options[1]:
conditions = conditions & (input_df[attacker_duck_amount_column] > 0.5)
elif duck_option == duck_options[2]:
conditions = conditions & (input_df[attacker_duck_amount_column] <= 0.5)
self.all_cols_df = input_df[conditions].copy()
old_x_recoil_column = base_x_recoil_column + f" (t-{delta_ticks})"
self.all_cols_df[delta_x_recoil_column] = \
self.all_cols_df[cur_x_recoil_column] - self.all_cols_df[old_x_recoil_column]
old_y_recoil_column = base_y_recoil_column + f" (t-{delta_ticks})"
self.all_cols_df[delta_y_recoil_column] = \
self.all_cols_df[cur_y_recoil_column] - self.all_cols_df[old_y_recoil_column]
self.recoil_cols_df = \
self.all_cols_df.loc[:, core_id_columns +
[weapon_id_column, recoil_index_column,
ticks_since_last_fire_column, attacker_duck_amount_column,
cur_x_recoil_column, delta_x_recoil_column, old_x_recoil_column,
cur_y_recoil_column, delta_y_recoil_column, old_y_recoil_column]]
@dataclass
class RecoilPlot:
fig: plt.Figure
canvas: FigureCanvasTkAgg
def plot_recoil_distribution(self, abs_hist_ax: plt.Axes, delta_hist_ax: plt.Axes,
selected_recoil_df: pd.DataFrame):
abs_hist_range = [[-10, 10], [-1, 20]]
delta_hist_range = [[-0.75, 0.75], [-0.75, 0.75]]
# plot abs
abs_recoil_heatmap, abs_recoil_x_bins, abs_recoil_y_bins = \
np.histogram2d(selected_recoil_df[cur_x_recoil_column].to_numpy(),
selected_recoil_df[cur_y_recoil_column].to_numpy(),
bins=41, range=abs_hist_range)
abs_recoil_heatmap = abs_recoil_heatmap.T
abs_recoil_X, abs_recoil_Y = np.meshgrid(abs_recoil_x_bins, abs_recoil_y_bins)
abs_recoil_im = abs_hist_ax.pcolormesh(abs_recoil_X, abs_recoil_Y, abs_recoil_heatmap)
self.fig.colorbar(abs_recoil_im, ax=abs_hist_ax)
abs_hist_ax.set_title("Absolute Scaled Recoil")
abs_hist_ax.set_xlabel("X Recoil (deg)")
abs_hist_ax.set_ylabel("Y Recoil (deg)")
abs_hist_ax.invert_xaxis()
# plot delta
delta_recoil_heatmap, delta_recoil_x_bins, delta_recoil_y_bins = \
np.histogram2d(selected_recoil_df[delta_x_recoil_column].to_numpy(),
selected_recoil_df[delta_y_recoil_column].to_numpy(),
bins=41, range=delta_hist_range)
delta_recoil_heatmap = delta_recoil_heatmap.T
delta_recoil_X, delta_recoil_Y = np.meshgrid(delta_recoil_x_bins, delta_recoil_y_bins)
delta_recoil_im = delta_hist_ax.pcolormesh(delta_recoil_X, delta_recoil_Y, delta_recoil_heatmap)
self.fig.colorbar(delta_recoil_im, ax=delta_hist_ax)
delta_hist_ax.set_title("Delta Scaled Recoil")
delta_hist_ax.set_xlabel("Delta X Recoil (deg)")
delta_hist_ax.set_ylabel("Delta Y Recoil (deg)")
delta_hist_ax.invert_xaxis()
self.fig.tight_layout()
self.canvas.draw()
def vis(recoil_df: pd.DataFrame):
weapon_ids = all_data_df.loc[:, weapon_id_column].unique().tolist()
weapon_names = [weapon_id_to_name[index] for index in weapon_ids]
weapon_names = sorted(weapon_names)
#This creates the main window of an application
window = tk.Tk()
window.title("Weapon Recoil Explorer")
window.resizable(width=False, height=False)
window.configure(background='grey')
fig = Figure(figsize=(12., 5.5), dpi=100)
canvas = FigureCanvasTkAgg(fig, master=window) # A tk.DrawingArea.
recoil_plot = RecoilPlot(fig, canvas)
canvas.draw()
canvas.get_tk_widget().pack(side=tk.TOP, fill=tk.BOTH, expand=True)
# pack_toolbar=False will make it easier to use a layout manager later on.
toolbar = NavigationToolbar2Tk(canvas, window, pack_toolbar=False)
toolbar.update()
toolbar.pack(side=tk.BOTTOM, fill=tk.X)
def ignore_arg_update_graph(ignore_arg):
update_graph()
last_filtered_recoil_data: FilteredRecoilData
def update_graph():
nonlocal last_filtered_recoil_data
fig.clear()
abs_hist_ax = fig.add_subplot(1, 2, 1)
delta_hist_ax = fig.add_subplot(1, 2, 2)
mid_recoil_index = float(mid_recoil_index_selector.get())
range_recoil_index = float(range_recoil_index_selector.get())
min_recoil_index = mid_recoil_index - range_recoil_index / 2.
max_recoil_index = mid_recoil_index + range_recoil_index / 2.
mid_speed = float(mid_speed_selector.get())
range_speed = float(range_speed_selector.get())
min_speed = mid_speed - range_speed / 2.
max_speed = mid_speed + range_speed / 2.
mid_ticks_since_fire = float(mid_ticks_since_fire_selector.get())
range_ticks_since_fire = float(range_ticks_since_fire_selector.get())
min_ticks_since_fire = mid_ticks_since_fire - range_ticks_since_fire / 2.
max_ticks_since_fire = mid_ticks_since_fire + range_ticks_since_fire / 2.
last_filtered_recoil_data = \
FilteredRecoilData(recoil_df, weapon_name_to_id[weapon_selector_variable.get()],
min_recoil_index, max_recoil_index, min_speed, max_speed,
min_ticks_since_fire, max_ticks_since_fire,
duck_selector_variable.get(), int(delta_ticks_selector.get()))
recoil_index_text_var.set(f"recoil index mid {mid_recoil_index} range {range_recoil_index},"
f"speed mid {mid_speed} range {range_speed},"
f"ticks since fire mid {mid_ticks_since_fire} range {range_ticks_since_fire},"
f"delta ticks {int(delta_ticks_selector.get())},"
f"num points {len(last_filtered_recoil_data.all_cols_df)}")
recoil_plot.plot_recoil_distribution(abs_hist_ax, delta_hist_ax, last_filtered_recoil_data.all_cols_df)
def save_graph_data():
tmp_df = last_filtered_recoil_data.all_cols_df.copy()
tmp_df.sort_index(axis=1).T.to_csv(saved_path)
def save_graph_data_recoil_cols():
last_filtered_recoil_data.recoil_cols_df.to_csv(recoil_saved_path)
discrete_selector_frame = tk.Frame(window)
discrete_selector_frame.pack(pady=5)
weapon_selector_variable = tk.StringVar()
weapon_selector_variable.set(weapon_names[0]) # default value
weapon_selector = tk.OptionMenu(discrete_selector_frame, weapon_selector_variable, *weapon_names,
command=ignore_arg_update_graph)
weapon_selector.configure(width=20)
weapon_selector.pack(side="left")
duck_label = tk.Label(discrete_selector_frame, text="Duck Options")
duck_label.pack(side="left")
duck_selector_variable = tk.StringVar()
duck_selector_variable.set(duck_options[0]) # default value
duck_selector = tk.OptionMenu(discrete_selector_frame, duck_selector_variable, *duck_options,
command=ignore_arg_update_graph)
duck_selector.configure(width=20)
duck_selector.pack(side="left")
save_all_cols_button = tk.Button(discrete_selector_frame, text="Save All Cols", command=save_graph_data)
save_all_cols_button.pack(side="left")
save_recoil_cols_button = tk.Button(discrete_selector_frame, text="Save Recoil Cols",
command=save_graph_data_recoil_cols)
save_recoil_cols_button.pack(side="left")
recoil_index_text_frame = tk.Frame(window)
recoil_index_text_frame.pack(pady=5)
recoil_index_text_var = tk.StringVar()
recoil_index_label = tk.Label(recoil_index_text_frame, textvariable=recoil_index_text_var)
recoil_index_label.pack(side="left")
mid_recoil_index_frame = tk.Frame(window)
mid_recoil_index_frame.pack(pady=5)
mid_recoil_index_label = tk.Label(mid_recoil_index_frame, text="Recoil Index Mid")
mid_recoil_index_label.pack(side="left")
mid_recoil_index_selector = tk.Scale(
mid_recoil_index_frame,
from_=0,
to=30,
orient='horizontal',
showvalue=0,
resolution=0.5,
length=300,
command=ignore_arg_update_graph
)
mid_recoil_index_selector.pack()
range_recoil_index_frame = tk.Frame(window)
range_recoil_index_frame.pack(pady=5)
range_recoil_index_label = tk.Label(range_recoil_index_frame, text="Recoil Index Range")
range_recoil_index_label.pack(side="left")
range_recoil_index_selector = tk.Scale(
range_recoil_index_frame,
from_=0.5,
to=15,
orient='horizontal',
showvalue=0,
resolution=0.5,
length=300,
command=ignore_arg_update_graph
)
#range_recoil_index_selector.set(30)
range_recoil_index_selector.pack(side="left")
mid_speed_frame = tk.Frame(window)
mid_speed_frame.pack(pady=5)
mid_speed_label = tk.Label(mid_speed_frame, text="Attacker Speed Mid")
mid_speed_label.pack(side="left")
mid_speed_selector = tk.Scale(
mid_speed_frame,
from_=0,
to=250,
orient='horizontal',
showvalue=0,
length=300,
resolution=0.5,
command=ignore_arg_update_graph
)
mid_speed_selector.pack(side="left")
range_speed_frame = tk.Frame(window)
range_speed_frame.pack(pady=5)
range_speed_label = tk.Label(range_speed_frame, text="Attacker Speed Range")
range_speed_label.pack(side="left")
range_speed_selector = tk.Scale(
range_speed_frame,
from_=1,
to=500,
orient='horizontal',
showvalue=0,
length=300,
command=ignore_arg_update_graph
)
range_speed_selector.set(500)
range_speed_selector.pack(side="left")
mid_ticks_since_fire_frame = tk.Frame(window)
mid_ticks_since_fire_frame.pack(pady=5)
mid_ticks_since_fire_label = tk.Label(mid_ticks_since_fire_frame, text="Ticks Since Fire Mid")
mid_ticks_since_fire_label.pack(side="left")
mid_ticks_since_fire_selector = tk.Scale(
mid_ticks_since_fire_frame,
from_=0,
to=100,
orient='horizontal',
showvalue=0,
length=300,
command=ignore_arg_update_graph
)
mid_ticks_since_fire_selector.pack(side="left")
range_ticks_since_fire_frame = tk.Frame(window)
range_ticks_since_fire_frame.pack(pady=5)
range_ticks_since_fire_label = tk.Label(range_ticks_since_fire_frame, text="Ticks Since Fire Range")
range_ticks_since_fire_label.pack(side="left")
range_ticks_since_fire_selector = tk.Scale(
range_ticks_since_fire_frame,
from_=1,
to=200,
orient='horizontal',
showvalue=0,
length=300,
command=ignore_arg_update_graph
)
#range_ticks_since_fire_selector.set(200)
range_ticks_since_fire_selector.pack(side="left")
delta_ticks_frame = tk.Frame(window)
delta_ticks_frame.pack(pady=5)
delta_ticks_label = tk.Label(delta_ticks_frame, text="Delta Ticks")
delta_ticks_label.pack(side="left")
delta_ticks_selector = tk.Scale(
delta_ticks_frame,
from_=1,
to=13,
orient='horizontal',
showvalue=0,
length=300,
command=ignore_arg_update_graph
)
delta_ticks_selector.pack(side="left")
update_graph()
# Start the GUI
window.mainloop()
bad_recoil_index_path = Path(__file__).parent / 'bad_recoil_index.csv'
if __name__ == "__main__":
all_data_df = pd.read_csv(data_path)
x_all = all_data_df[(all_data_df[ticks_since_last_fire_column] == 0)]
print(len(x_all))
x = all_data_df[(all_data_df[ticks_since_last_fire_column] == 0) &
(all_data_df[recoil_index_column] - 0.5 <= all_data_df[prior_recoil_index_column])]
print(len(x))
x.copy().sort_index(axis=1).T.to_csv(bad_recoil_index_path)
vis(all_data_df)
| David-Durst/csknow | learn_bot/learn_bot/recoil/vis.py | vis.py | py | 14,690 | python | en | code | 13 | github-code | 13 |
40177493213 | import turtle as t
import random
def turn_up(): #위에 키 눌렀을 때 호출되는 함수
t.left(2) #거북이를 왼쪽으로 2도 돌림
def turn_down(): #아래 키 눌렀을 때 호출되는 함수
t.right(2) #거북이를 오른쪽으로 2도 돌림
def fire(): #스페이스바 눌렀을 때 호출되는 함수
ang = t.heading() #현재 거북이가 바라보는 각도를 기억
while t.ycor() > 0: #거북이가 땅 위에 있는 동안 반복
t.forward(15) #15만큼 앞으로 이동
t.right(5) #오른쪽으로 5도 회전
d = t.distance(target, 0) #거북이와 목표 지점과의 거리를 구함
t.sety(random.randint(10,100)) #성공 또는 실패를 표시할 위치를 지정
if d < 25: #거리 차이가 25보다 작으면 명중한 것으로 처리
t.color("blue")
t.write("Good!", False, "center", ("",15))
else: #25보다 멀면 실패 처리
t.color("red")
t.write("Bad!", False, "center", ("",15))
t.color("black") #거북이 색을 검은색으로 되돌림
t.goto(-200, 10) #거북이 위치를 처음 발사했던 곳으로 되돌림
t.setheading(ang) #각도돋 처음 기억해둔 각도로 되돌림
#땅 그리기
t.goto(-300,0)
t.down()
t.goto(300,0)
#목표지점을 설정하고 그림
target = random.randint(50,150) #목표 지점을 50~150 사이에 있는 임의의 수로 지정
t.pensize(3)
t.color("green")
t.up()
t.goto(target - 25,2)
t.down()
t.goto(target +25, 2)
#거북이 색을 검은색으로 지정하고 처음 발사했던 곳으로 되돌립니다.
t.color("black")
t.up()
t.goto(-200,10)
t.setheading(20)
#거북이가 동작하는 데 필요한 설정
t.onkeypress(turn_up, "Up")
t.onkeypress(turn_down, "Down")
t.onkeypress(fire, "space")
t.listen()
| mongsil0219/2023.01.18 | 16A_cannon.py | 16A_cannon.py | py | 1,825 | python | ko | code | 0 | github-code | 13 |
31740649655 | import numpy as np
from machinelearn.linear_model_03.closed_form_sol.LinearRegression_CFSol import LinearRegressionClosedFormSol
from machinelearn.model_evaluation_selection_02.Polynomial_feature import PolynomialFeatureData
import matplotlib.pyplot as plt
def objective_fun(x):
# return 0.5 * x ** 2 + x + 2
return 0.5 * x ** 3 + 2 * x * x - 2.5 * x + 2
np.random.seed(42) # 随机种子
n = 100 # 样本量
raw_x = np.sort(6 * np.random.rand(n, 1) - 3) # 采样数据 【-3,3】,均匀分布
raw_y = objective_fun(raw_x) + 0.5 * np.random.randn(n, 1) # 目标值,添加噪声
# raw_y = objective_fun(raw_x)
plt.figure(figsize=(15, 8))
degree = [1, 2, 5, 10, 15, 20] # 拟合多项式的最高阶次
for i, d in enumerate(degree):
print('0' * 100)
feature_obj = PolynomialFeatureData(raw_x, d, with_bias=False) # 特征数据对象
X_sample = feature_obj.fit_transform() # 生成特征多项式
lr_cfs = LinearRegressionClosedFormSol() # 采用线性回归求解多项式
lr_cfs.fit(X_sample, raw_y) # 求解多项式回归系数
theta = lr_cfs.get_params() # 获取系数
print('degree: %d, theta is ' % d, theta[0].reshape(-1)[::-1], theta[1])
y_train_pred = lr_cfs.predict(X_sample) # 在训练集上的预测
# 测试样本采样
x_test_raw = np.linspace(-3, 3, 150) # 测试数据
y_test = objective_fun(x_test_raw) # 测试数据真值
feature_obj = PolynomialFeatureData(x_test_raw, degree=d, with_bias=False) # 特征数据对象
X_test = feature_obj.fit_transform() # 生成多项式特征测试数据
y_test_pred = lr_cfs.predict(X_test) # 模型在测试样本上的预测值
# 可视化多项式拟合曲线
plt.subplot(231 + i)
plt.scatter(raw_x, raw_y, edgecolors='k', s=10, label='Raw data')
plt.plot(x_test_raw, y_test, 'k-', lw=1, label='Objective Fun')
plt.plot(x_test_raw, y_test_pred, 'r--', lw=1.5, label='Polynomial Fit')
plt.legend(frameon=False)
plt.grid(ls=':')
plt.xlabel("$x$", fontdict={'fontsize': 12})
plt.ylabel("$y(x)$", fontdict={'fontsize': 12})
test_ess = (y_test_pred.reshape(-1) - y_test.reshape(-1)) ** 2 # 误差平方
test_mse, test_std = np.mean(test_ess), np.std(test_ess)
train_ess = (y_train_pred.reshape(-1) - raw_y.reshape(-1)) ** 2 # 误差平方
train_mse, train_std = np.mean(train_ess), np.std(train_ess)
print(y_test_pred.shape, y_test.shape)
plt.title('Degree {} Test MSE = {:.2e}(+/-{:.2e}) \n Train Mse = {:.2e}(+/-{:.2e}'
.format(d, test_mse, test_std, train_mse, train_std), fontdict={'fontsize': 8})
plt.show()
| lixixi89055465/py_stu | machinelearn/model_evaluation_selection_02/learning_curve/test_polynomial_fit.py | test_polynomial_fit.py | py | 2,643 | python | en | code | 1 | github-code | 13 |
12210404993 | import os
import urllib
from google.appengine.api import users
from google.appengine.ext import ndb
import jinja2
from bottle import Bottle, request, redirect, debug
debug(True)
bottle = Bottle()
JINJA_ENVIRONMENT = jinja2.Environment(
loader=jinja2.FileSystemLoader(os.path.dirname(__file__)),
extensions=['jinja2.ext.autoescape'],
autoescape=True)
DEFAULT_GUESTBOOK_NAME = 'guestbook'
# We set a parent key on the 'Greetings' to ensure that they are all in the same
# entity group. Queries across the single entity group will be consistent.
# However, the write rate should be limited to ~1/second.
def guestbook_key(guestbook_name=DEFAULT_GUESTBOOK_NAME):
"""Constructs a Datastore key for a Guestbook entity with guestbook_name."""
return ndb.Key('Guestbook', guestbook_name)
class Greeting(ndb.Model):
"""Models an individual Guestbook entry with author, content, and date."""
author = ndb.UserProperty()
content = ndb.StringProperty(indexed=False)
date = ndb.DateTimeProperty(auto_now_add=True)
@bottle.get('/')
def MainPage():
guestbook_name = request.forms.get('guestbook_name',
DEFAULT_GUESTBOOK_NAME)
greetings_query = Greeting.query(
ancestor=guestbook_key(guestbook_name)).order(-Greeting.date)
greetings = greetings_query.fetch(10)
if users.get_current_user():
url = users.create_logout_url('/')
url_linktext = 'Logout'
else:
url = users.create_login_url('/')
url_linktext = 'Login'
template_values = {
'greetings': greetings,
'guestbook_name': urllib.quote_plus(guestbook_name),
'url': url,
'url_linktext': url_linktext,
}
template = JINJA_ENVIRONMENT.get_template('index.html')
return template.render(template_values)
@bottle.post('/sign')
def Guestbook():
# We set the same parent key on the 'Greeting' to ensure each Greeting
# is in the same entity group. Queries across the single entity group
# will be consistent. However, the write rate to a single entity group
# should be limited to ~1/second.
guestbook_name = request.forms.get('guestbook_name',
DEFAULT_GUESTBOOK_NAME)
greeting = Greeting(parent=guestbook_key(guestbook_name))
if users.get_current_user():
greeting.author = users.get_current_user()
greeting.content = request.forms.get('content')
greeting.put()
query_params = {'guestbook_name': guestbook_name}
redirect('/?' + urllib.urlencode(query_params))
| Durell/appengine | bottle_guestbook/guestbook.py | guestbook.py | py | 2,529 | python | en | code | 0 | github-code | 13 |
2005403290 | import nibabel as nib
import glob as glob
import numpy as np
from sklearn.model_selection import train_test_split
import matplotlib.pyplot as plt
import tensorflow as tf
import numpy as np
import os
from keras.models import *
from keras.layers import *
from keras.optimizers import *
from keras.callbacks import ModelCheckpoint, LearningRateScheduler, EarlyStopping
from keras import backend as K
from keras.layers.normalization import BatchNormalization as bn
from keras import regularizers
from keras.preprocessing.image import *
#
# from sklearn.utils import shuffle
from keras.utils import multi_gpu_model
os.environ['CUDA_VISIBLE_DEVICES'] = '0'
import cv2
path = '/home/rutu/thesis/iSeg-2019-Training'
label_path = '/home/rutu/thesis/iSeg-2019-Training'
val_path = '/home/rutu/thesis/iSeg-2019-Validation'
smooth = 1.
def dice_coef(y_true, y_pred):
""" The dice coef is a metric to calculate the similarilty
(intersection) between the true values and the predictions"""
y_true_f = K.flatten(y_true)
y_pred_f = K.flatten(y_pred)
intersection = K.sum(y_true_f * y_pred_f)
return (2. * intersection + smooth) / (K.sum(y_true_f) + K.sum(y_pred_f) + smooth)
def dice_coef_loss(y_true, y_pred):
return -dice_coef(y_true, y_pred)
def tversky(y_true, y_pred, smooth=1, alpha=0.7):
y_true_pos = K.flatten(y_true)
y_pred_pos = K.flatten(y_pred)
true_pos = K.sum(y_true_pos * y_pred_pos)
false_neg = K.sum(y_true_pos * (1 - y_pred_pos))
false_pos = K.sum((1 - y_true_pos) * y_pred_pos)
return (true_pos + smooth) / (true_pos + alpha * false_neg + (1 - alpha) * false_pos + smooth)
def tversky_loss(y_true, y_pred):
return 1 - tversky(y_true, y_pred)
def load_data(path, i):
sub_T1 = '/subject-%d-T1.img' %i
sub_T2 = '/subject-%d-T2.img' %i
sub_label = '/subject-%d-label.img' %i
inter_train = nib.load(path + sub_T1)
train_T1=inter_train.get_data()
inter_train = nib.load(path + sub_T2)
train_T2=inter_train.get_data()
inter_label = nib.load(path + sub_label)
label=inter_label.get_data()
return train_T1, train_T2, label#, val
train_T1, train_T2, label = load_data(path, 1)
def load_data_val(path, i):
sub_T1 = '/subject-%d-T1.img' %i
sub_T2 = '/subject-%d-T2.img' %i
#sub_label = '/subject-%d-label.img' %i
inter_train = nib.load(path + sub_T1)
train_T1=inter_train.get_data()
inter_train = nib.load(path + sub_T2)
train_T2=inter_train.get_data()
# inter_label = nib.load(path + sub_label)
# label=inter_label.get_data()
return train_T1, train_T2#, label#, val
val_T1, val_T2 = load_data_val(path, 1)
#
train_filenames = glob.glob('/home/rutu/thesis/iSeg-2019-Training/*T1.img')
val_filenames = glob.glob('/home/rutu/thesis/iSeg-2019-Validation/*T1.img')
len(val_filenames)
np_train_T1 = np.ndarray(shape = (1, 144, 192, 256, 1))
np_train_T2 = np.ndarray(shape = (1, 144, 192, 256, 1))
np_label = np.ndarray(shape = (1, 144, 192, 256, 1))
for i in range(1,len(train_filenames)+1):
print("Running 500 epochs*********** ")
print("i......", i)
train_T1, train_T2, labels = load_data(path, i)
train_T1_exp = np.expand_dims(train_T1, axis=0)
train_T2_exp = np.expand_dims(train_T2, axis=0)
label_exp = np.expand_dims(labels, axis=0)
np_train_T1 = np.concatenate((np_train_T1, train_T1_exp), axis=0)
#print(np_train_T1.shape)
np_train_T2 = np.concatenate((np_train_T2, train_T2_exp), axis=0)
np_label = np.concatenate((np_label, label_exp), axis=0)
np_train_T1 = np.array(np_train_T1[1:])
np_train_T2 = np.array(np_train_T2[1:])
np_label = np.array(np_label[1:])
print("np_train_T1", np_train_T1.shape)
print("[[[[[[[[np_label]]]]]]]]", np_label.shape)
print("unique np_label", np.unique(np_label))
#
def UNet(input_shape_1, input_shape_2):
new_inputs = Input(input_shape_1)
new_inputs_2 = Input(input_shape_2)
l2_lambda = 0.0002
DropP = 0.3
kernel_size = 3
#Change shape from (144, 192, 256) to (192, 256, 144)
new_inputs_permuted = Permute((2, 3, 1))(new_inputs)
print("new_inputs_permuted", new_inputs_permuted.shape)
#Channel squeeze, Spatial Excite
se1 = Conv2D(1, (1,1), activation = 'softmax')(new_inputs_permuted)
print("se1 shape....", K.int_shape(se1)) #
mul_1 = multiply([new_inputs_permuted, se1], name = 'mul_1') #
print("mul_1 shape....", K.int_shape(mul_1)) #
init_cs = K.int_shape(new_inputs_permuted)
print("Look here..........init_cs", init_cs)
#Spatial squeeze, Channel excite (ssce)
conv1_int = Lambda(lambda x: tf.reshape(x, (tf.shape(x)[0], x.shape[1], x.shape[2]*x.shape[3], 1)))(new_inputs_permuted)
cs = K.int_shape(conv1_int)
print("Look here..........cs", cs)
se1 = DepthwiseConv2D((int(cs[1]), 1), activation = 'relu')(conv1_int)
print("H*1 filter", se1.shape)
se1_r = Lambda(lambda x: tf.reshape(x, (tf.shape(x)[0], x.shape[1], init_cs[2],init_cs[3])))(se1)
print("After reshaping wc", K.int_shape(se1_r))
se1_r_shape = K.int_shape(se1_r)
print("se1_r_shape", se1_r_shape)
se1_r = Lambda(lambda x: tf.reverse(x, [1]))(se1_r)
se1 = DepthwiseConv2D((1, int(se1_r_shape[2])), activation = 'softmax')(se1_r)
print("W*1 filter", se1.shape)
print("************se1", se1.shape)
new_inputs_p = Permute( (2, 3, 1))(new_inputs) #
print("************new_inputs", new_inputs_p.shape)
print("************se1", se1.shape)
mul = multiply([new_inputs_p, se1])
print("mul.........", mul.shape)
mul_scse = Add()([mul_1, mul]) #
print("mul_scse........", mul_scse.shape)
# mul = Permute( (3, 2, 1))(mul)
mul = Permute( (3, 2, 1))(mul_scse)
print("*****mul",mul.shape)
mul1 = Permute((2, 3, 1))(mul)
print("!!!!!!!!!!!!!!!!!!!!mul1:", mul1.shape)
# pwc = Conv2D(1, (1, 1), activation = 'relu', name = 'pwc')(mul1)
pwc = Conv2D(1, (1, 1), activation = 'relu', name = 'pwc')(mul_scse)
print("!!!!!!!!!!!!!!!!!pwc_before:", pwc.shape)
#2d attention-augmented UNet begins with two slices combined together as input
#Downsample layer 1
combine = concatenate([pwc, new_inputs_2], name = 'combine')
sess = K.get_session()
conv1 = Conv2D(32, (kernel_size, kernel_size), activation='relu', padding='same',
kernel_regularizer=regularizers.l2(l2_lambda))(combine)
conv1 = bn()(conv1)
conv1 = Conv2D(32, (kernel_size, kernel_size), activation='softmax', padding='same',
kernel_regularizer=regularizers.l2(l2_lambda))(conv1)
conv1 = bn()(conv1)
#MSE left branch layer1
se1 = Conv2D(1, (1,1), activation = 'softmax')(conv1)#, activation = ?)
print("se1 shape....", K.int_shape(se1))
mul_1 = multiply([conv1, se1], name = 'mul_11')
print("mul_1 shape....", K.int_shape(mul_1))
init_cs = K.int_shape(conv1)
print("Look here..........init_cs", init_cs)
#MSE right branch layer1
conv1_int = Lambda(lambda x: tf.reshape(x, (tf.shape(x)[0], x.shape[1], x.shape[2]*x.shape[3], 1)))(conv1)
cs = K.int_shape(conv1_int)
print("Look here..........cs", cs)
se1 = DepthwiseConv2D((int(cs[1]), 1), activation = 'relu')(conv1_int)
print("H*1 filter", se1.shape)
se1_r = Lambda(lambda x: tf.reshape(x, (tf.shape(x)[0], x.shape[1], init_cs[2],init_cs[3])))(se1)
print("After reshaping wc", K.int_shape(se1_r))
se1_r_shape = K.int_shape(se1_r)
print("se1_r_shape", se1_r_shape)
se1_r = Lambda(lambda x: tf.reverse(x, [1]))(se1_r)
se1 = DepthwiseConv2D((1, int(se1_r_shape[2])), activation = 'softmax')(se1_r)
print("W*1 filter", se1.shape)
print("************se1", se1.shape)
mul = multiply([conv1, se1])
print("mul shape......", K.int_shape(mul))
mul_scse = Add()([mul_1, mul])
print("mul_scse shape......", K.int_shape(mul_scse))
print("*********************sen1 shape: ",K.int_shape(se1))
#Downsample layer 2
pool1 = MaxPooling2D(pool_size=(2, 2))(mul_scse)
pool1 = Dropout(DropP)(pool1)
conv2 = Conv2D(64, (kernel_size, kernel_size), activation='relu', padding='same',
kernel_regularizer=regularizers.l2(l2_lambda))(pool1)
conv2 = bn()(conv2)
conv2 = Conv2D(64, (kernel_size, kernel_size), activation='softmax', padding='same',
kernel_regularizer=regularizers.l2(l2_lambda))(conv2)
conv2 = bn()(conv2)
#MSE left branch layer2
se1 = Conv2D(1, (1,1), activation = 'softmax')(conv2)#, activation = ?)
print("se1 shape....", K.int_shape(se1))
init_cs = K.int_shape(conv2)
print("Look here..........init_cs", init_cs)
mul_1 = multiply([conv2, se1])
print("mul_1 shape....", K.int_shape(mul_1))
#MSE right branch layer1
conv1_int = Lambda(lambda x: tf.reshape(x, (tf.shape(x)[0], x.shape[1], x.shape[2]*x.shape[3], 1)))(conv2)
cs = K.int_shape(conv1_int)
print("Look here..........cs", cs)
se1 = DepthwiseConv2D((int(cs[1]), 1), activation = 'relu')(conv1_int)
print("H*1 filter", se1.shape)
se1_r = Lambda(lambda x: tf.reshape(x, (tf.shape(x)[0], x.shape[1], init_cs[2],init_cs[3])))(se1)
print("After reshaping wc", K.int_shape(se1_r))
se1_r_shape = K.int_shape(se1_r)
print("se1_r_shape", se1_r_shape)
se1_r = Lambda(lambda x: tf.reverse(x, [1]))(se1_r)
se1 = DepthwiseConv2D((1, int(se1_r_shape[2])), activation = 'softmax')(se1_r)
print("W*1 filter", se1.shape)
print("************se1", se1.shape)
mul = multiply([conv2, se1])
print("mul shape......", K.int_shape(mul))
mul_scse = Add()([mul_1, mul])
print("mul_scse shape......", K.int_shape(mul_scse))
print("*********************sen1 shape: ",K.int_shape(se1))
#Downsample layer 3
pool2 = MaxPooling2D(pool_size=(2, 2))(mul_scse)
pool2 = Dropout(DropP)(pool2)
conv3 = Conv2D(128, (3, 3), activation='relu', padding='same',
kernel_regularizer=regularizers.l2(l2_lambda))(pool2)
conv3 = bn()(conv3)
conv3 = Conv2D(128, (3, 3), activation='softmax', padding='same',
kernel_regularizer=regularizers.l2(l2_lambda))(conv3)
conv3 = bn()(conv3)
se1 = Conv2D(1, (1,1), activation = 'softmax')(conv3)
init_cs = K.int_shape(conv3)
print("Look here..........init_cs", init_cs)
mul_1 = multiply([conv3, se1])
conv1_int = Lambda(lambda x: tf.reshape(x, (tf.shape(x)[0], x.shape[1], x.shape[2]*x.shape[3], 1)))(conv3)
cs = K.int_shape(conv1_int)
print("Look here..........cs", cs)
se1 = DepthwiseConv2D((int(cs[1]), 1), activation = 'relu')(conv1_int)
print("H*1 filter", se1.shape)
se1_r = Lambda(lambda x: tf.reshape(x, (tf.shape(x)[0], x.shape[1], init_cs[2],init_cs[3])))(se1)
print("After reshaping wc", K.int_shape(se1_r))
se1_r_shape = K.int_shape(se1_r)
print("se1_r_shape", se1_r_shape)
se1_r = Lambda(lambda x: tf.reverse(x, [1]))(se1_r)
se1 = DepthwiseConv2D((1, int(se1_r_shape[2])), activation = 'softmax')(se1_r)
print("W*1 filter", se1.shape)
print("************se1", se1.shape)
mul = multiply([conv3, se1])
mul_scse = Add()([mul_1, mul])
print("*********************sen1 shape: ",K.int_shape(se1))
#Downsample layer 4
pool3 = MaxPooling2D(pool_size=(2, 2))(mul_scse)
pool3 = Dropout(DropP)(pool3)
conv4 = Conv2D(256, (3, 3), activation='relu', padding='same',
kernel_regularizer=regularizers.l2(l2_lambda))(pool3)
conv4 = bn()(conv4)
conv4 = Conv2D(256, (3, 3), activation='softmax', padding='same',
kernel_regularizer=regularizers.l2(l2_lambda))(conv4)
conv4 = bn()(conv4)
se1 = Conv2D(1, (1,1), activation='softmax')(conv4)#, activation = ?)
init_cs = K.int_shape(conv4)
print("Look here..........init_cs", init_cs)
mul_1 = multiply([conv4, se1])
conv1_int = Lambda(lambda x: tf.reshape(x, (tf.shape(x)[0], x.shape[1], x.shape[2]*x.shape[3], 1)))(conv4)
cs = K.int_shape(conv1_int)
print("Look here..........cs", cs)
se1 = DepthwiseConv2D((int(cs[1]), 1), activation = 'relu')(conv1_int)
print("H*1 filter", se1.shape)
se1_r = Lambda(lambda x: tf.reshape(x, (tf.shape(x)[0], x.shape[1], init_cs[2],init_cs[3])))(se1)
print("After reshaping wc", K.int_shape(se1_r))
se1_r_shape = K.int_shape(se1_r)
print("se1_r_shape", se1_r_shape)
se1_r = Lambda(lambda x: tf.reverse(x, [1]))(se1_r)
se1 = DepthwiseConv2D((1, int(se1_r_shape[2])), activation = 'softmax')(se1_r)
print("W*1 filter", se1.shape)
print("************se1", se1.shape)
mul = multiply([conv4, se1])
mul_scse = Add()([mul_1, mul])
print("*********************sen1 shape: ",K.int_shape(se1))
#Bottom layer
pool4 = MaxPooling2D(pool_size=(2, 2))(mul_scse)
pool4 = Dropout(DropP)(pool4)
conv5 = Conv2D(512, (3, 3), activation='relu', padding='same',
kernel_regularizer=regularizers.l2(l2_lambda))(pool4)
conv5 = bn()(conv5)
conv5 = Conv2D(512, (3, 3), activation='relu', padding='same',
kernel_regularizer=regularizers.l2(l2_lambda))(conv5)
conv5 = bn()(conv5)
#Upsample layer 1
up6 = concatenate([Conv2DTranspose(256, (2, 2), strides=(2, 2),
padding='same')(conv5), conv4], name='up6', axis=3)
up6 = Dropout(DropP)(up6)
conv6 = Conv2D(256, (3, 3), activation='relu', padding='same',
kernel_regularizer=regularizers.l2(l2_lambda))(up6)
conv6 = bn()(conv6)
conv6 = Conv2D(256, (3, 3), activation='softmax', padding='same',
kernel_regularizer=regularizers.l2(l2_lambda))(conv6)
conv6 = bn()(conv6)
#MSE left branch
se1 = Conv2D(1, (1,1), activation = 'softmax')(conv6)#, activation = ?)
init_cs = K.int_shape(conv6)
print("Look here..........init_cs", init_cs)
mul_1 = multiply([conv6, se1])
#MSE right branch
conv1_int = Lambda(lambda x: tf.reshape(x, (tf.shape(x)[0], x.shape[1], x.shape[2]*x.shape[3], 1)))(conv6)
cs = K.int_shape(conv1_int)
print("Look here..........cs", cs)
se1 = DepthwiseConv2D((int(cs[1]), 1), activation = 'relu')(conv1_int)
print("H*1 filter", se1.shape)
se1_r = Lambda(lambda x: tf.reshape(x, (tf.shape(x)[0], x.shape[1], init_cs[2],init_cs[3])))(se1)
print("After reshaping wc", K.int_shape(se1_r))
se1_r_shape = K.int_shape(se1_r)
print("se1_r_shape", se1_r_shape)
se1_r = Lambda(lambda x: tf.reverse(x, [1]))(se1_r)
se1 = DepthwiseConv2D((1, int(se1_r_shape[2])), activation = 'softmax')(se1_r)
print("W*1 filter", se1.shape)
print("************se1", se1.shape)
mul = multiply([conv6, se1])
mul_scse = Add()([mul_1, mul])
print("*********************sen1 shape: ",K.int_shape(se1))
#Upsample layer 2
up7 = concatenate([Conv2DTranspose(128, (2, 2), strides=(2, 2), padding='same')(mul_scse), conv3], name='up7', axis=3)
up7 = Dropout(DropP)(up7)
conv7 = Conv2D(128, (3, 3), activation='relu', padding='same',
kernel_regularizer=regularizers.l2(l2_lambda))(up7)
conv7 = bn()(conv7)
conv7 = Conv2D(128, (3, 3), activation='softmax', padding='same',
kernel_regularizer=regularizers.l2(l2_lambda))(conv7)
conv7 = bn()(conv7)
#MSE left branch
se1 = Conv2D(1, (1,1), activation='softmax')(conv7)#, activation = ?)
init_cs = K.int_shape(conv7)
print("\n\n\nLook here..........init_cs", init_cs)
mul_1 = multiply([conv7, se1])
print("conv7 shape....", K.int_shape(conv7))
print("se1 shape....", K.int_shape(se1))
print("mul_1 shape....", K.int_shape(mul_1))
#MSE right branch
conv1_int = Lambda(lambda x: tf.reshape(x, (tf.shape(x)[0], x.shape[1], x.shape[2]*x.shape[3], 1)))(conv7)
cs = K.int_shape(conv1_int)
print("Look here..........cs", cs)
se1 = DepthwiseConv2D((int(cs[1]), 1), activation = 'relu')(conv1_int)
print("H*1 filter", se1.shape)
se1_r = Lambda(lambda x: tf.reshape(x, (tf.shape(x)[0], x.shape[1], init_cs[2],init_cs[3])))(se1)
print("After reshaping wc", K.int_shape(se1_r))
se1_r_shape = K.int_shape(se1_r)
print("se1_r_shape", se1_r_shape)
se1_r = Lambda(lambda x: tf.reverse(x, [1]))(se1_r)
se1 = DepthwiseConv2D((1, int(se1_r_shape[2])), activation = 'softmax')(se1_r)
print("W*1 filter", se1.shape)
print("************se1", se1.shape)
mul = multiply([conv7, se1])
print("conv7................", K.int_shape(conv7))
print("se1..............", K.int_shape(se1))
print("mul...............", K.int_shape(mul))
mul_scse = Add()([mul_1, mul])
print("*********************sen1 shape: ",K.int_shape(se1))
#Upsample layer 3
up8 = concatenate([Conv2DTranspose(64, (2, 2), strides=(2, 2), padding='same')(mul_scse), conv2], name='up8', axis=3)
up8 = Dropout(DropP)(up8)
conv8 = Conv2D(64, (kernel_size, kernel_size), activation='relu', padding='same',
kernel_regularizer=regularizers.l2(l2_lambda))(up8)
conv8 = bn()(conv8)
conv8 = Conv2D(64, (kernel_size, kernel_size), activation='softmax', padding='same',
kernel_regularizer=regularizers.l2(l2_lambda))(conv8)
conv8 = bn()(conv8)
#MSE left branch
se1 = Conv2D(1, (1,1), activation='softmax')(conv8)#, activation = ?)
init_cs = K.int_shape(conv8)
print("Look here..........init_cs", init_cs)
mul_1 = multiply([conv8, se1])
print("conv8 shape....", K.int_shape(conv8))
print("se1 shape....", K.int_shape(se1))
print("mul_1 shape....", K.int_shape(mul_1))
# MSE right branch
conv1_int = Lambda(lambda x: tf.reshape(x, (tf.shape(x)[0], x.shape[1], x.shape[2]*x.shape[3], 1)))(conv8)
cs = K.int_shape(conv1_int)
print("Look here..........cs", cs)
se1 = DepthwiseConv2D((int(cs[1]), 1), activation = 'relu')(conv1_int)
print("H*1 filter", se1.shape)
se1_r = Lambda(lambda x: tf.reshape(x, (tf.shape(x)[0], x.shape[1], init_cs[2],init_cs[3])))(se1)
print("After reshaping wc", K.int_shape(se1_r))
se1_r_shape = K.int_shape(se1_r)
print("se1_r_shape", se1_r_shape)
se1_r = Lambda(lambda x: tf.reverse(x, [1]))(se1_r)
se1 = DepthwiseConv2D((1, int(se1_r_shape[2])), activation = 'softmax')(se1_r)
print("W*1 filter", se1.shape)
print("************se1", se1.shape)
mul = multiply([conv8, se1])
print("conv8 shape....", K.int_shape(conv8))
print("se1 shape....", K.int_shape(se1))
print("mul shape....", K.int_shape(mul))
mul_scse = Add()([mul_1, mul])
print("*********************sen1 shape: ",K.int_shape(se1))
# Upsample layer 4
up9 = concatenate([Conv2DTranspose(32, (2, 2), strides=(2, 2), padding='same')(mul_scse), conv1], name='up9', axis=3)
up9 = Dropout(DropP)(up9)
conv9 = Conv2D(32, (kernel_size, kernel_size), activation='relu', padding='same',
kernel_regularizer=regularizers.l2(l2_lambda))(up9)
conv9 = bn()(conv9)
conv9 = Conv2D(32, (kernel_size, kernel_size), activation='softmax', padding='same',
kernel_regularizer=regularizers.l2(l2_lambda))(conv9)
conv9 = bn()(conv9)
se1 = Conv2D(1, (1,1), activation='softmax')(conv9)#, activation = ?)
init_cs = K.int_shape(conv9)
print("Look here..........init_cs", init_cs)
mul_1 = multiply([conv9, se1])
conv1_int = Lambda(lambda x: tf.reshape(x, (tf.shape(x)[0], x.shape[1], x.shape[2]*x.shape[3], 1)))(conv9)
cs = K.int_shape(conv1_int)
print("Look here..........cs", cs)
se1 = DepthwiseConv2D((int(cs[1]), 1), activation = 'relu')(conv1_int)
print("H*1 filter", se1.shape)
se1_r = Lambda(lambda x: tf.reshape(x, (tf.shape(x)[0], x.shape[1], init_cs[2],init_cs[3])))(se1)
print("After reshaping wc", K.int_shape(se1_r))
se1_r_shape = K.int_shape(se1_r)
print("se1_r_shape", se1_r_shape)
se1_r = Lambda(lambda x: tf.reverse(x, [1]))(se1_r)
se1 = DepthwiseConv2D((1, int(se1_r_shape[2])), activation = 'softmax')(se1_r)
print("W*1 filter", se1.shape)
print("************se1", se1.shape)
mul = multiply([conv9, se1])
mul_scse = Add()([mul_1, mul])
#Final block
conv22 = Conv2D(1, (1, 1), activation = 'sigmoid', name='conv22')(mul_scse) #
conv23 = Conv2D(1, (1, 1), activation = 'sigmoid', name = 'conv23')(mul_scse)
conv24 = Conv2D(1, (1, 1), activation = 'sigmoid', name = 'conv24')(mul_scse)
model = Model(inputs = [new_inputs, new_inputs_2], outputs = [conv22, conv23, conv24])
model.compile(optimizer = Adam(lr = 5e-5), loss = {'conv22':dice_coef_loss, 'conv23': dice_coef_loss, 'conv24': dice_coef_loss}, metrics = [dice_coef])
model.summary()
return model
# X_train=np.load("8_after_sub_144_new.npy")
# X1_train=np.load("8_x_l_144_new.npy")
# y_train=np.load("8_y_l_144_new.npy")
X_test1=np.load("8_after_sub_144_new.npy")
X_test=np.load("8_x_l_144_new.npy")
y_test=np.load("8_y_l_144_new.npy")
X_train=np.load("2_after_sub_144_new.npy")
X1_train=np.load("2_x_l_144_new.npy")
y_train=np.load("2_y_l_144_new.npy")
print("[[[[[[[[[[[[[[[[[[[[[[[[[y_tets]]]]]]]]]]]]]]]]]]]]]]]]]", np.unique(y_test))
X_train1=np.load("10_after_sub_144_new.npy")
X1_train1=np.load("10_x_l_144_new.npy")
y_train1=np.load("10_y_l_144_new.npy")
#changed
X1_train=np.concatenate((X1_train,X1_train1),axis=0)
X1_train1=[]
y_train=np.concatenate((y_train,y_train1),axis=0)
y_train1=[]
X_train=np.concatenate((X_train,X_train1),axis=0)
X_train1=[]
X_train1=np.load("6_after_sub_144_new.npy")
X1_train1=np.load("6_x_l_144_new.npy")
y_train1=np.load("6_y_l_144_new.npy")
X1_train=np.concatenate((X1_train,X1_train1),axis=0)
X1_train1=[]
y_train=np.concatenate((y_train,y_train1),axis=0)
y_train1=[]
X_train=np.concatenate((X_train,X_train1),axis=0)
X_train1=[]
#
X_train1=np.load("4_after_sub_144_new.npy")
X1_train1=np.load("4_x_l_144_new.npy")
y_train1=np.load("4_y_l_144_new.npy")
X1_train=np.concatenate((X1_train,X1_train1),axis=0)
X1_train1=[]
y_train=np.concatenate((y_train,y_train1),axis=0)
y_train1=[]
X_train=np.concatenate((X_train,X_train1),axis=0)
X_train1=[]
print(np.array(X_train).shape,np.array(X1_train).shape,np.array(y_train).shape)
X_train = X_train/1000
print("X_train",np.unique(X_train))
X1_train = X1_train/1000
print("X1_train", np.unique(X1_train))
X_test1 = X_test1/1000
print("X_test1",np.unique(X_test1))
X_test = X_test/1000
print("X_test",np.unique(X_test))
# X_test = X_test/1000
# print("X_test1",np.unique(X_test))
#
# X1_test = X1_test/1000
# print("X_test",np.unique(X1_test))
model = UNet((10, 192, 256),( 192, 256, 1))
# model=load_model('nrna_reverse_reshape_fold4_mulscse.h5', custom_objects={'dice_coef_loss' : dice_coef_loss, 'tf':tf})
print("************Running 300 epochs with softmax***************")
# print("plain_unet_ordering_raunak_parameters_test_2_500.h5")
# model = load_weights('/home/rutu_g/thesis/plain_unet_ordering_75_again.h5')
es = EarlyStopping(monitor = 'val_loss', min_delta = -0.001 , patience = 5)
mc = ModelCheckpoint("server_no_reduction_modified_concurrent_144_100_3_regions_softmax_2intodice4_{epoch:02d}-{val_loss:.2f}.h5", monitor = 'val_loss', period = 100)
# parallel_model = multi_gpu_model(model, gpus = 3)
model.compile(optimizer = Adam(lr = 5e-5), loss = {'conv22':dice_coef_loss, 'conv23': dice_coef_loss, 'conv24': dice_coef_loss}, metrics = [dice_coef_loss] )
history = model.fit([X_train, X1_train], [y_train[ :, :, :, 0],y_train[ :, :, :, 1], y_train[ :, :, :, 2]], verbose = 2, batch_size =3, epochs = 300, shuffle= True)#, validation_data = ([X_val1, X_val], [y_val]))#[ :, :, :, 0], y_val[ :, :, :, 1], y_val[ :, :, :, 2]]))
model.save("nrna_reverse_reshape_fold2_mulscse.h5")
y_pred = model.predict([X_test1, X_test])
y_pred = np.array(y_pred)
# y_pred[y_pred <= 0.5 ] = 0
# y_pred[y_pred > 0.5] = 1
print("y_test", y_test.shape)
print("*********************////////////y_pred", y_pred.shape)
print("y_pred unique", np.unique(y_pred))
def dice_coef(y_true, y_pred):
""" The dice coef is a metric to calculate the similarilty
(intersection) between the true values and the predictions"""
y_true_f = K.flatten(y_true)
y_pred_f = K.flatten(y_pred)
intersection = K.sum(y_true_f * y_pred_f)
return (2. * intersection + smooth) / (K.sum(y_true_f) + K.sum(y_pred_f) + smooth)
def dice_coef_loss(y_true, y_pred):
return -dice_coef(y_true, y_pred)
"""Preprocessing For softmax"""
import random
"""Preprocess for four sigmoids"""
print("y_test", y_test.shape)
print("y_pred", y_pred.shape)
print("y_pred unique", np.unique(y_pred))
pred = y_pred
print("X_test......", X_test.shape)
# y_test = y_test[144:288]
print("-----------------y_pred", y_pred.shape)
d0 = 0
d1 = 0
d2 = 0
d3 = 0
count = 0
for i in range(0, (X_test.shape)[0]):#(X_test[:144].shape)[0]):
count = count + 1
dl0 = dice_coef_loss(y_test[i, : ,: , 0].astype(np.float32), y_pred[0, i, : ,: , :])
dl1 = dice_coef_loss(y_test[i, : ,: , 1].astype(np.float32), y_pred[1, i, : ,: , :])
dl2 = dice_coef_loss(y_test[i, : ,: , 2].astype(np.float32), y_pred[2, i, : ,: , :])
# dl3 = dice_coef_loss(y_test[i, : ,: , 3].astype(np.float64), new[i, : ,: , 3])
d0 = d0 + dl0
d1 = d1 + dl1
d2 = d2 + dl2
# d3 = d3 + dl3
print("count1 ", count)
sess = K.get_session()
print("------------all------dice")
print("nrna_reverse_reshape_fold2_mulscse")
print(sess.run(d0/count))
print(sess.run(d1/count))
print(sess.run(d2/count))
print(np.unique(y_test))
new = new*255
y_test = y_test*255
for i in range(48,100):
cv2.imwrite("0_4r_pred_%d.png" %i,new[i][ : ,: , 0])
cv2.imwrite("0_4r_test_%d.png" %i, y_test[i,:,:,0])
cv2.imwrite("1_4r_pred_%d.png" %i, new[i][ : ,: , 1])
cv2.imwrite("1_4r_test_%d.png" %i, y_test[i,:,:,1])
cv2.imwrite("2_4r_pred_%d.png" %i,new[i][ : ,: , 2])
cv2.imwrite("2_4r_test_%d.png" %i, y_test[i,:,:,2])
# cv2.imwrite("3_4r_pred_%d.png" %i,y_pred[i][ : ,: , 3])
# cv2.imwrite("3_4r_test_%d.png" %i, y_test[i,:,:,3])
| rutugandhi/MDA-Net | Sagittal/Proposed/mda_net_144.py | mda_net_144.py | py | 26,277 | python | en | code | 0 | github-code | 13 |
9556335324 | class Solution:
def findMaxAverage(self, nums: List[int], k: int) -> float:
s=sum(nums[:k])
ans=s/k
j=k
i=0
while(j<len(nums)):
s+=(nums[j]-nums[i])
ans=(max(ans,s/k))
i+=1
j+=1
return ans | eyoaab/Solution-_pack | maxAverage.py | maxAverage.py | py | 303 | python | en | code | 0 | github-code | 13 |
35644404655 | import pandas as pd
from sqli_detect import getFeatures
import time
import pickle
if __name__ == '__main__':
while 1:
payload = input("please input your url:")
print('start process data')
start = time.time()
result = pd.DataFrame(columns=('sql','length','key_num','capital_f','num_f','space_f','special_f','prefix_f','entropy','label'))
results = getFeatures(payload, '1')
result.loc[0] = results
result = result.drop(['sql','label'],axis=1).values
print(result)
end = time.time()
print('Over process in %f s'%(end -start))
with open('models.model','rb') as fr:
clf = pickle.load(fr)
print('start Predict job')
start = time.time()
print(clf.predict(result))
end = time.time()
print('Over Predict job in %f s'%(end - start)) | scusec/Data-Mining-for-Cybersecurity | Homework/2019/Task5/12/code/predict_sql.py | predict_sql.py | py | 868 | python | en | code | 66 | github-code | 13 |
72337985619 | """Adversarial attack class
"""
import os
import torch
class Attack(object):
"""Base class for attacks
Arguments:
object {[type]} -- [description]
"""
def __init__(self, attack_type, model):
self.attack_name = attack_type
self.model = model.eval()
self.device = next(model.parameters()).device
def forward(self, *args):
"""Call adversarial examples
Should be overridden by all attack classes
"""
raise NotImplementedError
def inference(self, args, save_path, file_name, data_loader):
"""[summary]
Arguments:
save_path {[type]} -- [description]
data_loader {[type]} -- [description]
"""
adv_list = []
label_list = []
correct = 0
accumulated_num = 0.
total_num = len(data_loader)
for step, (imgs, labels) in enumerate(data_loader):
if imgs.size(1) == 1:
imgs = imgs.repeat(1, 3, 1, 1)
imgs = imgs.to(args.device)
labels = labels.to(args.device)
adv_imgs, labels = self.__call__(imgs, labels)
adv_list.append(adv_imgs.cpu())
label_list.append(labels.cpu())
accumulated_num += labels.size(0)
outputs = self.model(adv_imgs)
_, predicted = torch.max(outputs, 1)
correct += predicted.eq(labels).sum().item()
acc = 100 * correct / accumulated_num
print('Progress : {:.2f}% / Accuracy : {:.2f}%'.format(
(step+1)/total_num*100, acc), end='\r')
print('Progress : {:.2f}% / Accuracy : {:.2f}%'.format(
(step+1)/total_num*100, acc))
if args.save_adv:
adversarials = torch.cat(adv_list, 0)
y = torch.cat(label_list, 0)
os.makedirs(save_path, exist_ok=True)
save_path = os.path.join(save_path, file_name)
torch.save((adversarials, y), save_path)
def training(self, imgs):
adv_imgs, labels = self.__call__(imgs, labels)
return adv_imgs, labels
def __call__(self, *args):
adv_examples, labels = self.forward(*args)
return adv_examples, labels | lepoeme20/Adversarial-Detection | attacks.py | attacks.py | py | 2,230 | python | en | code | 0 | github-code | 13 |
74218108176 | def is_matrix_rectangular(matrix):
return all(len(row) == len(matrix[0]) for row in matrix)
def count_non_zero_columns(matrix):
if not matrix:
return 0
if not is_matrix_rectangular(matrix):
return -1 #не прямоугольная, возвращаем -1
num_rows = len(matrix)
num_cols = len(matrix[0])
non_zero_cols = 0
for col in range(num_cols):
has_zero = False
for row in range(num_rows):
if matrix[row][col] == 0:
has_zero = True
break
if not has_zero:
non_zero_cols += 1
return non_zero_cols
def main():
while True:
try:
rows = int(input("Введите количество строк матрицы: "))
cols = int(input("Введите количество столбцов матрицы: "))
if rows <= 0 or cols <= 0:
print("Количество строк и столбцов должно быть больше 0.")
continue
matrix = []
for i in range(rows):
row = []
for j in range(cols):
element = int(input(f"Введите элемент [{i+1}][{j+1}]: "))
row.append(element)
matrix.append(row)
result = count_non_zero_columns(matrix)
if result == -1:
print("Матрица не прямоугольная.")
else:
print(f"Количество столбцов без нулей: {result}")
break
except ValueError:
print("Введите корректное целое число.")
if __name__ == "__main__":
main()
| lkrvtsk/Python | Лабораторная №2/Лаб_2_задание_3.py | Лаб_2_задание_3.py | py | 1,778 | python | ru | code | 0 | github-code | 13 |
73772401937 | def anima():#Animação
from random import randint
import colorama
from time import sleep
from colorama import Fore, Back, Style
colorama.init(autoreset=True)
lista = [Fore.RED, Fore.YELLOW, Fore.BLUE, Fore.MAGENTA, Fore.GREEN, Fore.CYAN, Fore.WHITE, Fore.LIGHTRED_EX]
nome = ["L", "u", "c","a","s"]
sobrenome=["W","a","s","i","l","e","w","s","k","i"]
for c in nome:
print(lista[randint(0, 7)]+f"{c}", end="", flush=True)
sleep(0.1)
print()
for c in sobrenome:
print(lista[randint(0, 7)]+f"{c}", end="", flush=True)
sleep(0.1)
print()
while True:
anima()
| Firestormant/Alguns-projetos-que-eu-gostei | Animação*1000.py | Animação*1000.py | py | 648 | python | en | code | 1 | github-code | 13 |
29245575126 | import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
import pandas as pd
import seaborn as sns
from sklearn.decomposition import PCA, SparsePCA
from sklearn.cluster import DBSCAN, KMeans, AgglomerativeClustering
from sklearn.preprocessing import StandardScaler, MinMaxScaler
from scipy.stats import pointbiserialr
from pandas.plotting import parallel_coordinates
from sklearn.neighbors import NearestNeighbors
from random import sample
from numpy.random import uniform
import numpy as np
from math import isnan
from scipy.cluster.hierarchy import dendrogram, linkage, fcluster
import scipy.spatial.distance as ssd
from scipy.spatial import distance_matrix
from sklearn import manifold
clustering =[1, 2, 2, 2, 2, 2, 1, 2, 2, 2, 1, 0, 1, 1, 1, 0, 1, 1, 0, 0, 1, 0, 0, 0, 0, 1, 0, 0, 2, 2, 0, 0,
0, 0, 2, 2, 1, 0, 0, 0, 1, 2, 1, 1, 1, 1, 2, 1, 0, 0, 0, 1, 1, 1, 1, 2, 1, 1, 2, 1, 2, 1, 1, 1,
2, 1, 1, 1, 2, 0, 2, 2, 1, 2, 1, 1, 1, 1, 0, 0, 1, 2, 2, 1, 2, 2, 0, 1, 2, 0, 0, 2, 2, 1, 2, 1,
1, 1, 0, 0, 0, 2, 2, 0, 1, 0, 0, 2, 2, 1, 0, 2, 1, 2, 2, 1, 2, 2, 0, 2, 1, 1, 1, 2, 2, 2, 2, 0,
0, 1, 2, 1, 2, 2, 2, 0, 2, 0, 2, 2, 1, 0, 2, 0, 0, 1, 2, 1, 2, 0, 1, 0, 1, 0, 1, 1, 1, 1, 0, 2,
0, 2, 1, 0, 1, 0, 1, 2, 1, 2, 1, 0, 2, 2, 2, 0, 2, 0, 1, 1, 2, 0, 1, 2, 1, 1, 2, 2, 1, 0, 2, 1,
2, 1, 0, 2, 0, 1, 2, 0, 2, 1, 1, 1, 1, 2, 0, 0, 0, 1, 1, 2, 0, 1, 2, 1, 0, 2, 0, 0, 2, 2, 1, 2,
0, 2, 2, 1, 0, 0, 1, 1, 1, 1, 1, 1, 1, 2, 1, 0, 0, 1, 1, 0, 2, 0, 2, 1, 0, 1, 1, 1, 2, 0, 1, 2,
1, 2, 1, 2, 2, 1, 1, 1, 2, 2, 1, 1, 2, 0, 0, 1, 1, 2, 2, 1, 2, 1, 1, 2, 2, 0, 0, 1, 0, 2, 1, 1,
2, 1, 1, 1, 2, 2, 1, 2, 1, 0, 2, 0, 0, 1, 0, 2, 2, 1, 0, 2, 0, 1, 2, 2, 1, 1, 1, 1, 1, 2, 1, 2,
1, 2, 0, 1, 2, 2, 2, 2, 2, 2, 1, 1, 1, 2, 2, 1, 1, 1, 1, 2, 0, 1, 2, 2, 2, 2, 1, 0, 2, 2, 1, 0,
1, 1, 0, 1, 2, 0, 1, 0, 0, 1, 0, 2, 1, 1, 1, 2, 1, 0, 0, 2, 1, 2, 2, 2, 2, 0, 2, 1, 1, 2, 0, 1,
0, 1, 0, 1, 1, 0, 1, 1, 0, 2, 2, 1, 0, 0, 1, 1, 0, 1, 0, 2, 1, 2, 1, 1, 2, 2, 2, 2, 0, 0, 0, 2,
1, 1, 0, 0, 1, 2, 0, 0, 0, 0, 0, 2, 2, 0, 0, 0, 1, 1, 2, 1, 0, 1, 1, 2, 2, 1, 2, 0, 1, 0, 2, 1,
2, 1, 0, 0, 1, 1, 0, 0, 2, 2, 0, 2, 1, 0, 2, 1, 2, 1, 2, 0, 2, 0, 2, 2, 2, 1, 0, 1, 2, 1, 0, 0,
2, 1, 0, 0, 0, 2, 0, 1, 1, 1, 1, 2, 0, 0, 2, 0, 2, 2, 0, 0, 0, 1, 1, 2, 0, 0, 2, 2, 1, 0, 0, 2,
0, 1, 0, 2, 1, 2, 0, 2, 1, 0, 2, 2, 2, 2, 0, 2, 2, 1, 1, 2, 2, 2, 2, 2, 2, 0, 1, 0, 2, 2, 0, 2,
2, 1, 2, 1, 2, 1, 1, 2, 0, 1, 2, 2, 1, 1, 0, 2, 0, 1, 1, 0, 2, 2, 1, 2, 2, 1, 1, 2, 2, 2, 0, 1,
2, 1, 2, 0, 1, 2, 1, 1, 1, 0, 1, 0, 1, 1, 1, 1, 1, 2, 1, 0, 1, 2, 0, 0, 0, 0, 2, 0, 1, 1, 1, 1,
1, 0, 2, 1, 1, 1, 2, 1, 0, 1, 0, 2, 0, 1, 2, 1, 0, 1, 0, 1, 0, 0, 0, 2, 1, 1, 0, 2, 2, 1, 2, 1,
1, 2, 2, 2, 2, 1, 2, 2, 1, 2, 0, 1, 0, 0, 1, 1, 0, 2, 0, 1, 2, 2, 1, 2, 1, 1, 2, 1, 2, 1, 0, 2,
0, 0, 1, 0, 2, 1, 1, 0, 0, 1, 0, 1, 0, 1, 2, 0, 2, 0, 1, 2, 0, 1, 0, 2, 1, 1, 2, 1, 2, 0, 2, 0,
1, 1, 1, 0, 2, 2, 1, 2, 2, 1, 2, 1, 2, 2, 1, 1, 1, 0, 2, 0, 1, 1, 2, 1, 0, 0, 0, 2, 1, 2, 0, 1,
0, 1, 0, 2, 1, 0, 1, 2, 1, 2, 0, 0, 0, 0, 1, 0, 1, 2, 2, 0, 1, 0, 2, 2, 0, 2, 2, 1, 1, 1, 2, 2,
1, 1, 1, 1, 2, 2, 0, 1, 2, 0, 2, 2, 0, 1, 1, 1, 0, 2, 2, 1, 0, 2, 0, 0, 0, 2, 1, 0, 2, 2, 2, 1,
0, 1, 0, 0, 2, 0, 2, 1, 0, 0, 0, 0, 2, 2, 1, 0, 1, 0, 1, 2, 1, 1, 0, 2, 0, 0, 0, 2, 2, 2, 1, 0,
0, 2, 2, 0, 2, 0, 2, 2, 1, 0, 0, 0, 0, 1, 1, 0, 2, 2, 2, 2, 1, 0, 2, 1, 0, 1, 1, 2, 0, 0, 0, 0,
1, 2, 1, 0, 0, 1, 2, 0, 2, 1, 2, 2, 0, 2, 0, 1, 0, 0, 2, 1, 1, 0, 2, 2, 2, 2, 2, 2, 1, 1, 1, 2,
1, 0, 1, 2, 2, 0, 2, 2, 2, 2, 0, 0, 1, 1, 1, 1, 0, 1, 1, 0, 1, 2, 2, 0, 2, 1, 0, 1, 2, 0, 2, 2,
1, 1, 0, 2, 2, 1, 0, 1, 2, 1, 0, 2, 1, 2, 1, 0, 1, 2, 2, 2, 2, 2, 2, 0, 2, 2, 1, 1, 0, 1, 2, 1,
1, 2, 0, 2, 0, 0, 2, 2, 0, 0, 1, 0, 1, 1, 0, 2, 0, 0, 1, 2, 2, 1, 1, 1, 1, 1, 1, 1, 2, 0, 2, 1,
0, 1, 1, 0, 0, 0, 1, 1, 2, 0, 0, 2, 1, 1, 2, 2, 1, 1, 1, 0, 2, 2, 2, 1, 1, 2, 0, 1, 1, 1, 1, 2,
2, 0, 1, 2, 2, 0, 2, 1, 2, 2, 2, 0, 0, 1, 2, 1, 0, 1, 2, 2, 2, 1, 1, 2, 0, 1, 2, 0, 2, 1, 0, 2,
0, 1, 0, 0, 0, 1, 1, 2, 2, 1, 1, 1, 1, 1, 0, 1, 0, 0, 0, 2, 2, 1, 1, 0, 1, 0, 1, 2, 2, 2, 0, 2,
2, 0, 0, 2, 2, 0, 2, 1, 0, 1, 2, 1, 1, 2, 0, 1, 2, 2, 1, 2, 0, 1, 0, 0, 0, 2, 2, 2, 2, 1, 1, 1,
2, 1, 1, 1, 2, 2, 1, 1, 0, 2, 1, 2, 2, 1, 1, 0, 0, 1, 2, 1, 2, 1, 2, 2, 0, 1, 0, 1, 2, 2, 2, 1,
2, 0, 1, 0, 2, 2, 2, 2, 2, 0, 1, 0, 1, 1, 2, 0, 2, 0, 2, 1, 0, 0, 2, 1, 0, 2, 0, 1, 1, 2, 1, 2,
1, 1, 1, 1, 1, 2, 0, 2, 2, 2, 1, 2, 2, 0, 2, 2, 1, 1, 0, 2, 1, 1, 1, 1, 2, 1, 1, 1, 2, 2, 2, 1,
2, 2, 2, 0, 2, 1, 0, 1, 0, 0, 1, 2, 0, 1, 1, 0, 2, 2, 1, 0, 2, 1, 1, 0, 1, 1, 1, 0, 1, 2, 1, 1,
1, 2, 2, 1, 1, 0, 0, 2, 0, 0, 2, 0, 0, 0, 1, 0, 1, 1, 2, 1, 1, 1, 1, 0, 1, 0, 1, 2, 1, 2, 1, 1,
0, 2, 2, 1, 2, 2, 2, 2, 1, 1, 1, 1, 1, 0, 2, 1, 1, 0, 2, 0, 2, 2, 2, 1, 2, 1, 1, 1, 1, 0, 1, 1,
0, 2, 1, 1, 1, 1, 0, 2, 0, 0, 2, 1, 1, 2, 0, 2, 1, 1, 1, 0, 1, 1, 0, 2, 2, 1, 1, 0, 2, 2, 1, 2,
0, 2, 0, 2, 1, 1, 1, 2, 0, 1, 0, 0, 2, 1, 2, 2, 2, 0, 1, 0, 0, 2, 2, 0, 1, 1, 2, 0, 0, 2, 0, 0,
0, 0, 0, 1, 0, 2, 0, 1, 2, 0, 2, 1, 0, 2, 2, 2, 2, 0, 2, 0, 2, 2, 1, 2, 2, 2, 1, 2, 0, 1, 2, 2,
2, 2, 1, 0, 2, 1, 0, 1, 1, 0, 0, 0, 1, 0, 1, 1, 0, 2, 0, 2, 2, 0, 1, 1, 0, 1, 2, 1, 1, 2, 2, 1,
1, 0, 0, 2, 0, 2, 1, 1, 0, 0, 1, 1, 1, 1, 2, 0, 0, 0, 2, 0, 0, 1, 2, 1, 1, 2, 0, 0, 0, 2, 0, 1,
0, 1, 1, 2, 0, 1, 2, 2, 1, 1, 1, 2, 0, 1, 0, 2, 1, 2, 2, 1, 0, 1, 2, 1, 2, 0, 0, 0, 1, 2, 0, 2,
0, 0, 1, 2, 0, 1, 2, 2, 0, 2, 2, 0, 0, 1, 0, 0, 2, 1, 2, 1, 1, 2, 1, 2, 1, 1, 0, 0, 1, 2, 0, 2,
1, 2, 1, 2, 1, 0, 2, 1, 1, 1, 1, 2, 1, 1, 2, 1, 2, 1, 1, 0, 0, 0, 2, 1, 2, 0, 2, 0, 1, 1, 2, 1,
1, 2, 1, 1, 2, 1, 0, 2, 2, 1, 1, 1, 2, 2, 2, 0, 1, 0, 2, 1, 2, 1, 2, 2, 1, 1, 0, 2, 0, 1, 1, 0,
0, 1, 2, 0, 0, 0, 0, 0, 2, 0, 2, 2, 1, 1, 1, 1, 1, 1, 2, 2, 2, 1, 0, 2, 2, 1, 2, 0, 2, 2, 2, 2,
2, 1, 0, 0, 1, 0, 0, 0, 0, 0, 1, 0, 0, 2, 2, 0, 2, 2, 1, 2, 0, 2, 2, 1, 2, 0, 1, 1, 1, 0, 1, 0,
0, 0, 0, 2, 1, 0, 2, 2, 1, 2, 1, 0, 0, 0, 1, 1, 1, 1, 1, 0, 1, 1, 0, 1, 1, 2, 2, 1, 0, 2, 2, 0,
0, 2, 1, 1, 1, 2, 1, 1, 2, 1, 2, 0, 1, 1, 0, 0, 2, 0, 0, 0, 2, 0, 2, 1, 1, 0, 1, 0, 2, 0, 1, 0,
1, 0, 1, 2, 0, 1, 2, 1, 1, 0, 1, 2, 1, 0, 0, 1, 0, 2, 2, 2, 1, 2, 0, 1, 0, 0, 2, 1, 0, 0, 2, 2,
0, 2, 1, 2, 1, 1, 2, 2, 0, 2, 0, 1, 2, 1, 0, 1, 1, 2, 2, 0, 1, 1, 1, 0, 2, 2, 0, 1, 0, 1, 0, 2,
2, 0, 2, 0, 0, 1, 1, 1, 0, 0, 0, 1, 2, 2, 0, 1, 2, 2, 1, 1, 1, 0, 1, 0, 1, 1, 1, 1, 1, 0, 0, 0,
1, 1, 2, 0, 1, 0, 1, 1, 2, 1, 1, 2, 0, 2, 1, 2, 0, 1, 1, 2, 0, 2, 0, 2, 2, 2, 2, 0, 1, 0, 0, 2,
2, 1, 1, 1, 2, 2, 0, 1, 2, 1, 0, 0, 1, 1, 0, 2, 0, 2, 1, 0, 2, 1, 1, 0, 0, 1, 2, 1, 2, 2, 1, 0,
1, 2, 0, 1, 0, 2, 2, 1, 1, 0, 2, 1, 1, 1, 1, 1, 0, 0, 1, 1, 2, 1, 1, 2, 0, 0, 1, 1, 2, 2, 0, 1,
2, 0, 1, 0, 2, 2, 0, 1, 2, 2, 2, 2, 2, 0, 0, 2, 2, 1, 1, 1, 1, 1, 1, 2, 0, 0, 0, 1, 2, 0, 1, 0,
0, 1, 2, 1, 0, 2, 2, 0, 0, 2, 1, 0, 0, 1, 1, 0, 0, 2, 1, 2, 0, 2, 1, 2, 2, 0, 0, 0, 2, 0, 2, 0,
1, 2, 0, 0, 1, 1, 0, 1, 1, 2, 2, 1, 2, 0, 0]
def verification(feature, finalt):
dissim = pd.read_csv('../reaperCSVs/cluster data/singledistancematrixto' + str(finalt) + '.csv')
dissim.index = dissim['0Replay_id']
del dissim['0Replay_id']
matches = list(dissim.index)
#mds = manifold.MDS(n_components=3, dissimilarity="precomputed", random_state=5, verbose=2)
#results = mds.fit(dissim.values)
#coords = pd.DataFrame(results.embedding_, index=dissim.index, columns=['Coord 1', 'Coord 2', 'Coord 3'])
#Z = linkage(coords.values, 'ward')
dffinal = pd.read_csv('../reaperCSVs/cluster data/cluster_data' + str(finalt) + '.csv')
dffeature = pd.DataFrame(columns=range(720, finalt + 720, 720), index=matches)
#dffeature['Names'] = fcluster(Z, 4, criterion='maxclust')
dffeature['Names']=clustering
#print(list(dffeature['Names'].values))
for t in range(720, finalt + 720, 720):
df = pd.read_csv('../reaperCSVs/cluster data/cluster_data' + str(t) + '.csv')
df.index = df['0Replay_id']
del df['0Replay_id']
df = df.loc[df.index.isin(matches)]
dffeature[t] = df[feature]
dffeature.to_csv('../reaperCSVs/cluster data/verificationfile'+ feature + str(finalt) + '.csv')
return dffeature
def labelDf(labelDict, df_unlabeled):
newNames = []
for e in df_unlabeled['Name']:
if labelDict.__contains__(e):
newNames.append(labelDict.get(e))
else:
newNames.append('Null')
df_unlabeled['Names'] = newNames
del df_unlabeled['Unnamed: 0']
del df_unlabeled['Name']
df_tmp = df_unlabeled.loc[:, (df_unlabeled != 0).any(axis=0)]
return df_tmp[df_tmp['Names'] != 'Null']
def getReplays(df, names):
return df.loc[df['Name'].isin(names)]
def getColumn(df, column):
return df.loc[df['column']]
def makeLabelDict(names, labels):
dict = {}
for i in range(0, len(labels)):
dict[names[i]] = labels[i]
return dict
def readAndPrep(dir):
df = pd.read_csv(dir)
del df['Unnamed: 0']
df = df.loc[:, (df!=0).any(axis=0)]
return df
def clusterAtTimestamp(timestamp, ids):
dir = '../data/Replays6-' + str(timestamp) + 's.csv'
df = readAndPrep(dir)
df, nameCol = rmName(df)
df_km = cluster_KMeans(getPCs(df, 4), 3, True)
labels = df_km['Names']
dict = makeLabelDict(nameCol, labels)
dfs = []
for e in ids:
df_tmp = pd.read_csv('../data/Replays6-' + str(e) + 's.csv')
dfs.append(labelDf(dict, df_tmp))
return dfs
def multicluster(ids):
i = 1
dfs_list = [clusterAtTimestamp(90, ids), clusterAtTimestamp(180, ids), clusterAtTimestamp(270, ids), clusterAtTimestamp(390, ids), clusterAtTimestamp(510, ids), clusterAtTimestamp(600, ids)]
for e in dfs_list:
plt.figure()
i = 1
for e2 in e:
#plt.figure()
plt.subplot(2, 3, i)
e2 = getPCs(e2, 3)
#Methods.project_onto_R3(e, ['PC 1', 'PC 2', 'PC 3'])
project_onto_R2(e2, ['PC 1', 'PC 2'], False)
i += 1
plt.show()
return dfs_list
def rmName(df):
if 'Name' in df.columns:
nameCol = df['Name']
dfNew = df.drop('Name', axis=1)
return dfNew, nameCol
else:
return
def compare():
f, axes = plt.subplots(2, 3)
n = 0
q = 60
for i in range(60,150,30):
s='../data/Replays2-'+str(i)+'.0s.csv'
df = pd.read_csv(s)
del df['Unnamed: 0']
df = df.loc[:, (df != 0).any(axis=0)]
df_pca=getPCs(df,2)
df_pca = cluster_KMeans(df_pca, 2, True)
names = list(set(df_pca.Names))
i = 0
for e in names:
df_pca = df_pca.replace(e, i)
i = i + 1
axes[0,n].scatter(x=df_pca['PC 1'], y=df_pca['PC 2'], c=df_pca['Names'], cmap='rainbow')
axes[0,n].set_title(str(q)+'s')
n = n + 1
q = 60 + 30 * n
m = 0
q = 150
for i in range(150, 240, 30):
s = '../data/Replays2-' + str(i) + '.0s.csv'
df = pd.read_csv(s)
del df['Unnamed: 0']
df = df.loc[:, (df != 0).any(axis=0)]
df_pca = getPCs(df, 2)
df_pca = cluster_KMeans(df_pca, 2, True)
names = list(set(df_pca.Names))
i = 0
for e in names:
df_pca = df_pca.replace(e, i)
i = i + 1
axes[1, m].scatter(x=df_pca['PC 1'], y=df_pca['PC 2'], c=df_pca['Names'], cmap='rainbow')
axes[1, m].set_title(str(q)+'s')
m = m+1
q = 150+30*m
plt.show()
def project_onto_R3(df, cols):
if 'Names' in df.columns:
names = list(set(df.Names))
i = 0
for e in names:
df = df.replace(e, i)
i = i+1
ax = plt.axes(projection='3d')
ax.scatter3D(df[cols[0]], df[cols[1]], df[cols[2]], c=df['Names'], cmap='rainbow')
else:
ax = plt.axes(projection='3d')
ax.scatter3D(df[cols[0]], df[cols[1]], df[cols[2]],'kx')
def project_onto_R2(df, cols, plot):
if 'Names' in df.columns:
names = list(set(df.Names))
i = 0
for e in names:
df = df.replace(e, i)
i = i + 1
plt.scatter(x=df[cols[0]], y=df[cols[1]], c=df['Names'], cmap='rainbow')
else:
plt.scatter(x=df[cols[0]], y=df[cols[1]])
if plot:
plt.show()
def linkageType(df,type):
dfNew, nameCol = rmName(df) #rms names
if 'Names' in df.columns:
data = df.drop('Names', axis=1)
data = data.values
else:
data = df.values
Z=linkage(data,type)
dendrogram(Z, no_labels=True)
plt.ylabel('Tolerance')
plt.xlabel('Index in data')
plt.title('Hierarchical dendogram;'+type+' linkage.')
plt.show()
def heatMap(df):
df = rmName(df)
plt.figure()
corr = df.corr()
sns.heatmap(corr, mask=np.zeros_like(corr, dtype=np.bool), cmap=plt.get_cmap('magma'),
square=True)
plt.show()
return corr
def seabornHeatmap(df):
df = rmName(df)
if 'Names' in df.columns:
df=df.drop('Names',axis=1)
sns.clustermap(df,robust=True)
plt.show()
else:
sns.clustermap(df)
plt.show()
def parallelCoordinates(df):
#df = rmName(df)
plt.figure()
plt.title('Parallel Coordinates plot')
pd.plotting.parallel_coordinates(frame=df, class_column='Names', colormap=plt.get_cmap('tab10'))
plt.show()
def radViz(df):
df = rmName(df)
plt.figure()
plt.title('Radviz plot')
pd.plotting.radviz(frame=df, class_column='Names', colormap=plt.get_cmap('tab10'))
plt.show()
def parallelCoordinates_Clusters(df):
df = rmName(df)
clusters = set(list(df['Names']))
columns = df.columns
first = True
clusterMean = []
for e in clusters:
cluster = df[df['Names'] == e]
if first:
first = False
clusterMean = cluster[cluster.columns].mean().values
else:
clusterMean = np.vstack([clusterMean,cluster[cluster.columns].mean().values])
plotDF = pd.DataFrame(clusterMean)
plotDF['Names'] = clusters
plt.title('Parallel Coordinates plot')
pd.plotting.parallel_coordinates(frame=plotDF, class_column='Names', colormap=plt.get_cmap('tab10'))
plt.grid(False)
def cluster_DBSCAN(df, eps, min_samples, keepOutliers, keepVarnames): #Hanterar dataframe
df, nameCol = rmName(df)
# init:
labelsArray = []
if 'Names' in df.columns:
data = df.drop('Names', axis=1)
data = data.values
else:
data = df.values
X = StandardScaler().fit_transform(data)
print('DBSCAN on ' + str(len(data[:, 1])) + ' points in ' + str(len(data[1, :])) + ' dimensions.')
print('Clustering parameters set to eps=' + str(eps) + ', min_samples=' + str(min_samples) + '.')
print()
# Clustering
db = DBSCAN(eps=eps, min_samples=min_samples).fit(X)
core_samples_mask = np.zeros_like(db.labels_, dtype=bool)
core_samples_mask[db.core_sample_indices_] = True
labels = db.labels_
n_clusters = len(set(labels)) - (1 if -1 in labels else 0)
for i in range(0,len(db.labels_)):
if db.labels_[i]==-1:
labelsArray.append('Outlier')
else:
labelsArray.append('Cluster '+str(db.labels_[i]+1))
if n_clusters == 0:
raise ValueError('No clusters found, change params.')
print(str(n_clusters) + " clusters found.")
print()
if not keepVarnames:
columns = ['Var %i' % i for i in range(1, len(data[1, :]) + 1)]
else:
if 'Names' in df.columns:
df = df.drop('Names', axis=1)
columns = df.columns
dfNew=pd.DataFrame(data=data, columns=columns)
dfNew['Names']=labelsArray
print('#Points classified as outliers: ' + str(len(dfNew.loc[dfNew['Names'] == 'Outlier'])) + '.')
for i in range(0, n_clusters, 1):
print('#Points in cluster ' + str(i+1) + ': ' + str(len(dfNew.loc[dfNew['Names'] == 'Cluster '+str(i+1)]))+'.')
dfNew['Name'] = nameCol
if keepOutliers:
return dfNew
else:
return dfNew.loc[dfNew['Names'] != 'Outlier']
def cluster_KMeans(df, k, keepVarnames): #Hanterar dataframe
# init:
#df, nameCol = rmName(df)
labelsArray = []
if 'Names' in df.columns:
data = df.drop('Names', axis=1)
data = data.values
else:
data = df.values
X = StandardScaler().fit_transform(data)
print('Executing K-Means clustering on ' + str(len(data[:, 0])) + ' points.')
print('Looking for k=' + str(k) + ' clusters.')
print()
# Clustering
km = KMeans(n_clusters=k, random_state=0, init = 'k-means++').fit(X)
labels = km.labels_
n_clusters = len(set(labels))
print(str(n_clusters) + " clusters found.")
for i in range(0,len(km.labels_)):
labelsArray.append('Cluster '+str(km.labels_[i] + 1))
if not keepVarnames:
columns = ['Var %i' % i for i in range(1,len(data[1, :])+1)]
else:
if 'Names' in df.columns:
df = df.drop('Names', axis=1)
columns = df.columns
dfNew = pd.DataFrame(data=data, columns=columns)
dfNew['Names']=labelsArray
for i in range(0, n_clusters, 1):
print('#Points in cluster ' + str(i+1) + ': ' + str(len(dfNew.loc[dfNew['Names'] == 'Cluster '+str(i+1)]))+'.')
#dfNew['Name'] = nameCol
return dfNew
def elbowMethod(df, ks):
df = rmName(df)
distorsions = []
for k in range(1, ks+1):
kmeans = KMeans(n_clusters=k)
kmeans.fit(df)
distorsions.append(kmeans.inertia_)
fig = plt.figure(figsize=(15, 5))
plt.plot(range(1, ks+1), distorsions)
plt.grid(True)
plt.title('Elbow curve')
plt.show()
def cluster_Hierarchical(df, k, linkageType, keepVarnames):
df, nameCol = rmName(df)
labelsArray = []
if 'Names' in df.columns:
data = df.drop('Names', axis=1)
data = data.values
else:
data = df.values
X = StandardScaler().fit_transform(data)
print('Executing Agglomerative Hierarchical clustering on ' + str(len(data[:, 1])) + ' points.')
print('Looking for k=' + str(k) + ' clusters.')
print()
# Clustering
ac = AgglomerativeClustering(n_clusters=k, affinity='euclidean', linkage=linkageType).fit(X)
labels = ac.labels_
n_clusters = len(set(labels))
for i in range(0, len(ac.labels_)):
labelsArray.append('Cluster ' + str(ac.labels_[i] + 1))
if not keepVarnames:
columns = ['Var %i' % i for i in range(1, len(data[1, :]) + 1)]
else:
if 'Names' in df.columns:
df = df.drop('Names', axis=1)
columns = df.columns
dfNew = pd.DataFrame(data=data, columns=columns)
dfNew['Names'] = labelsArray
for i in range(0, n_clusters, 1):
print('#Points in cluster ' + str(i + 1) + ': ' + str(
len(dfNew.loc[dfNew['Names'] == 'Cluster ' + str(i + 1)])) + '.')
dfNew['Name'] = nameCol
return dfNew
def getPCs(df, n_components):
#df, nameCol = rmName(df)
if 'Names' in df.columns:
data = df.drop('Names', axis=1)
else:
data = df
tmp = data.values
standard = StandardScaler()
tmpS = standard.fit_transform(tmp)
data = pd.DataFrame(tmpS)
pca = PCA(n_components=n_components)
pca.fit(data)
columns = ['PC %i' % i for i in range(1,n_components+1)]
df_pca = pd.DataFrame(pca.transform(data), columns=columns, index=df.index)
if 'Names' in df.columns: #tror inte denna ifsats behövs..
df_pca['Names'] = df['Names']
#df_pca['Name'] = nameCol
return df_pca
def clusterSparsePCA(df, n_components):
df, nameCol = rmName(df)
if 'Names' in df.columns:
data = df.drop('Names', axis=1)
else:
data=df
Data_Array = data.values
standard = StandardScaler()
Data_SArray = standard.fit_transform(Data_Array)
data = pd.DataFrame(Data_SArray)
pca = SparsePCA(n_components=n_components,normalize_components=True)
pca.fit(data)
columns = ['PC %i' % i for i in range(1,n_components+1)]
df_pca = pd.DataFrame(pca.transform(data), columns=columns, index=df.index)
if 'Names' in df.columns:
df_pca['Names']=df['Names']
df_pca['Name'] = nameCol
return df_pca
def inversePCA(df):
df, nameCol = rmName(df)
if 'Names' in df.columns:
df = df.drop('Names', axis=1)
pca=PCA().fit(df)
print('Number of components required to explain 95% of all variance: '+str(pca.n_components_))
components = pca.transform(df)
dfNew = pd.DataFrame(data=pca.inverse_transform(components))
dfNew['Name'] = nameCol
return dfNew
def explainedVariance(df):
df = rmName(df)
if 'Names' in df.columns:
df = df.drop('Names', axis=1)
pca = PCA().fit(df)
print(np.cumsum(pca.explained_variance_ratio_))
df = pd.DataFrame({'var': pca.explained_variance_ratio_,
'PC': ['PC %i' % i for i in range(1,len(df.columns)+1)]})
f, (ax1, ax2) = plt.subplots(1, 2)
ax1.bar(df['PC'],df['var'])
ax1.set_xlabel('PC')
ax1.set_ylabel('Explained variance')
ax2.plot(np.cumsum(pca.explained_variance_ratio_))
ax2.set_xlabel('Number of components')
ax2.set_ylabel('Cumulative explained variance')
plt.show()
def binaryCluster(df):
if 'Names' not in df.columns:
raise ValueError('Data not clustered.')
df_dummies = pd.get_dummies(df['Names'])
df_new = pd.concat([df, df_dummies], axis=1)
del df_new['Names']
return df_new
def pointBiserial(df, cols):
#df = rmName(df)
df = binaryCluster(df)
if not all(elem in df.columns for elem in cols):
raise ValueError('Dummy variable ' + np.setdiff1d(cols, df.columns) + ' not in DataFrame.')
df = df.loc[:, (df != 0).any(axis=0)]
for i in range(0, len(cols)):
df[cols[i]].loc[df[cols[i]] == 1] = True
df[cols[i]].loc[df[cols[i]] == 0] = False
corr = pd.DataFrame()
for c in cols:
tmpCol = []
for e in df.columns:
tmp = pointbiserialr(df[c].values, df[e].values)
tmpCol.append(tmp[0])
corr[c] = tmpCol
corr.index = df.columns
corr = corr.T
corr = corr.loc[:, (abs(corr) > 0.15).any(axis=0)]
corr.drop(cols,axis=1)
plt.figure()
sns.set(font_scale=0.5)
sns.heatmap(corr, mask=np.zeros_like(corr, dtype=np.bool), cmap=plt.get_cmap('rainbow'), square=True, xticklabels=1)
plt.show()
return corr
def hopkins(X): #hittad på: https://matevzkunaver.wordpress.com/2017/06/20/hopkins-test-for-cluster-tendency/
d = X.shape[1]
# d = len(vars) # columns
n = len(X) # rows
m = int(0.1 * n) # heuristic from article [1]
nbrs = NearestNeighbors(n_neighbors=1).fit(X.values)
rand_X = sample(range(0, n, 1), m)
ujd = []
wjd = []
for j in range(0, m):
u_dist, _ = nbrs.kneighbors(uniform(np.amin(X, axis=0), np.amax(X, axis=0), d).reshape(1, -1), 2,
return_distance=True)
ujd.append(u_dist[0][1])
w_dist, _ = nbrs.kneighbors(X.iloc[rand_X[j]].values.reshape(1, -1), 2, return_distance=True)
wjd.append(w_dist[0][1])
H = sum(ujd) / (sum(ujd) + sum(wjd))
if isnan(H):
print(ujd, wjd)
H = 0
return H
def hopkins_df(df): #hittad på: https://matevzkunaver.wordpress.com/2017/06/20/hopkins-test-for-cluster-tendency/
if rmName(df) != False:
df, nameCol = rmName(df)
if 'Names' in df.columns:
del df['Names']
if 'Unnamed: 0' in df.columns:
del df['Unnamed: 0']
df = df.loc[:, (df != 0).any(axis=0)]
X = df.values
d = X.shape[1]
# d = len(vars) # columns
n = len(X) # rows
m = int(0.1 * n) # heuristic from article [1]
nbrs = NearestNeighbors(n_neighbors=1).fit(X.values)
rand_X = sample(range(0, n, 1), m)
ujd = []
wjd = []
for j in range(0, m):
u_dist, _ = nbrs.kneighbors(uniform(np.amin(X, axis=0), np.amax(X, axis=0), d).reshape(1, -1), 2,
return_distance=True)
ujd.append(u_dist[0][1])
w_dist, _ = nbrs.kneighbors(X.iloc[rand_X[j]].values.reshape(1, -1), 2, return_distance=True)
wjd.append(w_dist[0][1])
H = sum(ujd) / (sum(ujd) + sum(wjd))
if isnan(H):
print(ujd, wjd)
H = 0
return H
def dissimtosim(df):
maxval=np.max(df.values)
minval=np.min(df.values)
max = pd.DataFrame([[maxval for col in range(len(df.columns))] for row in range(len(df.index))],
index=df.index, columns=df.columns)
min = pd.DataFrame([[minval for col in range(len(df.columns))] for row in range(len(df.index))],
index=df.index, columns=df.columns)
return max-df+min
def removedoubles(df):
return df[~df.index.duplicated(keep='first')]
def dunnindexavg(df, dissim):
clusters = list(set(df['Names']))
inside = list()
for c in clusters:
intracl = list(set(df.loc[df['Names'] == c].index))
dissimn = ssd.squareform(dissim[intracl].loc[intracl].values)
inside.append(np.average(dissimn))
visited = list()
between = list()
for c in clusters:
visited.append(c)
intracl = list(set(df.loc[df['Names'] == c].index))
for c2 in clusters:
if c2 not in visited:
intracl2 = list(set(df.loc[df['Names'] == c2].index))
dissimn = dissim[intracl].loc[intracl2]
between.append(np.average(dissimn.values))
return np.average(inside)/np.average(between)
def dunnindex(df, dissim):
clusters = list(set(df['Names']))
inside = list()
for c in clusters:
intracl = list(set(df.loc[df['Names'] == c].index))
dissimn = ssd.squareform(dissim[intracl].loc[intracl].values)
inside.append(np.average(dissimn))
visited = list()
between = list()
for c in clusters:
visited.append(c)
intracl = list(set(df.loc[df['Names'] == c].index))
for c2 in clusters:
if c2 not in visited:
intracl2 = list(set(df.loc[df['Names'] == c2].index))
dissimn = dissim[intracl].loc[intracl2]
between.append(np.average(dissimn.values))
return np.max(inside)/np.min(between)
def overtime(time):
dffirst = pd.read_csv('../data/Replays6-600s.csv')
matches = list(set(dffirst['Name']))
dffirst.index = dffirst['Name']
dffirst = removedoubles(dffirst)
del dffirst['Name']
del dffirst['Unnamed: 0']
df60 = pd.read_csv('../data/Replays6-60s.csv')
df60.index = df60['Name']
df60 = removedoubles(df60)
matches2 = list(set(df60['Name']))
for m in matches:
if not (m in matches2):
matches.remove(m)
df60 = df60[df60['Name'].isin(matches)]
del df60['Name']
del df60['Unnamed: 0']
dissimilarity = pd.DataFrame(data=distance_matrix(df60.values, df60.values, p=1), columns=df60.index, index=df60.index)
for t in range(90, time+30, 30):
dftmp = pd.read_csv('../data/Replays6-'+str(t)+'s.csv')
matches2 = list(set(dftmp['Name']))
for m in matches:
if not (m in matches2):
dissimilarity = dissimilarity.drop(m, axis=0)
dissimilarity = dissimilarity.drop(m, axis=1)
matches.remove(m)
dftmp = dftmp[dftmp['Name'].isin(matches)]
dftmp.index = dftmp['Name']
dftmp = removedoubles(dftmp)
del dftmp['Name']
del dftmp['Unnamed: 0']
d = pd.DataFrame(distance_matrix(dftmp.values, dftmp.values, p=1), index=dissimilarity.index, columns=dissimilarity.columns)
values = d.values + dissimilarity.values
dissimilarity = pd.DataFrame(data=values, columns=dissimilarity.columns, index=dissimilarity.columns)
dissimilarity.to_csv('../data/newdissimilaritymatrixto'+str(t)+'.csv', encoding='utf-8', index=True)
print('t='+str(t)+' completed', end='\r')
return dissimilarity
def overtime2():
df60 = pd.read_csv('../data/Replays6-60s.csv')
df60.index = df60['Name']
df60 = removedoubles(df60)
df60 = df60.drop(['Name', 'Unnamed: 0'], axis=1)
matches = list(df60.index)
dissimilarity = pd.DataFrame(distance_matrix(df60.values, df60.values, p=1),
columns=df60.index, index=df60.index)
dissimilarity.to_csv('../data/new2dissimilaritymatrixto60.csv', encoding='utf-8', index=True)
for t in range(90, 600+30, 30):
dftmp = pd.read_csv('../data/Replays6-'+str(t)+'s.csv')
dftmp.index = dftmp['Name']
dftmp = removedoubles(dftmp)
dftmp = dftmp.drop(['Name', 'Unnamed: 0'], axis=1)
matches = [m for m in matches if m in list(dftmp.index)]
dftmp = dftmp.loc[matches]
dissimilarity = dissimilarity[matches].loc[matches]
dissimilarity = pd.DataFrame(data=distance_matrix(dftmp.values, dftmp.values, p=1) + dissimilarity.values,
columns=dissimilarity.columns, index=dissimilarity.index)
dissimilarity.to_csv('../data/new2dissimilaritymatrixto'+str(t)+'.csv', encoding='utf-8', index=True)
print('t='+str(t)+' completed', end='\r')
return dissimilarity
def makeCompatible(df,dissim):
dissim.index = dissim['Name']
del dissim['Name']
df = df.loc[df['Name'].isin(dissim.index)]
df.index = df['Name']
df = removedoubles(df)
df = df.drop(['Name', 'Unnamed: 0'], axis=1)
matches = [m for m in dissim.index if m in df.index]
dissim = dissim[matches].loc[matches]
return df, dissim
def checkOptimalClustering(t, expert):
dissim = pd.read_csv('../data/new2dissimilaritymatrixto' + str(t) + '.csv')
df = pd.read_csv('../data/Replays6-' + str(t) + 's.csv')
df, dissim = makeCompatible(df, dissim)
mindists = list()
cls = list()
Z = linkage(ssd.squareform(dissim.values), method='ward')
for i in range(2, int(1/expert)+1):
cl = fcluster(Z, i, criterion='maxclust')
df['Names'] = cl
if all(i >= expert*len(df.index) for i in [len(df.loc[df['Names'] == c].index) for c in list(set(cl))]):
mindists.append(dunnindex(df, dissim))
cls.append(i)
print('t='+str(t)+': '+str(i)+'/'+str(int(1/expert))+' clusters checked.')
return cls, mindists
def pointBiserialovertime():
cls = [4, 2, 2, 3, 2, 5, 3, 4, 6, 4, 6, 4, 7, 6, 7, 3, 6, 6]
times = [90, 120, 210,
240, 270, 330,
360, 390, 420,
450, 480, 510,
540, 570, 600]
'''times = list()
cls = list()
minds=list()
for t in range(90, 630, 30):
clsopti, mind = checkOptimalClustering(t, expert)
cls.append(clsopti[mind.index(np.min(mind))])
minds.append(np.min(mind))
times.append(t)
print(cls)
print(minds)'''
j = 0
for i in times:
dissim = pd.read_csv('../data/newdissimilaritymatrixto600.csv')
dissim.index = dissim['Name']
matches = list(set(dissim['Name']))
del dissim['Name']
df = pd.read_csv('../data/Replays6-' + str(i) + 's.csv')
df = df.loc[df['Name'].isin(matches)]
df.index = df['Name']
df = removedoubles(df)
del df['Name']
del df['Unnamed: 0']
distArray = ssd.squareform(dissim.values)
Z = linkage(distArray, method='ward')
cl = fcluster(Z, 6, criterion='maxclust')
df['Names'] = cl
pointBiserial(df, [q for q in range(1, 7)])
j = j + 1
plt.show()
def parallellovertime():
cls = [4, 2, 2,
3, 2, 5,
3, 4, 6,
4, 6, 4,
7, 6, 7,
3, 6, 6]
cls = [2, 2, 2,
2, 3, 2,
2, 2, 3,
2, 3, 3,
3, 4, 6]
times = [90, 120, 210,
240, 270, 330,
360, 390, 420,
450, 480, 510,
540, 570, 600]
'''times = list()
cls = list()
minds=list()
for t in range(90, 630, 30):
clsopti, mind = checkOptimalClustering(t, expert)
cls.append(clsopti[mind.index(np.min(mind))])
minds.append(np.min(mind))
times.append(t)
print(cls)
print(minds)'''
j = 0
for i in times:
print('t = ' + str(i)+'s.', end='\r')
dissim = pd.read_csv('../data/newdissimilaritymatrixto600.csv')
dissim.index = dissim['Name']
matches = list(set(dissim['Name']))
del dissim['Name']
df = pd.read_csv('../data/Replays6-' + str(i) + 's.csv')
df = df.loc[df['Name'].isin(matches)]
df.index = df['Name']
df = removedoubles(df)
del df['Name']
del df['Unnamed: 0']
distArray = ssd.squareform(dissim.values)
Z = linkage(distArray, method='ward')
cl = fcluster(Z, 6, criterion='maxclust')
df['Names'] = cl
means = df.mean(axis=0)
means = means.loc[means != 0]
sds = df.std(axis=0)
sds = sds.loc[sds != 0]
sizes = list()
parallell = pd.DataFrame()
for cl in list(set(cl)):
sizes.append(len(df.loc[df['Names'] == cl]))
dftmp = df.loc[df['Names'] == cl]
dftmp = dftmp.loc[:, (dftmp != 0).any(axis=0)]
dftmp = dftmp.mean(axis=0)
for f in dftmp.index:
mean = means.loc[means.index == f]
sd = sds.loc[sds.index == f]
dftmp[f] = (dftmp.loc[dftmp.index == f]-mean)/sd
parallell[cl] = dftmp
print(sizes)
parallell = parallell.T
parallell['Names'] = [('Cluster '+str(q))+', size = '+str(sizes[q-1]) for q in range(1, 7)]
parallell.index = parallell['Names']
parallelCoordinates(parallell)
plt.show()
j = j + 1
def projectOptimalClustering(t):
cls = [4, 2, 2,
3, 2, 5,
3, 4, 6,
4, 6, 4,
7, 6, 7,
3, 6, 6]
dissim = pd.read_csv('../data/newdissimilaritymatrixto570.csv')
dissim.index = dissim['Name']
matches = list(set(dissim['Name']))
del dissim['Name']
df = pd.read_csv('../data/Replays6-'+str(t)+'s.csv')
df = df.loc[df['Name'].isin(matches)]
df.index = df['Name']
df = removedoubles(df)
df = df.drop(['Name', 'Unnamed: 0'], axis=1)
Z = linkage(ssd.squareform(dissim.values), method='ward')
#cl = fcluster(Z, cls[int(t/30)-3], criterion='maxclust')
cl = fcluster(Z, 6, criterion='maxclust')
df['Names'] = cl
df = getPCs(df, 3)
project_onto_R3(df, ['PC ' + str(i) for i in range(1, 4)])
plt.show()
| JohnSegerstedt/DATX02-19-81 | Fredrik_Hs_mapp/Methods.py | Methods.py | py | 35,432 | python | en | code | 4 | github-code | 13 |
23422879323 | from lib.coefficients import Coefficients
from utils.menu import main_menu
while True:
combustible = main_menu()
if combustible:
coefficients = Coefficients(*combustible)
print(f"Mean specific heat of {coefficients.combustible.name.title()} at {coefficients.t_gh + 273.15}°C is:\n"
f"{coefficients.get_cp_flue_gas()} kJ/kgK.")
| KoteTheInnkeeper/cp_flue_gas | app.py | app.py | py | 367 | python | en | code | 0 | github-code | 13 |
2349306340 | import botsdk.util.HttpRequest
import json
from botsdk.BotModule.Adapter import Adapter
class MiraiAdapter(Adapter):
def init(self, data):
self.url = data["path"]
async def get(self, parameter, **kwargs):
return json.loads(
await botsdk.util.HttpRequest.get(
(self.url + parameter["path"] + "?"
+ "&".join(["=".join([i, kwargs[i]]) for i in kwargs]))))
async def post(self, parameter, **kwargs):
return json.loads(
await botsdk.util.HttpRequest.post(
self.url + parameter["path"], kwargs))
| f88af65a/XyzB0ts | bot/Mirai/Adapter.py | Adapter.py | py | 603 | python | en | code | 6 | github-code | 13 |
34919425339 | import pandas as pd
import scipy.sparse as sp
import os
def create_URM_matrix(ratings_df):
URM_all = sp.csr_matrix((ratings_df["data"].values,
(ratings_df["user_id"].values, ratings_df["item_id"].values)))
return URM_all
def create_ICM_matrix(dataframe):
csr_matrix = sp.csr_matrix((dataframe["data"].values,
(dataframe["item_id"].values, dataframe["feature_id"].values)))
return csr_matrix
def combine_matrices(URM: sp.csr_matrix, ICM: sp.csr_matrix):
stacked_URM = sp.vstack([URM, ICM.T])
stacked_URM = sp.csr_matrix(stacked_URM)
stacked_ICM = sp.csr_matrix(stacked_URM.T)
return stacked_URM, stacked_ICM
def load_URM():
interactions_df = load_data_interactions()
# Make watched = 1
interactions_df.loc[interactions_df['data'] == 0, "data"] = 1
# Drop duplicates
interactions_df.drop_duplicates(subset=['user_id', 'item_id'], inplace=True)
URM_all = create_URM_matrix(interactions_df)
return URM_all
def load_data():
interactions_df = load_data_interactions()
length_df = load_data_length()
type_df = load_data_type()
interactions_df.drop_duplicates()
interactions_df.loc[interactions_df['data'] == 0, "data"] = 1
# Remove cold items
length_df = length_df[length_df.item_id.isin(interactions_df.item_id)]
type_df = type_df[type_df.item_id.isin(interactions_df.item_id)]
# FEATURES
all_features_indices = pd.concat([length_df["feature_id"], type_df["feature_id"]], ignore_index=True)
mapped_id, original_id = pd.factorize(all_features_indices.unique())
print("Unique features: {}".format(len(original_id)))
features_original_ID_to_index = pd.Series(mapped_id, index=original_id)
length_df["feature_id"] = length_df["feature_id"].map(features_original_ID_to_index)
type_df["feature_id"] = type_df["feature_id"].map(features_original_ID_to_index)
URM_all = create_URM_matrix(interactions_df)
ICM_length = create_ICM_matrix(length_df)
ICM_type = create_ICM_matrix(type_df)
ICM_all = sp.hstack([ICM_type, ICM_length])
return URM_all, ICM_type, ICM_length, ICM_all
def load_data_interactions():
if os.path.exists("../data/interactions_and_impressions.csv"):
print('interactions_and_impressions found!')
return pd.read_csv(
"../data/interactions_and_impressions.csv",
sep=",",
names=["user_id", "item_id", "impressions", "data"],
header=0,
dtype={"user_id": int, "item_id": int, "impressions": str, "data": int})
else:
print("interactions_and_impressions not found.")
return None
def load_data_length():
if os.path.exists("../data/data_ICM_length.csv"):
print('data_ICM_length found!')
return pd.read_csv("../data/data_ICM_length.csv",
sep=",",
names=["item_id", "feature_id", "data"],
header=0,
dtype={"item_id": int, "feature_id": int, "data": int})
else:
print("data_ICM_length not found.")
return None
def load_data_type():
if os.path.exists("../data/data_ICM_type.csv"):
print('data_ICM_type found!')
return pd.read_csv("/Users/redaellimattia/Desktop/RecSysCompetition/Competition/data/data_ICM_type.csv",
sep=",",
names=["item_id", "feature_id", "data"],
header=0,
dtype={"item_id": int, "feature_id": int, "data": int})
else:
print("data_ICM_type not found.")
return None
def load_users_for_submission():
if os.path.exists("../data/data_target_users_test.csv"):
print('data_target_users_test found!')
return pd.read_csv(
"/Users/redaellimattia/Desktop/RecSysCompetition/Competition/data/data_target_users_test.csv",
names=['user_id'],
header=0,
dtype={"user_id": int})
else:
print("data_target_users_test not found.")
return None
def create_submission(recommender):
users_df = load_users_for_submission()
submission = []
for user_id in users_df["user_id"].values:
submission.append((user_id, recommender.recommend(user_id_array=user_id, cutoff=10)))
return submission
def write_submission(submission, file_name):
with open("../submissions/" + file_name + ".csv",
"w") as f:
f.write("user_id,item_list\n")
for user_id, items in submission:
f.write(f"{user_id},{' '.join([str(item) for item in items])}\n")
| redaellimattia/RecSys-Competition-2022-Polimi | utils/data_util.py | data_util.py | py | 4,702 | python | en | code | 0 | github-code | 13 |
43142128576 | # -*- coding: utf-8 -*-
#################################################################################################
import logging
import sqlite3
import sys
import traceback
import xbmc
import xbmcgui
import xbmcplugin
import xbmcvfs
from views import Playlist, VideoNodes
from utils import window, should_stop, settings, language
#################################################################################################
log = logging.getLogger("EMBY."+__name__)
KODI = xbmc.getInfoLabel('System.BuildVersion')[:2]
#################################################################################################
def video_database():
db_version = {
'13': 78, # Gotham
'14': 90, # Helix
'15': 93, # Isengard
'16': 99, # Jarvis
'17': 107,# Krypton
'18': 109 # Leia
}
return xbmc.translatePath("special://database/MyVideos%s.db"
% db_version.get(KODI, "")).decode('utf-8')
def music_database():
db_version = {
'13': 46, # Gotham
'14': 48, # Helix
'15': 52, # Isengard
'16': 56, # Jarvis
'17': 60, # Krypton
'18': 68 # Leia
}
return xbmc.translatePath("special://database/MyMusic%s.db"
% db_version.get(KODI, "")).decode('utf-8')
def texture_database():
return xbmc.translatePath("special://database/Textures13.db").decode('utf-8')
def emby_database():
return xbmc.translatePath("special://database/emby.db").decode('utf-8')
def kodi_commit():
# verification for the Kodi video scan
kodi_scan = window('emby_kodiScan') == "true"
count = 0
while kodi_scan:
log.info("kodi scan is running, waiting...")
if count == 10:
log.info("flag still active, but will try to commit")
window('emby_kodiScan', clear=True)
elif should_stop() or xbmc.Monitor().waitForAbort(1):
log.info("commit unsuccessful. sync terminating")
return False
kodi_scan = window('emby_kodiScan') == "true"
count += 1
return True
class DatabaseConn(object):
# To be called as context manager - i.e. with DatabaseConn() as conn: #dostuff
def __init__(self, database_file="video", commit_on_close=True, timeout=120):
"""
database_file can be custom: emby, texture, music, video, :memory: or path to the file
commit_mode set to None to autocommit (isolation_level). See python documentation.
"""
self.db_file = database_file
self.commit_on_close = commit_on_close
self.timeout = timeout
def __enter__(self):
# Open the connection
self.path = self._SQL(self.db_file)
#traceback.print_stack()
if settings('dblock') == "true":
self.conn = sqlite3.connect(self.path, isolation_level=None, timeout=self.timeout)
else:
self.conn = sqlite3.connect(self.path, timeout=self.timeout)
log.info("opened: %s - %s", self.path, id(self.conn))
self.cursor = self.conn.cursor()
if self.db_file == "emby":
verify_emby_database(self.cursor)
self.conn.commit()
return self.cursor
def _SQL(self, media_type):
databases = {
'emby': emby_database,
'texture': texture_database,
'music': music_database,
'video': video_database
}
return databases[media_type]() if media_type in databases else self.db_file
def __exit__(self, exc_type, exc_val, exc_tb):
# Close the connection
changes = self.conn.total_changes
if exc_type is not None:
# Errors were raised in the with statement
log.error("Type: %s Value: %s", exc_type, exc_val)
if self.commit_on_close == True and changes:
log.info("number of rows updated: %s", changes)
if self.db_file == "video":
kodi_commit()
self.conn.commit()
log.info("commit: %s", self.path)
log.info("closing: %s - %s", self.path, id(self.conn))
self.cursor.close()
self.conn.close()
def verify_emby_database(cursor):
# Create the tables for the emby database
# emby, view, version
log.info("Verifying emby DB")
cursor.execute(
"""CREATE TABLE IF NOT EXISTS emby(
emby_id TEXT UNIQUE, media_folder TEXT, emby_type TEXT, media_type TEXT,
kodi_id INTEGER, kodi_fileid INTEGER, kodi_pathid INTEGER, parent_id INTEGER,
checksum INTEGER)""")
cursor.execute(
"""CREATE TABLE IF NOT EXISTS view(
view_id TEXT UNIQUE, view_name TEXT, media_type TEXT, kodi_tagid INTEGER, group_series TEXT)""")
cursor.execute("CREATE TABLE IF NOT EXISTS version(idVersion TEXT)")
columns = cursor.execute("SELECT * FROM view")
if 'group_series' not in [description[0] for description in columns.description]:
log.info("Add missing column group_series")
cursor.execute("ALTER TABLE view ADD COLUMN group_series 'TEXT'")
def db_reset():
dialog = xbmcgui.Dialog()
if not dialog.yesno(language(29999), language(33074)):
return
# first stop any db sync
window('emby_online', value="reset")
window('emby_shouldStop', value="true")
count = 10
while window('emby_dbScan') == "true":
log.info("Sync is running, will retry: %s..." % count)
count -= 1
if count == 0:
dialog.ok(language(29999), language(33085))
return
xbmc.sleep(1000)
# Clean up the playlists
Playlist().delete_playlists()
# Clean up the video nodes
VideoNodes().deleteNodes()
# Wipe the kodi databases
log.warn("Resetting the Kodi video database.")
with DatabaseConn('video') as cursor:
cursor.execute('SELECT tbl_name FROM sqlite_master WHERE type="table"')
rows = cursor.fetchall()
for row in rows:
tablename = row[0]
if tablename != "version":
cursor.execute("DELETE FROM " + tablename)
if settings('enableMusic') == "true":
log.warn("Resetting the Kodi music database.")
with DatabaseConn('music') as cursor:
cursor.execute('SELECT tbl_name FROM sqlite_master WHERE type="table"')
rows = cursor.fetchall()
for row in rows:
tablename = row[0]
if tablename != "version":
cursor.execute("DELETE FROM " + tablename)
# Wipe the emby database
log.warn("Resetting the Emby database.")
with DatabaseConn('emby') as cursor:
cursor.execute('SELECT tbl_name FROM sqlite_master WHERE type="table"')
rows = cursor.fetchall()
for row in rows:
tablename = row[0]
if tablename != "version":
cursor.execute("DELETE FROM " + tablename)
cursor.execute('DROP table IF EXISTS emby')
cursor.execute('DROP table IF EXISTS view')
cursor.execute("DROP table IF EXISTS version")
# Offer to wipe cached thumbnails
if dialog.yesno(language(29999), language(33086)):
log.warn("Resetting all cached artwork")
# Remove all existing textures first
import artwork
artwork.Artwork().delete_cache()
# reset the install run flag
settings('SyncInstallRunDone', value="false")
# Remove emby info
resp = dialog.yesno(language(29999), language(33087))
if resp:
import connectmanager
# Delete the settings
addondir = xbmc.translatePath(
"special://profile/addon_data/plugin.video.emby/").decode('utf-8')
dataPath = "%ssettings.xml" % addondir
xbmcvfs.delete(dataPath)
connectmanager.ConnectManager().clear_data()
dialog.ok(heading=language(29999), line1=language(33088))
xbmc.executebuiltin('RestartApp')
try:
xbmcplugin.setResolvedUrl(int(sys.argv[1]), False, xbmcgui.ListItem())
except:
pass
| CaoZ/plugin.video.emby | resources/lib/database.py | database.py | py | 8,342 | python | en | code | 0 | github-code | 13 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.