id stringlengths 1 7 | text stringlengths 6 1.03M | dataset_id stringclasses 1
value |
|---|---|---|
1722156 | def test_hello():
print("test hello")
| StarcoderdataPython |
1755571 | import io
from ast import AST
from typing import Optional, TextIO, Tuple, Union
from .context import TranspilerContext
from .retokenizer import retokenize
from .transpiler import IMPL_NAME, transpile_ast, transpile_source
def source_from_filename(filename: str) -> str:
with open(filename, 'r') as script_file:
return script_file.read()
def validate(source: str, context: TranspilerContext) -> None:
reserved_names = (IMPL_NAME, context.prefix)
for name in reserved_names:
if name in source: # TODO, FIXME: very dumb check
raise ValueError(f'Found reserved name/prefix {name} in script')
if '\n\t' in source: # TODO: control with some flag ??
raise ValueError('Tabs are forbidden! Use spaces for indentation')
def transpile(
file: Optional[Union[str, TextIO]] = None,
source: Optional[str] = None,
context: Optional[TranspilerContext] = None,
) -> AST:
assert (
file or source
), 'Provide either opened file (or file name) or source code for transpiling, not both!'
filename = None
if isinstance(file, io.TextIOWrapper):
source = file.read()
filename = file.name
if source is None:
source = source_from_filename(file) # type: ignore
filename = file # type: ignore
context = context or TranspilerContext(filename=filename)
validate(source, context)
return transpile_source(source, context)
__all__ = ['transpile', 'validate', 'retokenize', 'transpile_ast', 'TranspilerContext']
| StarcoderdataPython |
3219932 | import sys
sys.path.append('.')
import numpy as np
from functions.Voltages import Voltages
from numpy import sin, cos, pi
import os
import matplotlib.pyplot as plt
from scipy.signal import butter, lfilter, freqz
def butter_lowpass(cutoff, fs, order=5):
nyq = 0.5 * fs
normal_cutoff = cutoff / nyq
b, a = butter(order, normal_cutoff, btype='low', analog=False)
return b, a
def butter_lowpass_filter(data, cutoff, fs, order):
b, a = butter_lowpass(cutoff, fs, order=order)
y = lfilter(b, a, data)
return y
x = 5e-3
y = 0.0e-3
z = 0
sigmaXX=0.6
sigmaYY=0.083
sigmaZZ=0.083
I=4e-3
X, Y, Z = np.meshgrid(x, y, z)
VS1s=Voltages(X,Y,Z,-5e-3,+5e-3,+0e-3,+I,sigmaXX,sigmaYY,sigmaZZ,'V','Vx','Vy','Vxx','Vyy','Vxy')
VS2s=Voltages(X,Y,Z,+5e-3,+5e-3,+0e-3,-I,sigmaXX,sigmaYY,sigmaZZ,'V','Vx','Vy','Vxx','Vyy','Vxy')
VS3s=Voltages(X,Y,Z,-5e-3,-5e-3,+0e-3,+I,sigmaXX,sigmaYY,sigmaZZ,'V','Vx','Vy','Vxx','Vyy','Vxy')
VS4s=Voltages(X,Y,Z,+5e-3,-5e-3,+0e-3,-I,sigmaXX,sigmaYY,sigmaZZ,'V','Vx','Vy','Vxx','Vyy','Vxy')
V1=VS1s['V']+VS2s['V']
V2=VS3s['V']+VS4s['V']
fig,ax=plt.subplots(num=0)
f1 = 1000+20
f2 = 1000
fs = 1000*50*4
N=4
Phase=0.
time_duration = (N+(0+Phase)/2.0)/(f1-f2)
stim_duration = (N+(0+Phase)/2.0)/(f1-f2)
dt = 1/fs
t=np.arange(0,time_duration,dt)
A1=np.squeeze(V1)
A2=np.squeeze(V2)
y1= A1*sin(2*pi*f1*t + 0)
y2= A2*sin(2*pi*f2*t + 0)
yS=y1+y2
cutoff=1000
order=4
yF = butter_lowpass_filter((yS), cutoff, fs, order)
ax.plot(1000*t,yF,alpha=1,linewidth=2,label='Low-Pass(Sum)')
ax.set_frame_on(False)
plt.xticks([])
plt.yticks([])
Cycle=1000/(f1-f2)
plt.xlim([(N-2.5)*Cycle,(N-0.5)*Cycle])
plt.show() | StarcoderdataPython |
3230235 | <gh_stars>0
import pickle
import sys
sys.path.append("..")
from model import ECO
import paddle.fluid as fluid
# Load pickle, since pretrained model is too bigger than the threshold(150M), split them into 2 parts and then reload them
f0 = open('seg0.pkl', 'rb')
f1 = open('seg1.pkl', 'rb')
model_out = dict()
model_0 = pickle.load(f0)
model_1 = pickle.load(f1)
for i,key in enumerate(model_0):
model_out[key]=model_0[key]
for i,key in enumerate(model_1):
model_out[key]=model_1[key]
with fluid.dygraph.guard():
paddle_model = ECO.ECO(num_classes=101, num_segments=24)
paddle_model.load_dict(model_out)
fluid.dygraph.save_dygraph(paddle_model.state_dict(), 'ECO_FULL_RGB__seg16')
print('finished')
| StarcoderdataPython |
1649654 | #!/usr/bin/env python3
import os
import psutil
import signal
import subprocess
import sys
import time
from panda import Panda
serials = Panda.list()
num_pandas = len(serials)
if serials:
# If panda is found, kill boardd, if boardd is flapping, and UsbPowerMode is CDP when shutdown,
# device has a possibility of rebooting. Also, we need control of USB so we can force UsbPowerMode to client.
for proc in psutil.process_iter():
if proc.name() == 'boardd':
os.kill(proc.pid, signal.SIGKILL)
time.sleep(1)
break
# set usb power to client
for serial in serials:
panda = Panda(serial)
panda.set_usb_power(1)
# execute system shutdown
os.system('LD_LIBRARY_PATH="" svc power shutdown')
| StarcoderdataPython |
3349036 | <gh_stars>0
from django.contrib import admin
from django.db import models
from django.forms.widgets import ClearableFileInput # This is what ImageFields use by default, we're going to customize ours a little.
from project.persons.models import Person
class ImageWidget(ClearableFileInput):
template_name = "image_widget.html"
class PersonAdmin(admin.ModelAdmin):
formfield_overrides = {
models.ImageField: {'widget': ImageWidget},
}
admin.site.register(Person, PersonAdmin)
| StarcoderdataPython |
4800353 | name = 'doctest-cli'
| StarcoderdataPython |
1786074 | import os
import sys
import argparse
import shutil
#Globals
g_includedFiles = []
class COLORS:
DEFAULT = '\033[0m'
HEADER = '\033[95m'
OKBLUE = '\033[94m'
OKGREEN = '\033[92m'
WARNING = '\033[93m'
ERROR = '\033[91m'
BOLD = '\033[1m'
UNDERLINE = '\033[4m'
def printColor(text, color=COLORS.DEFAULT, resetColor=True):
'''
Prints colored text to the terminal
'''
if (resetColor):
formattedText = "{}{}{}".format(color, text, COLORS.DEFAULT)
print(formattedText)
else:
formattedText = "{}{}".format(color, text)
print(formattedText)
def createMonolithicVerilog(outputFile, inputFile, sourceDirectory):
'''
Recursive function that generates a linked monolithic verilog file
'''
inline = " "
while(inline):
inline = inputFile.readline()
if ("`include" in inline):
newInputFilename = inline.replace("`include","").strip().rstrip().split("//")[0].replace("\"","")
newInputFilepath = os.path.join(sourceDirectory, newInputFilename)
#Insert file contents if not already included
if not (newInputFilepath in g_includedFiles):
g_includedFiles.append(newInputFilepath)
newInputFile = open(newInputFilepath, "r")
createMonolithicVerilog(outputFile, newInputFile, sourceDirectory)
newInputFile.close()
else:
outputFile.write(inline)
outputFile.write("\n")
return
if __name__ == '__main__':
#Read command line arguments
helpdesc = '''
help yourself
'''
parser = argparse.ArgumentParser(description = helpdesc)
parser.add_argument("verilogPath", action="store", help="Filepath to top-level verilog")
parser.add_argument("-synthesize", action="store_true", dest="synthesize", help="Include to run qflow synthesis")
parser.add_argument("-sta", action="store_true", dest="sta", help="Include to run qflow static timing analysis")
args = parser.parse_args()
verilogPath = args.verilogPath
synthesizeFlag = args.synthesize
staFlag = args.sta
if (not verilogPath):
printColor("ERROR: -rtl <verilog_path> arg required", color=COLORS.ERROR)
sys.exit()
#Create output directory and/or remove old sythensis
printColor("Generating qflow directories\n", color=COLORS.UNDERLINE)
sourceDirectory, fileName = os.path.split(verilogPath)
moduleName = fileName.split(".")[0].strip()
if (not os.path.exists("qflowSynthesis")):
os.mkdir("qflowSynthesis")
if (not os.path.exists("qflowSynthesis/{}".format(moduleName))):
os.mkdir("qflowSynthesis/{}".format(moduleName))
#Create subdirectories
os.mkdir("qflowSynthesis/{}/source".format(moduleName))
os.mkdir("qflowSynthesis/{}/synthesis".format(moduleName))
os.mkdir("qflowSynthesis/{}/layout".format(moduleName))
else:
shutil.rmtree("qflowSynthesis/{}".format(moduleName))
os.mkdir("qflowSynthesis/{}".format(moduleName))
#Create subdirectories
os.mkdir("qflowSynthesis/{}/source".format(moduleName))
os.mkdir("qflowSynthesis/{}/synthesis".format(moduleName))
os.mkdir("qflowSynthesis/{}/layout".format(moduleName))
#Create linked monolithic verilog file for sythesis
printColor("Generating monolithic verilog\n", color=COLORS.UNDERLINE)
g_includedFiles.append(verilogPath)
monolithicFilepath = os.path.join("qflowSynthesis", moduleName, "source",fileName)
monolithicFilename = fileName
outputFile = open(monolithicFilepath, "w")
inputFile = open(verilogPath, "r")
createMonolithicVerilog(outputFile, inputFile, sourceDirectory)
outputFile.close()
inputFile.close()
#Run qflow steps if specified | http://opencircuitdesign.com/qflow/
os.chdir(os.path.join("qflowSynthesis", moduleName))
if(synthesizeFlag):
printColor("Running qflow synthesis", color=COLORS.UNDERLINE)
command = "qflow synthesize {} > {}_synth.log".format(moduleName, moduleName)
print(" + {}".format(command))
os.system(command)
#Output errors to terminal
command = "cat synth.log | grep ERROR".format(moduleName)
printColor("\r", color=COLORS.ERROR, resetColor=False)
os.system(command)
printColor("", color=COLORS.DEFAULT)
if(staFlag):
printColor("Running qflow static timing analysis", color=COLORS.UNDERLINE)
command = "qflow sta {} > {}_sta.log".format(moduleName, moduleName)
print(" + {}".format(command))
os.system(command)
#Output errors to terminal
command = "cat {}_sta.log | grep error".format(moduleName)
printColor("", color=COLORS.ERROR, resetColor=False)
os.system(command)
printColor("", color=COLORS.DEFAULT) | StarcoderdataPython |
4830317 | <reponame>joshuapatel/PewDiePie
import discord
from discord.ext import commands
class EconomyPhrases(commands.Cog):
def __init__(self, bot):
self.bot = bot
async def update_shovel(self):
self.bot.econ["pos"] = await self.bot.pool.fetch("SELECT name, id FROM shovel WHERE fate = true")
self.bot.econ["neg"] = await self.bot.pool.fetch("SELECT name, id FROM shovel WHERE fate = false")
async def update_crime(self):
self.bot.econ["crime"]["pos"] = await self.bot.pool.fetch("SELECT name, id FROM crime WHERE fate = true")
self.bot.econ["crime"]["neg"] = await self.bot.pool.fetch("SELECT name, id FROM crime WHERE fate = false")
@commands.group(invoke_without_command = True)
async def phrase(self, ctx, pid: int, u: str = "shovel"):
if "crime" in u:
table = "crime"
else:
table = "shovel"
pcheck = await self.bot.pool.fetchrow(f"SELECT name, fate FROM {table} WHERE id = $1", pid)
if pcheck == None:
em = discord.Embed(color = discord.Color.dark_teal())
em.add_field(name = "Phrase Not Found", value = f"Phrase #{pid} could not be found")
await ctx.send(embed = em)
return
fate = pcheck["fate"]
p = pcheck["name"]
if fate:
em = discord.Embed(color = discord.Color.green())
else:
em = discord.Embed(color = discord.Color.red())
em.add_field(name = "Raw Phrase", value = p)
em.set_footer(text = f"Phrase #{pid}")
await ctx.send(embed = em)
@phrase.command()
@commands.is_owner()
async def add(self, ctx, fate: bool, *, phrase: str):
if phrase.startswith("<-- ADD CRIME -->"):
phrase = phrase.replace("<-- ADD CRIME -->", "")
table = "crime"
else:
table = "shovel"
await self.bot.pool.execute(f"INSERT INTO {table} VALUES ($1, $2)", phrase, fate)
pid = await self.bot.pool.fetchval(f"SELECT id FROM {table} WHERE name = $1 AND fate = $2", phrase, fate)
if fate:
em = discord.Embed(color = discord.Color.green())
else:
em = discord.Embed(color = discord.Color.red())
em.add_field(name = "Added Phrase", value = f"The phrase has been added to the {table} command. Fate: {fate}")
em.set_footer(text = f"Phrase #{pid}")
await ctx.send(embed = em)
if table == "shovel":
await self.update_shovel()
else:
await self.update_crime()
@phrase.command()
@commands.is_owner()
async def edit(self, ctx, pid: int, *, phrase: str):
if phrase.startswith("<-- EDIT CRIME -->"):
phrase = phrase.replace("<-- EDIT CRIME -->", "")
table = "crime"
else:
table = "shovel"
pcheck = await self.bot.pool.fetchrow(f"SELECT * FROM {table} WHERE id = $1", pid)
if pcheck == None:
em = discord.Embed(color = discord.Color.dark_teal())
em.add_field(name = "Phrase Not Found", value = f"Phrase #{pid} could not be found")
await ctx.send(embed = em)
return
await self.bot.pool.execute(f"UPDATE {table} SET name = $1 WHERE id = $2", phrase, pid)
em = discord.Embed(color = discord.Color.dark_red())
em.add_field(name = "Phrase Updated", value = f"Phrase #{pid} has been updated")
await ctx.send(embed = em)
if table == "shovel":
await self.update_shovel()
else:
await self.update_crime()
@phrase.command(aliases = ["remove"])
@commands.is_owner()
async def delete(self, ctx, pid: int, crime: bool = False):
if crime:
table = "crime"
else:
table = "shovel"
pcheck = await self.bot.pool.fetchrow(f"SELECT * FROM {table} WHERE id = $1", pid)
if pcheck == None:
em = discord.Embed(color = discord.Color.dark_teal())
em.add_field(name = "Phrase Not Found", value = f"Phrase #{pid} could not be found")
await ctx.send(embed = em)
return
await self.bot.pool.execute(f"DELETE FROM {table} WHERE id = $1", pid)
em = discord.Embed(color = discord.Color.dark_red())
em.add_field(name = "Phrase Removed", value = f"Phrase #{pid} has been removed")
await ctx.send(embed = em)
if table == "shovel":
await self.update_shovel()
else:
await self.update_crime()
@commands.group(invoke_without_command = True)
async def crimephrase(self, ctx, pid: int):
await ctx.invoke(self.bot.get_command("phrase"), pid = pid, u = "crime")
@crimephrase.command(name = "add")
@commands.is_owner()
async def crime_add(self, ctx, fate: bool, *, phrase: str):
phrase = "<-- ADD CRIME -->" + phrase
await ctx.invoke(self.bot.get_command("phrase add"), fate = fate, phrase = phrase)
@crimephrase.command(name = "edit")
@commands.is_owner()
async def crime_edit(self, ctx, pid: int, *, phrase: str):
phrase = "<-- EDIT CRIME -->" + phrase
await ctx.invoke(self.bot.get_command("phrase edit"), pid = pid, phrase = phrase)
@crimephrase.command(name = "delete", aliases = ["remove"])
@commands.is_owner()
async def crime_delete(self, ctx, pid: int):
await ctx.invoke(self.bot.get_command("phrase delete"), pid = pid, crime = True)
def setup(bot):
bot.add_cog(EconomyPhrases(bot)) | StarcoderdataPython |
3263049 | <filename>algs4/prim_mst.py
"""
* Execution: python prim_mst.py filename.txt
* Data files: https://algs4.cs.princeton.edu/43mst/tinyEWG.txt
* https://algs4.cs.princeton.edu/43mst/mediumEWG.txt
* https://algs4.cs.princeton.edu/43mst/largeEWG.txt
*
* Compute a minimum spanning forest using a lazy version of Prim's
* algorithm.
*
* % python prim_mst.py tinyEWG.txt
* 0-7 0.16000
* 1-7 0.19000
* 0-2 0.26000
* 2-3 0.17000
* 5-7 0.28000
* 4-5 0.35000
* 6-2 0.40000
* 1.81000
*
* % python prim_mst.py mediumEWG.txt
* 0-225 0.02383
* 49-225 0.03314
* 44-49 0.02107
* 44-204 0.01774
* 49-97 0.03121
* 202-204 0.04207
* 176-202 0.04299
* 176-191 0.02089
* 68-176 0.04396
* 58-68 0.04795
* 10.46351
*
* % python prim_mst.py largeEWG.txt
* ...
* 647.66307
*
"""
from algs4.edge_weighted_graph import EdgeWeightedGraph
from algs4.index_min_pq import IndexMinPQ
class PrimMST:
def __init__(self, g):
self.edgeTo = [None for _ in range(g.V)]
self.distTo = [float("inf") for _ in range(g.V)]
self.marked = [False for _ in range(g.V)]
self.pq = IndexMinPQ(g.V)
for v in range(g.V):
if not self.marked[v]:
self.prim(g, v)
def prim(self, g, s):
self.distTo[s] = 0
self.pq.insert(s, self.distTo[s])
while not self.pq.is_empty():
v = self.pq.del_min()
self.visit(g, v)
def visit(self, g, v):
self.marked[v] = True
for e in g.adj[v]:
w = e.other(v)
if self.marked[w]:
continue
if e.weight < self.distTo[w]:
self.distTo[w] = e.weight
self.edgeTo[w] = e
if self.pq.contains(w):
self.pq.decrease_key(w, self.distTo[w])
else:
self.pq.insert(w, self.distTo[w])
def edges(self):
return [e for e in self.edgeTo if e != None]
def weight(self):
return sum([e.weight for e in self.edges()])
if __name__ == "__main__":
import sys
g = EdgeWeightedGraph(file=open(sys.argv[1]))
mst = PrimMST(g)
for e in mst.edges():
print(e)
print("%.5f" % mst.weight())
| StarcoderdataPython |
4835338 | #!/usr/bin/env python
"""
This class reads sqlalchemy schema metadata in order to construct
joins for an arbitrary query.
Review all the foreign key links.
"""
__author__ = "<NAME> <<EMAIL>>"
__revision__ = "$Revision: 1.11 $"
class LinkObj(object):
"""class encapsulate for foreign key"""
def __init__(self, sqlalchemyfk=None):
"""initialize"""
if sqlalchemyfk:
self.name = sqlalchemyfk.name
self.lcolumn = sqlalchemyfk.parent.name
self.rcolumn = sqlalchemyfk.column.name
self.ltable = sqlalchemyfk.parent.table.name
self.rtable = sqlalchemyfk.column.table.name
self.fkey = sqlalchemyfk
else:
self.name = ""
self.lcolumn = ""
self.rcolumn = ""
self.ltable = ""
self.rtable = ""
def __repr__(self):
return self.name
def __str__(self):
return self.name
def __eq__(self, other):
return (isinstance(other, self.__class__) and\
self.name == other.name)
def __ne__(self, other):
return not self.__eq__(other)
def set(self, name, ltable, rtable, lcolumn, rcolumn):
"""set LinkObj by table/columns"""
self.name = name
self.ltable = ltable
self.rtable = rtable
self.lcolumn = lcolumn
self.rcolumn = rcolumn
class CLinkObj(object):
"""
class encapsulate for complex link between two table
It may be a composition of several foreignkey links or custormed
links, but all the links must have exactly same ltable and rtable.
"""
def __init__(self, foreignkeys=None, name=None):
"""initialize CLinkObj with name
String ltable rtable
list lcolumn rcolumn
"""
self.name = name
self.lcolumn = []
self.rcolumn = []
self.ltable = None
self.rtable = None
self.linklist = set()
self.fks = foreignkeys
if foreignkeys != None:
for fks in foreignkeys:
link = LinkObj(fks)
if self.ltable == None:
self.ltable = link.ltable
self.rtable = link.rtable
if self.ltable != link.ltable or self.rtable != link.rtable:
raise Exception("""conflict on links, different direction
or more than three table involving.""")
self.lcolumn.append(link.lcolumn)
self.rcolumn.append(link.rcolumn)
self.linklist.add(link)
self.weight = 0
self.indexed = 1
def __repr__(self):
return self.name
def __str__(self):
return self.name
def __eq__(self, other):
return (isinstance(other, self.__class__) and\
self.name == other.name)
def __ne__(self, other):
return not self.__eq__(other)
def set(self, name, links):
"""set CLinkObj by LinkObjs"""
self.name = name
for link in links:
self.ltable = link.ltable
self.rtable = link.rtable
if self.ltable != link.ltable or self.rtable != link.rtable:
raise Exception("""conflict on links, different direction
or more than three table involving.""")
self.lcolumn.append(link.lcolumn)
self.rcolumn.append(link.rcolumn)
self.linklist = links
| StarcoderdataPython |
3321847 | <gh_stars>0
class Reply:
count_id = 00
def __init__(self, reply):
Reply.count_id += 1
self.__reply_id = Reply.count_id
self.__reply = reply
def get_reply(self):
return self.__reply
def get_reply_id(self):
return self.__reply_id
def set_reply(self, reply):
self.__reply = reply
def set_reply_id(self, reply_id):
self.__reply_id = reply_id
| StarcoderdataPython |
3214812 | # Signals that fires when a user logs in and logs out
from django.contrib.auth import user_logged_in, user_logged_out
from django.dispatch import receiver
from .models import LoggedInUser
@receiver(user_logged_in)
def on_user_logged_in(sender, request, **kwargs):
logged_in_user_instance, _ = LoggedInUser.objects.get_or_create(
user=kwargs.get("user")
)
logged_in_user_instance.session_key = request.session.session_key
logged_in_user_instance.save()
@receiver(user_logged_out)
def on_user_logged_out(sender, **kwargs):
LoggedInUser.objects.filter(user=kwargs.get("user")).delete()
| StarcoderdataPython |
67633 | <gh_stars>1-10
import numpy as np
import pandas as pd
from sklearn.metrics import mean_absolute_error
from statsmodels.tsa.ar_model import AR
import statsmodels.api as sm
from time import time
class diff_integ:
def __init__(self,seasons):
"""
Differentiation and Integration Module
This class is needed to bring series to stationarity
and perform inverse operation.
Parameters
----------
Seasons : list of int
List of lags for differentiation.
Returns
-------
self : object
Exampless
--------
>>> import arimafd as oa
>>> dif = oa.diff_integ([1])
>>> my_array=[1,2,3,4,5]
>>> dif.fit_transform(my_array)
array([1, 1, 1, 1])
>>> dif.transform(6)
1
>>> dif.inverse_transform(1)
7.0
"""
#Comments: 1) The algorithm is not optimized, in terms of adding new element in dictionary instead adding new dictionary
self.seasons=seasons
def fit_transform(self,data,return_diff=True):
"""
Fit the model and transform data according to the given training data.
Parameters
----------
data : array-like, shape (n_samples,)
Training data, where n_samples is the number of samples
return_diff, optional (default=True)
Returns the differentiated array
Returns
-------
If return_diff = True: data_new : array-like, shape (n_samples - sum_seasons,)
where sum_seasons is sum of all lags
"""
self.data=np.array(data)
data=np.array(data)
if (len(data)-sum(self.seasons) <= sum(self.seasons)) or (len(self.seasons) < 1):
print('Error: too small lengths of the initial array')
else:
self.Minuend={}
self.Difference={}
self.Subtrahend={}
self.Sum_insstead_Minuend={}
self.additional_term={}
# process of differentiation
self.Minuend[0]=data[self.seasons[0]:]
self.Subtrahend[0]=data[:-self.seasons[0]]
self.Difference[0]=self.Minuend[0]-self.Subtrahend[0]
self.additional_term[0]=data[-self.seasons[0]]
for i in range(1,len(self.seasons)):
self.Minuend[i]=self.Difference[i-1][self.seasons[i]:]
self.Subtrahend[i]=self.Difference[i-1][:-self.seasons[i]]
self.Difference[i]=self.Minuend[i]-self.Subtrahend[i]
self.additional_term[i]=self.Difference[i-1][-self.seasons[i]]
if return_diff:
return self.Difference[len(self.seasons)-1]
def transform(self,point):
"""
Differentiation to the series data that were
in method fit_transform and plus all the points that
were in this method.
Parameters
----------
point : float
Add new point to self.data
Returns
-------
Array-like, shape (n_samples + n*n_points - sum_seasons,)
"""
return self.fit_transform(np.append(self.data,point),return_diff=True)[-1]
def inverse_fit_transform0(self):
"""
Return inital data for check class
"""
self.Sum_insstead_Minuend[len(self.seasons)]=self.Difference[len(self.seasons)-1]
j=0
for i in range(len(self.seasons)-1,-1,-1):
self.Sum_insstead_Minuend[i]=self.Sum_insstead_Minuend[i+1]+self.Subtrahend[i][sum(self.seasons[::-1][:j]):]
j+=1
return self.Sum_insstead_Minuend[0]
def inverse_transform(self,new_value):
"""
Return last element after integration.
(Forecasting value in initial dimension)
Parameters
----------
new_value : float
New value in differentiated series
Returns
-------
Integrated value, float
"""
self.new_value=new_value
self.Sum_insstead_Minuend[len(self.seasons)]=self.new_value
for i in range(len(self.seasons)-1,-1,-1):
self.Sum_insstead_Minuend[i]= self.Sum_insstead_Minuend[i+1]+ self.additional_term[i]
new_value1=float(self.Sum_insstead_Minuend[0])
# для того чтобы не выполнять регулярное fit_transform исполним хитрость тут
self.fit_transform(np.append(self.data,new_value1),return_diff=False)
return new_value1
| StarcoderdataPython |
99366 | import logging
import discord
from discord.ext import commands
class Errors(commands.Cog, name="Error handler"):
def __init__(self, bot):
self.bot = bot
self.logger = logging.getLogger(__name__)
@commands.Cog.listener()
async def on_ready(self):
self.logger.info("I'm ready!")
@commands.Cog.listener()
async def on_command_error(self, ctx, err):
if isinstance(err, commands.ConversionError):
await ctx.send(err)
elif isinstance(err, commands.MissingRequiredArgument):
await ctx.send(f"Missing required argument: `{err.param}`")
elif isinstance(err, commands.CommandInvokeError):
await ctx.send(err)
elif isinstance(err, commands.BadArgument):
await ctx.send(err)
elif isinstance(err, commands.ArgumentParsingError):
await ctx.send(err)
elif isinstance(err, commands.PrivateMessageOnly):
await ctx.send("This command can only be used in PMs.")
elif isinstance(err, commands.NoPrivateMessage):
await ctx.send("This command can only be used in Guilds.")
elif isinstance(err, commands.MissingPermissions):
perms = ", ".join(
f"`{perm.replace('_', ' ').title()}`" for perm in err.missing_perms
)
await ctx.send(f"You're missing the permissions: {perms}")
elif isinstance(err, commands.BotMissingPermissions):
perms = ", ".join(
f"`{perm.replace('_', ' ').title()}`" for perm in err.missing_perms
)
await ctx.send(f"I'm missing the permissions: {perms}")
elif isinstance(err, commands.DisabledCommand):
await ctx.send(f"`{ctx.command.qualified_name}` is currently disabled.")
elif isinstance(err, discord.HTTPException):
await ctx.send(
"An error occurred while I was trying to execute a task. Are you sure I have the correct permissions?"
)
else:
self.logger.error(err)
def setup(bot):
bot.add_cog(Errors(bot))
| StarcoderdataPython |
1717936 | <filename>spinoffs/oryx/oryx/experimental/nn/combinator.py
# Copyright 2020 The TensorFlow Probability Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Contains combinator layers."""
from jax import random
from oryx.core import state
from oryx.experimental.nn import base
__all__ = [
'Serial',
]
class Serial(base.Layer):
"""Layer that executes a sequence of child layers."""
@classmethod
def initialize(cls, init_key, *args):
"""Initializes Serial Layer.
Args:
init_key: Random key.
*args: Contains input specs and layer_inits.
Returns:
Tuple with the output spec and the LayerParams.
"""
in_specs, layer_inits = args[:-1], args[-1]
layers = state.init(list(layer_inits), name='layers')(init_key, *in_specs)
return base.LayerParams(tuple(layers))
@classmethod
def spec(cls, *args):
in_specs, layer_inits = args[:-1], args[-1]
return state.spec(list(layer_inits))(*in_specs)
@property
def state(self):
return tuple(l.state for l in self.params)
def _call(self, *args, rng=None, **kwargs):
"""Applies the serial sequence of layers to the input x.
Args:
*args: inputs to the Serial call.
rng: an optional PRNGKey that will be threaded through the layers.
**kwargs: keyword arguments to be passed to the layers.
Returns:
The result of applying a sequence of layers to args.
"""
return self._call_and_update(*args, rng=rng, **kwargs)[0]
def _update(self, *args, rng=None, **kwargs):
return self._call_and_update(*args, rng=rng, **kwargs)[1]
def _call_and_update(self, *args, rng=None, **kwargs):
"""Returns a Serial object with updated layer states."""
layers_out = []
for layer in self.params:
if not isinstance(args, tuple):
args = (args,)
if rng is not None:
rng, subrng = random.split(rng)
else:
subrng = None
args, new_layer = state.call_and_update(layer, *args, rng=subrng,
**kwargs) # pylint: disable=assignment-from-no-return
layers_out.append(new_layer)
return args, self.replace(params=tuple(layers_out))
def flatten(self):
"""Converts the Layer to a tuple suitable for PyTree."""
children_cls = tuple(l.__class__ for l in self.params)
xs, children_data = zip(*tuple(l.flatten() for l in self.params))
data = (children_cls, children_data, self.name)
return xs, data
@classmethod
def unflatten(cls, data, xs):
"""Reconstruct the Layer from the PyTree tuple."""
children_cls, children_data, name = data[0], data[1], data[2]
layers = tuple(c.unflatten(d, x) for c, x, d in
zip(children_cls, xs, children_data))
layer = object.__new__(cls)
layer_params = base.LayerParams(layers)
layer.__init__(layer_params, name=name)
return layer
def __str__(self):
"""String representation of the Layer."""
return ' >> '.join(map(str, self.params))
| StarcoderdataPython |
3336826 | import pygame
import os
from save import is_disabled
pygame.init()
class GUI:
def __init__(self):
self.project_path = os.path.join(os.path.dirname(__file__), "images")
self.board_img = pygame.image.load(os.path.join(self.project_path, "board.png"))
self.figures_images = self.load_figures_images()
self.bg_images = self.load_bg_images()
self.screen_images = self.load_screen_images()
self.screen = pygame.display.set_mode((600, 600))
self.show_menu(is_disabled(True), is_disabled(False))
pygame.display.update()
def show_board(self, board):
self.screen.blit(self.board_img, (0, 0))
for row in board:
for area in row:
if area.checker is not None:
self.screen.blit(self.figures_images[area.checker.image_id], (area.posx, area.posy))
def load_figures_images(self):
figures_images = []
fig = ['_king.png', '_queen.png', '_rook.png', '_knight.png', '_bishop.png', '_pawn.png']
for checker in fig:
for color in 'BW':
figures_images.append(pygame.image.load(os.path.join(self.project_path, color + checker)))
figures_images.append('dummy')
return figures_images
def load_bg_images(self):
bg_names = ["gray_circle.png", "W_background.png", "B_background.png"]
bg_images = []
for image_name in bg_names:
bg_images.append(pygame.image.load(os.path.join(self.project_path, image_name)))
return bg_images
def load_screen_images(self):
image_names = ["menu.png", "W_win.png", "B_win.png", "classic.png", "disabled_button.png"]
screen_images = []
for image_name in image_names:
screen_images.append(pygame.image.load(os.path.join(self.project_path, image_name)))
return screen_images
def show_menu(self, disabled_bot, disabled_player):
self.screen.blit(self.screen_images[3], (221, 316))
self.screen.blit(self.screen_images[0], (0, 0))
if not disabled_bot:
self.screen.blit(self.screen_images[4], (403, 444))
if not disabled_player:
self.screen.blit(self.screen_images[4], (17, 446))
pygame.display.update()
def won(self, color):
if color == 1:
self.screen.blit(self.screen_images[1], (0, 0))
else:
self.screen.blit(self.screen_images[2], (0, 0))
pygame.display.update()
def show_figure(self, pos, image_id):
self.screen.blit(self.figures_images[image_id], pos)
def what_must_update(self, pos, board, size):
upd_areas = []
vertices = [(pos[0], pos[1]), (pos[0] + 75, pos[1]), (pos[0], pos[1] + 75), (pos[0] + 75, pos[1] + 75)]
if size == 'big':
add_vertices = [(pos[0] + 37.5, pos[1] - 37.5), (pos[0] + 37.5, pos[1] + 37.5 + 75),
(pos[0] - 37.5, pos[1] + 37.5), (pos[0] + 37.5 + 75, pos[1] - 37.5)]
vertices += add_vertices
for row in range(8):
for column in range(8):
for ver in vertices:
area = board[row][column]
if area.posx <= ver[0] <= area.posx + 75 and area.posy <= ver[1] <= area.posy + 75:
upd_areas.append((row, column))
return upd_areas
def show_possible_moves(self, positions, board):
for pos in positions:
area = board[pos[0]][pos[1]]
self.screen.blit(self.bg_images[0], (area.posx, area.posy))
def update(self, positions, board):
for pos in positions:
area = board[pos[0]][pos[1]]
self.screen.blit(self.bg_images[area.bg_color()], (area.posx, area.posy))
if area.checker is not None:
self.screen.blit(self.figures_images[area.checker.create_image_id()], (area.posx, area.posy))
def normal_animation(self, start_pos, new_pos, board):
addx = (new_pos.posx - start_pos.posx) / 60
addy = (new_pos.posy - start_pos.posy) / 60
x, y = start_pos.posx, start_pos.posy
clock = pygame.time.Clock()
self.show_board(board)
while new_pos.posy + addy != y or new_pos.posx + addx != x:
for event in pygame.event.get():
if event.type == pygame.QUIT:
quit()
self.update(self.what_must_update((x, y), board, 'small'), board)
self.screen.blit(self.bg_images[start_pos.bg_color()], (start_pos.posx, start_pos.posy))
self.show_figure((x, y), start_pos.checker.create_image_id())
pygame.display.update()
x += addx
y += addy
clock.tick(120)
def castling_animation(self, start_pos, new_pos, board):
k_addx = (new_pos.posx - start_pos.posx) / 60
k_x, k_y = start_pos.posx, start_pos.posy
if new_pos.posy == 0:
r_y = 0
bg_color = [1, 2]
else:
bg_color = [2, 1]
r_y = 525
if new_pos.posx == 150:
bg_id = 0
bg_x = r_x = 0
r_addx = (150 - 0) / 40
else:
bg_id = 1
bg_x = r_x = 525
r_addx = (450 - 525) / 30
clock = pygame.time.Clock()
self.show_board(board)
while new_pos.posx + k_addx != k_x:
for event in pygame.event.get():
if event.type == pygame.QUIT:
quit()
self.update(self.what_must_update((r_x, r_y), board, 'small'), board)
self.update(self.what_must_update((k_x, k_y), board, 'small'), board)
self.screen.blit(self.bg_images[start_pos.bg_color()], (start_pos.posx, start_pos.posy))
self.screen.blit(self.bg_images[bg_color[bg_id]], (bg_x, r_y))
self.show_figure((r_x, r_y), start_pos.checker.color + 9)
self.show_figure((k_x, k_y), start_pos.checker.create_image_id())
pygame.display.update()
k_x += k_addx
r_x += r_addx
clock.tick(120)
def kind_of_animation(self, start_pos, new_pos, kind_of_animation, board):
if kind_of_animation == 'Castling':
self.castling_animation(start_pos, new_pos, board)
else:
self.normal_animation(start_pos, new_pos, board)
def update_screen(self):
pygame.display.update() | StarcoderdataPython |
3260942 | import stocklab
stocklab.bundle(__file__)
| StarcoderdataPython |
4807026 | #pragma repy
def foo():
print 'OK!'
if callfunc=='initialize':
settimer(0.5, foo, ())
exitall()
| StarcoderdataPython |
144194 | <reponame>tlalexander/stitchEm
from vs import *
from camera import *
| StarcoderdataPython |
4817001 | <reponame>CHH3213/two_loggers<filename>omni_diff_rl/scripts/maddpg-master/experiments/double_ind-v1.py
# !/usr/bin/python
# -*- coding: utf-8 -*-
"""
Doubld escape environment with discrete action space
"""
from __future__ import absolute_import, division, print_function
import gym
from gym import spaces
from gym.envs.registration import EnvSpec
import sys
import os
import math
import numpy as np
from numpy import pi
from numpy import random
import time
from multi_discrete import MultiDiscrete
import rospy
import tf
from std_srvs.srv import Empty
from gazebo_msgs.srv import SetModelState, GetModelState
from gazebo_msgs.msg import ModelState, ModelStates
from geometry_msgs.msg import Pose, Twist
class DoubleEscape:
def __init__(self):
self.env_type = 'discrete'
self.name = 'double_escape_discrete'
rospy.init_node(self.name, anonymous=True, log_level=rospy.DEBUG)
# env properties
self.rate = rospy.Rate(1000)
self.max_episode_steps = 1000
# self.observation_space_shape = (2, 6) # {r1, r2, s}: x, y, x_d, y_d, th, th_d
self.observation_space = []
self.action_space_shape = []
# self.action_reservoir = np.array([[1.5, pi/3], [1.5, -pi/3], [-1.5, pi/3], [-1.5, -pi/3]])
self.action_n = np.array([[1.5, pi / 3], [1.5, -pi / 3], [-1.5, pi / 3], [-1.5, -pi / 3]])
self.action_reservoir = []
for i in range(2):
total_action_space = []
u_action_space = spaces.Discrete(5) # 5
total_action_space.append(u_action_space)
if len(total_action_space) > 1:
# act_space = spaces.Tuple(total_action_space)
act_space = MultiDiscrete([[0, act_space.n - 1] for act_space in total_action_space])
self.action_reservoir.append(act_space)
else:
self.action_reservoir.append(total_action_space[0])
self.observation_space.append(spaces.Box(low=-np.inf, high=+np.inf, shape=(8,), dtype=np.float32))
# robot properties
self.model_states = ModelStates()
# self.obs = np.zeros(self.observation_space_shape)
# self.prev_obs = np.zeros(self.observation_space_shape)
self.obs = [[0 for i in range(8)], [0 for i in range(8)]]
self.prev_obs = [[0 for i in range(8)], [0 for i in range(8)]]
# self.obs = np.zeros((2, 8))
# self.prev_obs = np.zeros((2, 8))
self.status = ['deactivated']*2
self.world_name = rospy.get_param('/world_name')
self.exit_width = rospy.get_param('/exit_width')
# services
self.reset_world_proxy = rospy.ServiceProxy('/gazebo/reset_world', Empty)
self.unpause_physics_proxy = rospy.ServiceProxy('/gazebo/unpause_physics', Empty)
self.pause_physics_proxy = rospy.ServiceProxy('/gazebo/pause_physics', Empty)
self.set_model_state_proxy = rospy.ServiceProxy('/gazebo/set_model_state', SetModelState)
self.get_model_state_proxy = rospy.ServiceProxy('/gazebo/get_model_state', GetModelState)
# topic publisher
# self.cmd_vel0_pub = rospy.Publisher("/cmd_vel", Twist, queue_size=1)
self.cmd_vel0_pub = rospy.Publisher("/logger0/cmd_vel", Twist, queue_size=1)
self.cmd_vel1_pub = rospy.Publisher("/logger1/cmd_vel", Twist, queue_size=1)
# topic subscriber
rospy.Subscriber("/gazebo/model_states", ModelStates, self._model_states_callback)
def pausePhysics(self):
rospy.wait_for_service("/gazebo/pause_physics")
try:
self.pause_physics_proxy()
except rospy.ServiceException as e:
rospy.logerr("/gazebo/pause_physics service call failed")
def unpausePhysics(self):
rospy.wait_for_service("/gazebo/unpause_physics")
try:
self.unpause_physics_proxy()
except rospy.ServiceException as e:
rospy.logerr("/gazebo/unpause_physics service call failed")
def resetWorld(self):
rospy.wait_for_service("/gazebo/reset_world")
try:
self.reset_world_proxy()
except rospy.ServiceException as e:
rospy.logerr("/gazebo/reset_world service call failed")
def setModelState(self, model_state):
rospy.wait_for_service('/gazebo/set_model_state')
try:
self.set_model_state_proxy(model_state)
except rospy.ServiceException as e:
rospy.logerr("Service call failed: {}".format(e))
def reset(self, init_pose1=None, init_pose2=None):
"""
Reset environment
Usage:
obs = env.reset()
"""
rospy.logdebug("\nStart Environment Reset")
# set init pose
self.resetWorld()
self.obs = self._set_pose(init_pose1, init_pose2)
# self.prev_obs = self.obs.copy()
self.prev_obs = self.obs
self.step_counter = 0
# self.y = np.array([obs[0,1], obs[1,1]])
# self.prev_y = self.y.copy()
rospy.logerr("\nEnvironment Reset!!!\n")
# print('self.obs', self.obs)
return self.obs
def step(self, action_n):
"""
obs, rew, done, info = env.step(action_indices)
"""
# print('action_indices',action_indices)
# assert 0 <= action_indices[0] < self.action_reservoir.shape[0]
# assert 0 <= action_indices[1] < self.action_reservoir.shape[0]
# assert 0 <= action_indices[0] < 4
# assert 0 <= action_indices[1] < 4
# print('action_indices',self.action_reservoir.shape[0]) #self.action_reservoir.shape[0]=4
rospy.logdebug("\nStart environment step")
# actions = np.zeros((2, 2))
actions = []
done_n = []
for i in range(2):
actions.append(action_n[i])
# print("action", action_n)
# update status
reward, done = self._compute_reward()
# self.prev_obs = self.obs.copy() # make sure this happened after reward computing
done_n.append(done)
self.prev_obs = self.obs # make sure this happened after reward computing
info = self.status
self._get_observation()
self._take_action(actions)
self.step_counter += 1
if self.step_counter >= self.max_episode_steps:
rospy.logwarn("Step: {}, \nMax step reached...".format(self.step_counter))
rospy.logdebug("\nEnd environment step\n")
return self.prev_obs, reward, done_n, info
def _set_pose(self, pose1 = None, pose2 = None):
"""
Set double_logger with a random or given pose.
"""
rospy.logdebug("\nStart setting model pose")
logger1 = ModelState()
logger2 = ModelState()
logger1.model_name = "logger0"
logger2.model_name = "logger1"
# logger_pose.reference_frame = "world"
logger1.pose.position.z = 0.09
logger2.pose.position.z = 0.09
if pose1 is None: # random pose
x1 = random.uniform(-4, 4)
y1 = random.uniform(-4, 4)
th1 = random.uniform(-pi, pi)
while any([
np.abs(x1 + 2*np.sin(th1)) > 4.8,
np.abs(y1 - 2*np.cos(th1)) > 4.8
]):
th1 = random.uniform(-pi, pi)
quat1 = tf.transformations.quaternion_from_euler(0, 0, th1) # 四元数
rospy.logdebug("Set model1 pose @ {}".format((x1, y1, th1)))
if pose2 is None:
x2 = random.uniform(-4, 4)
y2 = random.uniform(-4, 4)
th2 = random.uniform(-pi, pi)
while any([
np.abs(x2 + 2*np.sin(th2)) > 4.8,
np.abs(y2 - 2*np.cos(th2)) > 4.8
]):
th2 = random.uniform(-pi, pi)
quat2 = tf.transformations.quaternion_from_euler(0, 0, th2)
rospy.logdebug("Set model2 pose @ {}".format((x2, y2, th2)))
else: # inialize accordingly
assert pose1.shape == (3,)
assert pose1[0] <= 4.5
assert pose1[1] <= 4.5
assert -pi<= pose1[2] <= pi # theta within [-pi,pi]
assert np.abs(pose1[0] + 2*np.sin(pose[2])) <= 4.8
assert np.abs(pose1[1] - 2*np.cos(pose[2])) <= 4.8
assert pose2.shape == (3,)
assert pose2[0] <= 4.5
assert pose2[1] <= 4.5
assert -pi<= pose2[2]<= pi # theta within [-pi,pi]
assert np.abs(pose2[0] + 2*np.sin(pose2[2])) <= 4.8
assert np.abs(pose2[1] - 2*np.cos(pose2[2])) <= 4.8
x1 = pose1[0]
y1 = pose1[1]
th1 = pose1[2]
x2 = pose2[0]
y2 = pose2[1]
th2 = pose2[2]
quat1 = tf.transformations.quaternion_from_euler(0, 0, th1)
quat2 = tf.transformations.quaternion_from_euler(0, 0, th2)
rospy.logdebug("Set model pose1 @ {}".format(logger1.pose))
rospy.logdebug("Set model pose2 @ {}".format(logger2.pose))
logger1.pose.position.x = x1
logger1.pose.position.y = y1
logger1.pose.orientation.z = quat1[2]
logger1.pose.orientation.w = quat1[3]
logger2.pose.position.x = x2
logger2.pose.position.y = y2
logger2.pose.orientation.z = quat2[2]
logger2.pose.orientation.w = quat2[3]
# set pose until on spot
self.unpausePhysics()
zero_vel = np.zeros((2,2))
self._take_action(zero_vel)
self.setModelState(model_state=logger1)
self.setModelState(model_state=logger2)
self._take_action(zero_vel)
self._get_observation()
self.pausePhysics()
rospy.logdebug("\nEnd setting model pose")
return self.obs
def _get_dist(self):
id_logger0 = self.model_states.name.index("logger0") # 逃
id_logger1 = self.model_states.name.index("logger1") # 追
logger_pose0 = self.model_states.pose[id_logger0]
logger_twist0 = self.model_states.twist[id_logger0]
logger_pose1 = self.model_states.pose[id_logger1]
logger_twist1 = self.model_states.twist[id_logger1]
quat0 = [
logger_pose0.orientation.x,
logger_pose0.orientation.y,
logger_pose0.orientation.z,
logger_pose0.orientation.w
]
quat1 = [
logger_pose1.orientation.x,
logger_pose1.orientation.y,
logger_pose1.orientation.z,
logger_pose1.orientation.w
]
euler0 = tf.transformations.euler_from_quaternion(quat0)
euler1 = tf.transformations.euler_from_quaternion(quat1)
# self.obs = np.array([[0 for i in range(8)] ,[0 for i in range(8)] ])
# self.obs = [[0 for i in range(8)] ,[0 for i in range(8)]]
# print('dsfds', self.obs)
self.obs[0][0] = logger_pose0.position.x
self.obs[0][1] = logger_pose0.position.y
self.obs[0][2] = logger_twist0.linear.x
self.obs[0][3] = logger_twist0.linear.y
self.obs[0][4] = euler0[2]
self.obs[0][5] = logger_twist0.angular.z
self.obs[0][6] = 0
self.obs[0][7] = 0
self.obs[1][0] = logger_pose1.position.x
self.obs[1][1] = logger_pose1.position.y
self.obs[1][2] = logger_twist1.linear.x
self.obs[1][3] = logger_twist1.linear.y
self.obs[1][4] = euler1[2]
self.obs[1][5] = logger_twist1.angular.z
self.obs[1][6] = 0
self.obs[1][7] = 0
pos = [0,0]
pos[0] = self.obs[0][0] - self.obs[1][0]
pos[1] = self.obs[0][1] - self.obs[1][1]
dist = np.sqrt(np.sum(np.square(pos)))
# print("dist", dist)
return dist
def _get_observation(self):
"""
Get observation of double_logger's state
Args:
Returns:
obs: array([...pose+vel0...,pose+vell...pose+vel1...])
"""
rospy.logdebug("\nStart getting observation")
dist = self._get_dist()
if dist <= 0.5:
self.status[0] = "trapped"
self.status[1] = "catch it"
else:
self.status[0] = "gaming"
self.status[1] = "gaming"
rospy.logdebug("\nEnd getting observation")
def _take_action(self, actions):
"""
Publish cmd_vel according to an action index
Args:
i_act: array([ia0, ia1])
Returns:
"""
rospy.logdebug("\nStart Taking Action")
cmd_vel0 = Twist()
# print('fgh', actions)
cmd_vel0.linear.x = actions[0][0]
cmd_vel0.angular.z = actions[0][1]
cmd_vel1 = Twist()
cmd_vel1.linear.x = actions[1][0] # 指向机器人前方 cmd_vel1.linear.y 指向机器人左方,一般为0
cmd_vel1.angular.z = actions[1][1] # 角速度
self.unpausePhysics()
for _ in range(50):
self.cmd_vel0_pub.publish(cmd_vel0)
self.cmd_vel1_pub.publish(cmd_vel1)
self.rate.sleep()
rospy.logdebug("cmd_vel0: {} \ncmd_vel1: {}".format(cmd_vel0, cmd_vel1))
self.pausePhysics()
rospy.logdebug("\nEnd Taking Action\n")
def _compute_reward(self):
"""
Compute reward and done based on current status
Return:
reward
done
"""
rospy.logdebug("\nStart Computing Reward")
dist = self._get_dist()
reward, done = np.zeros(2), False
if any([
'trapped' in self.status,
'catch it' in self.status
]):
reward[0] = -100
reward[1] = +100
done = True
else:
reward[0] += 0.1 * dist
reward[1] -= +0.1 * dist
rospy.logdebug("\nEnd Computing Reward\n")
return reward, done
def _model_states_callback(self, data):
self.model_states = data
if __name__ == "__main__":
env = DoubleEscape()
num_steps = env.max_episode_steps
obs = env.reset()
ep, st = 0, 0
o = env.reset()
for t in range(num_steps):
a = [np.random.randint(0, 4, size=2) for i in range(2)]
# print('sd', a)
# gaz# a = [1, 3]
o, r, d, i = env.step(a)
st += 1
rospy.loginfo("\n-\nepisode: {}, step: {} \nobs: {}, act: {}, reward: {}, done: {}, info: {}".format(ep+1, st, o, a, r, d, i))
if any(d):
ep += 1
st = 0
obs = env.reset()
| StarcoderdataPython |
1651972 | <reponame>gzy403999903/seahub<gh_stars>0
import os
import json
from django.core.urlresolvers import reverse
from seaserv import seafile_api
from seahub.test_utils import BaseTestCase
from tests.common.utils import randstring
class RepoTrashTest(BaseTestCase):
def setUp(self):
self.user_name = self.user.username
self.admin_name = self.admin.username
self.repo_id = self.repo.id
self.repo_name = self.repo.repo_name
self.file_path = self.file
self.file_name = os.path.basename(self.file_path)
self.folder_path = self.folder
self.folder_name = os.path.basename(self.folder.rstrip('/'))
self.url = reverse('api-v2.1-repo-trash', args=[self.repo_id])
def tearDown(self):
self.remove_repo()
self.remove_group()
def test_can_get(self):
# delete a file first
seafile_api.del_file(self.repo_id, '/',
self.file_name, self.user_name)
self.login_as(self.user)
resp = self.client.get(self.url)
self.assertEqual(200, resp.status_code)
json_resp = json.loads(resp.content)
assert json_resp['data'][0]['obj_name'] == self.file_name
assert json_resp['data'][0]['is_dir'] == False
def test_can_not_get_with_invalid_repo_permission(self):
self.login_as(self.admin)
resp = self.client.get(self.url)
self.assertEqual(403, resp.status_code)
def test_can_not_get_with_invalid_path_parameter(self):
invalid_path = randstring(6)
self.login_as(self.admin)
resp = self.client.get(self.url + '?path=%s' % invalid_path)
self.assertEqual(404, resp.status_code)
def test_can_clean_library_trash(self):
# delete a file first
seafile_api.del_file(self.repo_id, '/',
self.file_name, self.user_name)
self.login_as(self.user)
# get trash item count
resp = self.client.get(self.url)
json_resp = json.loads(resp.content)
assert len(json_resp['data']) > 0
# clean library trash
resp = self.client.delete(self.url)
self.assertEqual(200, resp.status_code)
# get trash item count again
resp = self.client.get(self.url)
json_resp = json.loads(resp.content)
assert len(json_resp['data']) == 0
def test_can_not_clean_with_invalid_user_permission(self):
self.login_as(self.admin)
resp = self.client.delete(self.url)
self.assertEqual(403, resp.status_code)
| StarcoderdataPython |
1633443 | #Crie um programa onde o usuário possa digitar vários valores numéricos e cadastre-os em uma lista. Caso o número já exista lá dentro, ele não será adicionado. No final, serão exibidos todos os valores únicos digitados, em ordem crescente.
valor=[]
while True:
v=int(input('Digite um valor: '))
if v not in valor:
valor.append(v)
print('Valor adcionado!!')
else:
print('Valor duplicado!')
p=str(input('Quer continuar? [S/N]')).upper()
if p in 'N':
break
valor.sort()
print(f'Você digitou os numeros {valor}') | StarcoderdataPython |
3228536 | import os
import numpy as np
import subprocess
from sklearn.metrics import f1_score, accuracy_score
from utils import *
os.environ["CUDA_DEVICE_ORDER"] = "PCI_BUS_ID" # see issue #152
os.environ["CUDA_VISIBLE_DEVICES"] = "0"
data_dir = "../data/inference_data/"
mode = "unlabeled" # real, fake, or unlabeled
pretrained_model_name = 'saved_model_240_8_32_0.05_1_50_0_0.0001_100_156_2_True_True_fitted_objects.h5'
print(f"Loading inference data from {os.path.join(data_dir,mode)}")
print(f"Loading pretrained model {pretrained_model_name}")
# preprocess the files
processed_data = preprocess_from_ray_parallel_inference(data_dir, mode, use_parallel=True)
processed_data = sorted(processed_data, key = lambda x: len(x[0]))
# Load trained model
discriminator = Discriminator_Model(load_pretrained=True, saved_model_name=pretrained_model_name, real_test_mode=False)
# Do inference
if mode == 'unlabeled':
# Visualize the preprocessed data
plot_spectrogram(processed_data[0], path='visualize_inference_spectrogram.png')
print("The probability of the clip being real is: {:.2%}".format(
discriminator.predict_labels(processed_data, raw_prob=True, batch_size=20)[0][0]))
else:
features = [x[0] for x in processed_data]
labels = [x[1] for x in processed_data]
preds = discriminator.predict_labels(features, threshold=0.5, batch_size=1)
print(f"Accuracy on data set: {accuracy_score(labels, preds)}")
all_filenames = sorted(os.listdir(os.path.join(data_dir, mode)))
if mode == 'real':
# True Positive Examples
ind_tp = np.equal((preds + labels).astype(int), 2)
ind_tp = np.argwhere(ind_tp == True).reshape(-1, )
tp_filenames = [all_filenames[i] for i in ind_tp]
print(f'correctly predicted filenames: {tp_filenames}')
# False Negative Examples
ind_fn = np.greater(labels, preds)
ind_fn = np.argwhere(ind_fn == True).reshape(-1, )
fn_filenames = [all_filenames[i] for i in ind_fn]
print(f'real clips classified as fake: {fn_filenames}')
elif mode == 'fake':
# True Negative Examples
ind_tn = np.equal((preds + labels).astype(int), 0)
ind_tn = np.argwhere(ind_tn == True).reshape(-1, )
tn_filenames = [all_filenames[i] for i in ind_tn]
print(f'correctly predicted filenames: {tn_filenames}')
# False Positive Examples
ind_fp = np.greater(preds, labels)
ind_fp = np.argwhere(ind_fp == True).reshape(-1, )
fp_filenames = [all_filenames[i] for i in ind_fp]
print(f'fake clips classified as real: {fp_filenames}')
| StarcoderdataPython |
1988 | # -*- coding: utf-8 -*-
import logging
import datetime
from flask import request, render_template
from flask_jwt_extended import (
create_access_token,
decode_token
)
from jwt.exceptions import DecodeError
from flasgger import swag_from
from http import HTTPStatus
from pathlib import Path
from sqlalchemy.orm.exc import NoResultFound
from vantage6.common import logger_name
from vantage6.server import db
from vantage6.server.resource import (
ServicesResources
)
module_name = logger_name(__name__)
log = logging.getLogger(module_name)
def setup(api, api_base, services):
path = "/".join([api_base, module_name])
log.info(f'Setting up "{path}" and subdirectories')
api.add_resource(
ResetPassword,
path+'/reset',
endpoint="reset_password",
methods=('POST',),
resource_class_kwargs=services
)
api.add_resource(
RecoverPassword,
path+'/lost',
endpoint='recover_password',
methods=('POST',),
resource_class_kwargs=services
)
# ------------------------------------------------------------------------------
# Resources / API's
# ------------------------------------------------------------------------------
class ResetPassword(ServicesResources):
"""user can use recover token to reset their password."""
@swag_from(str(Path(r"swagger/post_reset_password.yaml")),
endpoint='reset_password')
def post(self):
""""submit email-adress receive token."""
# retrieve user based on email or username
body = request.get_json()
reset_token = body.get("reset_token")
password = body.get("password")
if not reset_token or not password:
return {"msg": "reset token and/or password is missing!"}, \
HTTPStatus.BAD_REQUEST
# obtain user
try:
user_id = decode_token(reset_token)['identity'].get('id')
except DecodeError:
return {"msg": "Invalid recovery token!"}, HTTPStatus.BAD_REQUEST
log.debug(user_id)
user = db.User.get(user_id)
# set password
user.set_password(password)
user.save()
log.info(f"Successfull password reset for '{user.username}'")
return {"msg": "password successfully been reset!"}, \
HTTPStatus.OK
class RecoverPassword(ServicesResources):
"""send a mail containing a recover token"""
@swag_from(str(Path(r"swagger/post_recover_password.yaml")),
endpoint='recover_password')
def post(self):
"""username or email generates a token which is mailed."""
# default return string
ret = {"msg": "If the username or email is our database you "
"will soon receive an email"}
# obtain username/email from request'
body = request.get_json()
username = body.get("username")
email = body.get("email")
if not (email or username):
return {"msg": "No username or email provided!"}, \
HTTPStatus.BAD_REQUEST
# find user in the database, if not here we stop!
try:
if username:
user = db.User.get_by_username(username)
else:
user = db.User.get_by_email(email)
except NoResultFound:
# we do not tell them.... But we won't continue either
return ret
log.info(f"Password reset requested for '{user.username}'")
# generate a token that can reset their password
expires = datetime.timedelta(hours=1)
reset_token = create_access_token(
{"id": str(user.id)}, expires_delta=expires
)
self.mail.send_email(
"password reset",
sender="<EMAIL>",
recipients=[user.email],
text_body=render_template("mail/reset_password_token.txt",
token=reset_token),
html_body=render_template("mail/reset_password_token.html",
token=reset_token)
)
return ret
| StarcoderdataPython |
197780 | <reponame>gokudomatic/cobiv
__all__=["NodeDb"] | StarcoderdataPython |
1668626 | <filename>pwa_store_backend/pwas/migrations/0047_remove_pwa_manifest_json.py<gh_stars>0
# Generated by Django 3.2.6 on 2021-08-28 14:17
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('pwas', '0046_alter_pwa_slug'),
]
operations = [
migrations.RemoveField(
model_name='pwa',
name='manifest_json',
),
]
| StarcoderdataPython |
1763310 | #------------------
# Author @<NAME>
# Prediction
#-------------------
from tensorflow.keras.models import load_model
from mycvlibrary import config
from collections import deque
import numpy as np
import argparse
import cv2
ap = argparse.ArgumentParser()
ap.add_argument("-i", "--input", required=True, help="path to our input video")
ap.add_argument("-o", "--output", required=True, help="path to our output video")
ap.add_argument("-s", "--size", type=int, default=128, help="size of queue for averaging")
ap.add_argument("-d", "--display", type=int, default=-1,help="whether or not output frame should be displayed to screen")
print("loading model and label binarizer...")
model = load_model(config.MODEL_PATH)
# predictions queue
Q = deque(maxlen=args["size"])
print("processing video...")
vs = cv2.VideoCapture(args["input"])
writer = None
(W,H) = (None, None)
while True:
#read next frame
(grabbed, frame)= vs.read()
#when reached end of the stream
if not grabbed:
break
if W is None or H is None:
(H,W) = frame.shape[:2]
output = frame.copy()
frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
frame = cv2.resize(frame, (224, 224))
frame = frame.astype("float32")
preds= model.predict(np.expand_dims(frame, axis=0))[0]
Q.append(preds)
results = np.array(Q).mean(axis=0)
i = np.argmax(results)
label = config.CLASSES[i]
text = "activity: {}".format(label)
cv2.putText(output, text, (35,50), cv2.FONT_HERSHEY_PLAIN, 1.25, (0,0,255), 5)
if writer is None:
fourcc = cv2.VideoWriter_fourcc(*"MJPG")
writer = cv2.VideoWriter(args["output"], fourcc, 30, (W,H), True)
writer.write(output)
if args["display"] > 0:
cv2.imshow("Output", output)
key = cv2.waitKey(1) & 0xFF
if key == ord("q"):
break
print ("cleaning up ...")
writer.release()
vs.release()
| StarcoderdataPython |
74422 | import itertools
import numpy as np
import networkx as nx
import vocab
def coref_score(instance, property_id):
return [ instance.subject_entity["coref_score"], instance.object_entity["coref_score"] ]
def el_score(instance, property_id):
return [ instance.subject_entity["el_score"], instance.object_entity["el_score"] ]
def _entity_linker_types_from_mention(entity):
arr = np.zeros(len(vocab.types), np.float32)
for i, t in enumerate(vocab.types):
if t in entity["types"]:
arr[i] = 1.0
return arr
def entity_linker_types(instance, property_id):
return np.concatenate([
_entity_linker_types_from_mention(instance.subject_entity),
_entity_linker_types_from_mention(instance.object_entity)
])
def wikidata_predicates(instance, property_id):
return None
def text_score(instance, property_id):
return [ instance.text_instance.scores[property_id] ] | StarcoderdataPython |
3394162 | import smtplib
import os
from dotenv import load_dotenv
my_mail = 'From: <EMAIL>'
friend_mail = 'To: <EMAIL>'
subject_mail = 'Subject: Приглашение'
mail_text = '''\n\n Привет, %friend_name%! %my_name% приглашает тебя на сайт %website%!
%website% — это новая версия онлайн-курса по программированию.
Изучаем Python и не только. Решаем задачи. Получаем ревью от преподавателя.
Как будет проходить ваше обучение на %website%?
→ Попрактикуешься на реальных кейсах.
Задачи от тимлидов со стажем от 10 лет в программировании.
→ Будешь учиться без стресса и бессонных ночей.
Задачи не «сгорят» и не уйдут к другому. Занимайся в удобное время и ровно
столько, сколько можешь.
→ Подготовишь крепкое резюме.
Все проекты — они же решение наших задачек — можно разместить на твоём GitHub.
Работодатели такое оценят.
Регистрируйся → %website%
На модули, которые еще не вышли, можно подписаться и получить уведомление
о релизе сразу на имейл.'''
my_name = 'Евгений'
friend_name = 'Алексей'
web_site = 'dvmn.org'
mail_text = mail_text.replace('%website%', web_site)
mail_text = mail_text.replace('%friend_name%', friend_name)
mail_text = mail_text.replace('%my_name%', my_name)
mail_to_send = '\n'.join([my_mail, friend_mail, subject_mail, mail_text])
mail_to_send = mail_to_send.encode("UTF-8")
email_from = '<EMAIL>'
email_to = '<EMAIL>'
server = smtplib.SMTP_SSL('smtp.gmail.com:465')
load_dotenv()
log_in = os.getenv('LOGIN')
password = os.<PASSWORD>('PASSWORD')
server.login(log_in, password)
server.sendmail(email_from, email_to, mail_to_send)
server.quit()
| StarcoderdataPython |
3258371 | <gh_stars>10-100
from search_api import *
__version__ = "1.0.0"
__author__ = "<NAME> (@MattDMo)"
__all__ = ["articleAPI"]
if __name__ == "__main__":
print("This module cannot be run on its own. Please use by running ",
"\"from NYTimesArticleAPI import articleAPI\"")
exit(0)
| StarcoderdataPython |
1632002 | <gh_stars>0
' These seem to be accurate, but maybe I readed the barrel wrong. '
from __future__ import annotations
import random
from hijackedrole.game.stats import StatsBase
class DumbStats():
'10:KILL.LEVEL.LOOT.GOTO 10'
def __init__(self, initMaxHP: int = 5, initMaxSP: int = 5,
ATT: int = None, DEF: int = None, SPE: int = None,
EAA: int = None, EDE: int = None, ACC: int = None,
EVA: int = None, asList: list = None):
if(asList):
if(len(asList) != len(self)):
raise(IndexError(f'asList must have exactly {str(len(self))} entries'))
self.MaxHP, self.HP, self.MaxSP, self.SP, self.ATT, self.DEF, self.SPE, self.EAA, self.EDE, self.ACC, self.EVA = asList
else:
self.MaxHP = self.HP = initMaxHP
self.MaxSP = self.SP = initMaxSP
self.ATT = ATT if(ATT) else random.randint(5, 10)
self.DEF = DEF if(DEF) else random.randint(5, 10)
self.SPE = SPE if(SPE) else random.randint(5, 10)
self.EAA = EAA if(EAA) else random.randint(5, 10)
self.EDE = EDE if(EDE) else random.randint(5, 10)
self.ACC = ACC if(ACC) else random.randint(5, 10)
self.EVA = EVA if(EVA) else random.randint(5, 10)
super.__init__()
def __list__(self):
return([self.MaxHP, self.HP, self.MaxSP, self.SP, self.ATT,
self.DEF, self.SPE, self.EAA, self.EDE, self.ACC, self.EVA])
def __dic__(self) -> dict:
return({'MaxHP': self.MaxHP, 'HP': self.HP, 'MaxSP': self.MaxSP,
'SP': self.SP, 'ATT': self.ATT, 'DEF': self.DEF, 'SPE': self.SPE,
'EAA': self.EAA, 'EDE': self.EDE, 'ACC': self.ACC, 'EVA': self.EVA})
def __add__(self, other: DumbStats) -> DumbStats:
return(DumbStats(asList=[self.MaxHP + other.MaxHP, self.MaxSP + other.MaxSP,
self.ATT + other.ATT, self.DEF + other.DEF,
self.SPE + other.SPE, self.EAA + other.EAA,
self.EDE + other.EDE, self.ACC + other.ACC,
self.EVA + other.EVA]))
def __sub__(self, other: DumbStats) -> DumbStats:
return(DumbStats(asList=[self.MaxHP - other.MaxHP, self.MaxSP - other.MaxSP,
self.ATT - other.ATT, self.DEF - other.DEF,
self.SPE - other.SPE, self.EAA - other.EAA,
self.EDE - other.EDE, self.ACC - other.ACC,
self.EVA - other.EVA]))
def __neg__(self) -> DumbStats:
return(DumbStats(asList=[-self.MaxHP, -self.HP, -self.MaxSP,
-self.SP, -self.ATT, -self.DEF,
-self.SPE, -self.EAA, -self.EDE,
-self.ACC, -self.EVA]))
def __inv__(self) -> DumbStats:
return(DumbStats(asList=[~self.MaxHP, ~self.HP, ~self.MaxSP,
~self.SP, ~self.ATT, ~self.DEF,
~self.SPE, ~self.EAA, ~self.EDE,
~self.ACC, ~self.EVA]))
def __abs__(self) -> DumbStats:
return(DumbStats(asList=[abs(self.MaxHP), abs(self.HP), abs(self.MaxSP),
abs(self.SP), abs(self.ATT), abs(self.DEF),
abs(self.SPE), abs(self.EAA), abs(self.EDE),
abs(self.ACC), abs(self.EVA)]))
def __len__(self) -> int:
return(11)
def __str__(self) -> str:
return(f'MPP:\t{str(self.MaxHP)}\tHOW MUCH CAN YOU BE PUNCHED BEFORE YOU DIE. (IN GENERAL)\n' +
f'HPP:\t{str(self.HP)}\tHOW MUCH CAN YOU BE PUNCHED BEFORE YOU DIE. (NOW)\n' +
f'MAP:\t{str(self.MaxHP)}\tHOW MUCH CAN YOU PUNCH BEFORE YOU CAN\'T. (IN GENERAL)\n' +
f'SPP:\t{str(self.HP)}\tHOW MUCH CAN YOU PUNCH BEFORE YOU CAN\'T. (NOW)\n' +
f'ATT:\t{str(self.ATT)}\tHOW STRONG YOU ATTACK OTHER PEOPLE.\n' +
f'DEF:\t{str(self.DEF)}\tHOW MUCH YOU DEFEND WHEN ATTACKED.\n' +
f'SPE:\t{str(self.SPE)}\tHOW FAST YOU MOVE; WHAT DID YOU EXPECT?\n' +
f'EAA:\t{str(self.EAA)}\tHOW WELL YOU ATTACK OTHER PEOPLE.\n''' +
f'EDE:\t{str(self.EDE)}\tHOW WELL YOU DEFFEND FROM OTHER PEOPLE.\n' +
f'ACC:\t{str(self.ACC)}\tHOW WELL YOU TRACK ENEMY MOVEMENTS.\n' +
f'EVA:\t{str(self.EVA)}\tHOW FAST/WELL YOU REACT TO ENEMY MOVEMENTS.')
| StarcoderdataPython |
3212466 | <reponame>MHeasell/hearts-server<gh_stars>0
import unittest
from hearts.services.player import PlayerService, PlayerStateError
class TestPlayerService(unittest.TestCase):
def setUp(self):
self.svc = PlayerService()
def test_get_player_not_found(self):
data = self.svc.get_player(1234)
self.assertIsNone(data)
def test_create_player(self):
player_id = self.svc.create_player("Joe", "password")
player = self.svc.get_player(player_id)
expected = {
"id": player_id,
"name": "Joe"
}
self.assertEqual(expected, player)
def test_create_player_duplicate(self):
self.svc.create_player("Jimbob", "password")
self.svc.create_player("Billy", "password")
try:
self.svc.create_player("Jimbob", "asdf")
self.fail("Expected to throw")
except PlayerStateError:
pass # test passed
def test_create_player_duplicate_ids(self):
"""
Test that we don't skip ID numbers
when failing to create a player
due to duplicate name.
"""
first_id = self.svc.create_player("Joe", "asdf")
try:
self.svc.create_player("Joe", "asdf")
except PlayerStateError:
pass
second_id = self.svc.create_player("Charlie", "asdf")
self.assertEqual(first_id + 1, second_id)
def test_get_id(self):
player_id = self.svc.create_player("Steve", "asdf")
fetched_id = self.svc.get_player_id("Steve")
self.assertEqual(player_id, fetched_id)
def test_get_by_name(self):
player_id = self.svc.create_player("Jimmy", "password")
player = self.svc.get_player_by_name("Jimmy")
expected = {
"id": player_id,
"name": "Jimmy"
}
self.assertEqual(expected, player)
def test_ids_not_equal(self):
j_id = self.svc.create_player("Jimmy", "password")
b_id = self.svc.create_player("Bob", "password")
self.assertNotEqual(b_id, j_id)
jimmy = self.svc.get_player(j_id)
bob = self.svc.get_player(b_id)
self.assertEqual("Jimmy", jimmy["name"])
self.assertEqual("Bob", bob["name"])
if __name__ == '__main__':
unittest.main()
| StarcoderdataPython |
1663272 | import time, datetime
import numpy as np
import shutil
import sys
from PIL import Image
import torch
from torch import nn
import torch.backends.cudnn as cudnn
import torch.optim as optim
from torchvision import datasets
from torch.autograd import Variable
from learning.utils_learn import *
from learning.dataloader import SegList, SegListMS, get_loader, get_info
import logging
from learning.validate import validate
import data_transforms as transforms
from dataloaders.utils import decode_segmap
from torch.utils.tensorboard import SummaryWriter
import logging
FORMAT = "[%(asctime)-15s %(filename)s:%(lineno)d %(funcName)s] %(message)s"
logging.basicConfig(format=FORMAT)
logger = logging.getLogger(__name__)
logger.setLevel(logging.DEBUG)
def mtask_forone_grad(val_loader, model, criterion, task_name, args, test_vis=False):
grad_sum = 0
cnt = 0
model.eval()
score = AverageMeter()
print('task to be calculated gradients', task_name)
for i, (input, target) in enumerate(val_loader):
if torch.cuda.is_available():
input = input.cuda()
for keys, tar in target.items():
target[keys] = tar.cuda()
# input.requires_grad_()
input_var = torch.autograd.Variable(input, requires_grad=True)
# input.retain_grad()
output = model(input_var)
first_loss = None
loss_dict = {}
for c_name, criterion_fun in criterion.items():
if first_loss is None:
first_loss = c_name
# print('l output target', output)
# print('ratget', target)
loss_dict[c_name] = criterion_fun(output, target)
# print('caname', c_name, loss_dict[c_name])
else:
loss_dict[c_name] = criterion_fun(output[c_name], target[c_name])
grad_total_loss = None
for each in task_name:
if grad_total_loss is None:
grad_total_loss = loss_dict[each]
else:
grad_total_loss = grad_total_loss + loss_dict[each]
# grad_total_loss = loss_dict['segmentsemantic'] + loss_dict['depth_zbuffer']
grad_total_loss.backward()
# print('deug val in grad in bugger grad', input_var.grad) # Interesting, here we also able to get the grad
if test_vis:
from learning.utils_learn import accuracy
score.update(accuracy(output['segmentsemantic'], target['segmentsemantic'].long()), input.size(0))
# TODO: shit, the following code could not calculate the grad even if I specify. For unknown reason. drive me high fever
#
# first = True
# for c_name, criterion_fun in criterion.items():
# # print('if in',c_name, task_name)
#
# if c_name in task_name:
# print('get one')
# # loss_calculate = criterion[c_name](output[c_name], target[c_name])
# loss_calculate = criterion_fun(output[c_name], target[c_name])
#
#
# # loss_fn = lambda x, y: torch.nn.functional.cross_entropy(x.float(), y.long().squeeze(dim=1), ignore_index=0,
# # reduction='mean')
# # loss_calculate = loss_fn(output[c_name], target[c_name].float())
#
#
# # o2 = criterion[c_name](output[c_name], target[c_name])
# # import pdb; pdb.set_trace()
# # loss_calculate = torch.mean(output[c_name] - target[c_name].float())
# if first:
# total_loss = loss_calculate
# first = False
#
# else:
# total_loss = total_loss + loss_calculate #TODO: vikram told me cannot be += here, because grad will override
#
#
# input.retain_grad()
# total_loss.backward()
#
# import pdb; pdb.set_trace()
# print(input_var)
# print(input_var.grad)
data_grad = input_var.grad
# print('data grad', data_grad)
np_data_grad = data_grad.cpu().numpy()
L2_grad_norm = np.linalg.norm(np_data_grad)
grad_sum += L2_grad_norm
# increment the batch # counter
cnt += 1
if args.debug:
if cnt>200:
break
if test_vis:
print('Clean Acc for Seg: {}'.format(score.avg))
print('Vulnerability in Grad Norm')
print("average grad for task {} :".format(task_name), grad_sum * 1.0 /cnt)
return grad_sum * 1.0 /cnt
from learning.attack import PGD_attack_mtask, PGD_attack_mtask_L2, PGD_attack_mtask_city
from learning.utils_learn import accuracy
def mtask_forone_advacc(val_loader, model, criterion, task_name, args, info, epoch=0, writer=None,
comet=None, test_flag=False, test_vis=False, norm='Linf'):
"""
NOTE: test_flag is for the case when we are testing for multiple models, need to return something to be able to plot and analyse
"""
assert len(task_name) > 0
avg_losses = {}
num_classes = args.classes
hist = np.zeros((num_classes, num_classes))
for c_name, criterion_fun in criterion.items():
avg_losses[c_name] = AverageMeter()
seg_accuracy = AverageMeter()
seg_clean_accuracy = AverageMeter()
model.eval() # this is super important for correct including the batchnorm
print("using norm type", norm)
for i, (input, target, mask) in enumerate(val_loader):
if test_vis:
clean_output = model(Variable(input.cuda(), requires_grad=False))
seg_clean_accuracy.update(accuracy(clean_output['segmentsemantic'], target['segmentsemantic'].long().cuda()),
input.size(0))
if args.steps == 0 or args.step_size == 0:
args.epsilon = 0
if norm == 'Linf':
if args.dataset == 'taskonomy':
adv_img = PGD_attack_mtask(input, target, mask, model, criterion, task_name, args.epsilon, args.steps, args.dataset,
args.step_size, info, args, using_noise=True)
elif args.dataset == 'cityscape':
adv_img = PGD_attack_mtask_city(input, target, mask, model, criterion, task_name, args.epsilon, args.steps,
args.dataset,
args.step_size, info, args, using_noise=True)
elif norm == 'l2':
adv_img = PGD_attack_mtask_L2(input, target, mask, model, criterion, task_name, args.epsilon, args.steps,
args.dataset,
args.step_size)
# image_var = Variable(adv_img.data, requires_grad=False)
image_var = adv_img.data
# image_var = input
if torch.cuda.is_available():
image_var = image_var.cuda()
for keys, m in mask.items():
mask[keys] = m.cuda()
for keys, tar in target.items():
target[keys] = tar.cuda()
# print("diff", torch.sum(torch.abs(raw_input-image_var)))
with torch.no_grad():
output = model(image_var)
sum_loss = None
loss_dict = {}
for c_name, criterion_fun in criterion.items():
this_loss = criterion_fun(output[c_name].float(), target[c_name],
mask[c_name])
if sum_loss is None:
sum_loss = this_loss
else:
sum_loss = sum_loss + this_loss
loss_dict[c_name] = this_loss
avg_losses[c_name].update(loss_dict[c_name].data.item(), input.size(0))
if 'segmentsemantic' in criterion.keys():
# this is accuracy for segmentation
seg_accuracy.update(accuracy(output['segmentsemantic'], target['segmentsemantic'].long()), input.size(0))
#TODO: also mIOU here
class_prediction = torch.argmax(output['segmentsemantic'], dim=1)
target_seg = target['segmentsemantic'].cpu().data.numpy() if torch.cuda.is_available() else target['segmentsemantic'].data.numpy()
class_prediction = class_prediction.cpu().data.numpy() if torch.cuda.is_available() else class_prediction.data.numpy()
hist += fast_hist(class_prediction.flatten(), target_seg.flatten(), num_classes)
if i % 500 == 0:
class_prediction = torch.argmax(output['segmentsemantic'], dim=1)
# print(target['segmentsemantic'].shape)
decoded_target = decode_segmap(
target['segmentsemantic'][0][0].cpu().data.numpy() if torch.cuda.is_available() else
target['segmentsemantic'][0][0].data.numpy(),
args.dataset)
decoded_target = np.moveaxis(decoded_target, 2, 0)
decoded_class_prediction = decode_segmap(
class_prediction[0].cpu().data.numpy() if torch.cuda.is_available() else class_prediction[
0].data.numpy(), args.dataset)
decoded_class_prediction = np.moveaxis(decoded_class_prediction, 2, 0)
if not test_flag:
writer.add_image('Val/image clean ', back_transform(input, info)[0])
writer.add_image('Val/image adv ', back_transform(adv_img, info)[0])
writer.add_image('Val/image gt for adv ', decoded_target)
writer.add_image('Val/image adv prediction ', decoded_class_prediction)
# if comet is not None: comet.log_image(back_transform(input, info)[0].cpu(), name='Val/image clean ', image_channels='first')
# if comet is not None: comet.log_image(back_transform(adv_img, info)[0].cpu(), name='Val/image adv ', image_channels='first')
# if comet is not None: comet.log_image(decoded_target, name='Val/image gt for adv ', image_channels='first')
# if comet is not None: comet.log_image(decoded_class_prediction, name='Val/image adv prediction ', image_channels='first')
if 'segmentsemantic' in criterion.keys():
# this is accuracy for segmentation
seg_accuracy.update(accuracy(output['segmentsemantic'], target['segmentsemantic'].long()), input.size(0))
#TODO: also mIOU here
class_prediction = torch.argmax(output['segmentsemantic'], dim=1)
target_seg = target['segmentsemantic'].cpu().data.numpy() if torch.cuda.is_available() else target['segmentsemantic'].data.numpy()
class_prediction = class_prediction.cpu().data.numpy() if torch.cuda.is_available() else class_prediction.data.numpy()
hist += fast_hist(class_prediction.flatten(), target_seg.flatten(), num_classes)
if args.debug:
if i>1:
break
if test_vis:
print("clean seg accuracy: {}".format(seg_clean_accuracy.avg))
str_attack_result = ''
str_not_attacked_task_result = ''
for keys, loss_term in criterion.items():
if keys in task_name:
str_attack_result += 'Attacked Loss: {} {loss.val:.4f} ({loss.avg:.4f})\t'.format(keys, loss=avg_losses[keys])
else:
str_not_attacked_task_result += 'Not att Task Loss: {} {loss.val:.4f} ({loss.avg:.4f})\t'.format(keys, loss=avg_losses[keys])
# Tensorboard logger
if not test_flag:
for keys, _ in criterion.items():
if keys in task_name:
writer.add_scalar('Val Adv Attacked Task/ Avg Loss {}'.format(keys), avg_losses[keys].avg, epoch)
if comet is not None: comet.log_metric('Val Adv Attacked Task/ Avg Loss {}'.format(keys), avg_losses[keys].avg)
else:
writer.add_scalar('Val Adv not attacked Task/ Avg Loss {}'.format(keys), avg_losses[keys].avg)
if comet is not None: comet.log_metric('Val Adv not attacked Task/ Avg Loss {}'.format(keys), avg_losses[keys].avg)
if 'segmentsemantic' in criterion.keys() or 'segmentsemantic' in criterion.keys():
ious = per_class_iu(hist) * 100
logger.info(' '.join('{:.03f}'.format(i) for i in ious))
mIoU = round(np.nanmean(ious), 2)
str_attack_result += '\n Segment Score ({score.avg:.3f}) \t'.format(score=seg_accuracy)
str_attack_result += ' Segment ===> mAP {}\n'.format(mIoU)
if comet is not None: comet.log_metric('segmentsemantic Attacked IOU', mIoU)
if comet is not None: comet.log_metric('segmentsemantic Attacked Score', seg_accuracy)
print('clean task')
print(str_not_attacked_task_result)
if test_flag:
dict_losses = {}
for key, loss_term in criterion.items():
dict_losses[key] = avg_losses[key].avg
# print(str_attack_result, "\nnew", avg_losses[keys].avg, "\n")
if 'segmentsemantic' in criterion.keys():
dict_losses['segmentsemantic'] = {'iou' : mIoU,
'loss' : avg_losses['segmentsemantic'].avg,
'seg_acc': seg_accuracy.avg}
print("These losses are returned", dict_losses)
#Compute the dictionary of losses that we want. Desired: {'segmentsemantic:[mIoU, cel],'keypoints2d':acc,'}
return dict_losses
def mtask_test_all(val_loader, model, criterion, task_name, all_task_name_list, args, info, writer=None, epoch=0,
test_flag=False, test_vis=False):
"""
task name: is not sorted here, so can be rigorously define the sequence of tasks
all_task_name_list: make the task under attack first.
NOTE: test_flag is for the case when we are testing for multiple models, need to return something to be able to plot and analyse
"""
assert len(task_name) > 0
avg_losses = {}
num_classes = args.classes
hist = np.zeros((num_classes, num_classes))
num_of_tasks = len(all_task_name_list)
for c_name, criterion_fun in criterion.items():
avg_losses[c_name] = AverageMeter()
seg_accuracy = AverageMeter()
seg_clean_accuracy = AverageMeter()
matrix_cos_all = np.zeros((num_of_tasks, num_of_tasks))
matrix_cos = np.zeros((num_of_tasks, num_of_tasks))
grad_norm_list_all = np.zeros((num_of_tasks))
grad_norm_list = np.zeros((num_of_tasks))
grad_norm_joint_all = 0
model.eval() # this is super important for correct including the batchnorm
for i, (input, target, mask) in enumerate(val_loader):
if test_vis:
clean_output = model(Variable(input.cuda(), requires_grad=False))
seg_clean_accuracy.update(
accuracy(clean_output['segmentsemantic'], target['segmentsemantic'].long().cuda()),
input.size(0))
adv_img = PGD_attack_mtask(input, target, mask, model, criterion, task_name, args.epsilon, args.steps,
args.dataset,
args.step_size, info, args, using_noise=True)
# image_var = Variable(adv_img.data, requires_grad=False)
image_var = adv_img.data
# print("diff", torch.sum(torch.abs(raw_input-image_var)))
grad_list = []
if torch.cuda.is_available():
for keys, tar in mask.items():
mask[keys] = tar.cuda()
input = input.cuda()
for keys, tar in target.items():
target[keys] = tar.cuda()
total_grad = None
for jj, each in enumerate(all_task_name_list):
input_var = torch.autograd.Variable(input, requires_grad=True)
output = model(input_var)
# total_loss = criterion['Loss'](output, target)
loss_task = criterion[each](output[each], target[each], mask[each])
loss_task.backward()
grad = input_var.grad.cpu().numpy()
grad_norm_list[jj] = np.linalg.norm(grad)
grad_normalized = grad / np.linalg.norm(grad)
grad_list.append(grad_normalized)
input_var = torch.autograd.Variable(input, requires_grad=True)
output = model(input_var)
total_loss = 0
for jj, each in enumerate(all_task_name_list):
total_loss = total_loss + criterion[each](output[each], target[each], mask[each])
total_loss.backward()
total_grad = input_var.grad.cpu().numpy()
grad_norm_joint_all += np.linalg.norm(total_grad)
total_grad = total_grad / np.linalg.norm(
total_grad) # TODO: this is crucial for preventing GPU memory leak,
for row in range(num_of_tasks):
for column in range(num_of_tasks):
if row < column:
matrix_cos[row, column] = np.sum(np.multiply(grad_list[row], grad_list[column]))
elif row == column:
matrix_cos[row, row] = np.sum(np.multiply(grad_list[row], total_grad))
matrix_cos_all = matrix_cos_all + matrix_cos
grad_norm_list_all = grad_norm_list_all + grad_norm_list
with torch.no_grad():
output = model(image_var)
# first_loss = None
# loss_dict = {}
# for c_name, criterion_fun in criterion.items():
# # if c_name in task_name:
# if first_loss is None:
# first_loss = c_name
# loss_dict[c_name] = criterion_fun(output, target)
# else:
# loss_dict[c_name] = criterion_fun(output[c_name], target[c_name])
# avg_losses[c_name].update(loss_dict[c_name].data.item(), input.size(0))
for c_name, criterion_fun in criterion.items():
avg_losses[c_name].update(criterion_fun(output[c_name], target[c_name], mask[c_name]).data.item(), input.size(0))
if 'segmentsemantic' in criterion.keys():
# this is accuracy for segmentation
seg_accuracy.update(accuracy(output['segmentsemantic'], target['segmentsemantic'].long()),
input.size(0))
# TODO: also mIOU here
class_prediction = torch.argmax(output['segmentsemantic'], dim=1)
target_seg = target['segmentsemantic'].cpu().data.numpy() if torch.cuda.is_available() else target[
'segmentsemantic'].data.numpy()
class_prediction = class_prediction.cpu().data.numpy() if torch.cuda.is_available() else class_prediction.data.numpy()
hist += fast_hist(class_prediction.flatten(), target_seg.flatten(), num_classes)
if i % 500 == 0:
class_prediction = torch.argmax(output['segmentsemantic'], dim=1)
# print(target['segmentsemantic'].shape)
decoded_target = decode_segmap(
target['segmentsemantic'][0][0].cpu().data.numpy() if torch.cuda.is_available() else
target['segmentsemantic'][0][0].data.numpy(),
args.dataset)
decoded_target = np.moveaxis(decoded_target, 2, 0)
decoded_class_prediction = decode_segmap(
class_prediction[0].cpu().data.numpy() if torch.cuda.is_available() else class_prediction[
0].data.numpy(), args.dataset)
decoded_class_prediction = np.moveaxis(decoded_class_prediction, 2, 0)
if not test_flag:
writer.add_image('Val/image clean ', back_transform(input, info)[0])
writer.add_image('Val/image adv ', back_transform(adv_img, info)[0])
writer.add_image('Val/image gt for adv ', decoded_target)
writer.add_image('Val/image adv prediction ', decoded_class_prediction)
if args.debug:
if i > 1:
break
if test_vis:
print("clean seg accuracy: {}".format(seg_clean_accuracy.avg))
str_attack_result = ''
str_not_attacked_task_result = ''
for keys, loss_term in criterion.items():
if keys in task_name:
str_attack_result += 'Attacked Loss: {} {loss.val:.4f} ({loss.avg:.4f})\t'.format(keys,
loss=avg_losses[keys])
else:
str_not_attacked_task_result += 'Not att Task Loss: {} {loss.val:.4f} ({loss.avg:.4f})\t'.format(keys,
loss=
avg_losses[
keys])
# Tensorboard logger
if not test_flag:
for keys, _ in criterion.items():
if keys in task_name:
writer.add_scalar('Val Adv Attacked Task/ Avg Loss {}'.format(keys), avg_losses[keys].avg, epoch)
else:
writer.add_scalar('Val Adv not attacked Task/ Avg Loss {}'.format(keys), avg_losses[keys].avg,
epoch)
if 'segmentsemantic' in criterion.keys():
ious = per_class_iu(hist) * 100
logger.info(' '.join('{:.03f}'.format(i) for i in ious))
mIoU = round(np.nanmean(ious), 2)
str_attack_result += '\n Segment Score ({score.avg:.3f}) \t'.format(score=seg_accuracy)
str_attack_result += ' Segment ===> mAP {}\n'.format(mIoU)
print('clean task')
print(str_not_attacked_task_result)
if test_flag:
dict_losses = {}
for key, loss_term in criterion.items():
dict_losses[key] = avg_losses[key].avg
# print(str_attack_result, "\nnew", avg_losses[keys].avg, "\n")
if 'segmentsemantic' in criterion.keys():
dict_losses['segmentsemantic'] = {'iou': mIoU,
'loss': avg_losses['segmentsemantic'].avg,
'seg_acc': seg_accuracy.avg}
print("These losses are returned", dict_losses)
# Compute the dictionary of losses that we want. Desired: {'segmentsemantic:[mIoU, cel],'keypoints2d':acc,'}
return dict_losses, matrix_cos_all, grad_norm_joint_all, grad_norm_list_all
# the matrix, here, task under attack is the first
| StarcoderdataPython |
3369428 | import logging
logging.basicConfig(format='%(levelname)s: %(message)s', level=logging.DEBUG)
__version__ = '0.8.3'
| StarcoderdataPython |
3242952 | <filename>submissions/make_submissions_lgbm_gs.py
import os
# For reading, visualizing, and preprocessing data
import numpy as np
import pandas as pd
import torch
import torch.nn.functional as F
from pytorch_toolbelt.utils import fs
from sklearn.metrics import make_scorer
from sklearn.model_selection import GroupKFold, RandomizedSearchCV, GridSearchCV
from sklearn.preprocessing import StandardScaler
from alaska2 import get_holdout, INPUT_IMAGE_KEY, get_test_dataset
from alaska2.metric import alaska_weighted_auc
from alaska2.submissions import parse_classifier_probas, sigmoid, parse_array, parse_and_softmax, get_x_y_for_stacking
from submissions.eval_tta import get_predictions_csv
from submissions.make_submissions_averaging import compute_checksum_v2
import lightgbm as lgb
def wauc_metric(y_true, y_pred):
wauc = alaska_weighted_auc(y_true, y_pred)
return ("wauc", wauc, True)
def main():
output_dir = os.path.dirname(__file__)
experiments = [
"G_Jul03_21_14_nr_rgb_tf_efficientnet_b6_ns_fold0_local_rank_0_fp16",
"G_Jul05_00_24_nr_rgb_tf_efficientnet_b6_ns_fold1_local_rank_0_fp16",
"G_Jul06_03_39_nr_rgb_tf_efficientnet_b6_ns_fold2_local_rank_0_fp16",
"G_Jul07_06_38_nr_rgb_tf_efficientnet_b6_ns_fold3_local_rank_0_fp16",
# "H_Jul12_18_42_nr_rgb_tf_efficientnet_b7_ns_mish_fold1_local_rank_0_fp16",
#
"K_Jul17_17_09_nr_rgb_tf_efficientnet_b6_ns_mish_fold0_local_rank_0_fp16",
"J_Jul19_20_10_nr_rgb_tf_efficientnet_b7_ns_mish_fold1_local_rank_0_fp16",
"H_Jul11_16_37_nr_rgb_tf_efficientnet_b7_ns_mish_fold2_local_rank_0_fp16",
"K_Jul18_16_41_nr_rgb_tf_efficientnet_b6_ns_mish_fold3_local_rank_0_fp16"
#
#
]
holdout_predictions = get_predictions_csv(experiments, "cauc", "holdout", "d4")
test_predictions = get_predictions_csv(experiments, "cauc", "test", "d4")
checksum = compute_checksum_v2(experiments)
holdout_ds = get_holdout("", features=[INPUT_IMAGE_KEY])
image_ids = [fs.id_from_fname(x) for x in holdout_ds.images]
quality_h = F.one_hot(torch.tensor(holdout_ds.quality).long(), 3).numpy().astype(np.float32)
test_ds = get_test_dataset("", features=[INPUT_IMAGE_KEY])
quality_t = F.one_hot(torch.tensor(test_ds.quality).long(), 3).numpy().astype(np.float32)
with_logits = True
x, y = get_x_y_for_stacking(holdout_predictions, with_logits=with_logits, tta_logits=with_logits)
# Force target to be binary
y = (y > 0).astype(int)
print(x.shape, y.shape)
x_test, _ = get_x_y_for_stacking(test_predictions, with_logits=with_logits, tta_logits=with_logits)
print(x_test.shape)
if True:
sc = StandardScaler()
x = sc.fit_transform(x)
x_test = sc.transform(x_test)
if False:
sc = PCA(n_components=16)
x = sc.fit_transform(x)
x_test = sc.transform(x_test)
if True:
x = np.column_stack([x, quality_h])
x_test = np.column_stack([x_test, quality_t])
group_kfold = GroupKFold(n_splits=5)
params = {
"boosting_type": ["gbdt", "dart", "rf", "goss"],
"num_leaves": [16, 32, 64, 128],
"reg_alpha": [0, 0.01, 0.1, 0.5],
"reg_lambda": [0, 0.01, 0.1, 0.5],
"learning_rate": [0.001, 0.01, 0.1, 0.5],
"n_estimators": [32, 64, 126, 512],
"max_depth": [2, 4, 8],
"min_child_samples": [20, 40, 80, 100],
}
lgb_estimator = lgb.LGBMClassifier(objective="binary", silent=True)
random_search = RandomizedSearchCV(
lgb_estimator,
param_distributions=params,
scoring=make_scorer(alaska_weighted_auc, greater_is_better=True, needs_proba=True),
n_jobs=3,
n_iter=50,
cv=group_kfold.split(x, y, groups=image_ids),
verbose=2,
random_state=42,
)
# Here we go
random_search.fit(x, y)
test_pred = random_search.predict_proba(x_test)[:, 1]
print(test_pred)
submit_fname = os.path.join(output_dir, f"lgbm_gs_{random_search.best_score_:.4f}_{checksum}.csv")
df = pd.read_csv(test_predictions[0]).rename(columns={"image_id": "Id"})
df["Label"] = test_pred
df[["Id", "Label"]].to_csv(submit_fname, index=False)
print("\n All results:")
print(random_search.cv_results_)
print("\n Best estimator:")
print(random_search.best_estimator_)
print(random_search.best_score_)
print("\n Best hyperparameters:")
print(random_search.best_params_)
results = pd.DataFrame(random_search.cv_results_)
results.to_csv("lgbm-random-grid-search-results-01.csv", index=False)
# print(model.feature_importances_)
if __name__ == "__main__":
main()
| StarcoderdataPython |
1624867 | <gh_stars>1-10
# -*- coding: utf-8 -*-
# @Author: SHLLL
# @Email: <EMAIL>
# @Date: 2018-03-22 21:21:05
# @Last Modified by: SHLLL
# @Last Modified time: 2018-04-23 00:11:28
# @License: MIT LICENSE
import re
class Parser(object):
"""The html content parser."""
def __init__(self, para_url_reg, urls_queue, allow_domin=None):
"""Initialize the Parser class.
Arguments:
para_url_reg {object} -- The compiled regular expression object.
Keyword Arguments:
allow_domin {string} -- The allowed url domin. (default: {None})
"""
self._urls_queue = urls_queue
# self._allow_domin = allow_domin
self._para_url_pat = re.compile(para_url_reg)
def _get_news_item(self, url, title, date, news):
"""Return a news item dict"""
return {"url": url, "title": title, "datee": date, "news": news}
def _get_news_para(self, root, xpath):
"""Return the news paragraph."""
para = ""
for news in root.xpath(xpath):
para += str(news).replace('\xa0', '').replace("'", '"') + "\n"
return para
def _get_page_links(self, root):
"""Get links from html content."""
# TODO(SHLLL): 优化url的过滤流程即不只保留新闻url
urls = filter(
lambda x: self._para_url_pat.match(x),
root.xpath("//*[@href]/@href"))
urls = [str(url).replace('\n', '') for url in urls]
return urls
| StarcoderdataPython |
53971 | <filename>base_ppo_agent.py
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from ray import tune
from ray.rllib.agents.ppo import PPOTrainer
from ray.tune import grid_search
import hfo_py
from soccer_env.high_action_soccer_env import HighActionSoccerEnv
def on_episode_end(info):
episode = info["episode"]
episode.custom_metrics["goal_rate"] = int(episode.last_info_for()['status'] == hfo_py.GOAL)
stop = {
"timesteps_total": 100000,
"episode_reward_mean": 0.89
}
results = tune.run(PPOTrainer, config={
"env": HighActionSoccerEnv,
"lr": 0.001,
"num_workers": 1,
"env_config": {
"server_config":{
"defense_npcs": 1,
},
" feature_set": hfo_py.LOW_LEVEL_FEATURE_SET ,
},
# "lr": grid_search([1e-2, 1e-4, 1e-6]), # try different lrs
"callbacks": {
"on_episode_end": on_episode_end,
},
"framework": 'torch'
}, stop=stop) # "log_level": "INFO" for verbose,
| StarcoderdataPython |
3293736 | <gh_stars>1-10
from __future__ import print_function
import os
import yaml
from click.testing import CliRunner
from dagster import seven
from dagster.api.launch_scheduled_execution import sync_launch_scheduled_execution
from dagster.cli.pipeline import execute_list_command, pipeline_list_command
from dagster.core.definitions.reconstructable import ReconstructableRepository
from dagster.core.scheduler import ScheduledExecutionSuccess
from dagster.core.test_utils import environ
from dagster.utils import file_relative_path, script_relative_path
def no_print(_):
return None
def test_list_command():
runner = CliRunner()
execute_list_command(
{
'repository_yaml': script_relative_path('../repository.yaml'),
'python_file': None,
'module_name': None,
'fn_name': None,
},
no_print,
)
result = runner.invoke(
pipeline_list_command, ['-w', script_relative_path('../repository.yaml')]
)
assert result.exit_code == 0
def test_schedules():
with seven.TemporaryDirectory() as temp_dir:
with environ({'DAGSTER_HOME': temp_dir}):
with open(os.path.join(temp_dir, 'dagster.yaml'), 'w') as fd:
yaml.dump(
{
'scheduler': {
'module': 'dagster.utils.test',
'class': 'FilesystemTestScheduler',
'config': {'base_dir': temp_dir},
}
},
fd,
default_flow_style=False,
)
recon_repo = ReconstructableRepository.from_legacy_repository_yaml(
file_relative_path(__file__, '../repository.yaml')
)
for schedule_name in [
'many_events_every_min',
'pandas_hello_world_hourly',
]:
schedule = recon_repo.get_reconstructable_schedule(schedule_name)
result = sync_launch_scheduled_execution(schedule.get_origin())
assert isinstance(result, ScheduledExecutionSuccess)
| StarcoderdataPython |
98885 | #!/usr/bin/python2.7
import os
import re
import sys
import shutil
import fnmatch
import argparse
import tempfile
import subprocess
CWD = os.path.dirname(os.path.realpath(__file__))
SMALI_DEFAULT = os.path.join(CWD, 'smali.jar')
BAKSMALI_DEFAULT = os.path.join(CWD, 'baksmali.jar')
ZIPALIGN_DEFAULT = os.path.join(CWD, 'zipalign')
ODEX_FILES = ['*.odex']
if __name__ == "__main__":
parser = argparse.ArgumentParser(description='Recursivly deodex and zipalign APK/JAR files in place.')
parser.add_argument("system_dir")
parser.add_argument('--smali', dest='smali', action='store', default=SMALI_DEFAULT,
help='The smali JAR file to use')
parser.add_argument('--baksmali', dest='baksmali', action='store', default=BAKSMALI_DEFAULT,
help='The baksmali JAR file to use')
parser.add_argument('--zipalign', dest='zipalign', action='store', default=ZIPALIGN_DEFAULT,
help='The zipalign tool to use')
args = parser.parse_args()
# transform glob patterns to regular expressions
odex_file_re = r'|'.join([fnmatch.translate(x) for x in ODEX_FILES])
# Check to see if system directory contains a framework directory
framework_dir = os.path.join(args.system_dir, 'framework/arm')
if not os.path.isdir(framework_dir):
sys.stderr.write("Invalid system directory. Directory must contain framework files.\n")
exit(1)
tempdir = tempfile.mkdtemp()
for root, dirs, files in os.walk(args.system_dir):
files = [f for f in files if re.match(odex_file_re, f)]
files = [os.path.join(root, f) for f in files]
for odex_file in files:
fullname=os.path.basename(odex_file)
basename = '.'.join(fullname.split('.')[:-1])
dirname=os.path.dirname(odex_file) + '/'
dirname1 = '/'.join(dirname.split('/')[:-2]) + '/'
dirname2 = '/'.join(dirname.split('/')[:-3]) + '/'
print "Treating %s @ %s" % (basename, dirname2)
if os.path.isfile(dirname + basename + '.apk'):
archive_file = dirname + basename + '.apk'
elif os.path.isfile(dirname1 + basename + '.apk'):
archive_file = dirname1 + basename + '.apk'
elif os.path.isfile(dirname2 + basename + '.apk'):
archive_file = dirname2 + basename + '.apk'
elif os.path.isfile(dirname + basename + '.jar'):
archive_file = dirname + basename + '.jar'
basename = basename + '.jar'
elif os.path.isfile(dirname1 + basename + '.jar'):
archive_file = dirname1 + basename + '.jar'
basename = basename + '.jar'
elif os.path.isfile(dirname2 + basename + '.jar'):
archive_file = dirname2 + basename + '.jar'
basename = basename + '.jar'
else:
sys.stderr.write("Skipping. Could not find archive file for odex: %s\n" % basename)
continue
print "Deodexing %s --> %s" % (odex_file, archive_file)
smali_file = os.path.join(tempdir, "classes.smali")
dex_file = os.path.join(tempdir, "classes.dex")
zip_file = os.path.join(tempdir, "package.zip")
# baksmali
subprocess.check_call(['java', '-Xmx512m', '-jar', args.baksmali, '-d', framework_dir, '-d', dirname, '-e', '/' + archive_file, '-c', 'boot.oat:' + basename, '-o', smali_file, '-x', odex_file])
# smali
subprocess.check_call(['java', '-Xmx512m', '-jar', args.smali, smali_file, '-o', dex_file])
shutil.rmtree(smali_file)
shutil.copy(archive_file, zip_file)
subprocess.check_call(['zip', '-q', '-j', zip_file, dex_file])
os.remove(dex_file)
subprocess.check_call(['zipalign', '-f', '-v', '4', zip_file, archive_file])
os.remove(zip_file)
os.remove(odex_file)
shutil.rmtree(tempdir)
| StarcoderdataPython |
3366367 | """SGTR algorithm for iteratively solving Ax=b over multiple A's and b's."""
import numpy as np
from numpy.linalg import norm as Norm
import pandas as pd
from .optimizer import Optimizer
from .ridge import Ridge
from .group_loss_function import GroupLossFunction
from .pde_loss_function import PDELossFunction
class SGTR:
"""Class containing logic for the SGTR algorithm."""
def __init__(self,
point_optimizer: Optimizer = Ridge(lambda_ = 1e-5),
loss_func: GroupLossFunction = PDELossFunction(),
threshold_func: callable = Norm,
num_tols: int = 50,
normalize_by: int = 2):
"""Initialize components of the SGTR algorithm.
Keyword arguments:
point_optimizer -- the solver for a single Ax=b problem (Ridge in SGTR)
loss_func -- The loss function used for grading prospective solutions.
threshold_func -- the function used for thresholding
num_tols -- number of threshold tolerances to try for iterative thresh.
normalize_by -- the norm by which to normalize the cols in As and bs
"""
self.point_optimizer = point_optimizer.optimize
self.loss_func = loss_func.score
self.threshold_func = threshold_func
self.num_tols = num_tols
self.normalize = normalize_by
def format_inputs(self, As, bs):
"""Format As and bs to list of ndarrays.
Keyword arguments:
As -- list of As
bs -- list of bs
Returns:
As -- list of As as a list of ndarrays
bs -- list of bs as a list of ndarrays
"""
As = [self.convert_to_ndarray(A.copy()) for A in As]
bs = [self.convert_to_ndarray(b.copy()) for b in bs]
return As, bs
def convert_to_ndarray(self, array_like):
"""Convert an ndarray-like object to an ndarray.
Keyword arguments:
array_like -- an ndarray-like object
Returns:
ndarray -- object converted to ndarray
"""
if type(array_like) == pd.DataFrame:
return array_like.values
else:
try:
return np.asarray(array_like)
except Exception as err:
print("Exception on convering data:")
print(err)
raise(Exception("can't convert data to numpy array!"))
def compute_norms(self, As, bs):
"""Compute the norms of As and bs. As list is computed column-wise.
Keyword argument:
As -- list of As
bs -- list of bs
Returns:
As_norms -- list of As norms
bs_norms -- list of bs norms
The norm computed is based on the attribute self.normalize. Note that
As_norms is computed by taking all As, stacking them, and then
computing the norm of each column.
"""
m = len(As) # m is the number of individual optimizations to run
# in SINDy-BVP, m is the number of spatial positions
n, d = As[0].shape # d is the number of candidate functions
# and n is the number of trials
# Initialize an empty vector to hold the norm of each candidate
# function. the norm is evaluated over ALL spatial positions for
# each candidate function.
As_norms = np.zeros(d)
for i in range(d):
data = np.hstack([A[:, i] for A in As])
As_norms[i] = Norm(data, self.normalize)
# Now normalize the bs
bs_norms = [m*Norm(b, self.normalize) for b in bs]
return As_norms, bs_norms
def normalize_data(self, As, bs, As_norms, bs_norms):
"""Normalize the data in As and bs by norms As_norms, bs_norms.
Keyword arguments:
As -- list of As
bs -- list of bs
As_norms -- list of As norms
bs_norms -- list of bs norms
Returns:
normalized_As -- As normalized by the As_norms
normalized_bs -- bs normalized by the bs_norms
"""
normalized_As = [A.copy() for A in As]
normalized_bs = [b.copy() for b in bs]
for i in range(len(As)):
normalized_As[i] = As[i].dot(np.diag(As_norms**-1))
normalized_bs[i] = bs[i]/bs_norms[i]
return normalized_As, normalized_bs
def compute_tolerances(self, As, bs):
"""Compute the range of tolerances to use for iterative thresholding.
Keyword arguments:
As -- list of As
bs -- list of bs
Returns:
tols -- range of tolerances to use for iterative thresholding.
"""
# Compute the range of tolerances to use for thresholding
opt = self.point_optimizer # Use shortcut for optimizer
# Compute the solution x for each group using ridge regression
x_ridges = [opt(A, b) for (A, b) in zip(As, bs)]
# Stack the solutions into matrix, so that each column contains
# the coefficient vector for a single candidate function, where
# each row is a single spatial point.
x_ridge = np.hstack(x_ridges)
# Get the norm for each of the candidate function coefficient vectors
xr_norms = [Norm(x_ridge[j, :]) for j in range(x_ridge.shape[0])]
# Determine the maximum of these norms
max_tol = np.max(xr_norms)
# And the minimum
min_tol = np.min([x for x in xr_norms if x != 0])
# And compute a range of tolerances to use for thresholding
tolerance_space = np.linspace(np.log(min_tol), np.log(max_tol),
self.num_tols)
tols = [0]+[np.exp(alpha) for alpha in tolerance_space][:-1]
# return the tolerances
return tols
def optimize(self, As, bs):
"""Execute SGTR algorithm.
Inputs:
As -- list of As
bs -- list of bs
Returns:
xs -- all prospective solutions produced by iter. thresh.
tols -- tolerances used for iterative thresholding
losses -- the losses computed by loss function (typ. PDE Loss Fn)
"""
if len(As) != len(bs):
raise Exception('Number of Xs and ys mismatch')
As, bs = self.format_inputs(As, bs)
np.random.seed(0)
if isinstance(self.normalize, int):
As_norms, bs_norms = self.compute_norms(As, bs)
As, bs = self.normalize_data(As, bs, As_norms, bs_norms)
tols = self.compute_tolerances(As, bs)
# Execute SGTR for each thresholding tolerance
xs = []
losses = []
for i, tol in enumerate(tols):
try:
x = self.iterative_thresholding(As, bs, tol=tol)
xs.append(x)
loss = self.loss_func(As, bs, x)
losses.append(loss)
except Exception as exc:
print(exc)
pass
if isinstance(self.normalize, int):
xs = self.scale_solutions(As, bs, xs, As_norms, bs_norms)
return xs, tols, losses
def iterative_thresholding(self, As, bs, tol: float, maxit: int = 10):
"""Iterate through tolerances for thresholding and produce solutions.
Keyword arguments:
As -- list of As for Ax=b
bs -- list of bs for Ax=b
tol -- the tolerance to use for thresholding
maxit -- the maximum number of times to iteratively threshold
Returns:
W -- final solution to iterative thresholding at tolerance tol.
"""
# Assign shorter alias for point-wise optimizer
opt = self.point_optimizer
tfunc = self.threshold_func
# Define n, d, m
n, d = As[0].shape # n is num of trials. d is num of candidate funcs
m = len(As) # m is the number of spatial positions.
# Get initial estimates
W = np.hstack([opt(A, b) for [A, b] in zip(As, bs)])
num_relevant = As[0].shape[1] # assume all candidate functions matter
# Select indices of candidate functions exceeding the threshold
# as graded by the thresholding function
biginds = [i for i in range(d) if tfunc(W[i, :]) > tol]
# Execute iterative tresholding
for j in range(maxit):
# Figure out which items to cut out
smallinds = [i for i in range(d) if Norm(W[i, :]) < tol]
new_biginds = [i for i in range(d) if i not in smallinds]
# If nothing changes then stop
if num_relevant == len(new_biginds):
j = maxit-1
else:
num_relevant = len(new_biginds)
# Also make sure we didn't just lose all the coefficients
if len(new_biginds) == 0:
if j == 0:
print("Tolerance too high - all coefficients thresholded.")
break
biginds = new_biginds
# Otherwise get a new guess
for i in smallinds:
# zero out thresholded columns
W[i, :] = np.zeros(len(As))
if j != maxit - 1:
for i in range(m):
x = opt(As[i][:, biginds], bs[i])
x = x.reshape(len(biginds))
W[biginds, i] = x
else:
# Get final least squares estimate
for i in range(m):
r = len(biginds)
W[biginds, i] = np.linalg.lstsq(As[i][:, biginds],
bs[i],
rcond=None)[0].reshape(r)
return W
def scale_solutions(self, As, bs, xs, As_norms, bs_norms):
"""Scale solutions back based on norms.
Keyword arguments:
As -- list of As
bs -- list of bs
xs -- list of prospective solutions from iterative thresholding
As_norms -- norm of As
bs_norms -- norm of bs
Returns:
xs -- re-scaled solutions based on As and bs norms.
"""
for x in xs:
for i in range(x.shape[0]):
for j in range(x.shape[1]):
x[i, j] = x[i, j]*bs_norms[j]/(As_norms[i])
return xs
| StarcoderdataPython |
170754 | num = int(input("Digite um número Natural: "))
#cont = 0
#list = []
list_div = []
for c in range(1, num + 1):
if num % c == 0:
#cont += 1
list_div.append(c)
#list.append(c)
print(list)
print('='*40)
print(f'{num} possui {len(list_div)} divisores!\n'
f'Os dividores de {num} são: {list_div}')
if len(list_div) == 2:
print(f'{num} é primo')
| StarcoderdataPython |
11123 | """Use translation table to translate coding sequence to protein."""
from Bio.Data import CodonTable # type: ignore
from Bio.Seq import Seq # type: ignore
def translate_cds(cds: str, translation_table: str) -> str:
"""Translate coding sequence to protein.
:param cds: str: DNA coding sequence (CDS)
:param translation_table: str: translation table
as defined in Bio.Seq.Seq.CodonTable.ambiguous_generic_by_name
:return: str: Protein sequence
"""
table = CodonTable.ambiguous_dna_by_name[translation_table]
cds = "".join(cds.split()) # clean out whitespace
coding_dna = Seq(cds)
protein = coding_dna.translate(table, cds=True, to_stop=True)
return str(protein)
| StarcoderdataPython |
161409 | <gh_stars>1-10
#! /usr/bin/python
import redis
import base64
class RedisBackend:
_r_sample_id = 'next.sample.id'
_r_samples = 'samples'
_r_samples_hmap = 'samples.hmap'
_r_samples_features = 'samples:%s:features'
_r_species_id = 'next.species.id'
_r_species = 'species'
_r_species_hmap = 'species.hmap'
_r_species_samples = 'species:%s:samples'
def __init__(self, config={}):
host = 'localhost'
port = 6379
db = 14
if 'host' in config:
host = config['host']
if 'port' in config:
port = config['port']
if 'db' in config:
db = config['db']
self.pool = redis.ConnectionPool(host=host, port=port, db=db)
self.r_cnn = redis.Redis(connection_pool=self.pool)
def get_sample_id(self, file_path):
tmp = base64.b64encode(file_path)
if self.r_cnn.hexists(self._r_samples_hmap, tmp):
return self.r_cnn.hget(self._r_samples_hmap, tmp)
else:
sample_id = self.r_cnn.incr(self._r_sample_id)
if self.r_cnn.hset(self._r_samples_hmap, tmp, sample_id) == 1:
self.r_cnn.lpush(self._r_samples, sample_id)
return sample_id
return None
def set_features(self, sample_id, features):
#sample_id = self.get_sample_id(sample_path)
with self.r_cnn.pipeline() as pipe:
for k, v in features.iteritems():
pipe.hset(self._r_samples_features % (sample_id,), k, v)
pipe.execute()
def get_features(self, sample_id):
return self.r_cnn.hgetall(self._r_samples_features % (sample_id,))
def get_species_id(self, species_name):
if self.r_cnn.hexists(self._r_species_hmap, species_name):
return self.r_cnn.hget(self._r_species_hmap, species_name)
else:
species_id = self.r_cnn.incr(self._r_species_id)
if self.r_cnn.hset(self._r_species_hmap, species_name, species_id) == 1:
self.r_cnn.lpush(self._r_species, species_id)
return species_id
return None
def get_species_name(self, species_id):
for (k, v) in self.r_cnn.hgetall(self._r_species_hmap).items():
if v == str(species_id):
return k
return ""
def get_species(self):
return self.r_cnn.lrange(self._r_species, 0, -1)
def add_samples(self, species_id, sample_id):
return self.r_cnn.sadd(self._r_species_samples % (species_id,), sample_id)
def get_samples(self, species_id):
return self.r_cnn.smembers(self._r_species_samples % (species_id,))
| StarcoderdataPython |
167185 | <gh_stars>1-10
#coding=utf-8
from time import time
from urlparse import urlparse
from django.db import models
from django.db.models.signals import post_save
from django.dispatch import receiver
from mezzanine.core.models import Displayable, Ownable
from mezzanine.generic.models import Rating
from mezzanine.generic.fields import RatingField, CommentsField
class Link(Displayable, Ownable):
c=(('hc','清真餐厅'),('yc','一餐厅'),('ec','二餐厅'),('sc','三餐厅'),('jby','聚博园'),('other','未分类'))
canteen=models.CharField(max_length=20,choices=c,default='ec')
link = models.URLField(blank=True) #这个根本不需要,不要删除吧,免得麻烦,只要不让它出现就行,完成
rating = RatingField()
comments = CommentsField()
solved = models.BooleanField(default=False)
@models.permalink
def get_absolute_url(self):
return ("link_detail", (), {"slug": self.slug})
@property
def domain(self):
return urlparse(self.link).netloc
class Profile(models.Model):
user = models.OneToOneField("auth.User")
website = models.URLField(blank=True)
bio = models.TextField(blank=True)
karma = models.IntegerField(default=0, editable=False)
def __unicode__(self):
return "%s (%s)" % (self.user, self.karma)
@receiver(post_save, sender=Rating)
def karma(sender, **kwargs):
"""
Each time a rating is saved, check its value and modify the
profile karma for the related object's user accordingly.
Since ratings are either +1/-1, if a rating is being edited,
we can assume that the existing rating is in the other direction,
so we multiply the karma modifier by 2.
"""
rating = kwargs["instance"]
value = int(rating.value)
if not kwargs["created"]:
value *= 2
content_object = rating.content_object
if rating.user != content_object.user:
queryset = Profile.objects.filter(user=content_object.user)
queryset.update(karma=models.F("karma") + value)
| StarcoderdataPython |
3258801 | import unittest
from decimal import Decimal
from toshi.utils import parse_int
class TestParseInt(unittest.TestCase):
def test_parse_int_string(self):
self.assertEqual(parse_int("12345"), 12345)
def test_parse_negative_int_string(self):
self.assertEqual(parse_int("-12345"), -12345)
def test_parse_zero_int_string(self):
self.assertEqual(parse_int("0"), 0)
def test_parse_int_no_leading_zeros_string(self):
self.assertEqual(parse_int("0123"), None)
def test_parse_float_string(self):
self.assertEqual(parse_int("12345.567678"), 12345)
def test_parse_hex_string(self):
self.assertEqual(parse_int("0x12345"), 74565)
def test_parse_float(self):
self.assertEqual(parse_int(12345.45675), 12345)
def test_parse_decimal(self):
self.assertEqual(parse_int(Decimal("12345.6787")), 12345)
def test_parse_none(self):
self.assertEqual(parse_int(None), None)
def test_parse_misc(self):
self.assertEqual(parse_int({}), None)
def test_parse_bytes(self):
self.assertEqual(parse_int(b"12345"), 12345)
def test_parse_unicode(self):
self.assertEqual(parse_int(u'12345'), 12345)
| StarcoderdataPython |
1774919 | <filename>dojo/unittests/test_sslyze_parser.py
from django.test import TestCase
from dojo.tools.sslyze.parser import SslyzeXmlParser
from dojo.models import Test
class TestSslyzeXMLParser(TestCase):
def test_parse_without_file_has_no_findings(self):
parser = SslyzeXmlParser(None, Test())
self.assertEqual(0, len(parser.items))
def test_parse_file_with_one_target_has_one_vuln(self):
testfile = open("dojo/unittests/scans/sslyze/report_one_target_one_vuln.xml")
parser = SslyzeXmlParser(testfile, Test())
self.assertEqual(1, len(parser.items))
def test_parse_file_with_one_target_has_three_vuln(self):
testfile = open("dojo/unittests/scans/sslyze/report_one_target_three_vuln.xml")
parser = SslyzeXmlParser(testfile, Test())
self.assertEqual(3, len(parser.items))
def test_parse_file_with_two_target_has_many_vuln(self):
testfile = open("dojo/unittests/scans/sslyze/report_two_target_many_vuln.xml")
parser = SslyzeXmlParser(testfile, Test())
self.assertEqual(7, len(parser.items))
| StarcoderdataPython |
11304 | <gh_stars>1-10
# -*- coding: utf-8 -*-
import os
import util
from fabric.api import *
from fabric.state import output
from fabric.colors import *
from base import BaseTask
from helper.print_helper import task_puts
class CollectConfig(BaseTask):
"""
collect configuration
"""
name = "collect"
def run_task(self, *args, **kwargs):
host_config = env.inventory.get_variables(env.host)
hostname = host_config['ssh_host']
if not util.tcping(hostname, 22, 1):
task_puts("host {0} does not exist. skip...".format(hostname))
return
config = self.get_config(hostname, host_config['ssh_user'], host_config['ssh_pass'], host_config['exec_pass'], host_config['type'])
self.write_config(env.host, config)
# print config
def get_config(self, hostname, ssh_user, ssh_pass, exec_pass, os_type):
script_name = "dump-config-cisco-{0}.sh".format(os_type)
config = local(os.path.dirname(os.path.abspath(__file__)) + "/../bin/{0} {1} {2} {3}".format(script_name, ssh_user, hostname, ssh_pass), capture = True)
return config
def write_config(self, hostname, config):
output_dir = os.path.dirname(os.path.abspath(__file__)) + "/../tmp/config"
local("mkdir -p {0}".format(output_dir))
file = open("{0}/{1}.txt".format(output_dir, hostname), 'w')
file.write(str(config))
file.close()
collect = CollectConfig()
| StarcoderdataPython |
3253929 | <gh_stars>1000+
# main.py
import module1
# even though test was added to sys.modules
# in module1, we can still access it from here
import test
print(test())
# don't do this! It's a bad hack to illustrate how import looks
# in sys.modules for the symbol we are importing
| StarcoderdataPython |
20479 | import numpy as np
import chainer.functions as F
from chainer import Variable
def neural_stack(V, s, d, u, v):
# strengths
s_new = d
for t in reversed(xrange(s.shape[1])):
x = s[:, t].reshape(-1, 1) - u
s_new = F.concat((s_new, F.maximum(Variable(np.zeros_like(x.data)), x)))
u = F.maximum(Variable(np.zeros_like(x.data)), -x)
s = F.fliplr(s_new)
# memory
V = F.concat((V, F.expand_dims(v, 1)))
# result
r = Variable(np.zeros_like(v.data))
ur = Variable(np.ones_like(u.data))
for t in reversed(xrange(s_new.shape[1])):
w = F.minimum(s[:, t].reshape(-1, 1), ur)
r += V[:, t] * F.broadcast_to(w, V[:, t].shape)
x = ur - s[:, t].reshape(-1, 1)
ur = F.maximum(Variable(np.zeros_like(x.data)), x)
return V, s, r
batch_size = 3
stack_element_size = 2
V = Variable(np.zeros((batch_size, 1, stack_element_size)))
s = Variable(np.zeros((batch_size, 1)))
d = Variable(np.ones((batch_size, 1)) * 0.4)
u = Variable(np.ones((batch_size, 1)) * 0.)
v = Variable(np.ones((batch_size, stack_element_size)))
V, s, r = neural_stack(V, s, d, u, v)
d = Variable(np.ones((batch_size, 1)) * 0.8)
u = Variable(np.ones((batch_size, 1)) * 0.)
v = Variable(np.ones((batch_size, stack_element_size)) * 2.)
V, s, r = neural_stack(V, s, d, u, v)
d = Variable(np.ones((batch_size, 1)) * 0.9)
u = Variable(np.ones((batch_size, 1)) * 0.9)
v = Variable(np.ones((batch_size, stack_element_size)) * 3.)
V, s, r = neural_stack(V, s, d, u, v)
d = Variable(np.ones((batch_size, 1)) * 0.1)
u = Variable(np.ones((batch_size, 1)) * 0.1)
v = Variable(np.ones((batch_size, stack_element_size)) * 3.)
V, s, r = neural_stack(V, s, d, u, v)
print V.data
print s.data
print r.data
| StarcoderdataPython |
179988 | # coding: utf-8
from ac_engine.actions.trends import AbstractTrends
from django.conf import settings
class Trends(AbstractTrends):
EXCLUSION_SET = settings.HINT_EXCLUSION_SET
@property
def data_processor_class(self):
from ac_engine_allegro.data.data_processor import DataProcessor
return DataProcessor
@property
def get_api_name(self):
return 'allegro_trends'
| StarcoderdataPython |
3355882 | <reponame>vt-dev-team/vt-randomName<gh_stars>0
# -*- coding: utf-8 -*-
# Form implementation generated from reading ui file 'init.ui'
#
# Created by: PyQt5 UI code generator 5.15.4
#
# WARNING: Any manual changes made to this file will be lost when pyuic5 is
# run again. Do not edit this file unless you know what you are doing.
from PyQt5 import QtCore, QtGui, QtWidgets
class Ui_MainWindow(object):
def setupUi(self, MainWindow):
MainWindow.setObjectName("MainWindow")
MainWindow.resize(624, 370)
MainWindow.setAutoFillBackground(False)
MainWindow.setStyleSheet("#MainWindow {\n"
"background-color: #ffffff;\n"
"}\n"
"QLineEdit {\n"
"border: none;\n"
"border-bottom: 1px solid #2b2c2d;\n"
"}")
self.centralwidget = QtWidgets.QWidget(MainWindow)
self.centralwidget.setObjectName("centralwidget")
self.lineEdit = QtWidgets.QLineEdit(self.centralwidget)
self.lineEdit.setGeometry(QtCore.QRect(370, 120, 211, 31))
font = QtGui.QFont()
font.setFamily("Microsoft YaHei UI Light")
font.setPointSize(14)
self.lineEdit.setFont(font)
self.lineEdit.setStyleSheet("border-color: #2b2c2d;\n"
"padding: 3px;")
self.lineEdit.setObjectName("lineEdit")
self.label = QtWidgets.QLabel(self.centralwidget)
self.label.setGeometry(QtCore.QRect(370, 30, 221, 31))
font = QtGui.QFont()
font.setFamily("Microsoft YaHei UI Light")
font.setPointSize(22)
self.label.setFont(font)
self.label.setObjectName("label")
self.label_2 = QtWidgets.QLabel(self.centralwidget)
self.label_2.setGeometry(QtCore.QRect(370, 70, 231, 21))
font = QtGui.QFont()
font.setFamily("Microsoft YaHei UI Light")
font.setPointSize(10)
self.label_2.setFont(font)
self.label_2.setObjectName("label_2")
self.pushButton = QtWidgets.QPushButton(self.centralwidget)
self.pushButton.setGeometry(QtCore.QRect(370, 280, 91, 31))
font = QtGui.QFont()
font.setFamily("Microsoft YaHei UI Light")
font.setPointSize(14)
font.setBold(False)
font.setWeight(37)
self.pushButton.setFont(font)
self.pushButton.setCursor(QtGui.QCursor(QtCore.Qt.PointingHandCursor))
self.pushButton.setContextMenuPolicy(QtCore.Qt.ActionsContextMenu)
self.pushButton.setAutoFillBackground(False)
self.pushButton.setStyleSheet(" padding: 6px 20px;\n"
" background-color: #ffffff;\n"
" color: #606266;\n"
" border: 1px solid #dcdfe6;\n"
" border-color: #dcdfe6;\n"
" font-size: 14px;\n"
" font-weight: 300;\n"
"color: #fff;\n"
"background-color: #47af50;\n"
"border-color: #47af50;")
self.pushButton.setFlat(False)
self.pushButton.setObjectName("pushButton")
self.pushButton_2 = QtWidgets.QPushButton(self.centralwidget)
self.pushButton_2.setGeometry(QtCore.QRect(490, 280, 91, 31))
font = QtGui.QFont()
font.setFamily("Microsoft YaHei UI Light")
font.setPointSize(14)
font.setBold(False)
font.setWeight(37)
self.pushButton_2.setFont(font)
self.pushButton_2.setCursor(QtGui.QCursor(QtCore.Qt.PointingHandCursor))
self.pushButton_2.setContextMenuPolicy(QtCore.Qt.ActionsContextMenu)
self.pushButton_2.setLayoutDirection(QtCore.Qt.LeftToRight)
self.pushButton_2.setStyleSheet(" padding: 6px 20px;\n"
" background-color: #ffffff;\n"
" color: #606266;\n"
" border: 1px solid #dcdfe6;\n"
" border-color: #dcdfe6;\n"
" font-size: 14px;\n"
" font-weight: 300;\n"
"background-color:#d1d1d2;\n"
"border-color: #d1d1d2;")
self.pushButton_2.setObjectName("pushButton_2")
self.pushButton_2.clicked.connect(self.close)
self.lineEdit_2 = QtWidgets.QLineEdit(self.centralwidget)
self.lineEdit_2.setGeometry(QtCore.QRect(370, 170, 211, 31))
font = QtGui.QFont()
font.setFamily("Microsoft YaHei UI Light")
font.setPointSize(14)
self.lineEdit_2.setFont(font)
self.lineEdit_2.setStyleSheet("border-color: #2b2c2d;\n"
"padding: 3px;")
self.lineEdit_2.setObjectName("lineEdit_2")
self.lineEdit_3 = QtWidgets.QLineEdit(self.centralwidget)
self.lineEdit_3.setGeometry(QtCore.QRect(370, 220, 211, 31))
font = QtGui.QFont()
font.setFamily("Microsoft YaHei UI Light")
font.setPointSize(14)
self.lineEdit_3.setFont(font)
self.lineEdit_3.setStyleSheet("border-color: #2b2c2d;\n"
"padding: 3px;")
self.lineEdit_3.setText("")
self.lineEdit_3.setObjectName("lineEdit_3")
self.label_3 = QtWidgets.QLabel(self.centralwidget)
self.label_3.setGeometry(QtCore.QRect(30, 0, 331, 321))
self.label_3.setText("")
self.label_3.setPixmap(QtGui.QPixmap(":/bg/start.png"))
self.label_3.setObjectName("label_3")
MainWindow.setCentralWidget(self.centralwidget)
self.statusbar = QtWidgets.QStatusBar(MainWindow)
self.statusbar.setObjectName("statusbar")
MainWindow.setStatusBar(self.statusbar)
self.retranslateUi(MainWindow)
QtCore.QMetaObject.connectSlotsByName(MainWindow)
def retranslateUi(self, MainWindow):
_translate = QtCore.QCoreApplication.translate
MainWindow.setWindowTitle(_translate("MainWindow", "初始化程序"))
self.lineEdit.setPlaceholderText(_translate("MainWindow", "班级"))
self.label.setText(_translate("MainWindow", "初始化程序"))
self.label_2.setText(_translate("MainWindow", "轻松几步,即可搞定!"))
self.pushButton.setText(_translate("MainWindow", "确定"))
self.pushButton_2.setText(_translate("MainWindow", "取消"))
self.lineEdit_2.setPlaceholderText(_translate("MainWindow", "默认文字"))
self.lineEdit_3.setPlaceholderText(_translate("MainWindow", "间隔时间"))
import bg_rc
| StarcoderdataPython |
1646498 | import numpy as np
import pytest
from lagom.core.transform import Clip
from lagom.core.transform import Centralize
from lagom.core.transform import Normalize
from lagom.core.transform import Standardize
from lagom.core.transform import ExpFactorCumSum
from lagom.core.transform import RunningMeanStd
from lagom.core.transform import RankTransform
class TestTransform(object):
def test_clip(self):
clip = Clip()
# Test scalar
assert clip(x=2, a_min=0, a_max=1) == 1
assert clip(x=0.5, a_min=0, a_max=1) == 0.5
assert clip(x=-1, a_min=0, a_max=1) == 0
# Test numpy scalar
assert clip(x=np.array(2), a_min=0, a_max=1) == 1
assert clip(x=np.array(0.5), a_min=0, a_max=1) == 0.5
assert clip(x=np.array(-1), a_min=0, a_max=1) == 0
#
# Test vector
#
def _test_vec(x):
assert np.alltrue(clip(x=x, a_min=2, a_max=3) == [2, 2, 3, 3])
# Tuple
a = (1, 2, 3, 4)
_test_vec(a)
# List
b = [1, 2, 3, 4]
_test_vec(b)
# ndarray
c = np.array([1, 2, 3, 4])
_test_vec(c)
#
# Test exceptions
#
# ndarray more than 1-dim is not allowed
d = np.array([[1, 2, 3, 4]])
with pytest.raises(ValueError):
clip(x=d, a_min=2, a_max=3)
def test_centralize(self):
centralize = Centralize()
# Test scalar
assert centralize(x=1) == 1
assert centralize(x=0) == 0
assert centralize(x=2) == 2
assert centralize(x=1, mean=1) == 1
assert centralize(x=0, mean=1) == 0
assert centralize(x=2, mean=1) == 2
# Test numpy scalar
assert centralize(x=np.array(1)) == 1
assert centralize(x=np.array(0)) == 0
assert centralize(x=np.array(2)) == 2
assert centralize(x=np.array(1), mean=1) == 1
assert centralize(x=np.array(0), mean=1) == 0
assert centralize(x=np.array(2), mean=1) == 2
#
# Test vector
#
def _test_vec(x):
assert np.alltrue(centralize(x=x) == [-1.5, -0.5, 0.5, 1.5])
assert np.alltrue(centralize(x=x, mean=1) == [0, 1, 2, 3])
# Tuple
a = (1, 2, 3, 4)
_test_vec(a)
# List
b = [1, 2, 3, 4]
_test_vec(b)
# ndarray
c = np.array([1, 2, 3, 4])
_test_vec(c)
#
# Test exceptions
#
# ndarray more than 1-dim is not allowed
d = np.array([[1, 2, 3, 4]])
with pytest.raises(ValueError):
centralize(x=d)
def test_normalize(self):
normalize = Normalize(eps=1.1920929e-07)
# Test scalar
assert normalize(x=-1) == 0
assert normalize(x=0.5) == 0.5
assert normalize(x=2) == 1
assert normalize(x=-1, min_val=0, max_val=1) == 0
assert normalize(x=0.5, min_val=0, max_val=1) == 0.5
assert normalize(x=2, min_val=0, max_val=1) == 1
# Test numpy scalar
assert normalize(x=np.array(-1)) == 0
assert normalize(x=np.array(0.5)) == 0.5
assert normalize(x=np.array(2)) == 1
assert normalize(x=np.array(-1), min_val=0, max_val=1) == 0
assert normalize(x=np.array(0.5), min_val=0, max_val=1) == 0.5
assert normalize(x=np.array(2), min_val=0, max_val=1) == 1
#
# Test vector
#
def _test_vec(x):
assert np.allclose(normalize(x=x),
[0. , 0.33333332, 0.66666664, 0.99999996])
assert np.allclose(normalize(x=x, min_val=0, max_val=1),
[0.99999988, 1.99999976, 2.99999964, 3.99999952])
# Tuple
a = (1, 2, 3, 4)
_test_vec(a)
# List
b = [1, 2, 3, 4]
_test_vec(b)
# ndarray
c = np.array([1, 2, 3, 4])
_test_vec(c)
#
# Test exceptions
#
# ndarray more than 1-dim is not allowed
d = np.array([[1, 2, 3, 4]])
with pytest.raises(ValueError):
normalize(x=d)
def test_standardize(self):
standardize = Standardize(eps=1.1920929e-07)
# Test scalar
assert standardize(x=-1) == -1
assert standardize(x=0) == 0
assert standardize(x=1) == 1
assert standardize(x=-1, mean=0, std=1) == -1
assert standardize(x=0, mean=0, std=1) == 0
assert standardize(x=1, mean=0, std=1) == 1
# Test numpy scalar
assert standardize(x=np.array(-1)) == -1
assert standardize(x=np.array(0)) == 0
assert standardize(x=np.array(1)) == 1
assert standardize(x=np.array(-1), mean=0, std=1) == -1
assert standardize(x=np.array(0), mean=0, std=1) == 0
assert standardize(x=np.array(1), mean=0, std=1) == 1
#
# Test vector
#
def _test_vec(x):
assert np.allclose(standardize(x=x),
[-1.34164064, -0.44721355, 0.44721355, 1.34164064])
assert np.allclose(standardize(x=x, mean=0, std=1),
[0.99999988, 1.99999976, 2.99999964, 3.99999952])
# Tuple
a = (1, 2, 3, 4)
_test_vec(a)
# List
b = [1, 2, 3, 4]
_test_vec(b)
# ndarray
c = np.array([1, 2, 3, 4])
_test_vec(c)
#
# Test exceptions
#
# ndarray more than 1-dim is not allowed
d = np.array([[1, 2, 3, 4]])
with pytest.raises(ValueError):
standardize(x=d)
def test_expfactorcumsum(self):
expfactorcumsum = ExpFactorCumSum(alpha=0.1)
#
# Test vector
#
def _test_vec(x):
assert np.allclose(expfactorcumsum(x=x),
[1.23, 2.3, 3.0])
# Tuple
a = (1, 2, 3)
_test_vec(a)
# List
b = [1, 2, 3]
_test_vec(b)
# ndarray
c = np.array([1, 2, 3])
_test_vec(c)
#
# Test exceptions
#
# Scalar is not allowed
with pytest.raises(AssertionError):
expfactorcumsum(x=1)
# ndarray more than 1-dim is not allowed
d = np.array([[1, 2, 3]])
with pytest.raises(ValueError):
expfactorcumsum(x=d)
def test_runningmeanstd(self):
def _test_moments(runningmeanstd, x):
assert np.allclose(runningmeanstd.mu, np.mean(x))
assert np.allclose(runningmeanstd.sigma, np.std(x))
a = [1, 2, 3, 4]
# Scalar
runningmeanstd = RunningMeanStd()
[runningmeanstd(i) for i in a]
_test_moments(runningmeanstd=runningmeanstd, x=a)
# Vector
runningmeanstd = RunningMeanStd()
runningmeanstd(a)
_test_moments(runningmeanstd=runningmeanstd, x=a)
# n-dim array
b = np.array([[1, 10, 100], [2, 20, 200], [3, 30, 300], [4, 40, 400]])
runningmeanstd = RunningMeanStd()
runningmeanstd(b)
assert np.allclose(runningmeanstd.mu, b.mean(0))
assert np.allclose(runningmeanstd.sigma, b.std(0))
def test_rank_transform(self):
rank_transform = RankTransform()
# List
a = [3, 14, 1]
assert np.allclose(rank_transform(a, centered=False), [1, 2, 0])
assert np.allclose(rank_transform(a), [0, 0.5, -0.5])
# ndarray
b = np.array([3, 14, 1])
assert np.allclose(rank_transform(b, centered=False), [1, 2, 0])
assert np.allclose(rank_transform(b), [0, 0.5, -0.5])
#
# Test exceptions
#
# Scalar is not allowed
with pytest.raises(AssertionError):
rank_transform(5)
# ndarray more than 1-dim is not allowed
c = np.array([[3, 14, 1]])
with pytest.raises(ValueError):
rank_transform(c) | StarcoderdataPython |
4804579 | <reponame>yoomoney/yookassa-sdk-python
# -*- coding: utf-8 -*-
from yookassa.client import ApiClient
from yookassa.domain.common.http_verb import HttpVerb
class Settings:
base_path = '/me'
def __init__(self):
self.client = ApiClient()
@classmethod
def get_account_settings(cls, params=None):
"""
Shop Info
:param params: (dict | None) Параметры поиска.
В настоящее время доступен только {'on_behalf_of': account_id}
:return: dict
"""
instance = cls()
path = cls.base_path
response = instance.client.request(HttpVerb.GET, path, params)
return response
| StarcoderdataPython |
132915 | # Under MIT License, see LICENSE.txt
from Model.DataObject.BaseDataObject import catch_format_error
from Model.DataObject.AccessorData.BaseDataAccessor import BaseDataAccessor
__author__ = 'RoboCupULaval'
class PlayInfoAcc(BaseDataAccessor):
def __init__(self, data_in):
super().__init__(data_in)
self._format_data()
@catch_format_error
def _check_obligatory_data(self):
assert isinstance(self.data, dict), \
"data: {} n'est pas un dictionnaire.".format(type(self.data))
keys = self.data.keys()
for key in keys:
assert isinstance(key, str), \
"data[{}]: {} la clé n'a pas le format attendu (str)".format(key, type(key))
assert key in {'referee', 'referee_team', 'auto_play', 'auto_flag'}, \
"data[{}] n'est pas une clé validee".format(key)
assert isinstance(self.data[key], dict) or isinstance(self.data[key], bool), \
"data[{}]: {} n'a pas le format attendu (dict | bool)".format(key, type(self.data[key]))
@catch_format_error
def _check_optional_data(self):
pass
@staticmethod
def get_default_data_dict():
return dict()
@staticmethod
def get_type():
return 1005
| StarcoderdataPython |
3392835 | __author__ = '<NAME>'
__email__ = '<EMAIL>'
__version__ = '1.0'
__license__ = 'MIT'
# Step (1): Setup the environment
import numpy as np
from sklearn import datasets
from thb.datascience.ibm.KNNClassifier import KNNClassifier
# load the iris data set
iris = datasets.load_iris()
# Step (2): Define the Iris sample which we will classify in a second
iris_sample = np.array([4.8, 2.5, 5.3, 2.4])
# Step (3): Instantiate the custom KNN classifier and classify the sample
knn = KNNClassifier()
knn.k = 10
# perform the classification
predicted_class = knn.classify(iris_sample, iris.data, iris.target)
print('The predicted classification: %.1f' % predicted_class)
print('This means the Iris sample is an "iris-%s"' % iris.target_names[int(predicted_class)])
| StarcoderdataPython |
3387791 | <gh_stars>0
from threading import Event
from mgmt.steps_base import Step, Context
from mgmt_utils import log
from com.ic_interface import Direction
class BackToOriginStep(Step):
def __init__(self, context: Context):
super(BackToOriginStep, self).__init__(context)
def run(self):
log.debug('WaitForBackToOriginStep started')
self.event = Event()
self.context.ui_interface.register_back_to_origin_once(self.event.set)
log.info('Waiting for BackToOriginStep callback')
self.event.wait()
if not self.is_canceled:
log.info('BackToOriginStep callback received')
self.context.ic_interface.drive_jog(50, Direction.Backward)
def cancel(self):
super(BackToOriginStep, self).cancel()
self.context.ui_interface.unregister_back_to_origin(self.event.set)
self.event.set()
class WaitForInitStep(Step):
def __init__(self, context: Context):
super(WaitForInitStep, self).__init__(context)
def run(self):
log.debug('WaitForInitStep started')
self.event = Event()
self.context.ui_interface.register_init_once(self.set_event)
log.info('Waiting for init callback')
self.event.wait()
if not self.is_canceled:
log.info('Init callback received')
def set_event(self):
self.event.set()
def cancel(self):
super(WaitForInitStep, self).cancel()
self.context.ui_interface.unregister_init_once(self.set_event)
self.event.set()
class InitStep(Step):
def __init__(self, context: Context):
super(InitStep, self).__init__(context)
def run(self):
log.debug('Init Tele started')
self.context.ic_interface.init_tele()
self.context.ui_interface.send_end_init()
self.context.reset_position()
log.debug('Init Tele end')
| StarcoderdataPython |
3241565 | <reponame>perfeelab/weichigong
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import os
from kazoo.client import KazooClient
__name__ = "weichigong"
__version__ = '1.0.3'
__author__ = 'dashixiong'
__author_email__ = '<EMAIL>'
class zconfig:
def __init__(self, zkHosts, app, env):
self.app = app
self.env = env
self.client = KazooClient(hosts=zkHosts)
self.client.start()
def getPath(self, path):
return os.path.join('/', self.app, self.env, path)
def set(self, path, value):
fullPath = self.getPath(path)
self.client.ensure_path(fullPath)
self.client.set(fullPath, value)
def get(self, path):
fullPath = self.getPath(path)
return self.client.get(fullPath)[0].decode('utf-8')
| StarcoderdataPython |
3357080 | <reponame>Juliet-Chunli/cnss
'''
Module for the NetworkAgent class that can be subclassed by agents.
@author: <NAME> <<EMAIL>>
'''
from SimPy import Simulation as Sim
import networkx as nx
import random
SEED = 123456789
ADD_EDGE = "add edge"
REMOVE_EDGE = "remove edge"
ADD_NODE = "add node"
REMOVE_NODE = "remove node"
class NetworkAgent(Sim.Process):
'''NetworkAgent class that can be subclassed by agents. '''
#class variables, shared between all instances of this class
r = random.Random(SEED)
TIMESTEP_DEFAULT = 1.0
def __init__(self, state, initialiser, stateVector=[], name='network_process', **stateParameters):
Sim.Process.__init__(self, name)
self.state = state
self.stateVector = stateVector
self.stateParameters = stateParameters
self.initialize(*initialiser)
def initialize(self, id, sim, globalTopo, globalParams):
''' this gets called automatically '''
self.id = id
self.sim = sim
self.globalTopology = globalTopo
self.globalSharedParameters = globalParams
def getAllNodes(self):
return self.globalTopology.nodes()
def getAllAgents(self, state=None):
neighs = self.getAllNodes()
if state is not None:
return [self.globalTopology.node[n]['agent'] for n in neighs
if self.globalTopology.node[n]['agent'].state == state]
else:
return [self.globalTopology.node[n]['agent'] for n in neighs]
def getNeighbouringAgents(self, state=None):
''' returns list of neighbours, but as agents, not nodes.
so e.g. one can set result[0].state = INFECTED '''
neighs = self.globalTopology.neighbors(self.id)
if state is not None:
return [self.globalTopology.node[n]['agent'] for n in neighs
if self.globalTopology.node[n]['agent'].state == state]
else:
return [self.globalTopology.node[n]['agent'] for n in neighs]
def getNeighbouringAgentsIter(self, state=None):
'''same as getNeighbouringAgents, but returns generator expression,
not list.
'''
neighs = self.globalTopology.neighbors(self.id)
if state is not None:
return (self.globalTopology.node[n]['agent'] for n in neighs
if self.globalTopology.node[n]['agent'].state == state)
else:
return (self.globalTopology.node[n]['agent'] for n in neighs)
def getNeighbouringNodes(self):
''' returns list of neighbours as nodes.
Call self.getAgent() on one of them to get the agent.'''
return self.globalTopology.neighbors(self.id)
def getAgent(self, id):
'''returns agent of specified ID.'''
return self.globalTopology.node[id]['agent']
def addNewNode(self, state):
#add & activate new agent
return self.sim.addNewNode(state)
#add a random edge
#u = NetworkAgent.r.choice(self.globalTopology.nodes())
#self.globalTopology.add_edge(u, id)
def die(self):
self.removeNode(self.id)
def removeNode(self, id):
# cancel ? self.getAgent(id)
self.globalTopology.remove_node(id)
def removeEdge(self, node1, node2):
self.globalTopology.remove_edge(self.id, self.currentSupernodeID)
self.logTopoChange(REMOVE_EDGE, node1, node2)
def addEdge(self, node1, node2):
self.globalTopology.add_edge(self.id, self.currentSupernodeID)
self.logTopoChange(ADD_EDGE, node1, node2)
def logTopoChange(self, action, node, node2=None):
#TODO: test, add this to netlogger...
print action, node, node2
| StarcoderdataPython |
1712820 | from django.test import TestCase
from django.core.urlresolvers import reverse
from .models import Task
from .common import is_chance, delete_all_tasks
class TaskTestCase(TestCase):
def tst_views_test1(self):
"""Количество объектов в БД при разных вероятностях."""
total = Task.objects.all().count()
response = self.client.get(reverse('tasks_test1'))
total2 = Task.objects.all().count()
print(response.status_code, total2)
if response.status_code == 200:
self.assertEqual(total2, total + 2)
else:
self.assertEqual(total2, total + 1)
def test_views_test1(self):
"""Запускает tst_views_test1() пять раз."""
[self.tst_views_test1() for _ in range(5)]
def test_is_chance(self):
"""Тест функции вероятности."""
self.assertTrue(is_chance(100))
self.assertFalse(is_chance())
self.assertFalse(is_chance(''))
self.assertFalse(is_chance(1000))
def test_delete_all_tasks(self):
self.assertEqual(None, delete_all_tasks())
| StarcoderdataPython |
1690768 | # -*- coding: utf-8 -*-
from collections import OrderedDict
from datetime import datetime
import yaml
from mynt.exceptions import ConfigurationException
from mynt.fs import Directory
from mynt.utils import get_logger, normpath, URL
logger = get_logger('mynt')
class Configuration(dict):
def __init__(self, string):
super().__init__()
try:
self.update(yaml.safe_load(string))
except yaml.YAMLError:
raise ConfigurationException(
'Configuration contains unsupported YAML')
except:
logger.debug('.. configuration file is empty')
pass
class Data:
def __init__(self, items, archives, tags):
self.items = items
self.archives = archives
self.tags = tags
def __iter__(self):
return self.items.__iter__()
class Item(dict):
def __init__(self, source, *args, **kwargs):
super().__init__(*args, **kwargs)
self.__source = source
def __str__(self):
return self.__source
class Tag:
def __init__(self, name, url, count, items, archives):
self.name = name
self.url = url
self.count = count
self.items = items
self.archives = archives
def __iter__(self):
return self.items.__iter__()
class Container:
def __init__(self, name, source, configuration):
self._pages = None
self.name = name
self.path = source
self.configuration = {} if configuration is None else configuration
self.data = Data([], OrderedDict(), OrderedDict())
def _get_pages(self):
pages = []
for item in self.items:
if item['layout'] is None:
continue
pages.append((item['layout'], {'item': item}, item['url']))
return pages
def add(self, item):
self.items.append(item)
def archive(self):
pass
def sort(self):
pass
def tag(self):
pass
@property
def archives(self):
return self.data.archives
@property
def items(self):
return self.data.items
@property
def pages(self):
if self._pages is None:
self._pages = self._get_pages()
return self._pages
@property
def tags(self):
return self.data.tags
class Items(Container):
_sort_order = {
'asc': False,
'desc': True}
def __init__(self, name, source, configuration):
super().__init__(name, source, configuration)
self.path = Directory(normpath(source.path, '_containers', self.name))
def _archive(self, items, archive):
for item in items:
timestamp = datetime.utcfromtimestamp(item['timestamp'])
year, month = timestamp.strftime('%Y %B').split()
if year not in archive:
months = OrderedDict({month: [item]})
url = URL.from_format(self.configuration['archives_url'], year)
archive[year] = {'months': months, 'url': url, 'year': year}
elif month not in archive[year]['months']:
archive[year]['months'][month] = [item]
else:
archive[year]['months'][month].append(item)
def _get_pages(self):
pages = super()._get_pages()
if self.configuration['archive_layout'] and self.archives:
for archive in self.archives.values():
pages.append((
self.configuration['archive_layout'],
{'archive': archive},
archive['url']))
if self.configuration['tag_layout'] and self.tags:
for tag in self.tags.values():
pages.append((
self.configuration['tag_layout'],
{'tag': tag},
tag.url))
return pages
def _relate(self):
for i, item in enumerate(self.items):
if i:
item['prev'] = self.items[i-1]
else:
item['prev'] = None
try:
item['next'] = self.items[i+1]
except IndexError:
item['next'] = None
def _sort(self, container, key, order='asc'):
reverse = self._sort_order.get(order.lower(), False)
def sort(item):
try:
attribute = item.get(key, item)
except AttributeError:
attribute = getattr(item, key, item)
if isinstance(attribute, str):
return attribute.lower()
return attribute
container.sort(key=sort, reverse=reverse)
def archive(self):
self._archive(self.items, self.archives)
for tag in self.tags.values():
self._archive(tag.items, tag.archives)
def sort(self):
key = self.configuration['sort']
order = self.configuration['order']
self._sort(self.items, key, order)
self._relate()
def tag(self):
tags = []
for item in self.items:
item['tags'].sort(key=str.lower)
for tag in item['tags']:
if tag not in self.tags:
self.tags[tag] = []
self.tags[tag].append(item)
for name, items in self.tags.items():
url = URL.from_format(self.configuration['tags_url'], name)
tags.append(Tag(name, url, len(items), items, OrderedDict()))
self._sort(tags, 'name')
self._sort(tags, 'count', 'desc')
self.tags.clear()
for tag in tags:
self.tags[tag.name] = tag
class Posts(Items):
def __init__(self, source, site):
super().__init__('posts', source, self._get_configuration(site))
self.path = Directory(normpath(source.path, '_posts'))
def _get_configuration(self, site):
configuration = {
'archives_url': 'archives_url',
'archive_layout': 'archive_layout',
'order': 'posts_order',
'sort': 'posts_sort',
'tags_url': 'tags_url',
'tag_layout': 'tag_layout',
'url': 'posts_url'}
for name, value in configuration.items():
configuration[name] = site.get(value)
return configuration
| StarcoderdataPython |
3382735 | import random
class Knight:
position = 0
road = None
team = 0
gs = None
def __init__(self, _road, _team, _gs):
self.road = _road
self.team = _team
self.gs = _gs
def tick(self):
self.position += 1
if self.position >= self.road.length:
self.process_road_end()
def process_road_end(self):
if self.road.castle.team == self.team:
self.road.castle.knights += 1
elif random.random() >= 0.5:
self.road.castle.knights -= 1
if self.road.castle.knights < 0:
self.road.castle.team = self.team
gs.kill_knight(self)
| StarcoderdataPython |
1705064 | import logging
from config import iot23_attacks_dir, iot23_data_dir
from src.iot23 import iot23_metadata, data_cleanup, get_data_sample
from src.helpers.log_helper import add_logger
from src.helpers.data_helper import prepare_data
# Add Logger
add_logger(file_name='02_prepare_data.log')
logging.warning("!!! This step takes about 20 min to complete !!!")
# Prepare data
source_files_dir = iot23_attacks_dir
output_files_dir = iot23_data_dir
data_samples = [
# get_data_sample(dataset_name='S16', rows_per_dataset_file=10_000), # ~ 10 min
get_data_sample(dataset_name='S04', rows_per_dataset_file=5_000_000), # ~ 10 min
get_data_sample(dataset_name='S16', rows_per_dataset_file=5_000_000), # ~ 10 min
]
prepare_data(source_files_dir,
output_files_dir,
iot23_metadata["file_header"],
data_cleanup,
test_size=0.2,
data_samples=data_samples,
overwrite=True)
print('Step 02: The end.')
quit()
| StarcoderdataPython |
21917 | <gh_stars>1-10
import numpy as np
import os
import argparse
import tqdm
import pandas as pd
import SimpleITK as sitk
from medpy import metric
def get_args():
parser = argparse.ArgumentParser()
parser.add_argument('--file_path', type=str, default='./results/abus_roi/0108_dice_1/')
args = parser.parse_args()
# save csv file to the current folder
if args.file_path[-1] == '/':
args.save = args.file_path[:-1] + '.csv'
else:
args.save = args.file_path + '.csv'
return args
def main():
args = get_args()
dsc_list = []
jc_list = []
hd_list = []
hd95_list = []
asd_list = []
filenames = os.listdir(args.file_path)
for filename in tqdm.tqdm(filenames):
gt_img = sitk.ReadImage(os.path.join(args.file_path, filename+'/gt.nii.gz'))
gt_volume = sitk.GetArrayFromImage(gt_img)
pre_img = sitk.ReadImage(os.path.join(args.file_path, filename+'/pred.nii.gz'))
pre_volume = sitk.GetArrayFromImage(pre_img)
dsc = metric.binary.dc(pre_volume, gt_volume)
jc = metric.binary.jc(pre_volume, gt_volume)
hd = metric.binary.hd(pre_volume, gt_volume, voxelspacing=(0.4, 0.4, 0.4))
hd95 = metric.binary.hd95(pre_volume, gt_volume, voxelspacing=(0.4, 0.4, 0.4))
asd = metric.binary.asd(pre_volume, gt_volume, voxelspacing=(0.4, 0.4, 0.4))
dsc_list.append(dsc)
jc_list.append(jc)
hd_list.append(hd)
hd95_list.append(hd95)
asd_list.append(asd)
df = pd.DataFrame()
df['name'] = filenames
df['dsc'] = np.array(dsc_list)
df['jc'] = np.array(jc_list)
df['hd'] = np.array(hd_list)
df['hd95'] = np.array(hd95_list)
df['asd'] = np.array(asd_list)
print(df.describe())
df.to_csv(args.save)
if __name__ == '__main__':
main()
| StarcoderdataPython |
1789381 | import torch
import warnings
from binding_prediction.protein import ProteinSequence
from binding_prediction.utils import onehot
with warnings.catch_warnings():
warnings.filterwarnings("ignore", category=FutureWarning)
warnings.filterwarnings("ignore", category=DeprecationWarning)
import tensorflow as tf
class LanguageModel(object):
def __init__(self, path, device='cuda'):
super(LanguageModel, self).__init__()
self.path = path
self.device = device
def __call__(self, x):
pass
class Elmo(LanguageModel):
# requires a GPU in order to test
def __init__(self, path, trainable=False, device='cuda',
per_process_gpu_memory_fraction=0.2):
super(Elmo, self).__init__(path, device)
gpu_options = tf.GPUOptions(
per_process_gpu_memory_fraction=per_process_gpu_memory_fraction)
sess = tf.Session(config=tf.ConfigProto(gpu_options=gpu_options))
m = tf.keras.models.load_model(path)
layer = 'LSTM2'
self.model = tf.keras.models.Model(inputs=[m.input],
outputs=[m.get_layer(layer).output],
trainable=trainable)
def __call__(self, x):
prot = ProteinSequence(x)
embed = self.model.predict(prot.onehot).squeeze()
class OneHot(LanguageModel):
def __init__(self, path, device='cuda'):
super(OneHot, self).__init__(path, device)
self.tla_codes = ["A", "R", "N", "D", "B", "C", "E", "Q", "Z", "G",
"H", "I", "L", "K", "M", "F", "P", "S", "T", "W",
"Y", "V"]
self.num_words = len(self.tla_codes)
def __call__(self, x):
emb_i = [onehot(self.tla_codes.index(w_i), self.num_words) for w_i in x]
return torch.Tensor(emb_i).to(self.device)
| StarcoderdataPython |
19204 | import torch
import hcat.lib.functional
from hcat.lib.functional import IntensityCellReject
from hcat.backends.backend import Backend
from hcat.models.r_unet import embed_model as RUnet
from hcat.train.transforms import median_filter, erosion
import hcat.lib.utils
from hcat.lib.utils import graceful_exit
import os.path
import wget
from typing import Dict, Optional
class SpatialEmbedding(Backend):
def __init__(self,
sigma: Optional[torch.Tensor] = torch.tensor([0.02, 0.02, 0.02]),
device: Optional[str] = 'cuda',
model_loc: Optional[str] = None,
postprocessing: Optional[bool] = True,
scale: Optional[int] = 25,
figure: Optional[str] = None,
archetecture: Optional[RUnet] = RUnet):
"""
Initialize Spatial embedding Algorithm.
:param sigma: torch.Tensor[sigma_x, sigma_y, sigma_z] values for gaussian probability estimation.
:param device: String value for torch device by which to run segmentation backbone on.
:param model_loc: Path to trained model files.
:param postprocessing: Disable segmentation postprocessing, namely
:param scale: scale factor based on max diameter of object
:param figure: filename and path of diagnostic figure which may be rendered
"""
super(SpatialEmbedding, self).__init__()
self.url = 'https://github.com/buswinka/hcat/blob/master/modelfiles/spatial_embedding.trch?raw=true'
# self.url = None
self.scale = torch.tensor(scale)
self.device = device
self.sigma = sigma.to(device)
self.postprocessing = postprocessing
self.figure = figure
if self.url:
self.model = self._model_loader_url(self.url, archetecture, device)
else:
self.model = self._model_loader_path(model_loc, archetecture, device)
self.vector_to_embedding = torch.jit.script(
hcat.lib.functional.VectorToEmbedding(scale=self.scale).requires_grad_(False).eval())
self.embedding_to_probability = torch.jit.script(
hcat.lib.functional.EmbeddingToProbability(scale=self.scale).requires_grad_(False).eval())
self.estimate_centroids = hcat.lib.functional.EstimateCentroids(scale=self.scale).requires_grad_(False)
self.filter = median_filter(kernel_targets=3, rate=1, device=device)
self.binary_erosion = erosion(device=device)
self.intensity_rejection = IntensityCellReject()
self.nms = hcat.lib.functional.nms().requires_grad_(False)
self.centroids = None
self.vec = None
self.embed = None
self.prob = None
@graceful_exit('\x1b[1;31;40m' + 'ERROR: Spatial Embedding Failed. Aborting...' + '\x1b[0m')
def forward(self, image: torch.Tensor) -> torch.Tensor:
"""
Inputs an image and outputs a probability mask of everything seen in the image.
.. note::
Call the module as a function to execute this method (similar to torch.nn.module).
.. warning:
Will not raise an error upon failure, instead returns None and prints to standard out
Example:
>>> from hcat.backends.spatial_embedding import SpatialEmbedding
>>> import torch
>>> backend = SpatialEmbedding()
>>> image = torch.load('path/to/my/image.trch')
>>> assert image.ndim == 5 # Shape should be [B, C, X, Y, Z]
>>> masks = backend(image)
:param image: [B, C=4, X, Y, Z] input image
:return: [B, 1, X, Y, Z] output segmentation mask where each pixel value is a cell id (0 is background)
"""
assert image.ndim == 5
assert image.shape[1] == 1
assert image.min() >= -1
assert image.max() <= 1
# image = self.filter(image.to(self.device))
image = image.to(self.device)
b, c, x, y, z = image.shape
if self.image_reject and self._is_image_bad(image):
return torch.zeros((b, 0, x, y, z), device=self.device)
# Evaluate Neural Network Model
out: torch.Tensor = self.model(image)
# Assign Outputs
probability_map = out[:, [-1], ...]
out = out[:, 0:3:1, ...]
self.prob = probability_map.cpu()
self.vec = out.cpu()
out: torch.Tensor = self.vector_to_embedding(out)
self.embed = out.cpu()
centroids: Dict[str, torch.Tensor] = self.estimate_centroids(out, probability_map)
self.centroids = centroids
out: torch.Tensor = self.embedding_to_probability(out, centroids, self.sigma)
# Reject cell masks that overlap or meet min Myo7a criteria
if self.postprocessing:
out: torch.Tensor = self.intensity_rejection(out, image)
# print(centroids.shape, out.shape)
if out.numel() == 0:
return torch.zeros((b, 0, x, y, z), device=self.device)
ind = self.nms(out, 0.5)
out = out[:, ind, ...]
# Take probabilities and generate masks!
probability_map = probability_map.lt(0.8).squeeze(1)
for i in range(out.shape[1]):
out[:, i, ...][probability_map] = 0
self.zero_grad()
return out
def load(self, model_loc: str) -> None:
"""
Initializes model weights from a url or filepath.
Example:
>>> from hcat.backends.spatial_embedding import SpatialEmbedding
>>> backend = SpatialEmbedding()
>>>
>>> url = 'https://www.model_location.com/model.trch'
>>> backend.load(url) # Works with url
>>>
>>> model_path = 'path/to/my/model.trch'
>>> backend.load(model_path) # Also works with path
:param model_loc: url or filepath
:return: None
"""
if self._is_url(model_loc):
return self._model_loader_url(model_loc, RUnet(in_channels=1).requires_grad_(False), self.device)
else:
return self._model_loader_path(model_loc, RUnet(in_channels=1).requires_grad_(False), self.device)
| StarcoderdataPython |
1752393 | import psutil
import time
import math
import gitlab
import os
from argparse import ArgumentParser
import configparser
MODULE_NAME = "ptl: Automatically log time in GitLab issue tracker for COMP23311 at UoM."
__version__ = "0.1.0"
def print_config(token, project_id, issue_id):
print("--:CONFIG:--\n" + "🎫 TOKEN:" + token + "\n🆔 PROJECT-ID:" + project_id + "\n🆔 ISSUE-ID:" + issue_id)
def record_time(token, project_id, issue_id, ide="eclipse"):
eclipse_id = -1
# Iterate over all running process
for proc in psutil.process_iter():
try:
# Get process name & pid from process object.
proc_name = proc.name()
proc_id = proc.pid
if ide in proc_name:
eclipse_id = proc_id
except (psutil.NoSuchProcess, psutil.AccessDenied, psutil.ZombieProcess):
pass
if eclipse_id != -1:
# Get start_time
print("⏱️ Recording elapsed worktime for " + ide)
start_time = time.time()
while psutil.pid_exists(eclipse_id):
time.sleep(1)
end_time = time.time()
elapsed_time = (end_time - start_time) / 3600
elapsed_time = (math.ceil(elapsed_time))
# private token or personal token authentication
gl = gitlab.Gitlab('https://gitlab.cs.man.ac.uk', private_token=token)
# Make an API request and authenticate in order to add issue.
gl.auth()
project = gl.projects.get(project_id)
issue = project.issues.get(issue_id)
issue.add_spent_time(str(elapsed_time)+'h')
print("⏱️ " + str(elapsed_time) + "h of time recorded.")
else:
print("❌ IDE not running yet!")
def set_config(token, project_id, issue_id, config):
config['SETTINGS']['token'] = token
config['SETTINGS']['project_id'] = project_id
config['SETTINGS']['issue_id'] = issue_id
with open('config.ini', 'w') as configfile:
config.write(configfile)
return config
def main():
config = configparser.ConfigParser()
config.read('config.ini')
if len(config) == 1 and 'DEFAULT' in config:
config['SETTINGS'] = {}
config = set_config("0", "0", "0", config)
parser = ArgumentParser(description=MODULE_NAME)
parser.add_argument('-c','--config',
action="store_true", dest="config", default=False,
help="Shows config of current ptl settings")
parser.add_argument('-t','--token',
type=str, dest="token", default=config['SETTINGS']['token'],
help="Sets private token for GitLab user")
parser.add_argument('-p','--projectid',
type=str, dest="project_id", default=config['SETTINGS']['project_id'],
help="Sets project id for a GitLab repository")
parser.add_argument('-i','--issueid',
type=str, dest="issue_id", default=config['SETTINGS']['issue_id'],
help="Sets issue id for an issue in a GitLab repository")
parser.add_argument('-s','--start',
action="store_true", dest="time", default=False,
help="Start timing IDE open time.")
args = parser.parse_args()
if args.config:
print_config(args.token,args. project_id, args.issue_id)
else:
if (args.token != config['SETTINGS']['token'] or args.project_id != config['SETTINGS']['project_id']
or args.issue_id != config['SETTINGS']['issue_id']):
config = set_config(args.token, args.project_id, args.issue_id, config)
elif args.time:
record_time(args.token, int(args.project_id), int(args.issue_id))
if __name__ == "__main__":
main()
| StarcoderdataPython |
1623374 | <reponame>vicdashkov/ClickHouse
import time
import pytest
import requests
from tempfile import NamedTemporaryFile
from helpers.hdfs_api import HDFSApi
import os
from helpers.cluster import ClickHouseCluster
import subprocess
SCRIPT_DIR = os.path.dirname(os.path.realpath(__file__))
cluster = ClickHouseCluster(__file__)
node1 = cluster.add_instance('node1', with_hdfs=True, config_dir="configs", main_configs=['configs/log_conf.xml'])
@pytest.fixture(scope="module")
def started_cluster():
try:
cluster.start()
yield cluster
except Exception as ex:
print(ex)
raise ex
finally:
cluster.shutdown()
def test_read_write_storage(started_cluster):
hdfs_api = HDFSApi("root")
hdfs_api.write_data("/simple_storage", "1\tMark\t72.53\n")
assert hdfs_api.read_data("/simple_storage") == "1\tMark\t72.53\n"
node1.query("create table SimpleHDFSStorage (id UInt32, name String, weight Float64) ENGINE = HDFS('hdfs://hdfs1:9000/simple_storage', 'TSV')")
assert node1.query("select * from SimpleHDFSStorage") == "1\tMark\t72.53\n"
def test_read_write_table(started_cluster):
hdfs_api = HDFSApi("root")
data = "1\tSerialize\t555.222\n2\tData\t777.333\n"
hdfs_api.write_data("/simple_table_function", data)
assert hdfs_api.read_data("/simple_table_function") == data
assert node1.query("select * from hdfs('hdfs://hdfs1:9000/simple_table_function', 'TSV', 'id UInt64, text String, number Float64')") == data
| StarcoderdataPython |
3239020 | from datetime import datetime
import logging
from logging.handlers import SMTPHandler, RotatingFileHandler
import os
from config import Config
from flask import Flask, session
from flask_cors import CORS
from flask_login import LoginManager, current_user
from flask_mail import Mail
from flask_migrate import Migrate
from flask_sqlalchemy import SQLAlchemy
from flask_talisman import Talisman
from flask_wtf.csrf import CSRFProtect
from redis import Redis
import rq
from sqlalchemy import MetaData
import boto3
from botocore.client import Config as AZConfig
convention = {
"ix": 'ix_%(column_0_label)s',
"uq": "uq_%(table_name)s_%(column_0_name)s",
"ck": "ck_%(table_name)s_%(column_0_name)s",
"fk": "fk_%(table_name)s_%(column_0_name)s_%(referred_table_name)s",
"pk": "pk_%(table_name)s"
}
metadata = MetaData(naming_convention=convention)
db = SQLAlchemy(metadata=metadata)
migrate = Migrate()
login = LoginManager()
login.login_view = 'auth.login'
login.login_message = None
mail = Mail()
cors = CORS()
csrf = CSRFProtect()
talisman = Talisman()
my_config = AZConfig(
region_name = 'us-east-2',
signature_version = 's3v4',
)
s3 = boto3.client('s3',
aws_access_key_id=os.environ.get('AWS_ACCESS_KEY_ID'),
aws_secret_access_key=os.environ.get('AWS_SECRET_ACCESS_KEY'),
config=my_config
)
bucket = os.environ.get('AWS_BUCKET')
csp = {
'default-src': [
'\'self\'',
'\'unsafe-inline\'',
'*.getbootsrap.com/*',
'*.bootstrapcdn.com/*',
'*.jquery.com/*',
'*.cloudflare.com/ajax/libs/*'
]
}
def create_app(config_class=Config):
app = Flask(__name__)
app.config.from_object(config_class)
app.redis = Redis.from_url(app.config['REDIS_URL'])
app.task_queue = rq.Queue('lurnby-tasks', connection=app.redis)
@app.before_request
def before_request_func():
if current_user.is_authenticated:
current_user.last_active = datetime.utcnow()
db.session.commit()
# Add a variable into the app that can be used in all routes and blueprints
# This one is so that I can have a now variable that automatically updates the copyright notice at the bottom.
@app.context_processor
def inject():
if os.environ.get('DEV'):
staging=True
else:
staging=False
return {'now': datetime.utcnow(),'staging':staging}
cors.init_app(app, resources={r"/app/api/*": {"origins": "*"}})
db.init_app(app)
migrate.init_app(app, db)
login.init_app(app)
mail.init_app(app)
talisman.init_app(app, content_security_policy=None)
csrf.init_app(app)
from app.errors import bp as errors_bp
app.register_blueprint(errors_bp, url_prefix='/app')
from app.auth import bp as auth_bp
app.register_blueprint(auth_bp, url_prefix='/app/auth')
from app.main import bp as main_bp
app.register_blueprint(main_bp, url_prefix='/app')
from app.settings import bp as settings_bp
app.register_blueprint(settings_bp, url_prefix='/app')
from app.api import bp as api_bp
app.register_blueprint(api_bp,url_prefix='/api')
csrf.exempt(api_bp)
from app.experiments import bp as experiments_bp
app.register_blueprint(experiments_bp, url_prefix='/app')
from app.content import bp as content_bp
app.register_blueprint(content_bp, url_prefix='/app')
from app.dotcom import bp as dotcom_bp
app.register_blueprint(dotcom_bp)
if __name__ == "__main__":
app.run(ssl_context=('cert.pem', 'key.pem'))
# OAuth 2 client setup
if not app.debug and not app.testing:
if app.config['MAIL_SERVER']:
auth = None
if app.config['MAIL_USERNAME'] or app.config['MAIL_PASSWORD']:
auth = (app.config['MAIL_USERNAME'],
app.config['MAIL_PASSWORD'])
secure = None
if app.config['MAIL_USE_TLS']:
secure = ()
mail_handler = SMTPHandler(
mailhost=(app.config['MAIL_SERVER'],
app.config['MAIL_PORT']),
fromaddr='<EMAIL>',
toaddrs=app.config['ADMINS'], subject='Lurnby Failure',
credentials=auth, secure=secure
)
mail_handler.setLevel(logging.ERROR)
app.logger.addHandler(mail_handler)
if app.config['LOG_TO_STDOUT']:
stream_handler = logging.StreamHandler()
stream_handler.setLevel(logging.INFO)
app.logger.addHandler(stream_handler)
else:
if not os.path.exists('logs'):
os.mkdir('logs')
file_handler = RotatingFileHandler('logs/lurnby.log',
maxBytes=10240, backupCount=10)
file_handler.setLevel(logging.INFO)
app.logger.addHandler(file_handler)
app.logger.setLevel(logging.INFO)
app.logger.info('Lurnby')
return app
from app import models # noqa : E402, F401
| StarcoderdataPython |
3268621 | <filename>visualization.py
import argparse
import os
import matplotlib.pyplot as plt
import numpy as np
import wandb
from torchvision import transforms
from MODELS.model_resnet import *
from custom_dataset import DatasetISIC2018
from gradcam import GradCAM, GradCAMpp
from gradcam.utils import visualize_cam
parser = argparse.ArgumentParser(description='PyTorch ResNet+CBAM ISIC2018 Visualization')
parser.add_argument('--resume', default='', type=str, metavar='PATH',
help='path to latest checkpoint (default: none)')
parser.add_argument('--vis-prefix', type=str, default=None,
help='prefix to save plots e.g. "baseline" or "SAM-1"')
parser.add_argument('--run-name', type=str, default='noname run', help='run name on the W&B service')
parser.add_argument('--is-server', type=int, choices=[0, 1], default=1)
parser.add_argument("--tags", nargs='+', default=['default-tag'])
parser.add_argument('--cuda-device', type=int, default=0)
parser.add_argument('--batch-size', default=1, type=int,
metavar='N', help='mini-batch size (default: 1)')
args = parser.parse_args()
is_server = args.is_server == 1
# make and save Grad-CAM plot (original image, mask, Grad-CAM, Grad-CAM++)
def make_plot_and_save(input_img, img_name, no_norm_image, segm, model, train_or_val, epoch=None, vis_prefix=None):
global is_server
# get Grad-CAM results and prepare them to show on the plot
target_layer = model.layer4
gradcam = GradCAM(model, target_layer=target_layer)
gradcam_pp = GradCAMpp(model, target_layer=target_layer)
# sam_output shapes:
# [1, 1, 56, 56]x3 , [1, 1, 28, 28]x4 [1, 1, 14, 14]x6 , [1, 1, 7, 7]x3
mask, no_norm_mask, logit, sam_output = gradcam(input_img)
sam1_show = torch.squeeze(sam_output[0].cpu()).detach().numpy()
sam4_show = torch.squeeze(sam_output[3].cpu()).detach().numpy()
sam8_show = torch.squeeze(sam_output[7].cpu()).detach().numpy()
sam14_show = torch.squeeze(sam_output[13].cpu()).detach().numpy()
heatmap, result = visualize_cam(mask, no_norm_image)
result_show = np.moveaxis(torch.squeeze(result).detach().numpy(), 0, -1)
mask_pp, no_norm_mask_pp, logit_pp, sam_output_pp = gradcam_pp(input_img)
heatmap_pp, result_pp = visualize_cam(mask_pp, no_norm_image)
result_pp_show = np.moveaxis(torch.squeeze(result_pp).detach().numpy(), 0, -1)
# prepare mask and original image to show on the plot
segm_show = torch.squeeze(segm.cpu()).detach().numpy()
segm_show = np.moveaxis(segm_show, 0, 2)
input_show = np.moveaxis(torch.squeeze(no_norm_image).detach().numpy(), 0, -1)
# draw and save the plot
plt.close('all')
fig, axs = plt.subplots(nrows=2, ncols=6, figsize=(24, 9))
plt.suptitle(f'{train_or_val}-Image: {img_name}')
axs[1][0].imshow(segm_show)
axs[1][0].set_title('Mask')
axs[0][0].imshow(input_show)
axs[0][0].set_title('Original Image')
axs[0][1].imshow(result_show)
axs[0][1].set_title('Grad-CAM')
axs[1][1].imshow(result_pp_show)
axs[1][1].set_title('Grad-CAM++')
axs[1][2].imshow(sam1_show, cmap='gray')
axs[1][2].set_title('SAM-1 relative')
axs[0][2].imshow(sam1_show, vmin=0., vmax=1., cmap='gray')
axs[0][2].set_title('SAM-1 absolute')
axs[1][3].imshow(sam4_show, cmap='gray')
axs[1][3].set_title('SAM-4 relative')
axs[0][3].imshow(sam4_show, vmin=0., vmax=1., cmap='gray')
axs[0][3].set_title('SAM-4 absolute')
axs[1][4].imshow(sam8_show, cmap='gray')
axs[1][4].set_title('SAM-8 relative')
axs[0][4].imshow(sam8_show, vmin=0., vmax=1., cmap='gray')
axs[0][4].set_title('SAM-8 absolute')
axs[1][5].imshow(sam14_show, cmap='gray')
axs[1][5].set_title('SAM-14 relative')
axs[0][5].imshow(sam14_show, vmin=0., vmax=1., cmap='gray')
axs[0][5].set_title('SAM-14 absolute')
plt.show()
if vis_prefix is not None:
plt.savefig(f'vis/{vis_prefix}/{train_or_val}/{img_name}.png', bbox_inches='tight')
if is_server:
if epoch is not None:
wandb.log({f'{train_or_val}/{img_name}': fig}, step=epoch)
else:
wandb.log({f'{train_or_val}/{img_name}': fig})
def main():
global args, is_server
if is_server:
wandb.login()
config = dict(
vis_prefix=args.vis_prefix,
resume=args.resume,
)
if is_server:
wandb.init(config=config, project="vol.4", name=args.run_name, tags=args.tags)
# define constants
# vis_prefix = 'baseline'
CLASS_AMOUNT = 5
DEPTH = 50
root_dir = 'data/'
# resume = "checkpoints/baseline_checkpoint.pth"
traindir = os.path.join(root_dir, 'train')
train_labels = os.path.join(root_dir, 'train', 'images_onehot_train.txt')
valdir = os.path.join(root_dir, 'val')
val_labels = os.path.join(root_dir, 'val', 'images_onehot_val.txt')
# define the model
model = ResidualNet('ImageNet', DEPTH, CLASS_AMOUNT, 'CBAM')
if is_server:
model = model.cuda(args.cuda_device)
# # load the checkpoint
if os.path.isfile(args.resume):
print(f"=> loading checkpoint '{args.resume}'")
checkpoint = torch.load(args.resume, map_location=torch.device('cpu'))
state_dict = checkpoint['state_dict']
model.load_state_dict(state_dict)
print(f"=> loaded checkpoint '{args.resume}'")
print(f"epoch = {checkpoint['epoch']}")
else:
print(f"=> no checkpoint found at '{args.resume}'")
return -1
# define datasets and data loaders
size0 = 224
segm_dir = "images/256ISIC2018_Task1_Training_GroundTruth/"
train_dataset = DatasetISIC2018(
train_labels,
traindir,
segm_dir,
size0,
False, # perform flips
False, # perform random resized crop with size = 224
transforms.CenterCrop(size0)
)
train_loader = torch.utils.data.DataLoader(
train_dataset,
batch_size=args.batch_size, shuffle=False,
pin_memory=True
)
val_dataset = DatasetISIC2018(
val_labels,
valdir,
segm_dir,
size0,
False,
False,
transforms.CenterCrop(size0)
)
val_loader = torch.utils.data.DataLoader(
val_dataset,
batch_size=args.batch_size, shuffle=False,
pin_memory=True
)
# create directories to save plots
if args.vis_prefix is not None:
if not os.path.exists(f'vis/{args.vis_prefix}'):
os.mkdir(f'vis/{args.vis_prefix}')
if not os.path.exists(f'vis/{args.vis_prefix}/train'):
os.mkdir(f'vis/{args.vis_prefix}/train')
if not os.path.exists(f'vis/{args.vis_prefix}/val'):
os.mkdir(f'vis/{args.vis_prefix}/val')
for i, dictionary in enumerate(train_loader):
input_img = dictionary['image']
img_name = dictionary['name'][0]
no_norm_image = dictionary['no_norm_image']
segm = dictionary['segm']
if is_server:
input_img = input_img.cuda(args.cuda_device)
make_plot_and_save(input_img, img_name, no_norm_image, segm, model, 'train', vis_prefix=args.vis_prefix)
return
for i, dictionary in enumerate(val_loader):
input_img = dictionary['image']
img_name = dictionary['name'][0]
no_norm_image = dictionary['no_norm_image']
segm = dictionary['segm']
if is_server:
input_img = input_img.cuda(args.cuda_device)
make_plot_and_save(input_img, img_name, no_norm_image, segm, model, 'val', vis_prefix=args.vis_prefix)
if __name__ == '__main__':
main()
| StarcoderdataPython |
1718173 | <gh_stars>0
from django.shortcuts import render
def sensor_view(request):
return render(request, "sensor.html", {'sensor': '99'}) | StarcoderdataPython |
1703587 | """
usage: /Users/eisenham/Documents/ssbdev/crds/crds/rowdiff.py
[-h] [--ignore-fields IGNORE_FIELDS] [--fields FIELDS]
[--mode-fields MODE_FIELDS] [-v] [--verbosity VERBOSITY] [-V] [-J] [-H]
[--stats] [--profile PROFILE] [--pdb]
tableA tableB
Perform FITS table difference by rows
positional arguments:
tableA First table to compare
tableB Second table to compare
optional arguments:
-h, --help show this help message and exit
--ignore-fields IGNORE_FIELDS
List of fields to ignore
--fields FIELDS List of fields to compare
--mode-fields MODE_FIELDS
List of fields to do a mode compare
-v, --verbose Set log verbosity to True, nominal debug level.
--verbosity VERBOSITY
Set log verbosity to a specific level: 0..100.
-V, --version Print the software version and exit.
-J, --jwst Force observatory to JWST for determining header conventions.
-H, --hst Force observatory to HST for determining header conventions.
--stats Track and print timing statistics.
--profile PROFILE Output profile stats to the specified file.
--pdb Run under pdb.
Perform FITS table difference by rows
Input:
fits_a, fits_b: Paths or HDUList objects of the
two FITS files to compare.
fields: List of fields to compare on.
ignore-fields: List of fields to ignore.
mode-fields: List of fields that define modes to compare
Note: The parameters 'fields' and 'ignore-fields' are mutually exclusive.
An error will be raised if both are specified.
Output:
object variables:
diffs: tuple of the differences for each table extension found.
This is either None for no differences, or is again a
tuple consisting of:
- If mode-fields is specified, the tuple is described by
modediff
- Otherwise the tuple is described by rowdiff
stdout: Human readable report.
----------
TEST CASES
----------
>>> from crds.tests import test_config
>>> old_state = test_config.setup()
>>> from crds.rowdiff import RowDiffScript
Only should work with Table extensions
>>> case = RowDiffScript(argv="rowdiff.py data/hst_acs_biasfile_0001.fits data/hst_acs_biasfile_0002.fits")
>>> case.run()
<BLANKLINE>
Basic functionality: No differences
>>> case = RowDiffScript(argv="rowdiff.py data/test-source.fits data/test-source.fits")
>>> case.run()
HDU extension #1 contains no differences
Row change
>>> case = RowDiffScript(argv="rowdiff.py data/test-source.fits data/test-change-row1-valueLeft.fits")
>>> case.run() # doctest: +ELLIPSIS
Row differences for HDU extension #1
<BLANKLINE>
Summary:
a rows 1-1 differ from b rows 1-1
<BLANKLINE>
Row difference, unified diff format:
--- Table A
<BLANKLINE>
+++ Table B
<BLANKLINE>
@@ -1,5 +1,5 @@
<BLANKLINE>
'yes', 'yes', 2988, -2779.03..., 'coquille'
-'yes', 'no', 5748, 6357.97..., 'ferly'
+'yes', 'no', -1, 6357.97..., 'ferly'
'yes', 'maybe', 9735, -9132.53..., 'misreliance'
'no', 'yes', 425, -2689.26..., 'ogeed'
'no', 'no', 8989, 9870.02..., 'readmittance'
<BLANKLINE>
Row removal
>>> case = RowDiffScript(argv="rowdiff.py data/test-source.fits data/test-single-modes.fits")
>>> case.run() # doctest: +ELLIPSIS
Row differences for HDU extension #1
<BLANKLINE>
Summary:
Remove from a rows 1-3
Remove from a rows 5-7
<BLANKLINE>
Row difference, unified diff format:
--- Table A
<BLANKLINE>
+++ Table B
<BLANKLINE>
@@ -1,9 +1,3 @@
<BLANKLINE>
'yes', 'yes', 2988, -2779.03..., 'coquille'
-'yes', 'no', 5748, 6357.97..., 'ferly'
-'yes', 'maybe', 9735, -9132.5..., 'misreliance'
-'no', 'yes', 425, -2689.26..., 'ogeed'
'no', 'no', 8989, 9870.025..., 'readmittance'
-'no', 'maybe', 3537, -8615.03..., 'anacatadidymus'
-'maybe', 'yes', 1763, -2442.96..., 'monochromat'
-'maybe', 'no', 8023, 4665.56..., 'ranarium'
'maybe', 'maybe', 7347, 1705.58..., 'Dode'
<BLANKLINE>
Row addition
>>> case = RowDiffScript(argv="rowdiff.py data/test-single-modes.fits data/test-source.fits")
>>> case.run() # doctest: +ELLIPSIS
Row differences for HDU extension #1
<BLANKLINE>
Summary:
Add to b rows 1-3
Add to b rows 5-7
<BLANKLINE>
Row difference, unified diff format:
--- Table A
<BLANKLINE>
+++ Table B
<BLANKLINE>
@@ -1,3 +1,9 @@
<BLANKLINE>
'yes', 'yes', 2988, -2779.03..., 'coquille'
+'yes', 'no', 5748, 6357.97..., 'ferly'
+'yes', 'maybe', 9735, -9132.53..., 'misreliance'
+'no', 'yes', 425, -2689.26..., 'ogeed'
'no', 'no', 8989, 9870.02..., 'readmittance'
+'no', 'maybe', 3537, -8615.03..., 'anacatadidymus'
+'maybe', 'yes', 1763, -2442.96..., 'monochromat'
+'maybe', 'no', 8023, 4665.56..., 'ranarium'
'maybe', 'maybe', 7347, 1705.58..., 'Dode'
<BLANKLINE>
Test of switch ignore-fields
>>> case = RowDiffScript(argv="rowdiff.py --ignore-fields=valueleft data/test-source.fits data/test-change-row1-valueLeft.fits")
>>> case.run()
HDU extension #1 contains no differences
>>> case = RowDiffScript(argv="rowdiff.py --ignore-fields=modeup,modedown data/test-source.fits data/test-change-row1-valueLeft.fits")
>>> case.run() # doctest: +ELLIPSIS
Row differences for HDU extension #1
<BLANKLINE>
Summary:
a rows 1-1 differ from b rows 1-1
<BLANKLINE>
Row difference, unified diff format:
--- Table A
<BLANKLINE>
+++ Table B
<BLANKLINE>
@@ -1,5 +1,5 @@
<BLANKLINE>
2988, -2779.03..., 'coquille'
-5748, 6357.97..., 'ferly'
+-1, 6357.97..., 'ferly'
9735, -9132.53..., 'misreliance'
425, -2689.26..., 'ogeed'
8989, 9870.02..., 'readmittance'
<BLANKLINE>
Test of switch fields
>>> case = RowDiffScript(argv="rowdiff.py --fields=modeup data/test-source.fits data/test-change-row1-valueLeft.fits")
>>> case.run()
HDU extension #1 contains no differences
>>> case = RowDiffScript(argv="rowdiff.py --fields=valueleft data/test-source.fits data/test-change-row1-valueLeft.fits")
>>> case.run()
Row differences for HDU extension #1
<BLANKLINE>
Summary:
a rows 1-1 differ from b rows 1-1
<BLANKLINE>
Row difference, unified diff format:
--- Table A
<BLANKLINE>
+++ Table B
<BLANKLINE>
@@ -1,5 +1,5 @@
<BLANKLINE>
2988
-5748
+-1
9735
425
8989
<BLANKLINE>
Mode test: no differences
>>> case = RowDiffScript(argv="rowdiff.py --mode-fields=modeup,modedown data/test-source.fits data/test-source.fits")
>>> case.run()
Difference for HDU extension #1
<BLANKLINE>
Table A has all modes.
<BLANKLINE>
Table B has all modes.
<BLANKLINE>
Table A and B share all modes.
<BLANKLINE>
All common modes are equivalent.
<BLANKLINE>
Mode test: No mode changes, but change in rows selected
>>> case = RowDiffScript(argv="rowdiff.py --mode-fields=modeup,modedown data/test-source.fits data/test-change-row1-valueLeft.fits")
>>> case.run() # doctest: +ELLIPSIS
Difference for HDU extension #1
<BLANKLINE>
Table A has all modes.
<BLANKLINE>
Table B has all modes.
<BLANKLINE>
Table A and B share all modes.
<BLANKLINE>
Common mode changes:
If there were duplicate modes, the following may be nonsensical.
<BLANKLINE>
Changed Modes:
From Table A:
modeup modedown valueleft valueright wordage
------ -------- --------- ---------- -------
yes no 5748 6357.97... ferly
<BLANKLINE>
To Table B:
modeup modedown valueleft valueright wordage
------ -------- --------- ---------- -------
yes no -1 6357.97... ferly
<BLANKLINE>
Mode test: removed modes
>>> case = RowDiffScript(argv="rowdiff.py --mode-fields=modeup,modedown data/test-source.fits data/test-alternate-modes.fits")
>>> case.run()
Difference for HDU extension #1
<BLANKLINE>
Table A has all modes.
<BLANKLINE>
Table B changes:
<BLANKLINE>
Missing Modes:
modeup modedown
------ --------
maybe maybe
no no
yes yes
<BLANKLINE>
Table A to B changes:
<BLANKLINE>
Missing Modes:
modeup modedown
------ --------
maybe maybe
no no
yes yes
<BLANKLINE>
All common modes are equivalent.
<BLANKLINE>
Mode test: duplicate modes
>>> case = RowDiffScript(argv="rowdiff.py --mode-fields=modeup,modedown data/test-source.fits data/test-duplicate-mode.fits")
>>> case.run() # doctest: +ELLIPSIS
Difference for HDU extension #1
<BLANKLINE>
Table A has all modes.
<BLANKLINE>
Table B changes:
<BLANKLINE>
Duplicated Modes:
modeup modedown
------ --------
no maybe
<BLANKLINE>
Table A to B changes:
<BLANKLINE>
Duplicated Modes:
modeup modedown
------ --------
no maybe
<BLANKLINE>
Common mode changes:
If there were duplicate modes, the following may be nonsensical.
<BLANKLINE>
Changed Modes:
From Table A:
modeup modedown valueleft valueright wordage
------ -------- --------- ---------- -------
no yes 425 -2689.26... ogeed
<BLANKLINE>
To Table B:
modeup modedown valueleft valueright wordage
------ -------- --------- ---------- -------
no yes -1 -2689.26... ogeed
<BLANKLINE>
CLEANUP
>>> test_config.cleanup(old_state)
"""
def main():
"""Run module tests, for now just doctests only."""
from crds.tests import test_rowdiff, tstmod
return tstmod(test_rowdiff)
if __name__ == "__main__":
print(main())
| StarcoderdataPython |
3225082 | """Test script for ftplib module."""
# Modified by <NAME>' to test FTP class and IPv6 environment
import ftplib
import threading
import asyncore
import asynchat
import socket
import StringIO
from unittest import TestCase
from test import test_support
from test.test_support import HOST
# the dummy data returned by server over the data channel when
# RETR, LIST and NLST commands are issued
RETR_DATA = 'abcde12345\r\n' * 1000
LIST_DATA = 'foo\r\nbar\r\n'
NLST_DATA = 'foo\r\nbar\r\n'
class DummyDTPHandler(asynchat.async_chat):
def __init__(self, conn, baseclass):
asynchat.async_chat.__init__(self, conn)
self.baseclass = baseclass
self.baseclass.last_received_data = ''
def handle_read(self):
self.baseclass.last_received_data += self.recv(1024)
def handle_close(self):
self.baseclass.push('226 transfer complete')
self.close()
class DummyFTPHandler(asynchat.async_chat):
def __init__(self, conn):
asynchat.async_chat.__init__(self, conn)
self.set_terminator("\r\n")
self.in_buffer = []
self.dtp = None
self.last_received_cmd = None
self.last_received_data = ''
self.next_response = ''
self.push('220 welcome')
def collect_incoming_data(self, data):
self.in_buffer.append(data)
def found_terminator(self):
line = ''.join(self.in_buffer)
self.in_buffer = []
if self.next_response:
self.push(self.next_response)
self.next_response = ''
cmd = line.split(' ')[0].lower()
self.last_received_cmd = cmd
space = line.find(' ')
if space != -1:
arg = line[space + 1:]
else:
arg = ""
if hasattr(self, 'cmd_' + cmd):
method = getattr(self, 'cmd_' + cmd)
method(arg)
else:
self.push('550 command "%s" not understood.' %cmd)
def handle_error(self):
raise
def push(self, data):
asynchat.async_chat.push(self, data + '\r\n')
def cmd_port(self, arg):
addr = map(int, arg.split(','))
ip = '%d.%d.%d.%d' %tuple(addr[:4])
port = (addr[4] * 256) + addr[5]
s = socket.create_connection((ip, port), timeout=2)
self.dtp = DummyDTPHandler(s, baseclass=self)
self.push('200 active data connection established')
def cmd_pasv(self, arg):
sock = socket.socket()
sock.bind((self.socket.getsockname()[0], 0))
sock.listen(5)
sock.settimeout(2)
ip, port = sock.getsockname()[:2]
ip = ip.replace('.', ',')
p1, p2 = divmod(port, 256)
self.push('227 entering passive mode (%s,%d,%d)' %(ip, p1, p2))
conn, addr = sock.accept()
self.dtp = DummyDTPHandler(conn, baseclass=self)
def cmd_eprt(self, arg):
af, ip, port = arg.split(arg[0])[1:-1]
port = int(port)
s = socket.create_connection((ip, port), timeout=2)
self.dtp = DummyDTPHandler(s, baseclass=self)
self.push('200 active data connection established')
def cmd_epsv(self, arg):
sock = socket.socket(socket.AF_INET6)
sock.bind((self.socket.getsockname()[0], 0))
sock.listen(5)
sock.settimeout(2)
port = sock.getsockname()[1]
self.push('229 entering extended passive mode (|||%d|)' %port)
conn, addr = sock.accept()
self.dtp = DummyDTPHandler(conn, baseclass=self)
def cmd_echo(self, arg):
# sends back the received string (used by the test suite)
self.push(arg)
def cmd_user(self, arg):
self.push('331 username ok')
def cmd_pass(self, arg):
self.push('230 password ok')
def cmd_acct(self, arg):
self.push('230 acct ok')
def cmd_rnfr(self, arg):
self.push('350 rnfr ok')
def cmd_rnto(self, arg):
self.push('250 rnto ok')
def cmd_dele(self, arg):
self.push('250 dele ok')
def cmd_cwd(self, arg):
self.push('250 cwd ok')
def cmd_size(self, arg):
self.push('250 1000')
def cmd_mkd(self, arg):
self.push('257 "%s"' %arg)
def cmd_rmd(self, arg):
self.push('250 rmd ok')
def cmd_pwd(self, arg):
self.push('257 "pwd ok"')
def cmd_type(self, arg):
self.push('200 type ok')
def cmd_quit(self, arg):
self.push('221 quit ok')
self.close()
def cmd_stor(self, arg):
self.push('125 stor ok')
def cmd_retr(self, arg):
self.push('125 retr ok')
self.dtp.push(RETR_DATA)
self.dtp.close_when_done()
def cmd_list(self, arg):
self.push('125 list ok')
self.dtp.push(LIST_DATA)
self.dtp.close_when_done()
def cmd_nlst(self, arg):
self.push('125 nlst ok')
self.dtp.push(NLST_DATA)
self.dtp.close_when_done()
class DummyFTPServer(asyncore.dispatcher, threading.Thread):
handler = DummyFTPHandler
def __init__(self, address, af=socket.AF_INET):
threading.Thread.__init__(self)
asyncore.dispatcher.__init__(self)
self.create_socket(af, socket.SOCK_STREAM)
self.bind(address)
self.listen(5)
self.active = False
self.active_lock = threading.Lock()
self.host, self.port = self.socket.getsockname()[:2]
def start(self):
assert not self.active
self.__flag = threading.Event()
threading.Thread.start(self)
self.__flag.wait()
def run(self):
self.active = True
self.__flag.set()
while self.active and asyncore.socket_map:
self.active_lock.acquire()
asyncore.loop(timeout=0.1, count=1)
self.active_lock.release()
asyncore.close_all(ignore_all=True)
def stop(self):
assert self.active
self.active = False
self.join()
def handle_accept(self):
conn, addr = self.accept()
self.handler = self.handler(conn)
self.close()
def handle_connect(self):
self.close()
handle_read = handle_connect
def writable(self):
return 0
def handle_error(self):
raise
class TestFTPClass(TestCase):
def setUp(self):
self.server = DummyFTPServer((HOST, 0))
self.server.start()
self.client = ftplib.FTP(timeout=2)
self.client.connect(self.server.host, self.server.port)
def tearDown(self):
self.client.close()
self.server.stop()
def test_getwelcome(self):
self.assertEqual(self.client.getwelcome(), '220 welcome')
def test_sanitize(self):
self.assertEqual(self.client.sanitize('foo'), repr('foo'))
self.assertEqual(self.client.sanitize('pass 12345'), repr('pass *****'))
self.assertEqual(self.client.sanitize('PASS 1<PASSWORD>'), repr('PASS *****'))
def test_exceptions(self):
self.assertRaises(ftplib.error_temp, self.client.sendcmd, 'echo 400')
self.assertRaises(ftplib.error_temp, self.client.sendcmd, 'echo 499')
self.assertRaises(ftplib.error_perm, self.client.sendcmd, 'echo 500')
self.assertRaises(ftplib.error_perm, self.client.sendcmd, 'echo 599')
self.assertRaises(ftplib.error_proto, self.client.sendcmd, 'echo 999')
def test_all_errors(self):
exceptions = (ftplib.error_reply, ftplib.error_temp, ftplib.error_perm,
ftplib.error_proto, ftplib.Error, IOError, EOFError)
for x in exceptions:
try:
raise x('exception not included in all_errors set')
except ftplib.all_errors:
pass
def test_set_pasv(self):
# passive mode is supposed to be enabled by default
self.assertTrue(self.client.passiveserver)
self.client.set_pasv(True)
self.assertTrue(self.client.passiveserver)
self.client.set_pasv(False)
self.assertFalse(self.client.passiveserver)
def test_voidcmd(self):
self.client.voidcmd('echo 200')
self.client.voidcmd('echo 299')
self.assertRaises(ftplib.error_reply, self.client.voidcmd, 'echo 199')
self.assertRaises(ftplib.error_reply, self.client.voidcmd, 'echo 300')
def test_login(self):
self.client.login()
def test_acct(self):
self.client.acct('passwd')
def test_rename(self):
self.client.rename('a', 'b')
self.server.handler.next_response = '200'
self.assertRaises(ftplib.error_reply, self.client.rename, 'a', 'b')
def test_delete(self):
self.client.delete('foo')
self.server.handler.next_response = '199'
self.assertRaises(ftplib.error_reply, self.client.delete, 'foo')
def test_size(self):
self.client.size('foo')
def test_mkd(self):
dir = self.client.mkd('/foo')
self.assertEqual(dir, '/foo')
def test_rmd(self):
self.client.rmd('foo')
def test_pwd(self):
dir = self.client.pwd()
self.assertEqual(dir, 'pwd ok')
def test_quit(self):
self.assertEqual(self.client.quit(), '221 quit ok')
# Ensure the connection gets closed; sock attribute should be None
self.assertEqual(self.client.sock, None)
def test_retrbinary(self):
received = []
self.client.retrbinary('retr', received.append)
self.assertEqual(''.join(received), RETR_DATA)
def test_retrlines(self):
received = []
self.client.retrlines('retr', received.append)
self.assertEqual(''.join(received), RETR_DATA.replace('\r\n', ''))
def test_storbinary(self):
f = StringIO.StringIO(RETR_DATA)
self.client.storbinary('stor', f)
self.assertEqual(self.server.handler.last_received_data, RETR_DATA)
# test new callback arg
flag = []
f.seek(0)
self.client.storbinary('stor', f, callback=lambda x: flag.append(None))
self.assertTrue(flag)
def test_storlines(self):
f = StringIO.StringIO(RETR_DATA.replace('\r\n', '\n'))
self.client.storlines('stor', f)
self.assertEqual(self.server.handler.last_received_data, RETR_DATA)
# test new callback arg
flag = []
f.seek(0)
self.client.storlines('stor foo', f, callback=lambda x: flag.append(None))
self.assertTrue(flag)
def test_nlst(self):
self.client.nlst()
self.assertEqual(self.client.nlst(), NLST_DATA.split('\r\n')[:-1])
def test_dir(self):
l = []
self.client.dir(lambda x: l.append(x))
self.assertEqual(''.join(l), LIST_DATA.replace('\r\n', ''))
def test_makeport(self):
self.client.makeport()
# IPv4 is in use, just make sure send_eprt has not been used
self.assertEqual(self.server.handler.last_received_cmd, 'port')
def test_makepasv(self):
host, port = self.client.makepasv()
conn = socket.create_connection((host, port), 2)
conn.close()
# IPv4 is in use, just make sure send_epsv has not been used
self.assertEqual(self.server.handler.last_received_cmd, 'pasv')
class TestIPv6Environment(TestCase):
def setUp(self):
self.server = DummyFTPServer((HOST, 0), af=socket.AF_INET6)
self.server.start()
self.client = ftplib.FTP()
self.client.connect(self.server.host, self.server.port)
def tearDown(self):
self.client.close()
self.server.stop()
def test_af(self):
self.assertEqual(self.client.af, socket.AF_INET6)
def test_makeport(self):
self.client.makeport()
self.assertEqual(self.server.handler.last_received_cmd, 'eprt')
def test_makepasv(self):
host, port = self.client.makepasv()
conn = socket.create_connection((host, port), 2)
conn.close()
self.assertEqual(self.server.handler.last_received_cmd, 'epsv')
def test_transfer(self):
def retr():
received = []
self.client.retrbinary('retr', received.append)
self.assertEqual(''.join(received), RETR_DATA)
self.client.set_pasv(True)
retr()
self.client.set_pasv(False)
retr()
class TestTimeouts(TestCase):
def setUp(self):
self.evt = threading.Event()
self.sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.sock.settimeout(3)
self.port = test_support.bind_port(self.sock)
threading.Thread(target=self.server, args=(self.evt,self.sock)).start()
# Wait for the server to be ready.
self.evt.wait()
self.evt.clear()
ftplib.FTP.port = self.port
def tearDown(self):
self.evt.wait()
def server(self, evt, serv):
# This method sets the evt 3 times:
# 1) when the connection is ready to be accepted.
# 2) when it is safe for the caller to close the connection
# 3) when we have closed the socket
serv.listen(5)
# (1) Signal the caller that we are ready to accept the connection.
evt.set()
try:
conn, addr = serv.accept()
except socket.timeout:
pass
else:
conn.send("1 Hola mundo\n")
# (2) Signal the caller that it is safe to close the socket.
evt.set()
conn.close()
finally:
serv.close()
# (3) Signal the caller that we are done.
evt.set()
def testTimeoutDefault(self):
# default -- use global socket timeout
self.assert_(socket.getdefaulttimeout() is None)
socket.setdefaulttimeout(30)
try:
ftp = ftplib.FTP("localhost")
finally:
socket.setdefaulttimeout(None)
self.assertEqual(ftp.sock.gettimeout(), 30)
self.evt.wait()
ftp.close()
def testTimeoutNone(self):
# no timeout -- do not use global socket timeout
self.assert_(socket.getdefaulttimeout() is None)
socket.setdefaulttimeout(30)
try:
ftp = ftplib.FTP("localhost", timeout=None)
finally:
socket.setdefaulttimeout(None)
self.assertTrue(ftp.sock.gettimeout() is None)
self.evt.wait()
ftp.close()
def testTimeoutValue(self):
# a value
ftp = ftplib.FTP(HOST, timeout=30)
self.assertEqual(ftp.sock.gettimeout(), 30)
self.evt.wait()
ftp.close()
def testTimeoutConnect(self):
ftp = ftplib.FTP()
ftp.connect(HOST, timeout=30)
self.assertEqual(ftp.sock.gettimeout(), 30)
self.evt.wait()
ftp.close()
def testTimeoutDifferentOrder(self):
ftp = ftplib.FTP(timeout=30)
ftp.connect(HOST)
self.assertEqual(ftp.sock.gettimeout(), 30)
self.evt.wait()
ftp.close()
def testTimeoutDirectAccess(self):
ftp = ftplib.FTP()
ftp.timeout = 30
ftp.connect(HOST)
self.assertEqual(ftp.sock.gettimeout(), 30)
self.evt.wait()
ftp.close()
def test_main():
tests = [TestFTPClass, TestTimeouts]
if socket.has_ipv6:
try:
DummyFTPServer((HOST, 0), af=socket.AF_INET6)
except socket.error:
pass
else:
tests.append(TestIPv6Environment)
thread_info = test_support.threading_setup()
try:
test_support.run_unittest(*tests)
finally:
test_support.threading_cleanup(*thread_info)
if __name__ == '__main__':
test_main()
| StarcoderdataPython |
3334261 | <reponame>BCNI/VisualDiscriminationTask
#!/usr/bin/env/python
# whisker/__init__.py
import logging
logger = logging.getLogger(__name__)
logger.addHandler(logging.NullHandler())
# http://eric.themoritzfamily.com/learning-python-logging.html
# http://stackoverflow.com/questions/12296214/python-logging-with-a-library-namespaced-packages # noqa | StarcoderdataPython |
1718633 | <reponame>pubkraal/Advent<gh_stars>0
#!/usr/bin/env python3
import sys
def fuel_need(num):
return max(0, int(num / 3) - 2)
def reduced_fuel_need(num):
step = num
total = 0
for x in range(100):
more_fuel = fuel_need(step)
if more_fuel == 0:
break
total += more_fuel
step = more_fuel
return total
def main(input_file):
assert fuel_need(12) == 2
assert fuel_need(14) == 2
assert fuel_need(1969) == 654
assert fuel_need(100756) == 33583
all_fuels = []
with open(input_file, 'r') as rd:
for line in rd.readlines():
num = int(line)
fuel = fuel_need(num)
all_fuels.append(fuel)
print("Fuel need:", sum(all_fuels))
additional_fuels = [reduced_fuel_need(x) for x in all_fuels]
print("Fuel need w/ fuel:", sum(all_fuels) + sum(additional_fuels))
if __name__ == "__main__":
main(sys.argv[1])
| StarcoderdataPython |
3206165 | <reponame>CarlosMart626/dj_blog
from django.conf.urls import url
from functools import wraps
from django.utils.decorators import available_attrs
from django.views.decorators.cache import cache_page
from djcms_blog.settings import DJCMS_BLOG_CACHE_TIME
from djcms_blog import settings
from . import views
def cache_for_anonim(timeout):
def decorator(view_func):
@wraps(view_func, assigned=available_attrs(view_func))
def _wrapped_view(request, *args, **kwargs):
if request.user.is_staff:
return (view_func)(request, *args, **kwargs)
else:
return cache_page(timeout)(view_func)(request, *args, **kwargs)
return _wrapped_view
return decorator
urlpatterns = []
if settings.DEFAULT_BLOG_ID:
urlpatterns.append(
url(
r"^(?P<blog_slug>[\w-]+|)$",
cache_for_anonim(DJCMS_BLOG_CACHE_TIME)(views.DefaultBlogView.as_view()),
name="blog-main"
)
)
else:
urlpatterns.append(
url(
r"^(?P<blog_slug>[\w-]+)/index/$",
cache_for_anonim(DJCMS_BLOG_CACHE_TIME)(views.BlogView.as_view()),
name="blog-main"
)
)
urlpatterns += [
url(
r"^author/(?P<author_slug>[\w-]+)/$",
cache_for_anonim(DJCMS_BLOG_CACHE_TIME)(views.AutorView.as_view()),
name="author-main",
),
url(
r"^(?P<blog_slug>[\w-]+)/tag/(?P<tag_slug>[\w-]+)/$",
cache_for_anonim(DJCMS_BLOG_CACHE_TIME)(views.TagView.as_view()),
name="tag-main",
),
url(
r"^(?P<blog_slug>[\w-]+)/(?P<post_slug>[\w-]+)/$",
cache_for_anonim(DJCMS_BLOG_CACHE_TIME)(views.PostView.as_view()),
name="post-detail",
),
url(
r"^draft/(?P<blog_slug>[\w-]+)/(?P<post_slug>[\w-]+)/$",
views.PostDraftView.as_view(),
name="draft-post-detail",
),
url(
r'^unpublish-all/$',
views.delete_published_posts,
name="unpublish-all",
),
]
| StarcoderdataPython |
1751942 | from simple_smartsheet.models import Sheet
class TestSheet:
def test_dataframe(self, mocked_sheet: Sheet) -> None:
df = mocked_sheet.as_dataframe()
assert len(df) == 3
assert df.loc[0]["Full Name"] == "<NAME>"
assert df.loc[1]["Email address"] == "<EMAIL>"
assert df.loc[2]["Company"] == "ACME"
assert set(df.loc[2]["Maintains"]) == {"napalm", "netmiko", "nornir"}
| StarcoderdataPython |
3243677 | import json
from pprint import pprint
from flask import jsonify
from flask import Flask, request
from pathlib import Path
import subprocess
import os
from botcommands.youtube_dlp import get_meta, get_mp4
from flask import send_file
from yt_dlp.utils import DownloadError
app = Flask(__name__)
@app.route("/")
def hello_world():
# send_msg("Hello worldddd")
return "<p>Hello, World!</p>"
@app.route('/ytv')
def ytv():
if request.args.get('url'):
url = request.args.get('url')
try:
payload = get_mp4(url)
# print(payload)
except DownloadError as e:
payload = {'Error': str(e)}
return jsonify(payload)
try:
return send_file(payload['file'],
attachment_filename='v.mp4')
except Exception as e:
return jsonify(payload)
else:
return '<p>Wat?</p>'
def send_msg(msg):
you = 'pastorhudson'
them = os.environ.get('KEYBASE_BOTNAME')
payload = {"method": "send", "params": {
"options": {"channel": {"name": f"{you},{them}"}, "message": {"body": msg}}}}
print(payload)
print(['keybase', '--home', './webhookbot', 'chat', 'api', '-m', json.dumps(payload)])
subprocess.run(args=['./keybase', '--home', './webhookbot', 'chat', 'api', '-m', json.dumps(payload)])
if __name__ == '__main__':
# Bind to PORT if defined, otherwise default to 5000.
port = int(os.environ.get('PORT', 5000))
app.run(host='127.0.0.1', port=port)
| StarcoderdataPython |
3388217 | <filename>GPIO/NixieTube.py
# coding=utf-8
import sys
sys.path.append('..')
reload(sys)
sys.setdefaultencoding('utf8')
import time
import RPi.GPIO as GPIO
# 共阳4位数字管
class Yang4():
# 显示位数
p1 = 1
p2 = 2
p3 = 3
p4 = 4
# 显示状态
a = 5
b = 6
c = 7
d = 8
e = 9
f = 10
g = 11
dp = 12
positionPoints = []
numberPoints = []
# 初始化并设置控制针脚
# 针脚连接顺序:位置1-4,数字a-dp
def __init__(self, p1, p2, p3, p4, a, b, c, d, e, f, g, dp):
self.p1 = p1
self.p2 = p2
self.p3 = p3
self.p4 = p4
self.a = a
self.b = b
self.c = c
self.d = d
self.e = e
self.f = f
self.g = g
self.dp = dp
self.positionPoints = [p1, p2, p3, p4]
self.numberPoints = [a, b, c, d, e, f, g, dp]
# Board模式
GPIO.setmode(GPIO.BOARD)
# 关闭提示
GPIO.setwarnings(False)
for item in self.positionPoints+self.numberPoints:
GPIO.setup(item, GPIO.OUT)
# 输入一个字符串
def Display(self, str8bit):
self.__DisplayCode(str8bit)
# 筛选并控制显示各位置
def __DisplayCode(self, str8bit):
# 当前位置
index = -1
for i in range(0, len(str8bit)):
if index > 8:
return
arg = str(str8bit[i])
if arg == '.' and index % 2 != 0:
index = index + 1
elif arg != '.' and index % 2 != 1:
index = index + 1
index = index + 1
self.__ResetPosition()
self.__ResetNumber()
self.__DisplayNumberSwitch(arg)
GPIO.output(self.positionPoints[index//2], 1)
time.sleep(0.002)
def __ResetPosition(self):
for item in self.positionPoints:
GPIO.output(item, 0)
def __ResetNumber(self):
for item in self.numberPoints:
GPIO.output(item, 1)
def __DisplayNumberSwitch(self, arg):
# print('arg='+str(arg))
if arg == '.':
self.__Display_DOT()
# 上方小圈用小o,下方小圈用中文句号
elif arg == 'o':
self.__Display_TopCircle()
elif arg == '。':
self.__Display_DownCircle()
# -----------------------------
elif arg == '0':
self.__Display_0()
elif arg == '1':
self.__Display_1()
elif arg == '2':
self.__Display_2()
elif arg == '3':
self.__Display_3()
elif arg == '4':
self.__Display_4()
elif arg == '5':
self.__Display_5()
elif arg == '6':
self.__Display_6()
elif arg == '7':
self.__Display_7()
elif arg == '8':
self.__Display_8()
elif arg == '9':
self.__Display_9()
# -----------------------------
elif arg == 'A':
self.__Display_A()
elif arg == 'B':
self.__Display_B()
elif arg == 'C':
self.__Display_C()
elif arg == 'D':
self.__Display_D()
elif arg == 'd':
self.__Display_d()
elif arg == 'E':
self.__Display_E()
elif arg == 'F':
self.__Display_F()
elif arg == 'G':
self.__Display_G()
elif arg == 'H':
self.__Display_H()
elif arg == 'I':
self.__Display_I()
elif arg == 'J':
self.__Display_J()
elif arg == 'L':
self.__Display_L()
elif arg == 'O':
self.__Display_O()
elif arg == 'P':
self.__Display_P()
elif arg == 'S':
self.__Display_S()
elif arg == 'U':
self.__Display_U()
elif arg == 'V':
self.__Display_V()
else:
None
def __Display_DOT(self):
GPIO.output(self.dp, 0)
def __Display_TopCircle(self):
GPIO.output(self.a, 0)
GPIO.output(self.b, 0)
GPIO.output(self.g, 0)
GPIO.output(self.f, 0)
def __Display_DownCircle(self):
GPIO.output(self.c, 0)
GPIO.output(self.d, 0)
GPIO.output(self.e, 0)
GPIO.output(self.g, 0)
# -----------------------------
def __Display_0(self):
GPIO.output(self.a, 0)
GPIO.output(self.b, 0)
GPIO.output(self.c, 0)
GPIO.output(self.d, 0)
GPIO.output(self.e, 0)
GPIO.output(self.f, 0)
def __Display_1(self):
GPIO.output(self.b, 0)
GPIO.output(self.c, 0)
def __Display_2(self):
GPIO.output(self.a, 0)
GPIO.output(self.b, 0)
GPIO.output(self.d, 0)
GPIO.output(self.e, 0)
GPIO.output(self.g, 0)
def __Display_3(self):
GPIO.output(self.a, 0)
GPIO.output(self.b, 0)
GPIO.output(self.c, 0)
GPIO.output(self.d, 0)
GPIO.output(self.g, 0)
def __Display_4(self):
GPIO.output(self.b, 0)
GPIO.output(self.c, 0)
GPIO.output(self.f, 0)
GPIO.output(self.g, 0)
def __Display_5(self):
GPIO.output(self.a, 0)
GPIO.output(self.c, 0)
GPIO.output(self.d, 0)
GPIO.output(self.f, 0)
GPIO.output(self.g, 0)
def __Display_6(self):
GPIO.output(self.a, 0)
GPIO.output(self.c, 0)
GPIO.output(self.d, 0)
GPIO.output(self.e, 0)
GPIO.output(self.f, 0)
GPIO.output(self.g, 0)
def __Display_7(self):
GPIO.output(self.a, 0)
GPIO.output(self.b, 0)
GPIO.output(self.c, 0)
def __Display_8(self):
GPIO.output(self.a, 0)
GPIO.output(self.b, 0)
GPIO.output(self.c, 0)
GPIO.output(self.d, 0)
GPIO.output(self.e, 0)
GPIO.output(self.f, 0)
GPIO.output(self.g, 0)
def __Display_9(self):
GPIO.output(self.a, 0)
GPIO.output(self.b, 0)
GPIO.output(self.c, 0)
GPIO.output(self.d, 0)
GPIO.output(self.f, 0)
GPIO.output(self.g, 0)
# -----------------------------
def __Display_A(self):
GPIO.output(self.a, 0)
GPIO.output(self.b, 0)
GPIO.output(self.c, 0)
GPIO.output(self.e, 0)
GPIO.output(self.f, 0)
GPIO.output(self.g, 0)
def __Display_B(self):
self.__Display_8()
def __Display_C(self):
GPIO.output(self.a, 0)
GPIO.output(self.d, 0)
GPIO.output(self.e, 0)
GPIO.output(self.f, 0)
def __Display_d(self):
GPIO.output(self.b, 0)
GPIO.output(self.c, 0)
GPIO.output(self.d, 0)
GPIO.output(self.e, 0)
GPIO.output(self.g, 0)
def __Display_D(self):
GPIO.output(self.a, 0)
GPIO.output(self.b, 0)
GPIO.output(self.c, 0)
GPIO.output(self.d, 0)
GPIO.output(self.e, 0)
GPIO.output(self.f, 0)
def __Display_E(self):
GPIO.output(self.a, 0)
GPIO.output(self.d, 0)
GPIO.output(self.e, 0)
GPIO.output(self.f, 0)
GPIO.output(self.g, 0)
def __Display_F(self):
GPIO.output(self.a, 0)
GPIO.output(self.e, 0)
GPIO.output(self.f, 0)
GPIO.output(self.g, 0)
def __Display_G(self):
self.__Display_6()
def __Display_H(self):
GPIO.output(self.b, 0)
GPIO.output(self.c, 0)
GPIO.output(self.e, 0)
GPIO.output(self.f, 0)
GPIO.output(self.g, 0)
def __Display_I(self):
self.__Display_1()
def __Display_J(self):
GPIO.output(self.a, 0)
GPIO.output(self.b, 0)
GPIO.output(self.c, 0)
GPIO.output(self.d, 0)
def __Display_L(self):
GPIO.output(self.d, 0)
GPIO.output(self.e, 0)
GPIO.output(self.f, 0)
def __Display_O(self):
self.__Display_0()
def __Display_P(self):
GPIO.output(self.a, 0)
GPIO.output(self.b, 0)
GPIO.output(self.e, 0)
GPIO.output(self.f, 0)
GPIO.output(self.g, 0)
def __Display_S(self):
self.__Display_5()
def __Display_U(self):
GPIO.output(self.b, 0)
GPIO.output(self.c, 0)
GPIO.output(self.d, 0)
GPIO.output(self.e, 0)
GPIO.output(self.f, 0)
def __Display_V(self):
self.__Display_U()
| StarcoderdataPython |
3225774 | """
----------------------------------
<NAME>
AM: 2011030054
email: <EMAIL>
----------------------------------
"""
import pickle
import crypto_1
import random
from collections import namedtuple
"""
-----------------------------------------
Helpfull function
-----------------------------------------
"""
#Check if 2 numbers are coprime
def coprime(a, b):
#Two numbers are coprime if there is no integer (except 1) that divides both.
for n in range(2, min(a, b) + 1):
if a % n == b % n == 0:
return False
return True
"""
-----------------------------------------
Keys Generation
-----------------------------------------
"""
#Generates the valus n,e,d for public,private keys of RSA
def generateRSAkeys(length):
#cannot run for lenght <4
if length < 4:
print("\nLength must be >= 4!")
return None,None,None
#with the given length we can have a number in range (n_min,n_max)
min_n = 1 << (length - 1)
max_n = (1 << length) - 1
#the upper and lower bound for prime number search
upper = 1 << (length // 2 + 1)
lower = 1 << (length // 2 - 1)
primes = [2]
#Find all primes in range(3,upper)
for num in range(3, upper + 1, 2):
for p in primes:
if num % p == 0:
break
else:
primes.append(num)
while primes and primes[0] < lower:
del primes[0]
#Find p,q primes that are in the range for our length
while primes:
p = random.choice(primes)
primes.remove(p)
q_candidates = []
for q in primes:
if min_n <= p * q <= max_n:
q_candidates.append(q)
if q_candidates:
q = random.choice(q_candidates)
break
else:
print("\nNo p,q can be found!")
return None,None,None
#Choose an integer e such that 1 < e < phi(n) and e and phi(n) are coprime.
n = p * q
phi_n = (p - 1) * (q - 1)
for e in range(3, phi_n):
if coprime(e, phi_n):
break
else:
print("\nNo e can be found!")
return None,None
#Find d that (d * e - 1) is divisible by phi(n)
for d in range(3, phi_n,2):
if d * e % phi_n == 1:
break
else:
print("\nNo d can be found!")
return None,None,None
#Return public(n,e) and private(n,d) keys.
return n,e,d
"""
-----------------------------------------
Save Keys to Disk
-----------------------------------------
"""
#Save both keys on disk, private is encrypted with the given key
def savePair(n, e, d, encryptionKey):
publicKey = [n,e]
privateKey = [n,d]
#encrypt the ptivate key before save it on file
aes_128 = crypto_1.AES()
privateKey = aes_128.AES_ECBmodeEncryption(privateKey,encryptionKey)
key_file = open("keys.pair", "w")
pickle.dump(publicKey, key_file)
pickle.dump(privateKey, key_file)
key_file.close()
#Retrieves both keys, private is decrypted with the given key
def retrievePair(encryptionKey):
key_file = open("keys.pair", "r")
publicKey = pickle.load(key_file)
privateKey = pickle.load(key_file)
#decrypt the private key before return it
aes_128 = crypto_1.AES()
privateKey = aes_128.AES_ECBmodeDecryption(privateKey,encryptionKey)
privateKey = [privateKey[0],privateKey[1]]
key_file.close()
return publicKey,privateKey
#Save public key
def savePublic(n,e):
publicKey = [n,e]
key_file = open("key.pub", "w")
pickle.dump(publicKey, key_file)
key_file.close()
#Retrieve public key
def retrievePublic():
key_file = open("key.pub", "r")
publicKey = pickle.load(key_file)
key_file.close()
return publicKey
#Save private key and encrypt it
def savePrivate(n,d,encryptionKey):
privateKey = [n,d]
key_file = open("key.sec", "w")
aes_128 = crypto_1.AES()
privateKey = aes_128.AES_ECBmodeEncryption(privateKey,encryptionKey)
pickle.dump(privateKey, key_file)
key_file.close()
#Retrive private key and decrypt it
def retrievePrivate(encryptionKey):
key_file = open("key.sec", "r")
privateKey = pickle.load(key_file)
aes_128 = crypto_1.AES()
privateKey = aes_128.AES_ECBmodeDecryption(privateKey,encryptionKey)
privateKey = [privateKey[0],privateKey[1]]
key_file.close()
return privateKey
| StarcoderdataPython |
3383955 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Create custom shapes for pyprototypr
"""
# lib
import math
# third party
from reportlab.platypus import Paragraph
from reportlab.lib.styles import ParagraphStyle
# local
from pyprototypr.utils import tools
from pyprototypr.base import BaseShape, BaseCanvas, UNITS, COLORS, PAGES
DEBUG = False
class Value:
"""Class wrapper for a list of values possible for a card attribute."""
def __init__(self, **kwargs):
self.datalist = kwargs.get('datalist', [])
self.members = [] # card IDs, of which affected card is a member
def __call__(self, cid):
"""Return datalist item number 'ID' (card number)."""
#print "shapes_30", self.datalist, ID
try:
x = self.datalist[cid]
return x
except (ValueError, TypeError, IndexError):
return None
class Query:
"""Query to select an element or a value for a card attribute."""
def __init__(self, **kwargs):
self.query = kwargs.get('query', [])
self.result = kwargs.get('result', None)
self.alternate = kwargs.get('alternate', None)
self.members = [] # card IDs, of which affected card is a member
def __call__(self, cid):
"""Process the query, for a given card 'ID' in the dataset."""
#raise NotImplementedError
result = None
results = []
for _query in self.query:
if DEBUG: print(("shapes_54 _query", len(_query), '::', _query))
if _query and len(_query) >= 4:
result = tools.comparer(
val=_query[0][cid], operator=_query[1], target=_query[2])
results.append(result)
results.append(_query[3])
# compare across all
result = tools.boolean_join(results)
#print "shapes_61 cid %s Results %s" % (cid, results)
if result is not None:
if result:
return self.result
else:
return self.alternate
else:
tools.feedback('Query "%s" is incorrectly constructed.' %
self.query)
class ImageShape(BaseShape):
"""
Show an image on a given canvas.
"""
def draw(self, cnv=None, off_x=0, off_y=0, ID=None, **kwargs):
"""Show an image on a given canvas."""
cnv = cnv.canvas if cnv else self.canvas.canvas
img = None
# offset
margin_left = self.unit(self.margin_left)
margin_bottom = self.unit(self.margin_bottom)
off_x = self.unit(off_x)
off_y = self.unit(off_y)
delta_x = off_x + margin_left
delta_y = off_y + margin_bottom
# convert to using units
height = self.unit(self.height, skip_none=True)
width = self.unit(self.width, skip_none=True)
if self.cx and self.cy and width and height:
x = self.unit(self.cx) - width / 2.0 + delta_x
y = self.unit(self.cy) - height / 2.0 + delta_y
elif self.cx and self.cy and not(width or height):
tools.feedback(
'Must supply width and height for use with cx and cy',
stop=True)
else:
x = self.unit(self.x) + delta_x
y = self.unit(self.y) + delta_y
# draw
img = self.load_image(self.source)
if img:
# assumes 1 pt == 1 pixel ?
cnv.drawImage(img, x=x, y=y, width=width, height=height,
mask='auto')
# text
xc = x + width / 2.0
if self.label:
cnv.setFont(self.font_face, self.label_size)
cnv.setFillColor(self.stroke_label)
self.draw_multi_string(cnv, xc, y + height / 2.0, self.label)
if self.title:
cnv.setFont(self.font_face, self.title_size)
cnv.setFillColor(self.stroke_title)
self.draw_multi_string(cnv, xc, y - cnv._leading, self.title)
if self.heading:
cnv.setFont(self.font_face, self.heading_size)
cnv.setFillColor(self.stroke_heading)
self.draw_multi_string(cnv, xc, y + height + cnv._leading,
self.heading)
class LineShape(BaseShape):
"""
Draw a line on a given canvas.
"""
def draw(self, cnv=None, off_x=0, off_y=0, ID=None, **kwargs):
"""Draw a line on a given canvas."""
cnv = cnv.canvas if cnv else self.canvas.canvas
# offset
margin_left = self.unit(self.margin_left)
margin_bottom = self.unit(self.margin_bottom)
off_x = self.unit(off_x)
off_y = self.unit(off_y)
delta_x = off_x + margin_left
delta_y = off_y + margin_bottom
# convert to using units
x = self.unit(self.x) + delta_x
y = self.unit(self.y) + delta_y
height = self.unit(self.height)
width = self.unit(self.width)
#print "shapes_126 line", ID, ':margin:', uni, self.margin_left
if self.length:
length = self.unit(self.length)
angle = math.radians(self.angle)
x_1 = x + (length * math.cos(angle))
y_1 = y + (length * math.sin(angle))
else:
if self.x_1:
x_1 = self.unit(self.x_1) + delta_x
else:
x_1 = x + width
if self.y_1:
y_1 = self.unit(self.y_1) + delta_y
else:
y_1 = y + height
if self.row is not None and self.row >= 0:
y = y + self.row * height
y_1 = y_1 + self.row * height - margin_bottom
if self.col is not None and self.col >= 0:
x = x + self.col * width
x_1 = x_1 + self.col * width - margin_left
if DEBUG:
print((self.row, self.col, "=", x, x_1, ":", y, y_1))
# canvas
self.set_canvas_props()
# draw line
pth = cnv.beginPath()
pth.moveTo(x, y)
pth.lineTo(x_1, y_1)
cnv.drawPath(pth, stroke=1, fill=1)
class RhombusShape(BaseShape):
"""
Draw a rhombus on a given canvas.
"""
def draw(self, cnv=None, off_x=0, off_y=0, ID=None, **kwargs):
"""Draw a rhombus (diamond) on a given canvas."""
cnv = cnv.canvas if cnv else self.canvas.canvas
# offset
margin_left = self.unit(self.margin_left)
margin_bottom = self.unit(self.margin_bottom)
off_x = self.unit(off_x)
off_y = self.unit(off_y)
delta_x = off_x + margin_left
delta_y = off_y + margin_bottom
# convert to using units
height = self.unit(self.height)
width = self.unit(self.width)
if self.cx and self.cy:
x = self.unit(self.cx) - width / 2.0 + delta_x
y = self.unit(self.cy) + height / 2.0 + delta_y
else:
x = self.unit(self.x) + delta_x
y = self.unit(self.y) + delta_y
# canvas
self.set_canvas_props()
fill = 0 if self.transparent else 1
# draw
x_s, y_s = x, y + height / 2.0
pth = cnv.beginPath()
pth.moveTo(x_s, y_s)
pth.lineTo(x_s + width / 2.0, y_s + height / 2.0)
pth.lineTo(x_s + width, y_s)
pth.lineTo(x_s + width / 2.0, y_s - height / 2.0)
pth.lineTo(x_s, y_s)
pth.close()
cnv.drawPath(pth, stroke=1, fill=fill)
# text
if self.label:
x_c = x + width / 2.0
y_c = y + height / 2.0
cnv.setFont(self.font_face, self.label_size)
cnv.setFillColor(self.stroke_label)
self.draw_multi_string(cnv, x_c, y_c, self.label)
class RectShape(BaseShape):
"""
Draw a rectangle on a given canvas.
"""
def __init__(self, _object=None, canvas=None, **kwargs):
super(RectShape, self).__init__(_object=_object, canvas=canvas,
**kwargs)
# overrides
if self.cx and self.cy:
self.x = self.cx - self.width / 2.0
self.y = self.cy - self.height / 2.0
self.kwargs = kwargs
def draw(self, cnv=None, off_x=0, off_y=0, ID=None, **kwargs):
"""Draw a rectangle on a given canvas."""
cnv = cnv.canvas if cnv else self.canvas.canvas
# offset
margin_left = self.unit(self.margin_left)
margin_bottom = self.unit(self.margin_bottom)
off_x = self.unit(off_x)
off_y = self.unit(off_y)
delta_x = off_x + margin_left
delta_y = off_y + margin_bottom
# convert to using units
#print "shapes_246", self.height, self.width
height = self.unit(self.height)
width = self.unit(self.width)
#print "shapes_249", height, width
#print "shapes_246 rect", ID, ':margin:', uni, self.margin_left
if self.row is not None and self.col is not None:
x = self.col * width + delta_x
y = self.row * height + delta_y
elif self.cx and self.cy:
x = self.unit(self.cx) - width / 2.0 + delta_x
y = self.unit(self.cy) + height / 2.0 + delta_y
else:
x = self.unit(self.x) + delta_x
y = self.unit(self.y) + delta_y
# canvas
self.set_canvas_props()
fill = 0 if self.transparent else 1
# draw
if self.rounding:
rounding = self.unit(self.rounding)
cnv.roundRect(
x, y, width, height, rounding, stroke=1, fill=fill)
elif self.rounded:
_rounding = width * 0.08
cnv.roundRect(
x, y, width, height, _rounding, stroke=1, fill=fill)
else:
cnv.rect(
x, y, width, height, stroke=1, fill=fill)
# grid marks
self.set_canvas_props(stroke=self.grid_color,
stroke_width=self.grid_stroke_width)
if self.grid_marks:
deltag = self.unit(self.grid_length)
pth = cnv.beginPath()
gx, gy = 0, y # left-side
pth.moveTo(gx, gy)
pth.lineTo(deltag, gy)
pth.moveTo(0, gy + height)
pth.lineTo(deltag, gy + height)
gx, gy = x, self.pagesize[1] # top-side
pth.moveTo(gx, gy)
pth.lineTo(gx, gy - deltag)
pth.moveTo(gx + width, gy)
pth.lineTo(gx + width, gy - deltag)
gx, gy = self.pagesize[0], y # right-side
pth.moveTo(gx, gy)
pth.lineTo(gx - deltag, gy)
pth.moveTo(gx, gy + height)
pth.lineTo(gx - deltag, gy + height)
gx, gy = x, 0 # bottom-side
pth.moveTo(gx, gy)
pth.lineTo(gx, gy + deltag)
pth.moveTo(gx + width, gy)
pth.lineTo(gx + width, gy + deltag)
# done
cnv.drawPath(pth, stroke=1, fill=1)
# pattern
img = self.load_image(self.pattern)
if img:
#print "shapes_355", type(img._image), img._image.size
iwidth = img._image.size[0]
iheight = img._image.size[1]
# repeat?
if self.repeat:
cnv.drawImage(img, x=x, y=y, width=iwidth, height=iheight,
mask='auto')
else:
# stretch
# TODO - work out how to (a) fill and (b) cut off -- mask?
# assume DPI = 300? 72pt = 1" = 300px -see
# http://two.pairlist.net/pipermail/reportlab-users/2006-January/004670.html
#w, h = yourImage.size
#yourImage.crop((0, 30, w, h-30)).save(...)
cnv.drawImage(img, x=x, y=y, width=width, height=height,
mask='auto')
# text
if self.label:
cnv.setFont(self.font_face, self.label_size)
cnv.setFillColor(self.stroke_label)
x_t = x + width / 2.0
y_t = y + height / 2.0
self.draw_multi_string(cnv, x_t, y_t, self.label)
class ShapeShape(BaseShape):
"""
Draw an irregular polygon, based on a set of points, on a given canvas.
"""
def __init__(self, _object=None, canvas=None, **kwargs):
super(ShapeShape, self).__init__(_object=_object, canvas=canvas,
**kwargs)
# overrides
self.x = kwargs.get('x', kwargs.get('left', 0))
self.y = kwargs.get('y', kwargs.get('bottom', 0))
def draw(self, cnv=None, off_x=0, off_y=0, ID=None, **kwargs):
"""Draw an irregular polygon on a given canvas."""
cnv = cnv.canvas if cnv else self.canvas.canvas
# offset
margin_left = self.unit(self.margin_left)
margin_bottom = self.unit(self.margin_bottom)
off_x = self.unit(off_x)
off_y = self.unit(off_y)
x = self.unit(self.x)
y = self.unit(self.y)
delta_x = off_x + margin_left + x
delta_y = off_y + margin_bottom + y
# canvas
self.set_canvas_props()
fill = 0 if self.transparent else 1
# draw
if isinstance(self.points, str):
# SPLIT STRING e.g. "1,2 3,4 4.5,8.8"
_points = self.points.split(' ')
points = [_point.split(',') for _point in _points]
else:
points = self.points
if points and len(points) > 0:
pth = cnv.beginPath()
for key, vertex in enumerate(points):
_x0, _y0 = float(vertex[0]), float(vertex[1])
# convert to using units
x = self.unit(_x0) + delta_x
y = self.unit(_y0) + delta_y
if key == 0:
pth.moveTo(x, y)
pth.lineTo(x, y)
pth.close()
cnv.drawPath(pth, stroke=1, fill=fill)
class ArcShape(BaseShape):
"""
Arc on a given canvas.
"""
def draw(self, cnv=None, off_x=0, off_y=0, ID=None, **kwargs):
"""Draw arc on a given canvas."""
cnv = cnv.canvas if cnv else self.canvas.canvas
# offset
margin_left = self.unit(self.margin_left)
margin_bottom = self.unit(self.margin_bottom)
off_x = self.unit(off_x)
off_y = self.unit(off_y)
delta_x = off_x + margin_left
delta_y = off_y + margin_bottom
# convert to using units
x_1 = self.unit(self.x) + delta_x
y_1 = self.unit(self.y) + delta_y
if not self.x_1:
self.x_1 = self.x + self.default_length
if not self.y_1:
self.y_1 = self.y + self.default_length
x_2 = self.unit(self.x_1) + delta_x
y_2 = self.unit(self.y_1) + delta_y
# canvas
self.set_canvas_props()
#draw
cnv.arc(x_1, y_1, x_2, y_2)
class BezierShape(BaseShape):
"""
A Bezier curve on a given canvas.
A Bezier curve is specified by four control points:
(x1,y1), (x2,y2), (x3,y3), (x4,y4).
The curve starts at (x1,y1) and ends at (x4,y4) and the line segment
from (x1,y1) to (x2,y2) and the line segment from (x3,y3) to (x4,y4)
"""
def draw(self, cnv=None, off_x=0, off_y=0, ID=None, **kwargs):
"""Draw Bezier curve on a given canvas."""
cnv = cnv.canvas if cnv else self.canvas.canvas
# offset
margin_left = self.unit(self.margin_left)
margin_bottom = self.unit(self.margin_bottom)
off_x = self.unit(off_x)
off_y = self.unit(off_y)
delta_x = off_x + margin_left
delta_y = off_y + margin_bottom
# convert to using units
x_1 = self.unit(self.x) + delta_x
y_1 = self.unit(self.y) + delta_y
if not self.x_1:
self.x_1 = self.x + self.default_length
if not self.y_1:
self.y_1 = self.y + self.default_length
x_2 = self.unit(self.x_1) + delta_x
y_2 = self.unit(self.y_1) + delta_y
x_3 = self.unit(self.x_2) + delta_x
y_3 = self.unit(self.y_2) + delta_y
x_4 = self.unit(self.x_3) + delta_x
y_4 = self.unit(self.y_3) + delta_y
# canvas
self.set_canvas_props()
#draw
cnv.bezier(x_1, y_1, x_2, y_2, x_3, y_3, x_4, y_4)
class PolygonShape(BaseShape):
"""
A regular polygon on a given canvas.
"""
def draw(self, cnv=None, off_x=0, off_y=0, ID=None, **kwargs):
"""Draw a regular polygon on a given canvas."""
cnv = cnv.canvas if cnv else self.canvas.canvas
# offset
margin_left = self.unit(self.margin_left)
margin_bottom = self.unit(self.margin_bottom)
off_x = self.unit(off_x)
off_y = self.unit(off_y)
delta_x = off_x + margin_left
delta_y = off_y + margin_bottom
# convert to using units
x = self.unit(self.x) + delta_x
y = self.unit(self.y) + delta_y
# calc - assumes x and y are the centre
if self.radius:
radius = self.unit(self.radius)
else:
side = self.unit(self.side)
sides = int(self.sides)
#180 degrees is math.pi radians
radius = side / (2.0 * math.sin(math.pi / sides))
vertices = tools.polygon_vertices(
self.sides, radius, self.rotate, (x, y))
if not vertices or len(vertices) == 0:
return
# canvas
self.set_canvas_props()
fill = 0 if self.transparent else 1
# draw
pth = cnv.beginPath()
pth.moveTo(*vertices[0])
for vertex in vertices:
pth.lineTo(*vertex)
pth.close()
cnv.drawPath(pth, stroke=1, fill=fill)
# text
if self.label:
cnv.setFont(self.font_face, self.label_size)
cnv.setFillColor(self.stroke_label)
self.draw_multi_string(cnv, x, y, self.label)
if self.title:
cnv.setFont(self.font_face, self.title_size)
cnv.setFillColor(self.stroke_title)
self.draw_multi_string(cnv, x, y - 1.4 * radius, self.title)
if self.heading:
cnv.setFont(self.font_face, self.heading_size)
cnv.setFillColor(self.stroke_heading)
self.draw_multi_string(cnv, x, y + 1.3 * radius, self.heading)
# dot
if self.dot_size:
dot_size = self.unit(self.dot_size)
cnv.setFillColor(self.dot_color)
cnv.setStrokeColor(self.dot_color)
cnv.circle(x, y, dot_size, stroke=1, fill=1)
class PolylineShape(BaseShape):
"""
A multi-part line on a given canvas.
"""
def draw(self, cnv=None, off_x=0, off_y=0, ID=None, **kwargs):
"""Draw a polyline on a given canvas."""
cnv = cnv.canvas if cnv else self.canvas.canvas
points = tools.tuple_split(self.points)
if not points:
points = self.points
if not points or len(points) == 0:
tools.feedback("No points to draw or points are incorrect!")
return
# offset
margin_left = self.unit(self.margin_left)
margin_bottom = self.unit(self.margin_bottom)
off_x = self.unit(off_x)
off_y = self.unit(off_y)
delta_x = off_x + margin_left
delta_y = off_y + margin_bottom
# canvas
self.set_canvas_props()
fill = 0 if self.transparent else 1
# draw
pth = cnv.beginPath()
for key, vertex in enumerate(points):
x, y = vertex
# convert to using units
x = self.unit(x) + delta_x
y = self.unit(y) + delta_y
if key == 0:
pth.moveTo(x, y)
pth.lineTo(x, y)
cnv.drawPath(pth, stroke=1, fill=fill)
class HexShape(BaseShape):
"""
A hexagon on a given canvas.
See: http://powerfield-software.com/?p=851
"""
def draw(self, cnv=None, off_x=0, off_y=0, ID=None, **kwargs):
"""Draw an hexagon on a given canvas."""
is_cards = kwargs.get('is_cards', False)
cnv = cnv.canvas if cnv else self.canvas.canvas
# offset
margin_left = self.unit(self.margin_left)
margin_bottom = self.unit(self.margin_bottom)
off_x = self.unit(off_x)
off_y = self.unit(off_y)
delta_x = off_x + margin_left
delta_y = off_y + margin_bottom
# Calculate half_height and half_side from side
side = self.unit(self.side)
half_height = side * math.sqrt(3) / 2.0
half_side = side / 2.0
# Get coords for leftmost point
# __
# x,y .. / \
# \__/
if self.row is not None and self.col is not None and is_cards:
x = self.col * 2.0 * side + delta_x
y = half_height + self.row * 2.0 * half_height + delta_x
elif self.row is not None and self.col is not None:
x = self.col * (half_side + side) + delta_x
y = half_height + half_height * self.row * 2.0 + (self.col % 2.0) \
* half_height + delta_y
elif self.cx and self.cy:
# cx and cy are at the centre of the hex
x_d = self.unit(self.cx)
y_d = self.unit(self.cy)
x = x_d - half_side - side / 2.0 + delta_x
y = y_d + delta_y
else:
# x and y are at the bottom-left corner of the box around the hex
x = self.unit(self.x) + delta_x
y = self.unit(self.y) + delta_y + half_height
# hex centre
x_d = x + half_side + side / 2.0
y_d = y
if DEBUG:
print(("592", x, y, half_height, half_side, side))
# canvas
self.set_canvas_props()
# draw horizontal hexagon (clockwise)
pth = cnv.beginPath()
pth.moveTo(x, y)
pth.lineTo(x + half_side, y + half_height)
pth.lineTo(x + half_side + side, y + half_height)
pth.lineTo(x + half_side + side + half_side, y)
pth.lineTo(x + half_side + side, y - half_height)
pth.lineTo(x + half_side, y - half_height)
pth.close()
cnv.drawPath(pth, stroke=1, fill=1)
# centre dot
if self.dot_size:
dot_size = self.unit(self.dot_size)
cnv.setFillColor(self.dot_color)
cnv.setStrokeColor(self.dot_color)
cnv.circle(x_d, y_d, dot_size, stroke=1, fill=1)
if DEBUG:
cnv.setStrokeColorRGB(0, 0, 0)
cnv.drawCentredString(x - 10, y, '%s.%s' % (self.row, self.col))
# text
if self.label:
cnv.setFont(self.font_face, self.label_size)
cnv.setFillColor(self.stroke_label)
self.draw_multi_string(cnv, x_d, y_d, self.label)
class StarShape(BaseShape):
"""
A star on a given canvas.
"""
def draw(self, cnv=None, off_x=0, off_y=0, ID=None, **kwargs):
"""Draw a star on a given canvas."""
cnv = cnv.canvas if cnv else self.canvas.canvas
# offset
margin_left = self.unit(self.margin_left)
margin_bottom = self.unit(self.margin_bottom)
off_x = self.unit(off_x)
off_y = self.unit(off_y)
delta_x = off_x + margin_left
delta_y = off_y + margin_bottom
# convert to using units
x = self.unit(self.x) + delta_x
y = self.unit(self.y) + delta_y
# calc - assumes x and y are the centre
radius = self.unit(self.radius)
# canvas
self.set_canvas_props()
# draw
pth = cnv.beginPath()
pth.moveTo(x, y + radius)
angle = (2 * math.pi) * 2.0 / 5.0
start_angle = math.pi / 2.0
if DEBUG: print(('648 star self.vertices', self.vertices))
for vertex in range(self.vertices - 1):
next_angle = angle * (vertex + 1) + start_angle
x_1 = x + radius * math.cos(next_angle)
y_1 = y + radius * math.sin(next_angle)
pth.lineTo(x_1, y_1)
pth.close()
cnv.drawPath(pth, stroke=1, fill=1)
# text
if self.label:
cnv.setFont(self.font_face, self.label_size)
cnv.setFillColor(self.stroke_label)
self.draw_multi_string(cnv, x, y, self.label)
if self.title:
cnv.setFont(self.font_face, self.title_size)
cnv.setFillColor(self.stroke_title)
self.draw_multi_string(cnv, x, y - 1.4 * radius, self.title)
if self.heading:
cnv.setFont(self.font_face, self.heading_size)
cnv.setFillColor(self.stroke_heading)
self.draw_multi_string(cnv, x, y + 1.3 * radius, self.heading)
class TextShape(BaseShape):
"""
Text on a given canvas.
"""
def __init__(self, _object=None, canvas=None, **kwargs):
super(TextShape, self).__init__(_object=_object, canvas=canvas,
**kwargs)
def __call__(self, *args, **kwargs):
"""do something when I'm called"""
if DEBUG: print("shapes_679: calling TextShape...")
def draw(self, cnv=None, off_x=0, off_y=0, ID=None, **kwargs):
"""Draw text on a given canvas."""
cnv = cnv.canvas if cnv else self.canvas.canvas
# offset
margin_left = self.unit(self.margin_left)
margin_bottom = self.unit(self.margin_bottom)
off_x = self.unit(off_x)
off_y = self.unit(off_y)
delta_x = off_x + margin_left
delta_y = off_y + margin_bottom
# convert to using units
x_t = self.unit(self.x) + delta_x
y_t = self.unit(self.y) + delta_y
if self.height:
height = self.unit(self.height)
if self.width:
width = self.unit(self.width)
# canvas
self.set_canvas_props(cnv)
# text
_text = self.textify(ID)
if self.wrap:
_style = ParagraphStyle(name="sc")
_style.textColor = self.stroke
_style.borderColor = self.outline_color
_style.borderWidth = self.outline_width
_style.alignment = self.to_alignment()
_style.fontSize = self.font_size
_style.fontName = self.font_face
_style.leading = self.leading
"""
leading=12,
leftIndent=0,
rightIndent=0,
firstLineIndent=0,
spaceBefore=0,
spaceAfter=0,
bulletFontName='Times-Roman',
bulletFontSize=10,
bulletIndent=0,
backColor=None,
borderPadding= 0,
borderRadius= None,
allowWidows= 1,
allowOrphans= 0,
textTransform=None, # 'uppercase' | 'lowercase' | None
endDots=None,
splitLongWords=1,
"""
para = Paragraph(_text, style=_style)
para.wrapOn(cnv, width, height)
para.drawOn(cnv, x_t, y_t)
else:
cnv.setFillColor(self.stroke)
self.draw_multi_string(cnv, x_t, y_t, _text)
class CircleShape(BaseShape):
"""
Circle on a given canvas.
"""
def __init__(self, _object=None, canvas=None, **kwargs):
super(CircleShape, self).__init__(_object=_object, canvas=canvas,
**kwargs)
# overrides
if self.cx and self.cy:
self.x = self.cx - self.radius
self.y = self.cy - self.radius
self.width = 2.0 * self.radius
self.height = 2.0 * self.radius
self.kwargs = kwargs
def draw(self, cnv=None, off_x=0, off_y=0, ID=None, **kwargs):
"""Draw circle on a given canvas."""
cnv = cnv.canvas if cnv else self.canvas.canvas
# offset
margin_left = self.unit(self.margin_left)
margin_bottom = self.unit(self.margin_bottom)
off_x = self.unit(off_x)
off_y = self.unit(off_y)
delta_x = off_x + margin_left
delta_y = off_y + margin_bottom
# convert to using units
radius = self.unit(self.radius)
if self.row is not None and self.col is not None:
x_c = self.col * 2.0 * radius + radius + delta_x
y_c = self.row * 2.0 * radius + radius + delta_y
#print "shapes_735", self.col, self.row, "::", x_c, y_c
elif self.cx and self.cy:
x_c = self.unit(self.cx) + delta_x
y_c = self.unit(self.cy) + delta_y
else:
x_c = self.unit(self.x) + delta_x + radius
y_c = self.unit(self.y) + delta_y + radius
# canvas
self.set_canvas_props()
# draw
cnv.circle(x_c, y_c, radius, stroke=1, fill=1)
# text
if self.label:
cnv.setFont(self.font_face, self.label_size)
cnv.setFillColor(self.stroke_label)
self.draw_multi_string(cnv, x_c, y_c, self.label)
class EllipseShape(BaseShape):
"""
Ellipse on a given canvas.
"""
def draw(self, cnv=None, off_x=0, off_y=0, ID=None, **kwargs):
"""Draw ellipse on a given canvas."""
cnv = cnv.canvas if cnv else self.canvas.canvas
# offset
margin_left = self.unit(self.margin_left)
margin_bottom = self.unit(self.margin_bottom)
off_x = self.unit(off_x)
off_y = self.unit(off_y)
delta_x = off_x + margin_left
delta_y = off_y + margin_bottom
# convert to using units
x_1 = self.unit(self.x) + delta_x
y_1 = self.unit(self.y) + delta_y
if not self.x_1:
self.x_1 = self.x + self.default_length
if not self.y_1:
self.y_1 = self.y + self.default_length
x_2 = self.unit(self.x_1) + delta_x
y_2 = self.unit(self.y_1) + delta_y
# canvas
self.set_canvas_props()
#draw
cnv.ellipse(x_1, y_1, x_2, y_2, stroke=1, fill=1)
# text
if self.label:
cnv.setFont(self.font_face, self.label_size)
cnv.setFillColor(self.stroke_label)
x_c = (x_2 - x_1) / 2.0 + x_1
y_c = (y_2 - y_1) / 2.0 + y_1
self.draw_multi_string(cnv, x_c, y_c, self.label)
class GridShape(BaseShape):
"""
A grid on a given canvas.
"""
def draw(self, cnv=None, off_x=0, off_y=0, ID=None, **kwargs):
"""Draw a grid on a given canvas."""
cnv = cnv.canvas if cnv else self.canvas.canvas
# offset
margin_left = self.unit(self.margin_left)
margin_bottom = self.unit(self.margin_bottom)
off_x = self.unit(off_x)
off_y = self.unit(off_y)
delta_x = off_x + margin_left
delta_y = off_y + margin_bottom
# convert to using units
x = self.unit(self.x) + delta_x
y = self.unit(self.y) + delta_y
height = self.unit(self.height) # of each grid item
width = self.unit(self.width) # of each grid item
if self.size: # square grid
size = self.unit(self.size)
height, width = size, size
y_cols, x_cols = [], []
for y_col in range(0, self.rows + 1):
y_cols.append(y + y_col*height)
for x_col in range(0, self.cols + 1):
x_cols.append(x + x_col*width)
# canvas
self.set_canvas_props()
# draw
cnv.grid(x_cols, y_cols) # , stroke=1, fill=1)
class CommonShape(BaseShape):
"""
Attributes common to, or used by, multiple shapes
"""
def __init__(self, _object=None, canvas=None, **kwargs):
super(CommonShape, self).__init__(_object=_object, canvas=canvas,
**kwargs)
def draw(self, cnv=None, off_x=0, off_y=0, ID=None, **kwargs):
"""Not applicable."""
tools.feedback('This shape cannot be drawn.')
### deck/card ================================================================
class CardShape(BaseShape):
"""
Card attributes.
"""
def __init__(self, _object=None, canvas=None, **kwargs):
super(CardShape, self).__init__(_object=_object, canvas=canvas,
**kwargs)
self.elements = [] # container for objects which get added to the card
self.height = kwargs.get('height', 8.8)
self.width = kwargs.get('width', 6.3)
self.kwargs.pop('width', None)
self.kwargs.pop('height', None)
def draw(self, cnv=None, off_x=0, off_y=0, ID=None, **kwargs):
"""Draw an element on a given canvas."""
raise NotImplementedError
def draw_card(self, cnv, row, col, cid):
"""Draw a card on a given canvas."""
#print "shapes_857 Card cnv", cnv, row, col, cid, self.shape
# draw outline
label = "ID:%s" % cid if self.show_id else ''
if self.shape == 'rectangle':
outline = RectShape(label=label,
height=self.height, width=self.width,
canvas=cnv, col=col, row=row, **self.kwargs)
outline.draw()
elif self.shape == 'circle':
self.height = self.radius * 2.0
self.width = self.radius * 2.0
self.kwargs['radius'] = self.radius
outline = CircleShape(label=label,
canvas=cnv, col=col, row=row, **self.kwargs)
outline.draw()
elif self.shape == 'hexagon':
self.height = self.side * math.sqrt(3.0)
self.width = self.side * 2.0
self.kwargs['side'] = self.side
outline = HexShape(label=label,
canvas=cnv, col=col, row=row, **self.kwargs)
outline.draw(is_cards=True)
else:
tools.feedback('Unable to draw a %s-shaped card.' % self.shape,
stop=True)
flat_elements = tools.flatten(self.elements)
for flat_ele in flat_elements:
#print "shapes_926 flat_ele", flat_ele
members = flat_ele.members or self.members
try: # - normal element
iid = members.index(cid + 1)
flat_ele.draw(
cnv=cnv,
off_x=col*self.width,
off_y=row*self.height,
ID=iid)
except AttributeError:
# query ... get a new element ... or not!?
#print "shapes_937 self.shape_id", self.shape_id
new_ele = flat_ele(cid=self.shape_id) # uses __call__ on Query
if new_ele:
flat_new_eles = tools.flatten(new_ele)
for flat_new_ele in flat_new_eles:
members = flat_new_ele.members or self.members
iid = members.index(cid + 1)
flat_new_ele.draw(
cnv=cnv,
off_x=col*self.width,
off_y=row*self.height,
ID=iid)
except ValueError:
tools.feedback('Unable to draw card #%s' % (cid + 1))
except Exception as err:
tools.feedback('Unable to draw card #%s (Error: %s' % ((cid + 1), err))
class DeckShape(BaseShape):
"""Placeholder for the deck design; list of CardShapes and Shapes."""
def __init__(self, _object=None, canvas=None, **kwargs):
super(DeckShape, self).__init__(_object=_object, canvas=canvas,
**kwargs)
# page
self.page_width = self.pagesize[0] / self.units
self.page_height = self.pagesize[1] / self.units
# cards
self.deck = [] # container for CardShape objects
self.cards = kwargs.get('cards', 9) # default total number of cards
self.height = kwargs.get('height', 8.8) # OVERWRITE
self.width = kwargs.get('width', 6.3) # OVERWRITE
self.sequence = kwargs.get('sequence', []) # e.g. "1-2" or "1-5,8,10"
self.template = kwargs.get('template', None)
# user provided-rows and -columns
self.card_rows = kwargs.get('rows', None)
self.card_cols = kwargs.get('cols', kwargs.get('columns', None))
# data file
self.data_file = kwargs.get('data', None)
self.data_cols = kwargs.get('data_cols', None)
self.data_rows = kwargs.get('data_rows', None)
self.data_header = kwargs.get('data_header', True)
# GO!
self.create(self.cards)
def create(self, cards):
"""Create a new deck, based on number of `cards`"""
if DEBUG:
print(("Cards are: %s" % self.sequence))
self.deck = []
#print "shapes_925:deck %s cards with kwargs %s" % (cards, self.kwargs)
for card in range(0, cards):
_card = CardShape(**self.kwargs)
_card.shape_id = card
self.deck.append(_card)
def draw(self, cnv=None, off_x=0, off_y=0, ID=None, **kwargs):
cnv = cnv if cnv else self.canvas
#print "base_935 Deck cnv", type(self.canvas), type(cnv)
# user-defined rows and cols
max_rows = self.card_rows
max_cols = self.card_cols
# calculate rows/cols based on page size and margins
if not max_rows:
row_space = \
float(self.page_height) - self.margin_bottom - self.margin_top
max_rows = int(row_space / float(self.height))
if not max_cols:
col_space = \
float(self.page_width) - self.margin_left - self.margin_right
max_cols = int(col_space / float(self.width))
#print "shapes_961:", self.page_width, col_space, max_cols
#print "shapes_962:", self.page_height, row_space, max_rows
row, col = 0, 0
# generate cards
for key, card in enumerate(self.deck):
card.draw_card(cnv, row=row, col=col, cid=card.shape_id)
col += 1
if col >= max_cols:
col = 0
row += 1
if row >= max_rows:
row, col = 0, 0
if key + 1 != len(self.deck):
cnv.canvas.showPage()
def get(self, cid):
"""Return a card based on the internal ID"""
for card in self.deck:
if card.shape_id == cid:
return card
return None
def count(self):
"""Return number of cards in the deck"""
return len(self.deck)
### repeats ===================================================================
class RepeatShape(BaseShape):
"""Draw a Shape multiple times."""
def __init__(self, _object=None, canvas=None, **kwargs):
super(RepeatShape, self).__init__(_object=_object, canvas=canvas,
**kwargs)
# UPDATE SELF WITH COMMON
if self.common:
attrs = vars(self.common)
for attr in list(attrs.keys()):
if attr not in ['canvas', 'common', 'stylesheet'] and \
attr[0] != '_':
common_attr = getattr(self.common, attr)
base_attr = getattr(BaseCanvas(), attr)
if common_attr != base_attr:
setattr(self, attr, common_attr)
self._object = _object # incoming Shape object
# repeat
self.rows = kwargs.get('rows', 1)
self.cols = kwargs.get('cols', kwargs.get('columns', 1))
self.repeat = kwargs.get('repeat', None)
self.offset_across = self.offset_across or self.offset
self.offset_down = self.offset_down or self.offset
self.gap_across = self.gap_across or self.gap
self.gap_down = self.gap_down or self.gap
if self.repeat:
self.repeat_across, self.repeat_down, \
self.gap_down, self.gap_across, \
self.gap_across, self.offset_down = \
self.repeat.split(',')
else:
self.across = kwargs.get('across', self.cols)
self.down = kwargs.get('down', self.rows)
try:
self.down = list(range(1, self.down + 1))
except TypeError:
pass
try:
self.across = list(range(1, self.across + 1))
except TypeError:
pass
#self.repeat_ = kwargs.get('repeat_', None)
#self.repeat_ = kwargs.get('repeat_', None)
def draw(self, cnv=None, off_x=0, off_y=0, ID=None, **kwargs):
print(("1046", self.offset_across, self.offset_down))
print(("1047",self.gap_across, self.gap_down))
for col in range(self.cols):
for row in range(self.rows):
if ((col+1) in self.across) and ((row+1) in self.down):
off_x = col*self.width + \
col*(self.offset_across - self.margin_left)
off_y = row*self.height + \
row*(self.offset_down - self.margin_bottom)
flat_elements = tools.flatten(self._object)
#print "shapes_893", flat_elements
for flat_ele in flat_elements:
#print "shapes_895", flat_ele
try: # normal element
flat_ele.draw(off_x=off_x, off_y=off_y,
ID=self.shape_id)
except AttributeError:
new_ele = flat_ele(cid=self.shape_id)
#print "shapes_899", new_ele, type(new_ele)
if new_ele:
flat_new_eles = tools.flatten(new_ele)
#print "shapes_902", flat_new_eles
for flat_new_ele in flat_new_eles:
#print "shapes_569", flat_new_ele
flat_new_ele.draw(off_x=off_x, off_y=off_y,
ID=self.shape_id)
### other ====================================================================
class ConnectShape(BaseShape):
"""
Connect two shapes (Rectangle), based on a position, on a given canvas.
Q4 | Q1
-------
Q3 | Q2
"""
def __init__(self, _object=None, canvas=None, **kwargs):
super(ConnectShape, self).__init__(_object=_object, canvas=canvas,
**kwargs)
# overrides
self.kwargs = kwargs
self.shape_from = kwargs.get('shape_from', None)
self.shape_to = kwargs.get('shape_to', None)
def draw(self, cnv=None, off_x=0, off_y=0, ID=None, **kwargs):
"""Draw a connection (line) between two shapes on given canvas."""
cnv = cnv
ID = ID
# style
style = self.style or 'direct'
# shapes and position - default style
try:
shape_from, shape_from_position = self.shape_from # tuple form
except:
shape_from, shape_from_position = self.shape_from, 'BC'
try:
shape_to, shape_to_position = self.shape_to # tuple form
except:
shape_to, shape_to_position = self.shape_to, 'TC'
# props
edge_from = shape_from.get_edges()
edge_to = shape_to.get_edges()
x_f, y_f = self.key_positions(shape_from, shape_from_position)
x_t, y_t = self.key_positions(shape_to, shape_to_position)
xc_f, yc_f = self.shape_from.get_center()
xc_t, yc_t = self.shape_to.get_center()
# x,y: use fixed/supplied; or by "name"; or by default; or by "smart"
if style == 'path':
points = []
if xc_f == xc_t and yc_f > yc_t: # above
points = [
self.key_positions(shape_from, 'BC'),
self.key_positions(shape_to, 'TC')
]
if xc_f == xc_t and yc_f < yc_t: # below
points = [
self.key_positions(shape_from, 'TC'),
self.key_positions(shape_to, 'BC')
]
if xc_f > xc_t and yc_f == yc_t: # left
points = [
self.key_positions(shape_from, 'LC'),
self.key_positions(shape_to, 'RC')
]
if xc_f < xc_t and yc_f == yc_t: # right
points = [
self.key_positions(shape_from, 'RC'),
self.key_positions(shape_to, 'LC')
]
if xc_f < xc_t and yc_f < yc_t: # Q1
print("Q1")
if edge_from['right'] < edge_to['left']:
if edge_from['top'] < edge_to['bottom']:
print((" A", edge_from['top'], edge_to['bottom']))
delta = (edge_to['bottom'] - edge_from['top']) / 2.0
points = [
self.key_positions(shape_from, 'TC'),
(xc_f, edge_from['top'] + delta),
(xc_t, edge_from['top'] + delta),
self.key_positions(shape_to, 'BC')
]
elif edge_from['top'] > edge_to['bottom']:
print((" B", edge_from['top'], edge_to['bottom']))
points = [
self.key_positions(shape_from, 'TC'),
(xc_f, yc_t),
self.key_positions(shape_to, 'LC')
]
else:
pass
else:
print((" C", edge_from['top'], edge_to['bottom']))
points = [
self.key_positions(shape_from, 'TC'),
(xc_f, yc_t),
self.key_positions(shape_to, 'LC')
]
if xc_f < xc_t and yc_f > yc_t: # Q2
print("Q2")
if xc_f > xc_t and yc_f > yc_t: # Q3
print("Q3")
if xc_f > xc_t and yc_f < yc_t: # Q4
print("Q4")
if edge_from['left'] < edge_to['right']:
if edge_from['top'] < edge_to['bottom']:
print((" A", edge_from['top'], edge_to['bottom']))
delta = (edge_to['bottom'] - edge_from['top']) / 2.0
points = [
self.key_positions(shape_from, 'TC'),
(xc_f, edge_from['top'] + delta),
(xc_t, edge_from['top'] + delta),
self.key_positions(shape_to, 'BC')
]
elif edge_from['top'] > edge_to['bottom']:
print((" B", edge_from['top'], edge_to['bottom']))
points = [
self.key_positions(shape_from, 'TC'),
(xc_f, yc_t),
self.key_positions(shape_to, 'RC')
]
else:
pass
else:
print((" C", edge_from['top'], edge_to['bottom']))
points = [
self.key_positions(shape_from, 'TC'),
(xc_f, yc_t),
self.key_positions(shape_to, 'RC')
]
if xc_f == xc_t and yc_f == yc_t: # same!
return
self.kwargs['points'] = points
plin = PolylineShape(None, cnv, **self.kwargs)
plin.draw(ID=ID)
elif style == 'direct': # straight line
self.kwargs['x'] = x_f
self.kwargs['y'] = y_f
self.kwargs['x1'] = x_t
self.kwargs['y1'] = y_t
lin = LineShape(None, cnv, **self.kwargs)
lin.draw(ID=ID)
else:
tools.feedback('Style "%s" is not known.' % style)
def key_positions(self, _shape, location=None):
"""Calculate a dictionary of key positions around a Rectangle.
T,B,L,R,C = Top, Bottom, Left, Right, Center
"""
top = _shape.y + _shape.height
btm = _shape.y
mid_horizontal = _shape.x + _shape.width / 2.0
mid_vertical = _shape.y + _shape.height / 2.0
left = _shape.x
right = _shape.x + _shape.width
_positions = {
'TL': (left, top),
'TC': (mid_horizontal, top),
'TR': (right, top),
'BL': (left, btm),
'BC': (mid_horizontal, btm),
'BR': (right, btm),
'LC': (left, mid_vertical),
'RC': (right, mid_vertical),
#'': (),
}
if location:
return _positions.get(location, ())
else:
return _positions
class FooterShape(BaseShape):
"""
Footer for a page.
"""
def __init__(self, _object=None, canvas=None, **kwargs):
super(FooterShape, self).__init__(_object=_object, canvas=canvas,
**kwargs)
# overrides
page_width = self.pagesize[0]
self.kwargs['x'] = self.kwargs.get('x', page_width / 2.0)
self.kwargs['y'] = self.margin_bottom / 2.0
def draw(self, cnv=None, off_x=0, off_y=0, ID=None, **kwargs):
"""Draw footer on a given canvas page."""
cnv = cnv if cnv else self.canvas
if not self.kwargs.get('text'):
self.kwargs['text'] = 'Page %s' % ID
text = TextShape(_object=None, canvas=cnv, kwargs=self.kwargs)
text.draw()
| StarcoderdataPython |
3276584 | <filename>script.module.nanscrapers/lib/nanscrapers/scraperplugins/hubmovie.py
import re
import requests
import xbmc
import urllib
from ..scraper import Scraper
from ..common import clean_title,clean_search
session = requests.Session()
User_Agent = 'Mozilla/5.0 (iPhone; CPU iPhone OS 8_4 like Mac OS X) AppleWebKit/600.1.4 (KHTML, like Gecko) Version/8.0 Mobile/12H143 Safari/600.1.4'
class hubmovie(Scraper):
domains = ['http://hubmovie.cc']
name = "Hubmovie"
sources = []
def __init__(self):
self.base_link = 'http://hubmovie.cc'
self.sources = []
def scrape_movie(self, title, year, imdb, debrid=False):
try:
search_id = clean_search(title.lower())
start_url = '%s/pages/search/%s' %(self.base_link,search_id.replace(' ','%20'))
#print 'SEARCH url > '+start_url
headers = {'accept':'*/*','accept-encoding':'gzip, deflate, br','accept-language':'en-US,en;q=0.8','content-type':'text/html; charset=utf-8',
'User-Agent':'Mozilla/5.0 (Windows NT 6.3; WOW64; rv:55.0) Gecko/20100101 Firefox/55.0',
'origin':self.base_link,'referer':self.base_link,'x-requested-with':'XMLHttpRequest'}
response = session.get(self.base_link,headers=headers,timeout=5)
html = requests.get(start_url,headers=headers,timeout=5).content
#print html
page = html.split('<div id="movies_cont">')[1]
Regex = re.compile('href="(.+?)".+?<h1>(.+?)</h1>.+?class="poster_tag">(.+?)</li>',re.DOTALL).findall(page)
for item_url,name,date in Regex:
#print '%s %s %s' %(item_url,name,date)
if clean_title(title).lower() == clean_title(name).lower():
if year in date:
movie_link = item_url.replace('.',self.base_link)
#print 'CHECK here '+movie_link
self.get_source(movie_link)
return self.sources
except Exception, argument:
return self.sources
def scrape_episode(self,title, show_year, year, season, episode, imdb, tvdb, debrid = False):
try:
search_id = clean_search(title.lower())
start_url = '%s/pages/search/%s' %(self.base_link,search_id.replace(' ','%20'))
#print 'SEARCH url > '+start_url
headers = {'accept':'*/*','accept-encoding':'gzip, deflate, br','accept-language':'en-US,en;q=0.8','content-type':'text/html; charset=utf-8',
'User-Agent':'Mozilla/5.0 (Windows NT 6.3; WOW64; rv:55.0) Gecko/20100101 Firefox/55.0',
'origin':self.base_link,'referer':self.base_link,'x-requested-with':'XMLHttpRequest'}
response = session.get(self.base_link,headers=headers,timeout=5)
html = requests.get(start_url,headers=headers,timeout=5).content
page = html.split('<div id="movies_cont">')[1]
Regex = re.compile('href="(.+?)".+?<h1>(.+?)</h1>',re.DOTALL).findall(page)
for item_url,name in Regex:
if clean_title(title).lower() == clean_title(name).lower():
movie_link = item_url.replace('.',self.base_link)
movie_link = movie_link + '/season-%s-episode-%s' %(season,episode)
self.get_source(movie_link)
return self.sources
except Exception, argument:
return self.sources
def get_source(self,movie_link):
try:
#print ':::::::::::::::::::::::'+movie_link
headers = {'accept':'*/*','accept-encoding':'gzip, deflate, br','accept-language':'en-US,en;q=0.8','content-type':'text/html; charset=utf-8',
'User-Agent':'Mozilla/5.0 (Windows NT 6.3; WOW64; rv:55.0) Gecko/20100101 Firefox/55.0',
'origin':self.base_link,'referer':self.base_link,'x-requested-with':'XMLHttpRequest'}
response = session.get(self.base_link,headers=headers,timeout=5)
html = requests.get(movie_link,headers=headers,timeout=5).content
#print 'new'+html
sources = re.compile('<div class="link_go".+?href="(.+?)"',re.DOTALL).findall(html)
for link in sources:
#print link
if 'openload' in link:
headers = {'User_Agent':User_Agent}
get_res=requests.get(link,headers=headers,timeout=5).content
rez = re.compile('description" content="(.+?)"',re.DOTALL).findall(get_res)[0]
if '1080p' in rez:
qual = '1080p'
elif '720p' in rez:
qual='720p'
else:
qual='DVD'
else: qual = 'DVD'
host = link.split('//')[1].replace('www.','')
host = host.split('/')[0].split('.')[0].title()
self.sources.append({'source': host,'quality': qual,'scraper': self.name,'url': link,'direct': False})
except:
pass
| StarcoderdataPython |
3247106 | """
@name: Modules/House/Lighting/__init__.py
@author: <NAME>
@contact: <EMAIL>
@copyright: (c) 2011-2020 by <NAME>
@note: Created on May 1, 2011
@license: MIT License
@summary: This module handles the lights component of the lighting system.
"""
__updated__ = '2020-02-16'
__version_info__ = (20, 1, 20)
__version__ = '.'.join(map(str, __version_info__))
MODULES = [
'Buttons',
'Controllers',
'Lights',
'Outlets'
]
CONFIG_NAME = 'lighting'
class LightingInformation:
"""
==> PyHouse.House.Lighting.xxx as in the def below
"""
def __init__(self):
self.Buttons = None # ==> ButtonInformation()
self.Controllers = None # ==> ControllerInformation()
self.Lights = None # ==> LightInformation()
self.Outlets = None # ==> OutletInformation
self._Apis = {}
class LightingClass:
"""
"""
def __init__(self):
pass
# ## END DBK
| StarcoderdataPython |
3334245 | from twisted.plugin import IPlugin
from twisted.words.protocols import irc
from txircd.config import ConfigValidationError
from txircd.module_interface import Command, ICommand, IModuleData, ModuleData
from txircd.modules.xlinebase import XLineBase
from txircd.utils import durationToSeconds, ircLower, now
from zope.interface import implements
from fnmatch import fnmatchcase
class Shun(ModuleData, XLineBase):
implements(IPlugin, IModuleData)
name = "Shun"
lineType = "SHUN"
def actions(self):
return [ ("welcome", 10, self.checkLines),
("changeident", 10, self.checkIdentChange),
("changehost", 10, self.checkHostChange),
("commandpermission", 50, self.blockShunned),
("commandpermission-SHUN", 10, self.restrictToOper),
("statsruntype-shuns", 10, self.generateInfo),
("burst", 10, self.burstLines) ]
def userCommands(self):
return [ ("SHUN", 1, UserShun(self)) ]
def serverCommands(self):
return [ ("ADDLINE", 1, ServerAddShun(self)),
("DELLINE", 1, ServerDelShun(self)) ]
def load(self):
self.initializeLineStorage()
def verifyConfig(self, config):
if "shun_commands" in config:
if not isinstance(config["shun_commands"], list):
raise ConfigValidationError("shun_commands", "value must be a list")
for command in config["shun_commands"]:
if not isinstance(command, basestring):
raise ConfigValidationError("shun_commands", "\"{}\" is not a valid command".format(command))
def checkUserMatch(self, user, mask, data):
banMask = self.normalizeMask(mask)
userMask = ircLower("{}@{}".format(user.ident, user.host()))
if fnmatchcase(userMask, banMask):
return True
userMask = ircLower("{}@{}".format(user.ident, user.realHost))
if fnmatchcase(userMask, banMask):
return True
userMask = ircLower("{}@{}".format(user.ident, user.ip))
if fnmatchcase(userMask, banMask):
return True
return False
def checkLines(self, user):
if self.matchUser(user) is not None:
user.cache["shunned"] = True
self.ircd.log.info("Matched user {user.uuid} ({user.ident}@{user.host()}) against a shun", user=user)
elif "shunned" in user.cache:
del user.cache["shunned"]
def checkIdentChange(self, user, oldIdent, fromServer):
self.checkLines(user)
def checkHostChange(self, user, hostType, oldHost, fromServer):
if user.uuid[:3] == self.ircd.serverID:
self.checkLines(user)
def blockShunned(self, user, command, data):
if "shunned" not in user.cache:
return None
if command not in self.ircd.config.get("shun_commands", ["JOIN", "PART", "QUIT", "PING", "PONG"]):
return False
return None
def restrictToOper(self, user, data):
if not self.ircd.runActionUntilValue("userhasoperpermission", user, "command-shun", users=[user]):
user.sendMessage(irc.ERR_NOPRIVILEGES, "Permission denied - You do not have the correct operator privileges")
return False
return None
def onShunUpdate(self):
for user in self.ircd.users.itervalues():
self.checkLines(user)
class UserShun(Command):
implements(ICommand)
def __init__(self, module):
self.module = module
def parseParams(self, user, params, prefix, tags):
if len(params) < 1 or len(params) == 2:
user.sendSingleError("ShunParams", irc.ERR_NEEDMOREPARAMS, "SHUN", "Not enough parameters")
return None
shunmask = params[0]
if shunmask in self.module.ircd.userNicks:
targetUser = self.module.ircd.users[self.module.ircd.userNicks[shunmask]]
shunmask = "{}@{}".format(targetUser.ident, targetUser.host())
else:
if "@" not in shunmask:
shunmask = "*@{}".format(shunmask)
if len(params) == 1:
return {
"mask": shunmask
}
return {
"mask": shunmask,
"duration": durationToSeconds(params[1]),
"reason": " ".join(params[2:])
}
def execute(self, user, data):
shunmask = data["mask"]
if "reason" in data:
if not self.module.addLine(shunmask, now(), data["duration"], user.hostmask(), data["reason"]):
user.sendMessage("NOTICE", "*** Shun for {} is already set.".format(shunmask))
return True
self.module.onShunUpdate()
if data["duration"] > 0:
user.sendMessage("NOTICE", "*** Timed shun for {} has been set, to expire in {} seconds.".format(shunmask, data["duration"]))
else:
user.sendMessage("NOTICE", "*** Permanent shun for {} has been set.".format(shunmask))
return True
if not self.module.delLine(shunmask):
user.sendMessage("NOTICE", "*** Shun for {} doesn't exist.".format(shunmask))
return True
user.sendMessage("NOTICE", "*** Shun for {} has been removed.".format(shunmask))
self.module.onShunUpdate()
return True
class ServerAddShun(Command):
implements(ICommand)
def __init__(self, module):
self.module = module
def parseParams(self, server, params, prefix, tags):
return self.module.handleServerAddParams(server, params, prefix, tags)
def execute(self, server, data):
commandSuccess = self.module.executeServerAddCommand(server, data)
self.module.onShunUpdate()
return commandSuccess
class ServerDelShun(Command):
implements(ICommand)
def __init__(self, module):
self.module = module
def parseParams(self, server, params, prefix, tags):
return self.module.handleServerDelParams(server, params, prefix, tags)
def execute(self, server, data):
commandSuccess = self.module.executeServerDelCommand(server, data)
self.module.onShunUpdate()
return commandSuccess
shunModule = Shun() | StarcoderdataPython |
86395 | <reponame>juliensimon/optimum-graphcore
#!/usr/bin/env python
# coding=utf-8
# Copyright 2021 The HuggingFace Team All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import time
from argparse import ArgumentError, ArgumentParser
from pathlib import Path
from typing import Optional, Sequence, Union
from datasets import Dataset, DatasetDict, concatenate_datasets, load_dataset
from joblib import Parallel, delayed
def prepare_dataset(dataset_name: str, cache_dir: Path, data_dir: Path, data_files: Sequence[str]) -> Dataset:
raw_dataset = load_dataset(
dataset_name,
data_files=data_files,
cache_dir=cache_dir,
)
return raw_dataset
def main(
num_workers: int,
dataset_name: str,
data_dir: Union[Path, str],
cache_dir: Union[Path, str],
output_dataset_name: Optional[str] = None,
remove_intermediate_datasets_from_cache: bool = True,
max_number_of_files: int = -1,
) -> Dataset:
data_files = list(map(str, Path(data_dir).glob("*.tfrecord")))
if max_number_of_files > 0:
data_files = data_files[:max_number_of_files]
if num_workers > len(data_files):
raise ValueError(f"there are more workers ({num_workers}) than the number of files ({len(data_files)})")
num_data_file_per_worker = len(data_files) // num_workers
data_files_per_worker = [
data_files[i * num_data_file_per_worker : (i + 1) * num_data_file_per_worker] for i in range(num_workers)
]
remaining_files = len(data_files) % num_workers
if remaining_files > 0:
# Dispatching the remaning files to different workers
for i in range(1, remaining_files + 1):
data_files_per_worker[-i].append(data_files[-i])
formatted_filenames = "\n".join(data_files)
print(f"Found {len(data_files)} files:\n{formatted_filenames}")
print(f"Number of files per worker ~ {num_data_file_per_worker}")
print(f"Generating the dataset with {num_workers} workers...")
start = time.time()
sub_datasets = Parallel(n_jobs=num_workers)(
delayed(prepare_dataset)(dataset_name, cache_dir, data_dir, data_files) for data_files in data_files_per_worker
)
final_datasets = DatasetDict()
split_names = sub_datasets[0].keys()
for split_name in split_names:
final_datasets[split_name] = concatenate_datasets(
[dataset_dict[split_name] for dataset_dict in sub_datasets], split=split_name
)
end = time.time()
print(f"Dataset generation completed after {end - start}s")
if output_dataset_name is None:
final_dataset_filename = Path(cache_dir) / dataset_name.replace("/", "_")
else:
final_dataset_filename = Path(cache_dir) / output_dataset_name
final_datasets.save_to_disk(final_dataset_filename)
if remove_intermediate_datasets_from_cache:
print("*** Cleaning up intermediate dataset cache files ***")
for dataset in sub_datasets:
for (_, cache_files) in dataset.cache_files.items():
for cache_file in cache_files:
filename = cache_file.get("filename")
if filename is None:
continue
print(f"Removing {filename}")
Path(filename).unlink()
print("Done!")
print(f"Dataset saved at {final_dataset_filename}")
return final_datasets
def get_args():
parser = ArgumentParser(description="Utility tool to enable multiprocessing during dataset generation")
parser.add_argument("--num_workers", required=True, type=int, help="The number of workers to use.")
parser.add_argument(
"--dataset_name", required=True, type=str, help="The name of the dataset, or the path to the dataset script."
)
parser.add_argument(
"--data_dir", required=True, type=Path, help="The path to the directory containing the dataset files."
)
parser.add_argument("--cache_dir", default=None, type=Path, help="The path to the cache directory.")
parser.add_argument(
"--output_dataset_name",
default=None,
type=str,
help="The resulting dataset folder name, --dataset_name is used if this field is None",
)
parser.add_argument(
"--keep_intermediate_datasets",
default=False,
type=bool,
help="Whether to keep intermediate dataset cache files or not.",
)
parser.add_argument("--max_number_of_files", default=-1, type=int, help="The maximum number of files to process.")
return parser.parse_args()
if __name__ == "__main__":
args = get_args()
if args.cache_dir is None:
args.cache_dir = os.environ.get("HF_DATASETS_CACHE", None)
if args.cache_dir is None:
raise ArgumentError(
"You must either specify a cache_dir or set the HF_DATASETS_CACHE environment variable"
)
print(f"Cache dir: {args.cache_dir}")
main(
args.num_workers,
args.dataset_name,
args.data_dir,
args.cache_dir,
args.output_dataset_name,
remove_intermediate_datasets_from_cache=not args.keep_intermediate_datasets,
max_number_of_files=args.max_number_of_files,
)
| StarcoderdataPython |
123921 | __author__ = 'Claudio'
"""Demonstrate how to use Python’s list comprehension syntax to produce
the list [0, 2, 6, 12, 20, 30, 42, 56, 72, 90].
"""
def demonstration_list_comprehension():
return [idx*x for idx, x in enumerate(range(1,11))]
| StarcoderdataPython |
1654090 | <filename>peach/database/proxy.py
from peach.utils import load_resource_class
def load_db_proxy(db_conf):
db_proxy_class = load_resource_class(db_conf['proxy'])
return db_proxy_class.build(**db_conf)
class DBProxy(object):
@classmethod
def build(cls, **kwargs):
raise NotImplementedError
def add(self, model, doc):
raise NotImplementedError
def upsert(self, model, *docs):
raise NotImplementedError
def delete(self, model, *doc_ids):
raise NotImplementedError
def count(self, model, condition, **kwargs):
raise NotImplementedError
def find(self, model, condition, skip=0, limit=0, sort=None, **kwargs):
raise NotImplementedError
def by_attr(self, model, attr, value, exact=True, many=True, skip=0, limit=0, sort=None):
raise NotImplementedError
def by_id(self, model, id):
raise NotImplementedError
| StarcoderdataPython |
3221465 | from rubrix.client.sdk.users.models import User
from rubrix.server.security.model import User as ServerUser
def test_users_schema(helpers):
client_schema = User.schema()
server_schema = ServerUser.schema()
assert helpers.remove_description(client_schema) == helpers.remove_description(
server_schema
)
| StarcoderdataPython |
4801496 | <reponame>tho-wa/virushack
# -*- coding: utf-8 -*-
"""
Created on Sat Mar 21 15:18:46 2020
@author: LB
response = requests.get(
'https://hystreet.com/api/locations/',
params={},
headers={'X-API-Token': '<KEY>'},
)
json_response = response.json()
| StarcoderdataPython |
189528 | from datetime import datetime, timedelta
from airflow import DAG
default_args = {
"owner": "airflow",
"email_on_failure": False,
"email_on_retry": False,
"email": "<EMAIL>",
"retries": 1,
"retry_delay": timedelta(minutes=5),
}
with DAG(
"forex_data_pipeline",
start_date=datetime(2021, 1, 1),
schedule_interval="@daily",
default_args=default_args,
catchup=False,
) as dag:
None
| StarcoderdataPython |
3380029 | """
# Definition for a Node.
class Node:
def __init__(self, val=None, children=None):
self.val = val
self.children = children
"""
class Solution:
def levelOrder(self, root: 'Node') -> List[List[int]]:
if root is None:
return []
result, previousLayer = [], [root,]
while previousLayer:
currentLayer = []
result.append([])
for node in previousLayer:
result[-1].append(node.val)
currentLayer.extend(node.children)
previousLayer = currentLayer
return result | StarcoderdataPython |
3329505 | # Exemplo de dado com função
def dadosPessoais(nome, idade, cidade):
print("Seu nome é {}, você tem {} anos e mora em {}.".format(nome, idade, cidade))
dadosPessoais("José", 30, "Maceió")
dadosPessoais(nome="Joaquim", idade=80, cidade="Alagoas")
| StarcoderdataPython |
4820270 | <reponame>AlexanderIbrahim1/threebodyparah2
import os
import sys
import pathlib
import subprocess
class KernelGeneratorInfo:
def __init__(self, power1, power2, csvfile, kerneldir):
# check if power1 and power2 are valid
assert 0 <= power1 <= 6
assert 0 <= power2 <= 6
# setting up the input parameters
self.executable = os.path.join("src", "makekernel.x")
self.power1 = power1
self.power2 = power2
self.csvfile = csvfile
# creating the template for the file name
prefix = pathlib.Path(csvfile).stem
_kernelfiletemplate = "{0}.kernel".format(prefix)
self.kerneltemplate = os.path.join(kerneldir, _kernelfiletemplate)
def call_makekernel(self):
# uses the command line to call the ./makekernel.x fortran code
kernelfile = self.kerneltemplate
cmd = "{} {} {} {} {}".format(self.executable, self.csvfile, kernelfile,
self.power1, self.power2)
print("Creating {}".format(kernelfile))
subprocess.call(cmd, shell = True)
def make_kernel(prefix):
csvdir = os.path.join("..", "csvfiles")
kerneldir = os.path.join("..", "kernels")
csvfile = os.path.join(csvdir, prefix + ".csv")
# first power doesn't actually matter, energies are empirically determined in that direction
# second power should be 5
kgen = KernelGeneratorInfo(3, 5, csvfile, kerneldir)
kgen.call_makekernel()
if __name__ == '__main__':
if len(sys.argv) == 1:
make_kernel('threebodypes')
else:
kernelname = sys.argv[1]
make_kernel(kernelname)
| StarcoderdataPython |
3298346 | <filename>chemex/experiments/cpmg/fast/back_calculation.py
from numpy.linalg import matrix_power
from scipy.linalg import expm
from ....bases.two_states.fast import P_180Y
from ....caching import lru_cache
from .liouvillian import compute_iy_eq, compute_liouvillians, get_iy
@lru_cache()
def make_calc_observable(time_t2=0.0, ppm_to_rads=1.0, _id=None):
"""
Factory to make "calc_observable" function to calculate the intensity in
presence of exchange after a CPMG block.
Parameters
----------
time_t2 : float
Time of the CPMG block
ncyc : integer
Number of cycles, [t-180-2t-180-t]*n
id : tuple
Some type of identification for caching optimization
Returns
-------
out : function
Calculate intensity after the CEST block
"""
@lru_cache(100)
def _calc_observable(pb=0.0, kex=0.0, dw=0.0, r_ixy=5.0, dr_ixy=0.0,
ncyc=0):
"""
Calculate the intensity in presence of exchange during a cpmg-type pulse
train.
Parameters
----------
i0 : float
Initial intensity.
pb : float
Fractional population of state B,
0.0 for 0%, 1.0 for 100%
kex : float
Exchange rate between state A and B in /s.
dw : float
Chemical shift difference between states A and B in rad/s.
r_ixy : float
Transverse relaxation rate of state a in /s.
dr_ixy : float
Transverse relaxation rate difference between states a and b in /s.
Returns
-------
out : float
Intensity after the CPMG block
"""
dw *= ppm_to_rads
mag_eq = compute_iy_eq(pb)
if ncyc == 0:
mag = mag_eq
else:
l_free = compute_liouvillians(
pb=pb,
kex=kex,
dw=dw,
r_ixy=r_ixy,
dr_ixy=dr_ixy
)
t_cp = time_t2 / (4.0 * ncyc)
p_free = expm(l_free * t_cp)
mag = matrix_power(
p_free
.dot(P_180Y)
.dot(p_free),
2 * ncyc
).dot(mag_eq)
magy_a, _ = get_iy(mag)
return magy_a
def calc_observable(i0=0.0, **kwargs):
"""
Calculate the intensity in presence of exchange after a CPMG block.
Parameters
----------
i0 : float
Initial intensity.
Returns
-------
out : float
Intensity after the CEST block
"""
return i0 * _calc_observable(**kwargs)
return calc_observable
| StarcoderdataPython |
84786 | # -*- coding: utf-8 -*-
from autograd.blocks.trigo import sin
from autograd.blocks.trigo import cos
from autograd.blocks.trigo import tan
from autograd.blocks.trigo import arcsin
from autograd.blocks.trigo import arccos
from autograd.blocks.trigo import arctan
from autograd.variable import Variable
import numpy as np
import autograd as ad
def test_sin_forward():
ad.set_mode('forward')
# =============================================================================
# define the input variable
# =============================================================================
data=np.random.random(5)
x=Variable(data)
# =============================================================================
# define custom block
# =============================================================================
sin_block=sin()
# =============================================================================
# compute output of custom block
# =============================================================================
y_block=sin_block(x)
y_block.compute_gradients()
# =============================================================================
# define expected output
# =============================================================================
data_true=np.sin(data)
gradient_true=np.diag(np.cos(data))
# =============================================================================
# assert data pass
# =============================================================================
assert np.equal(data_true, y_block.data).all(), 'wrong sin data pass. expected {}, given{}'.format(data_true, y_block.data)
# =============================================================================
# assert gradient forward pass
# =============================================================================
assert np.equal(gradient_true, y_block.gradient).all(), 'wrong sin gradient forward pass. expected {}, given{}'.format(gradient_true,y_block.gradient)
def test_sin_reverse():
ad.set_mode('reverse')
# =============================================================================
# define the input variable
# =============================================================================
data=np.random.random(5)
x=Variable(data)
# =============================================================================
# define custom block
# =============================================================================
sin_block=sin()
# =============================================================================
# compute output of custom block
# =============================================================================
y_block=sin_block(x)
# =============================================================================
# Compute gradient backwards
# =============================================================================
y_block.compute_gradients()
# =============================================================================
# define expected output
# =============================================================================
data_true=np.sin(data)
gradient_true=np.diag(np.cos(data))
# =============================================================================
# assert data pass
# =============================================================================
assert np.equal(data_true, y_block.data).all(), 'wrong sin data pass. expected {}, given{}'.format(data_true, y_block.data)
# =============================================================================
# assert gradient forward pass
# =============================================================================
assert np.equal(gradient_true, y_block.gradient).all(), 'wrong sin gradient forward pass. expected {}, given{}'.format(gradient_true,y_block.gradient)
def test_cos_forward():
ad.set_mode('forward')
# =============================================================================
# define the input variable
# =============================================================================
data=np.random.random(5)
x=Variable(data)
# =============================================================================
# define custom block
# =============================================================================
cos_block=cos()
# =============================================================================
# compute output of custom block
# =============================================================================
y_block=cos_block(x)
y_block.compute_gradients()
# =============================================================================
# define expected output
# =============================================================================
data_true=np.cos(data)
gradient_true=np.diag(-np.sin(data))
# =============================================================================
# assert data pass
# =============================================================================
assert np.equal(data_true, y_block.data).all(), 'wrong cos data pass. expected {}, given{}'.format(data_true, y_block.data)
# =============================================================================
# assert gradient forward pass
# =============================================================================
assert np.equal(gradient_true, y_block.gradient).all(), 'wrong cos gradient forward pass. expected {}, given{}'.format(gradient_true,y_block.gradient)
def test_cos_reverse():
ad.set_mode('reverse')
# =============================================================================
# define the input variable
# =============================================================================
data=np.random.random(5)
x=Variable(data)
# =============================================================================
# define custom block
# =============================================================================
cos_block=cos()
# =============================================================================
# compute output of custom block
# =============================================================================
y_block=cos_block(x)
y_block.compute_gradients()
# =============================================================================
# define expected output
# =============================================================================
data_true=np.cos(data)
gradient_true=np.diag(-np.sin(data))
# =============================================================================
# assert data pass
# =============================================================================
assert np.equal(data_true, y_block.data).all(), 'wrong cos data pass. expected {}, given{}'.format(data_true, y_block.data)
# =============================================================================
# assert gradient forward pass
# =============================================================================
assert np.equal(gradient_true, y_block.gradient).all(), 'wrong cos gradient forward pass. expected {}, given{}'.format(gradient_true,y_block.gradient)
def test_tan_forward():
ad.set_mode('forward')
# =============================================================================
# define the input variable
# =============================================================================
data=np.random.random(5)
x=Variable(data)
# =============================================================================
# define custom block
# =============================================================================
tan_block=tan()
# =============================================================================
# compute output of custom block
# =============================================================================
y_block=tan_block(x)
y_block.compute_gradients()
# =============================================================================
# define expected output
# =============================================================================
data_true=np.tan(data)
gradient_true=np.diag(1/np.cos(data)**2)
# =============================================================================
# assert data pass
# =============================================================================
assert np.equal(data_true, y_block.data).all(), 'wrong tan data pass. expected {}, given{}'.format(data_true, y_block.data)
# =============================================================================
# assert gradient forward pass
# =============================================================================
assert np.equal(gradient_true, y_block.gradient).all(), 'wrong tan gradient forward pass. expected {}, given{}'.format(gradient_true,y_block.gradient)
def test_tan_reverse():
ad.set_mode('reverse')
# =============================================================================
# define the input variable
# =============================================================================
data=np.random.random(5)
x=Variable(data)
# =============================================================================
# define custom block
# =============================================================================
tan_block=tan()
# =============================================================================
# compute output of custom block
# =============================================================================
y_block=tan_block(x)
y_block.compute_gradients()
# =============================================================================
# define expected output
# =============================================================================
data_true=np.tan(data)
gradient_true=np.diag(1/np.cos(data)**2)
# =============================================================================
# assert data pass
# =============================================================================
assert np.equal(data_true, y_block.data).all(), 'wrong tan data pass. expected {}, given{}'.format(data_true, y_block.data)
# =============================================================================
# assert gradient forward pass
# =============================================================================
assert np.equal(gradient_true, y_block.gradient).all(), 'wrong tan gradient forward pass. expected {}, given{}'.format(gradient_true,y_block.gradient)
def test_arcsin_forward():
ad.set_mode('forward')
# =============================================================================
# define the input variable
# =============================================================================
data=np.random.random(5)
x=Variable(data)
# =============================================================================
# define custom block
# =============================================================================
arcsin_block=arcsin()
# =============================================================================
# compute output of custom block
# =============================================================================
y_block=arcsin_block(x)
y_block.compute_gradients()
# =============================================================================
# define expected output
# =============================================================================
data_true=np.arcsin(data)
gradient_true= np.diag(1/(np.sqrt(1 - data**2)))
# =============================================================================
# assert data pass
# =============================================================================
assert np.equal(data_true, y_block.data).all(), 'wrong arcsin data pass. expected {}, given{}'.format(data_true, y_block.data)
# =============================================================================
# assert gradient forward pass
# =============================================================================
assert np.equal(gradient_true, y_block.gradient).all(), 'wrong arcsin gradient forward pass. expected {}, given{}'.format(gradient_true,y_block.gradient)
def test_arcsin_reverse():
ad.set_mode('reverse')
# =============================================================================
# define the input variable
# =============================================================================
data=np.random.random(5)
x=Variable(data)
# =============================================================================
# define custom block
# =============================================================================
arcsin_block=arcsin()
# =============================================================================
# compute output of custom block
# =============================================================================
y_block=arcsin_block(x)
y_block.compute_gradients()
# =============================================================================
# define expected output
# =============================================================================
data_true=np.arcsin(data)
gradient_true= np.diag(1/(np.sqrt(1 - data**2)))
# =============================================================================
# assert data pass
# =============================================================================
assert np.equal(data_true, y_block.data).all(), 'wrong arcsin data pass. expected {}, given{}'.format(data_true, y_block.data)
# =============================================================================
# assert gradient forward pass
# =============================================================================
assert np.equal(gradient_true, y_block.gradient).all(), 'wrong arcsin gradient forward pass. expected {}, given{}'.format(gradient_true,y_block.gradient)
def test_arccos_forward():
ad.set_mode('forward')
# =============================================================================
# define the input variable
# =============================================================================
data=np.random.random(5)
x=Variable(data)
# =============================================================================
# define custom block
# =============================================================================
arccos_block=arccos()
# =============================================================================
# compute output of custom block
# =============================================================================
y_block=arccos_block(x)
y_block.compute_gradients()
# =============================================================================
# define expected output
# =============================================================================
data_true=np.arccos(data)
gradient_true= np.diag(-1/(np.sqrt(1 - data**2)))
# =============================================================================
# assert data pass
# =============================================================================
assert np.equal(data_true, y_block.data).all(), 'wrong arccos data pass. expected {}, given{}'.format(data_true, y_block.data)
# =============================================================================
# assert gradient forward pass
# =============================================================================
assert np.equal(gradient_true, y_block.gradient).all(), 'wrong arccos gradient forward pass. expected {}, given{}'.format(gradient_true,y_block.gradient)
def test_arccos_reverse():
ad.set_mode('reverse')
# =============================================================================
# define the input variable
# =============================================================================
data=np.random.random(5)
x=Variable(data)
# =============================================================================
# define custom block
# =============================================================================
arccos_block=arccos()
# =============================================================================
# compute output of custom block
# =============================================================================
y_block=arccos_block(x)
y_block.compute_gradients()
# =============================================================================
# define expected output
# =============================================================================
data_true=np.arccos(data)
gradient_true= np.diag(-1/(np.sqrt(1 - data**2)))
# =============================================================================
# assert data pass
# =============================================================================
assert np.equal(data_true, y_block.data).all(), 'wrong arccos data pass. expected {}, given{}'.format(data_true, y_block.data)
# =============================================================================
# assert gradient forward pass
# =============================================================================
assert np.equal(gradient_true, y_block.gradient).all(), 'wrong arccos gradient forward pass. expected {}, given{}'.format(gradient_true,y_block.gradient)
def test_arctan_forward():
ad.set_mode('forward')
# =============================================================================
# define the input variable
# =============================================================================
data=np.random.random(5)
x=Variable(data)
# =============================================================================
# define custom block
# =============================================================================
arctan_block=arctan()
# =============================================================================
# compute output of custom block
# =============================================================================
y_block=arctan_block(x)
y_block.compute_gradients()
# =============================================================================
# define expected output
# =============================================================================
data_true=np.arctan(data)
gradient_true= np.diag(1/(1 + data**2))
# =============================================================================
# assert data pass
# =============================================================================
assert np.equal(data_true, y_block.data).all(), 'wrong arctan data pass. expected {}, given{}'.format(data_true, y_block.data)
# =============================================================================
# assert gradient forward pass
# =============================================================================
assert np.equal(gradient_true, y_block.gradient).all(), 'wrong arctan gradient forward pass. expected {}, given{}'.format(gradient_true,y_block.gradient)
def test_arctan_reverse():
ad.set_mode('reverse')
# =============================================================================
# define the input variable
# =============================================================================
data=np.random.random(5)
x=Variable(data)
# =============================================================================
# define custom block
# =============================================================================
arctan_block=arctan()
# =============================================================================
# compute output of custom block
# =============================================================================
y_block=arctan_block(x)
y_block.compute_gradients()
# =============================================================================
# define expected output
# =============================================================================
data_true=np.arctan(data)
gradient_true= np.diag(1/(1 + data**2))
# =============================================================================
# assert data pass
# =============================================================================
assert np.equal(data_true, y_block.data).all(), 'wrong arctan data pass. expected {}, given{}'.format(data_true, y_block.data)
# =============================================================================
# assert gradient forward pass
# =============================================================================
assert np.equal(gradient_true, y_block.gradient).all(), 'wrong arctan gradient forward pass. expected {}, given{}'.format(gradient_true,y_block.gradient)
ad.set_mode('forward')
| StarcoderdataPython |
1693490 | #!/usr/bin/env python3
"""An image analyser that finds the three most common colors in an image.
Title:
Dominant Colors
Description:
Develop a program that accepts an image
either via the devices's camera (if it has one)
or a file dialog.
Your program should intelligently determine
three of the most dominant colors in the image
and present it to the user.
The dominant colors in this case are the colors
that appear most in a given image.
For added complexity,
generate the RGB, HSB, CYMK and HEX color.
Also you can add the ability
to save the generated color palette with all the above information.
"""
import colorsys
import tkinter as tk
from tkinter import filedialog, ttk
from PIL import Image, ImageTk
class MainWindow(tk.Tk):
"""The class for interacting with tkinter."""
def __init__(self):
"""Initialise window."""
super().__init__()
self.title("Dominant Colors")
self.geometry()
self.resizable(width=False, height=False)
self.bands = tk.StringVar()
self.color_images = []
self.current_image_frame = ttk.Frame(self)
self.bands_frame = ttk.Frame(self)
self.colors_frame = ttk.Frame(self)
self.current_image_label = ttk.Label(self.current_image_frame,
text="Currently opened image")
self.current_image_label2 = ttk.Label(self.current_image_frame,
text="None")
self.bands_label = ttk.Label(self.bands_frame,
text="The bands of the image are")
self.bands_entry = ttk.Entry(self.bands_frame,
textvariable=self.bands,
state="readonly",
justify=tk.CENTER)
self.dominant_colors_label = ttk.Label(self.colors_frame,
text="Most common colors")
self.dominant_color_treeview = ttk.Treeview(self.colors_frame,
columns=("RGB",
"HSL",
"HSV",
"HEX"))
self.choose_image_button = ttk.Button(self,
command=self.new_image,
text="Choose a new image")
self.dominant_color_treeview.column("#0", minwidth=40,
width=40, stretch=0)
self.dominant_color_treeview.column("RGB", minwidth=100,
width=100, anchor=tk.CENTER)
self.dominant_color_treeview.heading("RGB", text="RGB/RGBA")
self.dominant_color_treeview.column("HSL", minwidth=100,
width=100, anchor=tk.CENTER)
self.dominant_color_treeview.heading("HSL", text="HSL")
self.dominant_color_treeview.column("HSV", minwidth=100,
width=100, anchor=tk.CENTER)
self.dominant_color_treeview.heading("HSV", text="HSV")
self.dominant_color_treeview.column("HEX", minwidth=100,
width=100, anchor=tk.CENTER)
self.dominant_color_treeview.heading("HEX", text="HEX")
for item_id in range(0, 3):
self.dominant_color_treeview.insert("", "end", item_id,
values=("", "", ""))
self.current_image_frame.grid(row=0, column=0, padx=5, pady=10)
self.bands_frame.grid(row=1, column=0, padx=5, pady=10)
self.colors_frame.grid(row=2, column=0, padx=5, pady=10)
self.current_image_label.grid(row=0, column=0, padx=5, pady=2.5)
self.current_image_label2.grid(row=1, column=0, padx=5, pady=5)
self.bands_label.grid(row=0, column=0, padx=5, pady=2.5)
self.bands_entry.grid(row=1, column=0, padx=5, pady=5)
self.dominant_colors_label.grid(row=0, column=0, padx=5, pady=2.5,
columnspan=3)
self.dominant_color_treeview.grid(row=1, column=0)
self.choose_image_button.grid(row=3, column=0, padx=10, pady=10)
def new_image(self):
"""Get image path from user and display dominant colors."""
image_path = filedialog.askopenfilename(parent=self,
title="Please choose an image",
defaultextension=".png",
filetypes=[("Image", ("*.png", "*jpg")),
("All files", "*.*")])
if not image_path:
return
self.current_image_label2.config(text=image_path)
image = load_image(image_path)
colors = image.getcolors(maxcolors=image.size[0]*image.size[1])
most_frequent_colors = dominant_colors(colors)
color_dict_list = convert_to_color_dict_list(most_frequent_colors)
self.color_images = []
for item_id in range(0, 3):
self.color_images.append(ImageTk.PhotoImage(Image.new("RGBA",
(16, 16),
color=color_dict_list[item_id]
["rgb"])))
self.dominant_color_treeview.item(item_id,
image=self.color_images[item_id],
values=(color_dict_list[item_id]["rgb"],
color_dict_list[item_id]["hsl"],
color_dict_list[item_id]["hsv"],
"".join(color_dict_list[item_id]["hex"])))
self.bands.set("".join(image.getbands()))
def load_image(image_path: str) -> Image:
"""Return Image object of specified image."""
image = Image.open(image_path)
image.load()
return image
def dominant_colors(colors: list) -> list:
"""Return the three most dominant colors."""
most_frequent_colors = []
color_dict = {color[0]: color[1] for color in colors}
for _ in range(0, 3):
most_frequent_color = max(color_dict.keys())
most_frequent_colors.append(color_dict[most_frequent_color])
del color_dict[most_frequent_color]
return most_frequent_colors
def convert_to_color_dict_list(colors: list) -> list:
"""Return a list of dictionaries of colors in different formats."""
color_dict_list = []
for color in colors:
hex_color = []
for color_index in range(0, 3):
hex_color_part = hex(color[color_index]).replace("0x", "")
if len(hex_color_part) == 1:
hex_color_part = "0" + hex_color_part
hex_color.append(hex_color_part)
color_dict_list.append({
"rgb": color,
"hsl": [round(color, 2) for color
in colorsys.rgb_to_hls(color[0], color[1], color[2])],
"hsv": [round(color, 2) for color
in colorsys.rgb_to_hsv(color[0], color[1], color[2])],
"hex": hex_color
})
return color_dict_list
def _start_gui():
"""Start the Graphical User Interface."""
window = MainWindow()
window.mainloop()
if __name__ == "__main__":
_start_gui()
| StarcoderdataPython |
1739999 | <reponame>ChristopherMayes/lume-orchestration-demo
from setuptools import setup, find_packages
from os import path, environ
import versioneer
cur_dir = path.abspath(path.dirname(__file__))
# parse requirements
with open(path.join(cur_dir, "requirements.txt"), "r") as f:
requirements = f.read().split()
setup(
name="slac_services",
version=versioneer.get_version(),
cmdclass=versioneer.get_cmdclass(),
author="SLAC National Accelerator Laboratory",
author_email="<EMAIL>",
license="SLAC Open",
packages=find_packages(),
install_requires=requirements,
# set up development requirements
extras_require={
"test": ["pytest"],
},
url="https://github.com/slaclab/slac_services",
include_package_data=True,
python_requires=">=3.7",
entry_points={
'console_scripts': [
'save-model = slac_services.scripts.models:save_model',
'save-model-deployment = slac_services.scripts.models:save_model_deployment',
'create-project = slac_services.scripts.models:create_project',
'save-deployment-flow = slac_services.scripts.models:save_deployment_flow'
]
},
)
| StarcoderdataPython |
1783011 | <reponame>mikema2019/Machine-learning-algorithms<filename>Supervised Machine Learning Algorithms/Ensemble Methods/Gradient_Boosting.py
from Decision_Tree_CART import DecisionTree_CART
import numpy as np
import matplotlib.pyplot as plt
from sklearn.ensemble import GradientBoostingRegressor,AdaBoostRegressor
def L(y_train,f):
return np.sum((y_train-f)**2)
def der_L(y_train,f):
return 2*(y_train-f)
def opt_L(y_train,f=0):
return np.average(y_train-f)
class GradientBoosting():
def __init__(self,epsilon=[10**-2,10**-2,10**-2],M=3):
self.func_no=M
self.model=[]
self.epsilon=epsilon
def train(self,X_train,y_train,X_dev,y_dev):
train_no,feature_no=np.shape(X_train)
r=np.zeros((train_no,self.func_no+1))
f_func=[]
self.f0=opt_L(y_train)
f=opt_L(y_train)
r[:,0]=der_L(y_train,f)
for m in range(self.func_no):
model=DecisionTree_CART(epsilon=self.epsilon[m])
node=model.train(X_train,r[:,m],X_dev,y_dev)
for key,value in node.items():
if value[7]==True:
X_train_temp,y_train_temp,father_no,C,mini_feature_no,length,s_point,leaf_node=value
f_temp=0
for i in range(m):
f_temp+=f_func[i].predict(X_train_temp)
f_temp+=self.f0
index=[]
for X in X_train_temp:
index.append(list(X_train).index(X))
C=opt_L(y_train[index],f_temp)
node[key]=X_train_temp,y_train_temp,father_no,C,mini_feature_no,length,s_point,leaf_node
f_func.append(model)
f=f+model.predict(X_train)
r[:,m+1]=der_L(y_train,f)
self.model.append(model)
def predict(self,X_test):
y_test=0
for model in self.model:
y_test+=model.predict(X_test)
return y_test+self.f0
if __name__=='__main__':
# np.random.seed(4)
X_train=10*np.random.rand(300,1)
y_train=np.sin(X_train)
y_train=y_train.ravel()
X_dev=10*np.random.rand(100,1)
y_dev=np.sin(X_dev)
X_test_unsorted=10*np.random.rand(100,1)
# X_test_unsorted=X_train
X_test=np.sort(X_test_unsorted,axis=0)
y_test=np.sin(X_test)
model=GradientBoosting(epsilon=[10**-8,10**-8,10**-8])
model.train(X_train,y_train,X_dev,y_dev)
y_pred=model.predict(X_test)
# print('Decision Tree', y_pred)
print('Gradient_Boosting:',np.average((y_test-y_pred)**2))
plt.figure(1)
plt.plot(X_test,y_pred,label='y_pred')
plt.scatter(X_test,y_test,label='y_test')
# plt.scatter(X_train,y_train,label='y_train')
plt.legend()
model=DecisionTree_CART(epsilon=10**-8,method='Regression')
model.train(X_train,y_train,X_dev,y_dev)
y_pred=model.predict(X_test)
# print('Decision Tree', y_pred)
print('DecisionTree_CART:',np.average((y_test-y_pred)**2))
model = AdaBoostRegressor()
model.fit(X_train, y_train)
y_pred=model.predict(X_test)
# print('sklearn', y_pred)
print('sklearn_AB:',np.average((y_test-y_pred)**2))
model = GradientBoostingRegressor()
model.fit(X_train, y_train)
y_pred=model.predict(X_test)
# print('sklearn', y_pred)
print('sklearn_GBR:',np.average((y_test-y_pred)**2))
plt.figure(2)
plt.plot(X_test,y_pred,label='y_pred')
plt.scatter(X_test,y_test,label='y_test')
plt.legend()
| StarcoderdataPython |
1785902 | def divide_chunks(list_to_chunk, chunk_size):
# looping till length l
for i in range(0, len(list_to_chunk), chunk_size):
yield list_to_chunk[i:i + chunk_size]
| StarcoderdataPython |
3331818 | from .password import PasswordChange
from .utente import Utente
__all__ = [
"PasswordChange",
"Utente",
]
| StarcoderdataPython |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.