id stringlengths 1 8 | text stringlengths 6 1.05M | dataset_id stringclasses 1
value |
|---|---|---|
264217 | import time
from random import choice
from tests_steps.banners_steps import *
from tests_steps.browser_config import *
def test_submit_random_form_in_full_hd():
browser = setup_remote_browser(os='OSX_Catalina', size='Full HD', polling_time=5)
try:
browser.get(config.BASE_URL)
banners_container = browser.find_element_by_css_selector('body > div.container > main > div')
banners_list = banners_container.find_elements_by_css_selector('div:nth-child(2) > div.position-relative:not(.d-lg-none)')
banners_list.append(banners_container.find_element_by_css_selector('div.row.position-relative.d-none.d-lg-flex'))
banner = choice(banners_list)
banner.click()
time.sleep(0.3)
banner_form = banner.find_element_by_css_selector('form')
fill_input_field(form=banner_form,
input_selector='div.form-group.mb-4',
email_value=f'filling_test{randint(1, 1000)}@local.ad')
banner_form.find_element_by_css_selector('div.mt-1 > button.btn.btn-primary').click()
save_screenshot(browser=browser,
test_name='filling_and_submit_form_in_mobile_view_test')
finally:
browser.quit()
def test_submit_random_form_in_mobile_view():
browser = setup_remote_browser(os='Windows10', size='Tablet', polling_time=5)
try:
browser.get(config.BASE_URL)
banners_row = browser.find_elements_by_css_selector(
'body > div.container > main > div > div:nth-child(2) > div.position-relative')
banner = choice(banners_row)
banner.click()
time.sleep(0.3)
banner_form = banner.find_element_by_css_selector('form')
fill_input_field(form=banner_form,
input_selector='div.form-group.mb-4',
email_value=f'filling_test{randint(1, 1000)}<EMAIL>')
banner_form.find_element_by_css_selector('div.mt-1 > button.btn.btn-primary').click()
save_screenshot(browser=browser,
test_name='filling_and_submit_form_in_mobile_view_test')
finally:
browser.quit()
| StarcoderdataPython |
3501295 | <reponame>tdcosim/SolarPV-DER-simulation-utility
"""Grid model and shared attributes."""
from __future__ import division
import numpy as np
import math
import cmath
import six
from pvder import utility_functions
class BaseValues():
"""Class to store base values."""
Vbase = 500.0 #L-G peak"
Sbase = 50e3 #VA base
wbase = 2*math.pi*60.0
Vdcbase = Vbase #DC side base value is same as AC side base value
Ibase = Sbase/Vbase
Zbase = (Vbase**2)/Sbase
Lbase = Zbase/wbase
Cbase = 1/(Zbase*wbase)
class Grid(BaseValues):
""" Class for grid"""
grid_count = 0 #Count for grid objects
n_ODE = 0 #Number of ODE's
Vgridrated = 20415.0 # L-G peak to peak equivalent to 25000 V L-L RMS
_t_voltage_previous = 0.0
_t_frequency_previous = 0.0
def __init__(self,events,unbalance_ratio_b=1.0,unbalance_ratio_c=1.0,Z2_actual = 1.61 + 1j*5.54):
"""Creates an instance of `GridSimulation`.
Args:
events: An instance of `SimulationEvents`.
unbalance_ratio_b,unbalance_ratio_c: Scalar specifying difference in Phase B and Phase C voltage magnitude compared to phase A.
Z2_actual: Complex scalar specifying the impedance of the feeder connecting the DER with the voltage source.
"""
#Increment count
Grid.grid_count = Grid.grid_count+1
#Events object
self.events = events
#Object name
self.name = 'grid_'+str(Grid.grid_count)
#Voltage unbalance
self.unbalance_ratio_b = unbalance_ratio_b
self.unbalance_ratio_c = unbalance_ratio_c
#Grid impedance
self.Z2_actual = Z2_actual
self.R2_actual = self.Z2_actual.real
self.L2_actual = self.Z2_actual.imag/(2*math.pi*60.0)
#Converting to per unit
self.R2 = self.R2_actual/self.Zbase #Transmission line resistance
self.L2 = self.L2_actual/self.Lbase #Transmission line resistance
self.Z2 = self.Z2_actual/self.Zbase #Transmission line impedance
self.transmission_name = 'transmission_'+str(Grid.grid_count)
#Grid voltage/frequency events
self.Vagrid,self.wgrid = events.grid_events(t=0.0) #Grid voltage and frequency set-point
self.Vagrid = self.Vagrid*(self.Vgridrated/self.Vbase)
self.Vbgrid = utility_functions.Ub_calc(self.Vagrid*self.unbalance_ratio_b)
self.Vcgrid = utility_functions.Uc_calc(self.Vagrid*self.unbalance_ratio_c)
#Actual Grid voltage
self.vag = self.Vagrid
self.vbg = utility_functions.Ub_calc(self.vag*self.unbalance_ratio_b)
self.vcg = utility_functions.Uc_calc(self.vag*self.unbalance_ratio_c)
self.Vgrms = self.Vgrms_calc()
@property
def y0(self):
"""Grid states"""
return [self.vag.real,self.vag.imag,\
self.vbg.real,self.vbg.imag,\
self.vcg.real,self.vcg.imag]
def Vgrms_calc(self):
"""Grid side terminal voltage - RMS"""
return utility_functions.Urms_calc(self.vag,self.vbg,self.vcg)
def steady_state_model(self,t):
"""Grid voltage change."""
Vagrid_new,wgrid_new = self.events.grid_events(t)
Vagrid_new = Vagrid_new*(self.Vgridrated/self.Vbase)
if abs(self.Vagrid- Vagrid_new) > 0.0 and t >= self._t_voltage_previous:
utility_functions.print_to_terminal("{}:Grid voltage changed from {:.3f} V to {:.3f} V at {:.3f} s".format(self.name,self.Vagrid,Vagrid_new,t))
self.Vagrid = Vagrid_new
self.Vbgrid = utility_functions.Ub_calc(self.Vagrid*self.unbalance_ratio_b)
self.Vcgrid = utility_functions.Uc_calc(self.Vagrid*self.unbalance_ratio_c)
self._t_voltage_previous = t
if abs(self.wgrid- wgrid_new) > 0.0 and t >= self._t_frequency_previous:
utility_functions.print_to_terminal("{}:Grid frequency changed from {:.3f} Hz to {:.3f} Hz at {:.3f} s".format(self.name,self.wgrid/(2.0*math.pi),wgrid_new/(2.0*math.pi),t))
self.wgrid = wgrid_new
self._t_frequency_previous = t
self.vag = self.Vagrid
self.vbg = self.Vbgrid
self.vcg = self.Vcgrid
| StarcoderdataPython |
3437465 | <filename>Problems/Poster artist/task.py
word = input()
print(word.upper()) | StarcoderdataPython |
1678090 | <reponame>niallscc/Optimus
from optimus.engines.base.dask.dataframe import DaskBaseDataFrame
from optimus.engines.cudf.dataframe import CUDFDataFrame
from optimus.engines.dask.io.save import Save
from optimus.engines.pandas.dataframe import PandasDataFrame
from optimus.helpers.converter import pandas_to_dask_dataframe
class DaskDataFrame(DaskBaseDataFrame):
def __init__(self, data):
super().__init__(self, data)
def _base_to_dfd(self, pdf, n_partitions):
return pandas_to_dask_dataframe(pdf, n_partitions)
@staticmethod
def pivot(index, column, values):
pass
@staticmethod
def melt(id_vars, value_vars, var_name="variable", value_name="value", data_type="str"):
pass
@staticmethod
def query(sql_expression):
pass
@staticmethod
def debug():
pass
@staticmethod
def create_id(column="id"):
pass
@property
def rows(self):
from optimus.engines.dask.rows import Rows
return Rows(self)
@property
def cols(self):
from optimus.engines.dask.columns import Cols
return Cols(self)
@property
def save(self):
return Save(self)
@property
def functions(self):
from optimus.engines.dask.functions import DaskFunctions
return DaskFunctions()
@property
def mask(self):
from optimus.engines.dask.mask import DaskMask
return DaskMask(self)
@property
def ml(self):
from optimus.engines.dask.ml.models import ML
return ML(self)
@property
def encoding(self):
from optimus.engines.pandas.ml.encoding import Encoding
return Encoding()
def to_optimus_pandas(self):
return PandasDataFrame(self.root.to_pandas())
def to_optimus_cudf(self):
return CUDFDataFrame(self.root.to_pandas())
| StarcoderdataPython |
397590 | <reponame>dendisuhubdy/grokmachine<filename>Vault7/Lost-in-Translation/windows/fuzzbunch/command.py<gh_stars>10-100
"""
Derived command line processing handler class from the Python standard
module 'cmd'. Many methods overridden to support more FB like behavior.
"""
import string
import subprocess
import time
from context import CmdCtx
import exception
import iohandler
import cmd
__all__ = ["FbCmd"]
PROMPT_PRE = 'fb'
PROMPT_POST = '> '
PROMPT_FMTSTR = " %s (%s) "
IDENTCHARS = string.ascii_letters + string.digits + '_'
INTERACTIVE = 1
NONINTERACTIVE = 2
class FbCmd(cmd.Cmd):
"""Reimplementation of the Python Cmd class to fit more inline with Fuzzbunch
needs and operation. It effectively provides a base set of capabilities and
commands. The commands are:
* changeprompt
* help
* history
* sleep
* echo
* shell
* quit
* python
* script
"""
use_rawinput = 1
identchars = IDENTCHARS
# Create a CmdCtx for this class
defaultcontext = CmdCtx("Fuzzbunch", "Fuzzbunch")
shortcutKeys = {"?" : "help",
"!" : "shell"}
helpKeys = {"?" : "help"}
def __init__(self, stdin=None, stdout=None, stderr=None, enablecolor=True):
# Set our I/O handlers
self.init_io(stdin=stdin, stdout=stdout, stderr=stdout, enablecolor=enablecolor)
self.runmode_interactive() # default to interactive mode
self.promptpre = PROMPT_PRE
self.completekey = 'tab'
self.cmdqueue = [] # Holds a list of commands yet to be executed
self.cmdhistory = [] # Holds a list of commands already executed
self.setcontext(None) # Set us to the default context
self.setprompt()
"""
I/O Handling
Changed so that we can handle raw I/O, which python cmd.py cannot.
"""
def init_io(self, stdin=None, stdout=None, stderr=None, logfile=None, enablecolor=True):
self.io = iohandler.IOhandler(stdin, stdout, logfile, enablecolor=enablecolor)
def set_raw(self, mode=1):
if mode in (1,0):
self.io.raw_input = mode
def set_ionoprompt(self, mode=False):
if mode in (True,False):
self.io.noprompt = mode
"""
Run Mode Handling
Added to enable scriptability
"""
def runmode_interactive(self):
self.runmode = INTERACTIVE
self.set_raw()
self.scripting(False)
def runmode_noninteractive(self):
self.runmode = NONINTERACTIVE
self.set_raw(0)
self.scripting(True)
def scripting(self, mode=False):
if mode in (False,True):
self.scriptmode = mode
self.set_ionoprompt(mode)
def is_interactive(self):
if self.runmode == INTERACTIVE:
return True
else:
return False
def is_scripted(self):
return self.scriptmode
"""
Context handling
Added to enable us to change the prompt easily among different plug-in or
base contexts.
"""
def setprompt(self, prompt=None):
"""Set the prompt for the current context. Append the name of
the current plugin to the prompt
"""
if prompt is None:
if self.getcontext().get_name() == self.defaultcontext.get_name():
context = " "
else:
context = PROMPT_FMTSTR % (self.getcontext().get_type(),
self.getcontext().get_name())
prompt = self.promptpre + context + PROMPT_POST
self.prompt = prompt
def setcontext(self, new_context):
"""Change contexts"""
if new_context is None:
new_context = self.defaultcontext
self.ctx = new_context
def getcontext(self):
"""Retrieve the current plugin context"""
return self.ctx
"""
Change prompt look
"""
def help_changeprompt(self):
usage = ["changeprompt [new prompt]",
"Change the command prompt string. Run with no",
"args for default prompt."]
self.io.print_usage(usage)
def do_changeprompt(self, input):
"""Change the command prompt"""
newprompt = input.strip()
if newprompt:
self.promptpre = newprompt
else:
self.promptpre = PROMPT_PRE
self.setprompt()
"""
Command parsing and handling
"""
def cmdloop(self):
"""Repeatedly issue a prompt, accept input, parse an initial prefix
off the received input, and dispatch to action methods, passing them
the remainder of the line as argument.
"""
self.preloop()
self.io.pre_input(self.complete)
try:
stop = None
while not stop:
if self.cmdqueue:
# First, clear out anything we have in the command queue
line = self.cmdqueue.pop(0)
else:
# Then, accept input
line = self.io.get_input(self.prompt)
stop = self.runcmd(line)
self.postloop()
finally:
self.io.post_input()
def runcmdlist(self, cmdlist):
stop = None
while cmdlist and not stop:
stop = self.runcmd(cmdlist.pop(0))
def runcmdlist_noex(self, cmdlist):
stop = None
while cmdlist and not stop:
stop = self.runcmd_noex(cmdlist.pop(0))
def runcmd_noex(self, line):
line = self.precmd(line)
stop = self.onecmd(line)
return self.postcmd(stop, line)
def runcmd(self, line):
try:
stop = self.runcmd_noex(line)
except exception.CmdErr, err:
self.io.print_error(err.getErr())
stop = None
return stop
def register_shortcut(self, shortcutChar, expansion):
"""Register a new shortcut key expansion. If a shortcut key is reused
the old command will be deleted.
"""
if shortcutChar in self.shortcutKeys:
del self.shortcutKeys[shortcutChar]
self.shortcutKeys[shortcutChar] = expansion
def precmd(self, line):
"""Executed before each command. Append the line to history and then log
the line to the output.
"""
if len(line.strip()):
self.cmdhistory.append(line)
self.io.log(self.prompt + line)
return line
#def postcmd(self, stop, line):
# """Executed after each command."""
# return stop
#def preloop(self):
# pass
#def postloop(self):
# pass
def parseline(self, line):
"""Parse the line into a command name and a string containing the
arguments. Returns a tuple containing (command, args, line).
'command' and 'args' may be None if line couldn't be parsed. Check for
registered special handlers.
"""
line = line.strip()
if not line:
return None, None, line
if line[-1:] in self.helpKeys:
line = self.helpKeys[line[-1:]] + " " + line[:-1]
if line[0] in self.shortcutKeys:
line = self.shortcutKeys[line[0]] + " " + line[1:]
i, n = 0, len(line)
while i < n and line[i] in self.identchars:
i = i+1
cmd, arg = line[:i], line[i:].strip()
return cmd, arg, line
def onecmd(self, line):
"""Run a single command. Exceptions should be caught by the caller"""
cmd, arg, line = self.parseline(line)
if not line:
return self.emptyline()
if cmd is None:
return self.default(line)
self.lastcmd = line
if cmd == '':
return self.default(line)
else:
try:
# retrieve the command execution function, which will be
# self.do_<command>
func = getattr(self, 'do_' + cmd.lower())
except AttributeError:
return self.default(line)
return func(arg)
def emptyline(self):
"""Called when an empty line is encountered"""
pass
def default(self, line):
"""Called when command prefix is not recognized."""
cmd, arg, line = self.parseline(line)
# Check if the current context handles the cmd instead
try:
func = self.ctx.lookup_function(cmd)
except AttributeError:
self.io.print_error("Unknown syntax: %s" % line)
else:
func(arg)
#def completedefault(self, *ignored):
# return []
def completenames(self, text, *ignored):
"""Return a list of command names for command completion."""
dotext = 'do_' + text
return [ a[3:] for a in self.ctx.get_names() if a.startswith(dotext) ] +\
[ a[3:] for a in self.get_names() if a.startswith(dotext) ]
def get_compstate(self, text, arglist):
if text == "":
return len(arglist)
else:
return max(len(arglist) - 1, 0)
def complete(self, text, state):
"""Return the next possible completion for 'text'."""
if state == 0:
try:
import readline
except ImportError:
import pyreadline as readline
origline = readline.get_line_buffer()
begidx = readline.get_begidx()
endidx = readline.get_endidx()
if begidx > 0:
cmd, args, foo = self.parseline(origline)
if cmd == '':
compfunc = self.completedefault
else:
try:
compfunc = getattr(self, 'complete_' + cmd.lower())
except AttributeError:
try:
compfunc = self.ctx.lookup_compfunction(cmd)
except AttributeError:
compfunc = self.completedefault
else:
compfunc = self.completenames
arglist = [item.strip() for item in origline.strip().split()]
comp_state = self.get_compstate(text, arglist)
self.completion_matches = compfunc(text, origline, arglist, comp_state, begidx, endidx)
try:
return self.completion_matches[state]
except IndexError:
return None
#def get_names(self):
# names = []
# classes = [self.__class__]
# while classes:
# aclass = classes.pop(0)
# if aclass.__bases__:
# classes = classes + list(aclass.__bases__)
# names = names + dir(aclass)
# return names
def complete_help(self, *args):
return self.completenames(*args)
"""
Cmd: help
"""
def get_help_lists(self, names, ctx):
do_cmds = list(set([name for name in names if name.startswith('do_')]))
do_cmds.sort()
return [(name[3:], str(getattr(ctx, name).__doc__)) for name in do_cmds]
def get_shortcut_help(self):
"""Shortcut help"""
return [(key, "Shortcut for %s" % val) for key,val in self.shortcutKeys.items()]
def do_help(self, input):
"""Print out help"""
args = input.strip().split()
if len(args) > 0:
arg = args[0]
try:
func = self.ctx.lookup_helpfunction(arg)
func()
except AttributeError:
pass
try:
func = getattr(self, 'help_' + arg.lower())
func()
except AttributeError:
pass
else:
cmds = self.get_shortcut_help() + self.get_help_lists(self.get_names(), self)
cmdlist = {'title' : "Core Commands",
'commands' : cmds}
self.io.print_cmd_list(cmdlist)
if self.ctx.get_name() != self.defaultcontext.get_name():
cmds = self.get_help_lists(self.ctx.get_names(), self.ctx)
cmdlist = {'title' : "%s Commands" %self.ctx.get_type(),
'commands' : cmds}
self.io.print_cmd_list(cmdlist)
"""
Cmd: history
"""
def help_history(self):
usage = ["history [index]",
"Rerun a previous command. Omit index to print history"]
self.io.print_usage(usage)
def do_history(self, arg):
"""Run a previous command."""
# keep the history cmds out of the history to reduce noise
self.cmdhistory.pop()
if len(arg) == 0:
history = {'items' : enumerate(self.cmdhistory)}
self.io.print_history(history)
else:
try:
index = int(arg)
except ValueError:
self.io.print_error("Bad history index")
return
try:
self.cmdqueue.append(self.cmdhistory[index])
except IndexError:
maxIndex = len(self.cmdhistory) - 1
self.io.print_error("History index out of range [0 : %d]" % maxIndex)
"""
Cmd: sleep
"""
def help_sleep(self):
usage = ["sleep [N seconds]",
"Sleep for N seconds"]
self.io.print_usage(usage)
def do_sleep(self, count):
"""Sleep for n seconds"""
try:
count = int(count)
except ValueError:
self.io.print_error("Invalid delay")
return
self.io.print_msg("Sleeping for %d seconds" % count)
try:
time.sleep(count)
except KeyboardInterrupt:
self.io.print_error("User Interrupt")
"""
Cmd: echo
"""
def help_echo(self):
usage = ["echo [msg]",
"echo the given message"]
self.io.print_usage(usage)
def do_echo(self, msg):
"""Echo a message"""
self.io.print_msg(msg.strip())
"""
Cmd: shell
"""
def help_shell(self):
usage = ["shell [command [args]]",
"Runs command with args in OS shell"]
self.io.print_usage(usage)
def do_shell(self, arg):
"""Execute a shell command"""
try:
retcode = subprocess.call(arg, shell=True)
del retcode
except OSError, e:
self.io.print_error("Execution failed: " + e.message)
except KeyboardInterrupt:
self.io.print_warning("Execution aborted by user: Ctrl-c")
"""
Cmd: EOF, quit
"""
def help_eof(self):
usage = ["eof",
"Quits program (CTRL-D)"]
self.io.print_usage(usage)
def do_eof(self, arg):
"""Quit program (CTRL-D)"""
return self.do_quit(arg)
def help_quit(self):
usage = ["quit",
"Quits program (CTRL-D)"]
self.io.print_usage(usage)
def do_quit(self, arg):
"""Quit program"""
return True
"""
Cmd: Python
"""
def help_python(self):
usage = ["python",
"Enters the interactive python interpreter. Exit the",
"interpreter to return back to Fuzzbunch."]
self.io.print_usage(usage)
def do_python(self, arg):
"""Drop to an interactive Python interpreter"""
raise exception.Interpreter
"""
Scripting Support
"""
def help_script(self):
usage = ["script [scriptfile]",
"Run the given scriptfile"]
self.io.print_usage(usage)
def do_script(self, input):
"""Run a script"""
inputList = input.strip().split()
if len(inputList) == 0:
self.help_script()
else:
try:
self.scripting(True)
try:
script = [ line.strip()
for line in open(inputList[0]).readlines()
if not line.startswith('#') ]
except IOError:
raise exception.CmdErr, "Couldn't read script file"
self.runcmdlist_noex(script)
except exception.CmdErr, err:
self.io.print_error(err.getErr())
self.io.print_error("Aborting script")
finally:
self.scripting(False)
if __name__ == "__main__":
fb = FbCmd()
fb.cmdloop()
| StarcoderdataPython |
3484493 | <reponame>jwspaeth/simple_ml<filename>simple_ml/predict.py<gh_stars>0
def predict():
pass | StarcoderdataPython |
1706122 | import random as aleas
import matplotlib.pyplot as plt
from scipy.signal import freqz
import numpy as np
import pandas as pd
import statsmodels.api as sm
"""
random : pour generer des nombres aleatoires
matplotlib.pyplot : pour generer des graphiques et gerer leur construction
scipy.signal : pour avoir le TF de l'autocorrelation
numpy : pour implementer les moyennes et les covariances
"""
###########################################################################
# EXERCICE 3 - Identification de modèle AR
###########################################################################
"""
QUESTION 1 - Creation de trois series temporelles y1, y2, y3 par simulation stohchastique
"""
#Generation des coefficients
a = [- 0.0707, 0.2500]
b = [- 1.6674, 0.9025]
c = [1.7820, 0.8100]
#Donnees
n = 1536
t = range(- 2, n - 1)
y = [k*0 for k in t]
#Creation des series
y1 = []
y2 = []
y3 = []
for k in range(1, int(n/3)):
y[k] = -a[0]*y[k - 1] - a[1]*y[k - 2] + aleas.gauss(0, 1)
y1.append(y[k])
for k in range(int(n/3) + 1, 2*int(n/3)):
y[k] = -b[0]*y[k - 1] - b[1]*y[k - 2] + aleas.gauss(0, 1)
y2.append(y[k])
for k in range(2*int(n/3) + 1, n):
y[k] = -c[0]*y[k - 1] - c[1]*y[k - 2] + aleas.gauss(0, 1)
y3.append(y[k])
#Visualisation de la série 1
plt.plot(t[0 : int(n/3)], y[0 : int(n/3)], color = '#EC3874')
plt.grid()
plt.title("Serie 1")
plt.show()
#Visualisation de la série 2
plt.plot(t[int(n/3) + 1 : 2*int(n/3)], y[int(n/3) + 1 : 2*int(n/3)], y[0:int(n/3)])
plt.grid()
plt.title("Serie 2")
plt.show()
#Visualisation de la série 3
plt.plot(t[2*int(n/3) + 1 : n], y[2*int(n/3) + 1:n], color ='#4CAE58')
plt.grid()
plt.title("Serie 3")
plt.show()
"""
QUESTION 2 - Visualisation des spectres des sous-series
"""
def spectre(*args):
"""
Fonction qui permet de calculer les spectres des sous-series
Np : nombre de points du spectre
f : recuperation des echantillons de frequence (abscisses)
mag : hauteurs des frequences observables correspondantes (ordonnees)
"""
Np = 256
f=freqz(1,args[0],Np)[0]
mag=[]
for arg in args:
mag.append(abs(freqz(1,arg,Np)[1])) # calcul du spectre de chaque sous-serie
return (f,mag)
f,mag=spectre([1]+a,[1]+b,[1]+c)
spectre1 = mag[0]
spectre2 = mag[1]
spectre3 = mag[2]
plt.semilogy(
f,mag[0],'-g',
f,mag[1],':b',
f,mag[2],'--r'
)
plt.grid()
plt.legend(['spectre1', 'spectre2','spectre3'])
plt.title("Spectres")
plt.show()
"""
QUESTION 2 - Visualisation de l'autocorrelation et de la densité spectrale de puissance
pour chaque serie temporelle
"""
#Visualisation de la serie 1
sm.graphics.tsa.plot_acf(y[0:int(n/3)+1], lags = 40, color = '#EC3874')
plt.title("Autocorrelation de la serie 1")
plt.grid()
plt.show()
#Tracé de la densité spectrale de puissance de y1
plt.psd(y1[:])
plt.title("Densité spectrale de puissance de y1")
plt.show()
#Visualisation de la serie 2
sm.graphics.tsa.plot_acf(y[int(n/3)+1:2*int(n/3)], lags = 40)
plt.grid()
plt.title("Autocorrelation de la serie 2")
plt.show()
#Tracé de la densité spectrale de puissance de y2
plt.psd(y2[:])
plt.title("Densité spectrale de puissance de y2")
plt.show()
#Visualisation de la serie 3
sm.graphics.tsa.plot_acf(y[2*int(n/3)+1:n], lags = 40, color = '#4CAE58')
plt.grid()
plt.title("Autocorrelation de la serie 3")
plt.show()
#Tracé de la densité spectrale de puissance de y3
plt.psd(y3[:])
plt.title("Densité spectrale de puissance de y3")
plt.show()
"""
QUESTION 3 - Creation d'une serie temporelle constituee par la somme des series
synthetisees precedemment.
"""
#Visualisation de y
somme = []
for j in range(len(y1)):
somme.append(y1[j] + y2[j] + y3[j])
plt.plot(range(len(y1)),somme[:])
plt.grid()
plt.title("y : somme de y1, y2 et y3")
plt.show()
#Tracé de l'autocorrélation de y
sm.graphics.tsa.plot_acf(somme, lags = 40)
plt.grid()
plt.title("Autocorrelation de y")
plt.show()
#Tracé de la densité spectrale de puissance de y
plt.psd(somme[:])
plt.title("Densité spectrale de puissance de y")
plt.show()
"""
QUESTION 4 - Modélisation de y par un processus AR d'ordre 2.
L'objectif de cette etape est d'estimer les coefficients de ce modele et de comparer
les autocorrélations/densites spectrales de y et du modele estime.
"""
t=range(-2,n-1)
y=[k*0 for k in t]
y1 = []
y2 = []
y3 = []
for k in range(1,int(n/3)):
y[k]=-a[0]*y[k-1]-a[1]*y[k-2]+aleas.gauss(0,1)
y1.append(y[k])
for k in range(int(n/3)+1,2*int(n/3)):
y[k]=-b[0]*y[k-1]-b[1]*y[k-2]+aleas.gauss(0,1)
y2.append(y[k])
for k in range(2*int(n/3)+1,n):
y[k]=-c[0]*y[k-1]-c[1]*y[k-2]+aleas.gauss(0,1)
y3.append(y[k])
def AR_model_somme(debut, fin, serie, vrai_spectre):
"""
: parametre debut : debut de l'intervalle
: parametre fin : fin de l'intervalle
: parametre serie : nom de la serie à modéliser
: parametre vrai_spectre : vrai spectre à comparer aux résultats
: type debut : int
: type fin : int
: type serie : string
: type vrai_spectre : spectre
: return : la serie temporelle et la comparaison entre les spectres
: type return : plt.show
"""
D = np.cov([
y[debut : fin] + [0, 0, 0, 0],
[0] + y[debut : fin] + [0, 0, 0],
[0, 0] + y[debut : fin] + [0, 0],
[0, 0, 0] + y[debut : fin] + [0],
[0, 0, 0, 0] + y[debut : fin]])
E = - np.linalg.inv(D[0:2, 0:2]) @ D[0, 1:3].reshape(2, 1) # car on veut l'avoir à l'ordre 2
H = - np.linalg.inv(D[0:3, 0:3]) @ D[0, 1:4].reshape(3, 1) # car on veut l'avoir à l'ordre 3
E1 = np.append([1], E) # vecteur de coefficients incluant a0(ordre 4)
H1 = np.append([1], H)
#on trace la série entre 0 et le début de l'intervalle
plt.plot(t[debut : fin], y[debut : fin])
plt.title(serie)
plt.show()
#on trace les spectres (estimation)
f, mag = spectre(E1, H1)
#on calcule les valeurs correspondants aux spectres des 3 sous-series
plt.semilogy(
f, mag[0],
f, mag[1],
':r',
f, vrai_spectre,':b',
linewidth = 2,
)
plt.title('Spectre / Calcul sur l intervalle [{} {}]'.format(debut, fin))
plt.legend(['ordre2', 'ordre3',"vrai spectre"])
return plt.show()
AR_model_somme(0,int(n/3),"série 1",spectre1)
AR_model_somme(int(n/3),2*int(n/3),"série 2",spectre2)
AR_model_somme(0,n,"serie 3",spectre3)
#Spectre de la somme de y1, y2, y3
s=[]
for i in range(2):
s.append(a[i]+b[i]+c[i])
f,mag=spectre([1]+s)
spectreS = mag[0]
plt.semilogy(
f,mag[0],
)
plt.grid()
plt.legend('spectre1')
plt.title("Spectre de la somme")
plt.show()
"""
QUESTION 5 - Modèles AR de plusieurs ordres [NOT WORKING YET]
"""
"""
def AR_n(debut, fin, serie, vrai_spectre, ordre1, ordre2):
D = np.cov([
y[debut : fin] + [0, 0, 0, 0],
[0] + y[debut : fin] + [0, 0, 0],
[0, 0] + y[debut : fin] + [0, 0],
[0, 0, 0] + y[debut : fin] + [0],
[0, 0, 0, 0] + y[debut : fin]])
E = - np.linalg.inv(D[0:ordre1, 0:ordre1]) @ D[0, 1:ordre1+1].reshape(ordre1, 1) # ordre
H = - np.linalg.inv(D[0:ordre2, 0:ordre2]) @ D[0, 1:ordre2+1].reshape(ordre2, 1) # ordre
E1 = np.append([1], E) # vecteur de coefficients incluant a0(ordre 4)
H1 = np.append([1], H)
#trace de la serie entre 0 et le debut de l'intervalle
plt.plot(t[debut : fin], y[debut : fin])
plt.title(serie)
plt.show()
#Tracé des spectres (estimation)
f, mag = spectre(E1, H1)
#Calcul des spectres des trois sous-series
plt.semilogy(
f, mag[0],
f, mag[1],
':r',
f, vrai_spectre,':b',
linewidth = 2,
)
plt.title('Spectre / Calcul sur l intervalle [{} {}]'.format(debut, fin))
plt.legend(['ordre' + str(ordre1), 'ordre' + str(ordre2), "Vrai spectre"])
return plt.show()"""
"""debut = 0
fin = n
ordre1 = 5
ordre2 = 6
D = np.cov([
y[debut : fin] + [0, 0, 0, 0],
[0] + y[debut : fin] + [0, 0, 0],
[0, 0] + y[debut : fin] + [0, 0],
[0, 0, 0] + y[debut : fin] + [0],
[0, 0, 0, 0] + y[debut : fin]])
E = - np.linalg.inv(D[0:ordre1, 0:ordre1]) @ D[0, 1:ordre1+1].reshape(ordre1, 1) # ordre
H = - np.linalg.inv(D[0:ordre2, 0:ordre2]) @ D[0, 1:ordre2+1].reshape(ordre2, 1) # ordre
E1 = np.append([1], E) # vecteur de coefficients incluant a0(ordre 4)
H1 = np.append([1], H)
#trace de la serie entre 0 et le debut de l'intervalle
plt.plot(t[debut : fin], y[debut : fin])
plt.title("serie ordre 3 et 4")
plt.show()
#Tracé des spectres (estimation)
f, mag = spectre(E1, H1)
#Calcul des spectres des trois sous-series
plt.semilogy(
f, mag[0],
f, mag[1],
':r',
f, spectre3,':b',
linewidth = 2,
)
plt.title('Spectre / Calcul sur l intervalle [{} {}]'.format(debut, fin))
plt.legend(['ordre' + str(ordre1), 'ordre' + str(ordre2), "Vrai spectre"])
plt.show()
"""
"""
QUESTION 6 - Modélisation de y par un processus AR d'ordre 3 et 4.
L'objectif de cette etape est d'estimer les coefficients de ce modele.
"""
t=range(-2,n-1)
y=[k*0 for k in t]
y1 = []
y2 = []
y3 = []
for k in range(1,int(n/3)):
y[k]=-a[0]*y[k-1]-a[1]*y[k-2]+aleas.gauss(0,1)
y1.append(y[k])
for k in range(int(n/3)+1,2*int(n/3)):
y[k]=-b[0]*y[k-1]-b[1]*y[k-2]+aleas.gauss(0,1)
y2.append(y[k])
for k in range(2*int(n/3)+1,n):
y[k]=-c[0]*y[k-1]-c[1]*y[k-2]+aleas.gauss(0,1)
y3.append(y[k])
def AR_model_somme2(debut, fin, serie, vrai_spectre):
"""
: parametre debut : debut de l'intervalle
: parametre fin : fin de l'intervalle
: parametre serie : nom de la serie à modéliser
: parametre vrai_spectre : vrai spectre à comparer aux résultats
: type debut : int
: type fin : int
: type serie : string
: type vrai_spectre : spectre
: return : la serie temporelle et la comparaison entre les spectres
: type return : plt.show
"""
D = np.cov([
y[debut : fin] + [0, 0, 0, 0],
[0] + y[debut : fin] + [0, 0, 0],
[0, 0] + y[debut : fin] + [0, 0],
[0, 0, 0] + y[debut : fin] + [0],
[0, 0, 0, 0] + y[debut : fin]])
E = - np.linalg.inv(D[0:3, 0:3]) @ D[0, 1:4].reshape(3, 1) # car on veut l'avoir à l'ordre 3
H = - np.linalg.inv(D[0:4, 0:4]) @ D[0, 1:5].reshape(4, 1) # car on veut l'avoir à l'ordre 4
E1 = np.append([1], E) # vecteur de coefficients incluant a0(ordre 4)
H1 = np.append([1], H)
#on trace la série entre 0 et le début de l'intervalle
plt.plot(t[debut : fin], y[debut : fin])
plt.title(serie)
plt.show()
#on trace les spectres (estimation)
f, mag = spectre(E1, H1)
#on calcule les valeurs correspondants aux spectres des 3 sous-series
plt.semilogy(
f, mag[0],
f, mag[1],
':r',
f, vrai_spectre,':b',
linewidth = 2,
)
plt.title('Spectre / Calcul sur l intervalle [{} {}]'.format(debut, fin))
plt.legend(['ordre2', 'ordre3',"vrai spectre"])
return plt.show()
AR_model_somme2(0,int(n/3),"série 1",spectre1)
AR_model_somme2(int(n/3),2*int(n/3),"série 2",spectre2)
AR_model_somme2(0,n,"serie 3",spectre3)
AR_model_somme2(0,n,"y",y[:]) | StarcoderdataPython |
1768994 | <reponame>SebastianPartarrieu/ThroughArtistsEyes
import matplotlib.pyplot as plt
import numpy as np
import tensorflow as tf
from os import listdir
from numpy import asarray
from numpy import vstack
from numpy import savez_compressed
from numpy import load
from keras.preprocessing.image import img_to_array
from keras.preprocessing.image import load_img
from keras.layers import Conv2D, Dense, LeakyReLU, Conv2DTranspose, Activation, Concatenate
from keras.models import Model, Input
from keras.initializers import RandomNormal
from keras.optimizers import Adam
from keras_contrib.layers.normalization.instancenormalization import InstanceNormalization
#from tensorflow.keras import backend as K
# config = tf.compat.v1.ConfigProto(intra_op_parallelism_threads=8,
# inter_op_parallelism_threads=8,
# allow_soft_placement=True,
# device_count = {'CPU' : 1,
# 'GPU' : 1}
# )
# session = tf.compat.v1.Session(config=config)
# K.set_session(session)
print("Num GPUs Available: ", len(tf.config.experimental.list_physical_devices('GPU')))
LOADED = True
N_EPOCHS = 5
if not LOADED:
# load all images in a directory into memory
def load_images(path, size=(256,256)):
data_list = list()
# enumerate filenames in directory, assume all are images
for filename in listdir(path):
# load and resize the image
pixels = load_img(path + filename, target_size=size)
# convert to numpy array
pixels = img_to_array(pixels)
# store
data_list.append(pixels)
return asarray(data_list)
#dataset path
LOCAL_PATH = 'C:/Users/VR/Documents/MOVIE_local/datasets/monet_2_360/'
N_EPOCHS = 5
#load dataset A - 360° photos
################################################### BEWARE OF SIZE REMEMBER TO CHANGE SIZE IN LOAD IMAGES FUNCTION
data_train_A = load_images(LOCAL_PATH + 'trainA/')
data_test_A = load_images(LOCAL_PATH + 'testA/')
dataA = vstack((data_train_A, data_test_A))
print(f'Loaded data set A : {dataA.shape}')
#load dataset B - monet photos
data_train_B = load_images(LOCAL_PATH + 'trainB/')
data_test_B = load_images(LOCAL_PATH + 'testB/')
dataB = vstack((data_train_B, data_test_B))
print(f'Loaded data set B : {dataB.shape}')
#save as compressed numpy array
filename = 'monet_2_360.npz'
savez_compressed(filename, dataA, dataB)
print('Saved dataset: ', filename)
#Let's get plotting
data = load('monet_2_360.npz')
dataA, dataB = data['arr_0'], data['arr_1']
print(f'Loaded from compressed NumPy format: dataset A {dataA.shape} dataset B {dataB.shape}')
#optional plotting - WE COULD ADD ARGUMENT PARSER FOR THIS
n_samples = 3
# SHOW pictures
for i in range(n_samples):
plt.subplot(2, n_samples, i + 1)
plt.axis('off')
plt.imshow(dataA[i].astype('uint8'))
plt.show()
#SHOW monet paintings (or other chosen artists)
for i in range(n_samples):
plt.subplot(2, n_samples, i+1)
plt.axis('off')
plt.imshow(dataB[i].astype('uint8'))
plt.show()
#Starting to actually build model from scratch
#################### 70x70 PatchGAN discriminator model, useful as can take different dimension sizes as input
# define the discriminator model
def define_discriminator(image_shape):
# weight initialization
init = RandomNormal(stddev=0.02)
# source image input
in_image = Input(shape=image_shape)
# C64
d = Conv2D(64, (4,4), strides=(2,2), padding='same', kernel_initializer=init)(in_image)
d = LeakyReLU(alpha=0.2)(d)
# C128
d = Conv2D(128, (4,4), strides=(2,2), padding='same', kernel_initializer=init)(d)
d = InstanceNormalization(axis=-1)(d)
d = LeakyReLU(alpha=0.2)(d)
# C256
d = Conv2D(256, (4,4), strides=(2,2), padding='same', kernel_initializer=init)(d)
d = InstanceNormalization(axis=-1)(d)
d = LeakyReLU(alpha=0.2)(d)
# C512
d = Conv2D(512, (4,4), strides=(2,2), padding='same', kernel_initializer=init)(d)
d = InstanceNormalization(axis=-1)(d)
d = LeakyReLU(alpha=0.2)(d)
# second last output layer
d = Conv2D(512, (4,4), padding='same', kernel_initializer=init)(d)
d = InstanceNormalization(axis=-1)(d)
d = LeakyReLU(alpha=0.2)(d)
# patch output
patch_out = Conv2D(1, (4,4), padding='same', kernel_initializer=init)(d)
# define model
model = Model(in_image, patch_out)
# compile model
model.compile(loss='mse', optimizer=Adam(lr=0.0002, beta_1=0.5), loss_weights=[0.5])
return model
################### Generator models
#Encoder-decoder architecture. Downsampling, encoding down to bottleneck then upsampling and creating output image
def resnet_block(n_filters, input_layer):
# weight initialization
init = RandomNormal(stddev=0.02)
# first layer convolutional layer
g = Conv2D(n_filters, (3,3), padding='same', kernel_initializer=init)(input_layer)
g = InstanceNormalization(axis=-1)(g)
g = Activation('relu')(g)
# second convolutional layer
g = Conv2D(n_filters, (3,3), padding='same', kernel_initializer=init)(g)
g = InstanceNormalization(axis=-1)(g)
# concatenate merge channel-wise with input layer
g = Concatenate()([g, input_layer])
return g
# define the standalone generator model
def define_generator(image_shape, n_resnet=9):
# weight initialization
init = RandomNormal(stddev=0.02)
# image input
in_image = Input(shape=image_shape)
# c7s1-64
g = Conv2D(64, (7,7), padding='same', kernel_initializer=init)(in_image)
g = InstanceNormalization(axis=-1)(g)
g = Activation('relu')(g)
# d128
g = Conv2D(128, (3,3), strides=(2,2), padding='same', kernel_initializer=init)(g)
g = InstanceNormalization(axis=-1)(g)
g = Activation('relu')(g)
# d256
g = Conv2D(256, (3,3), strides=(2,2), padding='same', kernel_initializer=init)(g)
g = InstanceNormalization(axis=-1)(g)
g = Activation('relu')(g)
# R256
for _ in range(n_resnet):
g = resnet_block(256, g)
# u128
g = Conv2DTranspose(128, (3,3), strides=(2,2), padding='same', kernel_initializer=init)(g)
g = InstanceNormalization(axis=-1)(g)
g = Activation('relu')(g)
# u64
g = Conv2DTranspose(64, (3,3), strides=(2,2), padding='same', kernel_initializer=init)(g)
g = InstanceNormalization(axis=-1)(g)
g = Activation('relu')(g)
# c7s1-3
g = Conv2D(3, (7,7), padding='same', kernel_initializer=init)(g)
g = InstanceNormalization(axis=-1)(g)
out_image = Activation('tanh')(g)
# define model
model = Model(in_image, out_image)
return model
def define_composite_model(g_model_1, d_model, g_model_2, image_shape):
# ensure the model we're updating is trainable
g_model_1.trainable = True
# mark discriminator as not trainable
d_model.trainable = False
# mark other generator model as not trainable
g_model_2.trainable = False
# discriminator element
input_gen = Input(shape=image_shape)
gen1_out = g_model_1(input_gen)
output_d = d_model(gen1_out)
# identity element
input_id = Input(shape=image_shape)
output_id = g_model_1(input_id)
# forward cycle
output_f = g_model_2(gen1_out)
# backward cycle
gen2_out = g_model_2(input_id)
output_b = g_model_1(gen2_out)
# define model graph
model = Model([input_gen, input_id], [output_d, output_id, output_f, output_b])
# define optimization algorithm configuration
opt = Adam(lr=0.0002, beta_1=0.5)
# compile model with weighting of least squares loss and L1 loss
model.compile(loss=['mse', 'mae', 'mae', 'mae'], loss_weights=[1, 5, 10, 10], optimizer=opt)
return model
# load and prepare training images
def load_real_samples(filename):
# load the dataset
data = load(filename)
# unpack arrays
X1, X2 = data['arr_0'], data['arr_1']
# scale from [0,255] to [-1,1]
X1 = (X1 - 127.5) / 127.5
X2 = (X2 - 127.5) / 127.5
return [X1, X2]
# select a batch of random samples, returns images and target
def generate_real_samples(dataset, n_samples, patch_shape):
# choose random instances
ix = np.random.randint(0, dataset.shape[0], n_samples)
# retrieve selected images
X = dataset[ix]
# generate 'real' class labels (1)
y = np.ones((n_samples, patch_shape, patch_shape, 1))
return X, y
# generate a batch of images, returns images and targets
def generate_fake_samples(g_model, dataset, patch_shape):
# generate fake instance
X = g_model.predict(dataset)
# create 'fake' class labels (0)
y = np.zeros((len(X), patch_shape, patch_shape, 1))
return X, y
# save the generator models to file - need h5py installed to run this
def save_models(step, g_model_AtoB, g_model_BtoA):
# save the first generator model
filename1 = 'g_model_AtoB_%06d.h5' % (step+1)
g_model_AtoB.save(filename1)
# save the second generator model
filename2 = 'g_model_BtoA_%06d.h5' % (step+1)
g_model_BtoA.save(filename2)
print('>Saved: %s and %s' % (filename1, filename2))
# generate samples and save as a plot and save the model
def summarize_performance(step, g_model, trainX, name, n_samples=5):
# select a sample of input images
X_in, _ = generate_real_samples(trainX, n_samples, 0)
# generate translated images
X_out, _ = generate_fake_samples(g_model, X_in, 0)
# scale all pixels from [-1,1] to [0,1]
X_in = (X_in + 1) / 2.0
X_out = (X_out + 1) / 2.0
# plot real images
for i in range(n_samples):
plt.subplot(2, n_samples, 1 + i)
plt.axis('off')
plt.imshow(X_in[i])
# plot translated image
for i in range(n_samples):
plt.subplot(2, n_samples, 1 + n_samples + i)
plt.axis('off')
plt.imshow(X_out[i])
# save plot to file
filename1 = '%s_generated_plot_%06d.png' % (name, (step+1))
plt.savefig(filename1)
plt.close()
# update image pool for fake images
def update_image_pool(pool, images, max_size=50):
selected = list()
for image in images:
if len(pool) < max_size:
# stock the pool
pool.append(image)
selected.append(image)
elif np.random() < 0.5:
# use image, but don't add it to the pool
selected.append(image)
else:
# replace an existing image and use replaced image
ix = randint(0, len(pool))
selected.append(pool[ix])
pool[ix] = image
return asarray(selected)
#train cyclegan models
def train(d_model_A, d_model_B, g_model_AtoB, g_model_BtoA, c_model_AtoB, c_model_BtoA, dataset, n_epochs):
# define properties of the training run
n_batch = 1
# determine the output square shape of the discriminator
n_patch = d_model_A.output_shape[1]
# unpack dataset
trainA, trainB = dataset
# prepare image pool for fakes
poolA, poolB = list(), list()
# calculate the number of batches per training epoch
bat_per_epo = int(len(trainA) / n_batch)
# calculate the number of training iterations
n_steps = bat_per_epo * n_epochs
# manually enumerate epochs
for i in range(n_steps):
# select a batch of real samples
X_realA, y_realA = generate_real_samples(trainA, n_batch, n_patch)
X_realB, y_realB = generate_real_samples(trainB, n_batch, n_patch)
# generate a batch of fake samples
X_fakeA, y_fakeA = generate_fake_samples(g_model_BtoA, X_realB, n_patch)
X_fakeB, y_fakeB = generate_fake_samples(g_model_AtoB, X_realA, n_patch)
# update fakes from pool
X_fakeA = update_image_pool(poolA, X_fakeA)
X_fakeB = update_image_pool(poolB, X_fakeB)
# update generator B->A via adversarial and cycle loss
g_loss2, _, _, _, _ = c_model_BtoA.train_on_batch([X_realB, X_realA], [y_realA, X_realA, X_realB, X_realA])
# update discriminator for A -> [real/fake]
dA_loss1 = d_model_A.train_on_batch(X_realA, y_realA)
dA_loss2 = d_model_A.train_on_batch(X_fakeA, y_fakeA)
# update generator A->B via adversarial and cycle loss
g_loss1, _, _, _, _ = c_model_AtoB.train_on_batch([X_realA, X_realB], [y_realB, X_realB, X_realA, X_realB])
# update discriminator for B -> [real/fake]
dB_loss1 = d_model_B.train_on_batch(X_realB, y_realB)
dB_loss2 = d_model_B.train_on_batch(X_fakeB, y_fakeB)
# summarize performance
print('>%d, dA[%.3f,%.3f] dB[%.3f,%.3f] g[%.3f,%.3f]' % (i+1, dA_loss1,dA_loss2, dB_loss1,dB_loss2, g_loss1,g_loss2))
# evaluate the model performance every so often
if (i+1) % (bat_per_epo * 1) == 0:
# plot A->B translation
summarize_performance(i, g_model_AtoB, trainA, 'AtoB')
# plot B->A translation
summarize_performance(i, g_model_BtoA, trainB, 'BtoA')
if (i+1) % (bat_per_epo * 5) == 0:
# save the models
save_models(i, g_model_AtoB, g_model_BtoA)
########################## Making the whole thing run
# with tf.device('/GPU:0'):
dataset = load_real_samples('monet_2_360.npz')
print('Loaded', dataset[0].shape, dataset[1].shape)
# define input shape based on the loaded dataset
image_shape = dataset[0].shape[1:]
# generator: A -> B
g_model_AtoB = define_generator(image_shape)
# generator: B -> A
g_model_BtoA = define_generator(image_shape)
# discriminator: A -> [real/fake]
d_model_A = define_discriminator(image_shape)
# discriminator: B -> [real/fake]
d_model_B = define_discriminator(image_shape)
# composite: A -> B -> [real/fake, A]
c_model_AtoB = define_composite_model(g_model_AtoB, d_model_B, g_model_BtoA, image_shape)
# composite: B -> A -> [real/fake, B]
c_model_BtoA = define_composite_model(g_model_BtoA, d_model_A, g_model_AtoB, image_shape)
# train models
train(d_model_A, d_model_B, g_model_AtoB, g_model_BtoA, c_model_AtoB, c_model_BtoA, dataset, N_EPOCHS) | StarcoderdataPython |
3574471 | <filename>circles/tests/test_commands.py
import datetime
import pytz
from django.test import TestCase
from django.core import mail
from django.core.management import call_command
from django.contrib.auth import get_user_model
from circles.models import Event, MailTemplate
User = get_user_model()
class CheckSeminarsTestCase(TestCase):
def setUp(self):
# users
self.host = User(email="<EMAIL>", username="<EMAIL>")
self.host.save()
self.participant = User(
email="<EMAIL>", username="<EMAIL>"
)
self.participant.save()
# events
past_event = Event(
host=self.host, start=datetime.datetime(1999, 5, 1, 20, 0, tzinfo=pytz.UTC)
)
past_event.save()
past_event.participants.add(self.participant)
Event(
host=self.host, start=datetime.datetime(2222, 5, 1, 20, 0, tzinfo=pytz.UTC)
).save()
# mail template
MailTemplate(
type="join",
language_code="en",
subject_template="test",
body_template="test",
).save()
def test_check_mails_sent(self):
call_command("mail_participants")
self.assertEqual(len(mail.outbox), 2)
def test_not_sent_twice(self):
call_command("mail_participants")
call_command("mail_participants")
call_command("mail_participants")
call_command("mail_participants")
self.assertEqual(len(mail.outbox), 2)
| StarcoderdataPython |
8180995 | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from msrest.pipeline import ClientRawResponse
from msrestazure.azure_exceptions import CloudError
from msrestazure.azure_operation import AzureOperationPoller
import uuid
from .. import models
class WebServicesOperations(object):
"""WebServicesOperations operations.
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An objec model deserializer.
"""
def __init__(self, client, config, serializer, deserializer):
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self.config = config
def create_or_update(
self, create_or_update_payload, resource_group_name, web_service_name, custom_headers=None, raw=False, **operation_config):
"""Creates or updates a new Azure ML web service or update an existing
one.
:param create_or_update_payload: The payload to create or update the
Azure ML web service.
:type create_or_update_payload: :class:`WebService
<azure.mgmt.machinelearning.models.WebService>`
:param resource_group_name: Name of the resource group.
:type resource_group_name: str
:param web_service_name: The Azure ML web service name which you want
to reach.
:type web_service_name: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:rtype:
:class:`AzureOperationPoller<msrestazure.azure_operation.AzureOperationPoller>`
instance that returns :class:`WebService
<azure.mgmt.machinelearning.models.WebService>`
:rtype: :class:`ClientRawResponse<msrest.pipeline.ClientRawResponse>`
if raw=true
"""
# Construct URL
url = '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearning/webServices/{webServiceName}'
path_format_arguments = {
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'webServiceName': self._serialize.url("web_service_name", web_service_name, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.config.api_version", self.config.api_version, 'str')
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct body
body_content = self._serialize.body(create_or_update_payload, 'WebService')
# Construct and send request
def long_running_send():
request = self._client.put(url, query_parameters)
return self._client.send(
request, header_parameters, body_content, **operation_config)
def get_long_running_status(status_link, headers=None):
request = self._client.get(status_link)
if headers:
request.headers.update(headers)
return self._client.send(
request, header_parameters, **operation_config)
def get_long_running_output(response):
if response.status_code not in [200, 201]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('WebService', response)
if response.status_code == 201:
deserialized = self._deserialize('WebService', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
if raw:
response = long_running_send()
return get_long_running_output(response)
long_running_operation_timeout = operation_config.get(
'long_running_operation_timeout',
self.config.long_running_operation_timeout)
return AzureOperationPoller(
long_running_send, get_long_running_output,
get_long_running_status, long_running_operation_timeout)
def get(
self, resource_group_name, web_service_name, custom_headers=None, raw=False, **operation_config):
"""Retrieve an Azure ML web service definition by its subscription,
resource group and name.
:param resource_group_name: Name of the resource group.
:type resource_group_name: str
:param web_service_name: The Azure ML web service name which you want
to reach.
:type web_service_name: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:rtype: :class:`WebService
<azure.mgmt.machinelearning.models.WebService>`
:rtype: :class:`ClientRawResponse<msrest.pipeline.ClientRawResponse>`
if raw=true
"""
# Construct URL
url = '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearning/webServices/{webServiceName}'
path_format_arguments = {
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'webServiceName': self._serialize.url("web_service_name", web_service_name, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.config.api_version", self.config.api_version, 'str')
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
request = self._client.get(url, query_parameters)
response = self._client.send(request, header_parameters, **operation_config)
if response.status_code not in [200]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('WebService', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
def patch(
self, patch_payload, resource_group_name, web_service_name, custom_headers=None, raw=False, **operation_config):
"""Patch an existing Azure ML web service resource.
:param patch_payload: The payload to patch the Azure ML web service
with.
:type patch_payload: :class:`WebService
<azure.mgmt.machinelearning.models.WebService>`
:param resource_group_name: Name of the resource group.
:type resource_group_name: str
:param web_service_name: The Azure ML web service name which you want
to reach.
:type web_service_name: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:rtype:
:class:`AzureOperationPoller<msrestazure.azure_operation.AzureOperationPoller>`
instance that returns :class:`WebService
<azure.mgmt.machinelearning.models.WebService>`
:rtype: :class:`ClientRawResponse<msrest.pipeline.ClientRawResponse>`
if raw=true
"""
# Construct URL
url = '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearning/webServices/{webServiceName}'
path_format_arguments = {
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'webServiceName': self._serialize.url("web_service_name", web_service_name, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.config.api_version", self.config.api_version, 'str')
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct body
body_content = self._serialize.body(patch_payload, 'WebService')
# Construct and send request
def long_running_send():
request = self._client.patch(url, query_parameters)
return self._client.send(
request, header_parameters, body_content, **operation_config)
def get_long_running_status(status_link, headers=None):
request = self._client.get(status_link)
if headers:
request.headers.update(headers)
return self._client.send(
request, header_parameters, **operation_config)
def get_long_running_output(response):
if response.status_code not in [200]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('WebService', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
if raw:
response = long_running_send()
return get_long_running_output(response)
long_running_operation_timeout = operation_config.get(
'long_running_operation_timeout',
self.config.long_running_operation_timeout)
return AzureOperationPoller(
long_running_send, get_long_running_output,
get_long_running_status, long_running_operation_timeout)
def remove(
self, resource_group_name, web_service_name, custom_headers=None, raw=False, **operation_config):
"""Remove an existing Azure ML web service.
:param resource_group_name: Name of the resource group.
:type resource_group_name: str
:param web_service_name: The Azure ML web service name which you want
to reach.
:type web_service_name: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:rtype:
:class:`AzureOperationPoller<msrestazure.azure_operation.AzureOperationPoller>`
instance that returns None
:rtype: :class:`ClientRawResponse<msrest.pipeline.ClientRawResponse>`
if raw=true
"""
# Construct URL
url = '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearning/webServices/{webServiceName}'
path_format_arguments = {
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'webServiceName': self._serialize.url("web_service_name", web_service_name, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.config.api_version", self.config.api_version, 'str')
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
def long_running_send():
request = self._client.delete(url, query_parameters)
return self._client.send(request, header_parameters, **operation_config)
def get_long_running_status(status_link, headers=None):
request = self._client.get(status_link)
if headers:
request.headers.update(headers)
return self._client.send(
request, header_parameters, **operation_config)
def get_long_running_output(response):
if response.status_code not in [202, 204]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
if raw:
client_raw_response = ClientRawResponse(None, response)
return client_raw_response
if raw:
response = long_running_send()
return get_long_running_output(response)
long_running_operation_timeout = operation_config.get(
'long_running_operation_timeout',
self.config.long_running_operation_timeout)
return AzureOperationPoller(
long_running_send, get_long_running_output,
get_long_running_status, long_running_operation_timeout)
def list_keys(
self, resource_group_name, web_service_name, custom_headers=None, raw=False, **operation_config):
"""Get the access keys of a particular Azure ML web service.
:param resource_group_name: Name of the resource group.
:type resource_group_name: str
:param web_service_name: The Azure ML web service name which you want
to reach.
:type web_service_name: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:rtype: :class:`WebServiceKeys
<azure.mgmt.machinelearning.models.WebServiceKeys>`
:rtype: :class:`ClientRawResponse<msrest.pipeline.ClientRawResponse>`
if raw=true
"""
# Construct URL
url = '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearning/webServices/{webServiceName}/listKeys'
path_format_arguments = {
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'webServiceName': self._serialize.url("web_service_name", web_service_name, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.config.api_version", self.config.api_version, 'str')
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
request = self._client.get(url, query_parameters)
response = self._client.send(request, header_parameters, **operation_config)
if response.status_code not in [200]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('WebServiceKeys', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
def list_in_resource_group(
self, resource_group_name, skiptoken=None, custom_headers=None, raw=False, **operation_config):
"""Retrieve all Azure ML web services in a given resource group.
:param resource_group_name: Name of the resource group.
:type resource_group_name: str
:param skiptoken: Continuation token for pagination.
:type skiptoken: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:rtype: :class:`PaginatedWebServicesList
<azure.mgmt.machinelearning.models.PaginatedWebServicesList>`
:rtype: :class:`ClientRawResponse<msrest.pipeline.ClientRawResponse>`
if raw=true
"""
# Construct URL
url = '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearning/webServices'
path_format_arguments = {
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.config.api_version", self.config.api_version, 'str')
if skiptoken is not None:
query_parameters['$skiptoken'] = self._serialize.query("skiptoken", skiptoken, 'str')
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
request = self._client.get(url, query_parameters)
response = self._client.send(request, header_parameters, **operation_config)
if response.status_code not in [200]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('PaginatedWebServicesList', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
def list(
self, skiptoken=None, custom_headers=None, raw=False, **operation_config):
"""Retrieve all Azure ML web services in the current Azure subscription.
:param skiptoken: Continuation token for pagination.
:type skiptoken: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:rtype: :class:`PaginatedWebServicesList
<azure.mgmt.machinelearning.models.PaginatedWebServicesList>`
:rtype: :class:`ClientRawResponse<msrest.pipeline.ClientRawResponse>`
if raw=true
"""
# Construct URL
url = '/subscriptions/{subscriptionId}/providers/Microsoft.MachineLearning/webServices'
path_format_arguments = {
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.config.api_version", self.config.api_version, 'str')
if skiptoken is not None:
query_parameters['$skiptoken'] = self._serialize.query("skiptoken", skiptoken, 'str')
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
request = self._client.get(url, query_parameters)
response = self._client.send(request, header_parameters, **operation_config)
if response.status_code not in [200]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('PaginatedWebServicesList', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
| StarcoderdataPython |
1658812 | # -*- coding: utf-8 -*-
from django.test.client import Client
from networkapi.test.test_case import NetworkApiTestCase
from networkapi.util.geral import mount_url
class PeerGroupPostSuccessTestCase(NetworkApiTestCase):
peer_group_uri = '/api/v4/peer-group/'
fixtures_path = 'networkapi/api_peer_group/v4/fixtures/{}'
fixtures = [
'networkapi/config/fixtures/initial_config.json',
'networkapi/system/fixtures/initial_variables.json',
'networkapi/usuario/fixtures/initial_usuario.json',
'networkapi/grupo/fixtures/initial_ugrupo.json',
'networkapi/usuario/fixtures/initial_usuariogrupo.json',
'networkapi/api_ogp/fixtures/initial_objecttype.json',
'networkapi/api_ogp/fixtures/initial_objectgrouppermissiongeneral.json',
'networkapi/grupo/fixtures/initial_permissions.json',
'networkapi/grupo/fixtures/initial_permissoes_administrativas.json',
fixtures_path.format('initial_vrf.json'),
fixtures_path.format('initial_environment.json'),
fixtures_path.format('initial_route_map.json'),
]
json_path = 'api_peer_group/v4/tests/sanity/json/post/{}'
def setUp(self):
self.client = Client()
self.authorization = self.get_http_authorization('test')
self.content_type = 'application/json'
self.fields = ['name', 'route_map_in', 'route_map_out',
'environments']
def tearDown(self):
pass
def test_post_peer_groups(self):
"""Test POST PeerGroups."""
peer_groups_path = self.json_path.\
format('two_peer_groups.json')
response = self.client.post(
self.peer_group_uri,
data=self.load_json(peer_groups_path),
content_type=self.content_type,
HTTP_AUTHORIZATION=self.authorization)
self.compare_status(201, response.status_code)
get_ids = [data['id'] for data in response.data]
uri = mount_url(self.peer_group_uri,
get_ids,
fields=self.fields)
response = self.client.get(
uri,
HTTP_AUTHORIZATION=self.authorization
)
self.compare_status(200, response.status_code)
self.compare_json(peer_groups_path,
response.data)
class PeerGroupPostErrorTestCase(NetworkApiTestCase):
peer_group_uri = '/api/v4/peer-group/'
fixtures_path = 'networkapi/api_peer_group/v4/fixtures/{}'
fixtures = [
'networkapi/config/fixtures/initial_config.json',
'networkapi/system/fixtures/initial_variables.json',
'networkapi/usuario/fixtures/initial_usuario.json',
'networkapi/grupo/fixtures/initial_ugrupo.json',
'networkapi/usuario/fixtures/initial_usuariogrupo.json',
'networkapi/api_ogp/fixtures/initial_objecttype.json',
'networkapi/api_ogp/fixtures/initial_objectgrouppermissiongeneral.json',
'networkapi/grupo/fixtures/initial_permissions.json',
'networkapi/grupo/fixtures/initial_permissoes_administrativas.json',
fixtures_path.format('initial_vrf.json'),
fixtures_path.format('initial_environment.json'),
fixtures_path.format('initial_route_map.json'),
fixtures_path.format('initial_peer_group.json'),
]
json_path = 'api_peer_group/v4/tests/sanity/json/post/{}'
def setUp(self):
self.client = Client()
self.authorization = self.get_http_authorization('test')
self.content_type = 'application/json'
def tearDown(self):
pass
def test_post_peer_group_with_route_maps_used_by_others_peer_groups(self):
"""Test POST PeerGroup with RouteMaps used by others peer groups"""
peer_group_path = self.json_path.\
format('duplicated_peer_group.json')
response = self.client.post(
self.peer_group_uri,
data=self.load_json(peer_group_path),
content_type=self.content_type,
HTTP_AUTHORIZATION=self.authorization)
self.compare_status(400, response.status_code)
self.compare_values(
u'Already exists PeerGroup with RouteMap id = 1 '
u'or id = 2',
response.data['detail']
)
def test_post_peer_group_with_equal_route_maps(self):
"""Test POST PeerGroup with equal RouteMaps"""
peer_group_path = self.json_path.\
format('peer_group_with_equal_route_maps.json')
response = self.client.post(
self.peer_group_uri,
data=self.load_json(peer_group_path),
content_type=self.content_type,
HTTP_AUTHORIZATION=self.authorization)
self.compare_status(400, response.status_code)
self.compare_values(
u'RouteMapIn cant be equal RouteMapOut',
response.data['detail']
)
| StarcoderdataPython |
5026184 | import pandas as pd
import pandas._testing as tm
def test_getitem_callable():
# GH 12533
s = pd.Series(4, index=list("ABCD"))
result = s[lambda x: "A"]
assert result == s.loc["A"]
result = s[lambda x: ["A", "B"]]
tm.assert_series_equal(result, s.loc[["A", "B"]])
result = s[lambda x: [True, False, True, True]]
tm.assert_series_equal(result, s.iloc[[0, 2, 3]])
def test_setitem_callable():
# GH 12533
s = pd.Series([1, 2, 3, 4], index=list("ABCD"))
s[lambda x: "A"] = -1
tm.assert_series_equal(s, pd.Series([-1, 2, 3, 4], index=list("ABCD")))
def test_setitem_other_callable():
# GH 13299
inc = lambda x: x + 1
s = pd.Series([1, 2, -1, 4])
s[s < 0] = inc
expected = pd.Series([1, 2, inc, 4])
tm.assert_series_equal(s, expected)
| StarcoderdataPython |
113399 | #!/usr/bin/python3
import hid
import traceback
hid_max_pkt_size = 64
if __name__ == '__main__':
import argparse
import sys
import binascii
parser = argparse.ArgumentParser()
parser.add_argument('-d', '--descriptor', help='Print Descriptor', action='store_true')
args = parser.parse_args()
d_path = ''
device = None
devices = hid.enumerate()
print(devices)
if not d_path: # no hid device specified
if not devices:
print('No devices to read.')
sys.exit()
elif d_path and d_path not in [d['path'] for d in devices]:
print('Requested device not found.')
sys.exit()
else:
print('Available devices:')
for d in devices:
print('\t%s' % d['path'].decode('utf-8'))
for k in sorted(d.keys()):
h = k.replace('_', ' ').capitalize()
v = d[k].decode('utf-8') if isinstance(d[k], bytes) else d[k]
print('\t\t%s: %s' % (h, v))
device = devices[0]
d_path = device['path'].decode('utf-8')
print('Reading: %s' % d_path)
d = hid.device()
d.open(device['vendor_id'], device['product_id'])
if args.descriptor:
pass # TODO
while True:
# TODO: set max packet size based on descriptor
try:
data = bytes(d.read(hid_max_pkt_size))
dout = binascii.hexlify(data).upper()
dout = b' '.join(dout[i:i+2] for i in range(0, len(dout), 2)).strip()
#dout = ' '.join("{:02x}".format(c) for c in dout)
print(dout.decode('utf-8'), end='\r')
except OSError as e:
print('%s: %s' % (type(e).__name__, e))
sys.exit()
except IOError as e:
print('%s: %s' % (type(e).__name__, e))
sys.exit()
except Exception as e:
# TODO: do something useful
print(traceback.format_exc())
sys.exit()
| StarcoderdataPython |
100829 | import collections.abc
from itertools import repeat
container_abcs = collections.abc
def _ntuple(n):
def parse(x):
if isinstance(x, container_abcs.Iterable):
return x
return tuple(repeat(x, n))
return parse
_single = _ntuple(1)
_pair = _ntuple(2)
_triple = _ntuple(3)
_quadruple = _ntuple(4) | StarcoderdataPython |
6631686 | <gh_stars>10-100
import sys
import os
from setuptools import setup
import setuptools.command.build_py
from distutils.extension import Extension
PROJECT_METADATA = {
"version": "0.1.0.dev2",
"author": '<NAME>',
"author_email": '<EMAIL>',
"license": 'MIT',
}
METADATA_TEMPLATE = """
__version__ = "{version}"
__author__ = "{author}"
__author_email__ = "{author_email}"
__license__ = "{license}"
"""
sourcefiles = [
"avb/_ext.pyx",
]
extensions =[]
try:
from Cython.Build import cythonize
if int(os.environ.get("PYAVB_BUILD_EXT", '1')):
extensions = cythonize([Extension("avb._ext",
sourcefiles,
language="c++")])
except ImportError as e:
print('unable to build optional cython extension')
class AddMetadata(setuptools.command.build_py.build_py):
"""Stamps PROJECT_METADATA into __init__ files."""
def run(self):
setuptools.command.build_py.build_py.run(self)
if self.dry_run:
return
target_file = os.path.join(self.build_lib, 'avb', "__init__.py")
source_file = os.path.join(os.path.dirname(__file__), 'avb', "__init__.py")
# get the base data from the original file
with open(source_file, 'r') as fi:
src_data = fi.read()
# write that + the suffix to the target file
with open(target_file, 'w') as fo:
fo.write(src_data)
fo.write(METADATA_TEMPLATE.format(**PROJECT_METADATA))
setup(
name='pyavb',
description='A python module for the reading and writing Avid Bin Files files.',
url='https://github.com/markreidvfx/pyavb',
project_urls={
'Source':
'https://github.com/markreidvfx/pyavb',
'Documentation':
'http://pyavb.readthedocs.io',
'Issues':
'https://github.com/markreidvfx/pyavb/issues',
},
classifiers=[
'Development Status :: 3 - Alpha',
'Topic :: Multimedia :: Graphics',
'Topic :: Multimedia :: Video',
'Topic :: Multimedia :: Video :: Display',
'Topic :: Multimedia :: Video :: Non-Linear Editor',
'Topic :: Software Development :: Libraries :: Python Modules',
'License :: OSI Approved :: MIT License',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'Operating System :: OS Independent',
'Natural Language :: English',
],
keywords='film tv editing editorial edit non-linear edl time',
platforms='any',
packages=[
'avb',
],
cmdclass={'build_py': AddMetadata},
ext_modules = extensions,
extras_require= {'cython' : ['cython']},
**PROJECT_METADATA
)
| StarcoderdataPython |
11305058 | import json
from functools import cmp_to_key
from mock import patch
from django.urls import reverse
from django.test import override_settings
from seahub.test_utils import BaseTestCase
from seahub.trusted_ip.models import TrustedIP
from seahub.api2.endpoints.admin.device_trusted_ip import cmp_ip
@override_settings(ENABLE_LIMIT_IPADDRESS=True)
class DeviceAccessibleIpSetting(BaseTestCase):
def setUp(self):
self.url = reverse('api-v2.1-admin-device-trusted-ip')
self.test_ip = '192.168.127.12'
TrustedIP.objects.get_or_create('127.0.0.1')
TrustedIP.objects.get_or_create('1.1.1.1')
self.login_as(self.admin)
@patch('seahub.api2.permissions.IsProVersion.has_permission')
def test_can_get(self, mock_IsProVersion):
mock_IsProVersion.return_value= True
resp = self.client.get(self.url)
assert resp.status_code == 200
json_resp = json.loads(resp.content)
assert '127.0.0.1' in [x['ip'] for x in json_resp]
@patch('seahub.api2.permissions.IsProVersion.has_permission')
def test_no_permission(self, mock_IsProVersion):
self.logout()
self.login_as(self.admin_no_other_permission)
mock_IsProVersion.return_value= True
resp = self.client.get(self.url)
assert resp.status_code == 403
@patch('seahub.api2.permissions.IsProVersion.has_permission')
def test_can_post(self, mock_IsProVersion):
mock_IsProVersion.return_value= True
resp = self.client.post(self.url, {'ipaddress': self.test_ip})
assert resp.status_code == 201
resp = self.client.get(self.url)
json_resp = json.loads(resp.content)
assert self.test_ip in [x['ip'] for x in json_resp]
@patch('seahub.api2.permissions.IsProVersion.has_permission')
def test_can_delete(self, mock_IsProVersion):
mock_IsProVersion.return_value= True
data = 'ipaddress=1.1.1.1'
resp = self.client.delete(self.url, data, 'application/x-www-form-urlencoded')
assert resp.status_code == 200
resp = self.client.get(self.url)
json_resp = json.loads(resp.content)
assert '1.1.1.1' not in [x['ip'] for x in json_resp]
def test_cmp_ip(self):
ip_list = [{'ip': '192.168.3.11'}, {'ip': '192.168.3.11'}, {'ip': '172.16.31.10'}]
new_ip_list = sorted(ip_list, key=cmp_to_key(cmp_ip))
assert new_ip_list == ip_list[::-1]
ip_list = [{'ip': '192.168.3.11'}, {'ip': '192.*.1.1'}]
new_ip_list = sorted(ip_list, key=cmp_to_key(cmp_ip))
assert new_ip_list == ip_list
ip_list = [{'ip': '192.*.1.1'}, {'ip': '192.168.3.11'}]
new_ip_list = sorted(ip_list, key=cmp_to_key(cmp_ip))
assert new_ip_list == ip_list[::-1]
ip_list = [{'ip': '172.16.31.10'}, {'ip': '172.16.31.10'}]
new_ip_list = sorted(ip_list, key=cmp_to_key(cmp_ip))
assert new_ip_list == ip_list
ip_list = [{'ip': '111.1.*.2'}, {'ip': '111.1.*.1'}]
new_ip_list = sorted(ip_list, key=cmp_to_key(cmp_ip))
assert new_ip_list == ip_list[::-1]
ip_list = [{'ip': '111.1.*.2'}, {'ip': '111.2.*.1'}, {'ip': '111.1.*.2'}]
new_ip_list = sorted(ip_list, key=cmp_to_key(cmp_ip))
assert new_ip_list == [ip_list[0], ip_list[2], ip_list[1]]
ip_list = [{'ip': '111.1.*.2'}, {'ip': '112.2.*.1'}, {'ip': '110.1.*.2'}]
new_ip_list = sorted(ip_list, key=cmp_to_key(cmp_ip))
assert new_ip_list == [ip_list[2], ip_list[0], ip_list[1]]
ip_list = [{'ip': '111.1.*.2'}, {'ip': '111.1.*.*'}, {'ip': '111.*.*.2'}]
new_ip_list = sorted(ip_list, key=cmp_to_key(cmp_ip))
assert new_ip_list == [ip_list[0], ip_list[1], ip_list[2]]
| StarcoderdataPython |
5066329 | <filename>Develop/subgroup3_dev/movies/admin.py<gh_stars>10-100
from django.contrib import admin
# Register your models here.
# #http://127.0.0.1:8000/admin/
# hoseong // 12 or 123
from . import models
@admin.register(models.Actor)
class ActorAdmin(admin.ModelAdmin):
fieldsets = (
(
"Basic Info",
{"fields": ("name", "weight")}
),
)
@admin.register(models.Director)
class DirectorAdmin(admin.ModelAdmin):
fieldsets = (
(
"Basic Info",
{"fields": ("name", "weight")}
),
)
@admin.register(models.Genre)
class GenreAdmin(admin.ModelAdmin):
fieldsets = (
(
"Basic Info",
{"fields": ("name", "weight")}
),
)
| StarcoderdataPython |
135933 | from django.db import models
from general_business.models import Organization
# Create your models here.
class Campaign(models.Model):
title = models.CharField(
max_length = 300,
verbose_name = "Campaign Title"
)
subtitle = models.CharField(
max_length=300,
verbose_name="Short Description"
)
uploaded_at = models.DateTimeField(
auto_now = True
)
parent_organization = models.ForeignKey(
to = Organization,
on_delete = models.CASCADE,
null = True,
blank = True
)
def __str__(self):
return f"{self.title}, {self.subtitle}, {self.parent_organization}"
# Create your models here.
class Brand(models.Model):
logo_svg = models.ImageField(
verbose_name = "SVG file of logo",
null = True,
blank = True
)
mantra = models.CharField(
max_length = 250,
verbose_name = "Brand Mantra",
null = True,
blank = True
)
colors = models.JSONField(
verbose_name = "Store brand colors in JSON format.",
null = True,
blank = True
)
logo_mockup = models.ImageField(
verbose_name = "Enter a logo mockup image"
)
topic_keywords = models.CharField(
verbose_name = "Enter keywords describing your brand, separated by commas.",
max_length = 10000
)
parent_organization = models.ForeignKey(
to = Organization,
on_delete = models.CASCADE
)
class SocialMediaAccount(models.Model):
social_medium = models.CharField(
max_length = 300,
verbose_name = "What is the social medium of this social media account?"
)
username = models.CharField(
max_length = 300,
verbose_name = "What is the username of this social media account?"
)
password = models.CharField(
max_length = 300,
verbose_name = "What is the password of this social media account?"
)
purpose = models.CharField(
max_length = 500,
verbose_name = "What is the purpose of this social media account?",
)
parent_brand = models.ForeignKey(
to = Brand,
on_delete = models.CASCADE,
null = True,
blank = True
)
page_url = models.URLField(
verbose_name = "Enter social media page URL."
)
def __str__(self):
return f"{self.username}: {self.social_medium}" | StarcoderdataPython |
8198666 | <reponame>Zhenye-Na/leetcode<gh_stars>1-10
#
# @lc app=leetcode id=927 lang=python3
#
# [927] Three Equal Parts
#
# https://leetcode.com/problems/three-equal-parts/description/
#
# algorithms
# Hard (34.82%)
# Likes: 547
# Dislikes: 92
# Total Accepted: 20.4K
# Total Submissions: 53K
# Testcase Example: '[1,0,1,0,1]'
#
# You are given an array arr which consists of only zeros and ones, divide the
# array into three non-empty parts such that all of these parts represent the
# same binary value.
#
# If it is possible, return any [i, j] with i + 1 < j, such that:
#
#
# arr[0], arr[1], ..., arr[i] is the first part,
# arr[i + 1], arr[i + 2], ..., arr[j - 1] is the second part, and
# arr[j], arr[j + 1], ..., arr[arr.length - 1] is the third part.
# All three parts have equal binary values.
#
#
# If it is not possible, return [-1, -1].
#
# Note that the entire part is used when considering what binary value it
# represents. For example, [1,1,0] represents 6 in decimal, not 3. Also,
# leading zeros are allowed, so [0,1,1] and [1,1] represent the same value.
#
#
# Example 1:
# Input: arr = [1,0,1,0,1]
# Output: [0,3]
# Example 2:
# Input: arr = [1,1,0,1,1]
# Output: [-1,-1]
# Example 3:
# Input: arr = [1,1,0,0,1]
# Output: [0,2]
#
#
# Constraints:
#
#
# 3 <= arr.length <= 3 * 10^4
# arr[i] is 0 or 1
#
#
#
# @lc code=start
class Solution:
def threeEqualParts(self, arr: List[int]) -> List[int]:
total = sum(arr)
if total % 3 != 0:
return [-1, -1]
if total == 0:
return [0, len(arr) - 1]
ones = total // 3
i = len(arr) - 1
while i > 0 and ones > 0:
if arr[i] == 1:
ones -= 1
i -= 1
# arr[i+1:] is the number
digit = arr[i + 1:]
ret = []
start = 0
while start < len(arr) and arr[start] == 0:
start += 1
if not self.check(arr, start, digit):
return [-1, -1]
ret.append(start + len(digit) - 1)
start = start + len(digit)
while start < len(arr) and arr[start] == 0:
start += 1
if not self.check(arr, start, digit):
return [-1, -1]
ret.append(start + len(digit))
return ret
def check(self, arr, pointer, digit):
if arr[pointer: pointer + len(digit)] == digit:
return True
return False
# @lc code=end
# 118/118 cases passed (352 ms)
# Your runtime beats 95.7 % of python3 submissions
# Your memory usage beats 47.31 % of python3 submissions (15.2 MB)
| StarcoderdataPython |
11375579 | <gh_stars>10-100
from submin.models import storage as models_storage
storage = models_storage.get("hookjobs")
def jobs(repositorytype, repository, hooktype):
return storage.jobs(repositorytype, repository, hooktype)
def queue(repositorytype, repository, hooktype, content):
return storage.queue(repositorytype, repository, hooktype, content)
def done(jobid):
return storage.done(jobid)
__doc__ = """
Storage contract
================
* jobs(repositorytype, repository, hooktype)
Return a list of tuples [(jobid, content), ...] of jobs.
* queue(repositorytype, repository, hooktype, content)
Queues a new job.
* done(jobid)
Remove a job with id *jobid* from the queue.
"""
| StarcoderdataPython |
1624464 | numbers = [int(el) for el in input().split(", ")]
positive = [str(x) for x in numbers if x >= 0]
negative = [str(x) for x in numbers if x < 0]
even = [str(x) for x in numbers if x % 2 == 0]
odd = [str(x) for x in numbers if not x % 2 == 0]
print("Positive:", ', '.join(positive))
print("Negative:", ', '.join(negative))
print("Even:", ', '.join(even))
print("Odd:", ', '.join(odd)) | StarcoderdataPython |
3552035 | # Copyright 2019 Alorium Technology, LLC.
#
# This utility is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as
# published by the Free Software Foundation, either version 3 of
# the License, or (at your option) any later version.
#
# This utility is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this file. If not, see
# <http://www.gnu.org/licenses/>.
"""
`hinj_pmod_converter`
================================================================================
Utility for converting libraries that use standard SPI interface to one of the
PMOD SPI interfaces on Alorium's Hinj board.
* Author(s): <NAME>
"""
# This script requires Python 3, so check for it and error out if not
import sys
if sys.version_info[0] < 3:
raise Exception("\n\nMust be using Python 3\n\n")
# Other imports - only do after Python 3 check to avoid errors
import tkinter
from tkinter.filedialog import askdirectory
import os
from shutil import copyfile
def replaceHeaderReferences(filename):
print(filename)
file = open(filename, 'r')
try:
lines = file.readlines()
except UnicodeDecodeError:
file.close()
file = open(filename, 'r', encoding='latin1')
lines = file.readlines()
file.close()
for idx in range(len(lines)):
if (lines[idx].find('SPI.h') != -1):
lines[idx] = lines[idx]
elif (lines[idx].find('SPI.') != -1):
lines[idx] = lines[idx].replace('SPI.', 'XLR8PmodSPI.', 1)
for idx in range(len(lines)):
if (lines[idx].find('Wire.h') != -1):
lines[idx] = lines[idx]
elif (lines[idx].find('Wire.') != -1):
lines[idx] = lines[idx].replace('Wire.', 'XLR8PmodWire.', 1)
for idx in range(len(lines)):
# Serial is included by default in Arduino so we don't need to search for the include
if (lines[idx].find('Serial.') != -1):
lines[idx] = lines[idx].replace('Serial.', 'XLR8PmodSerial.', 1)
with open(filename,'w') as f:
for line in lines:
f.write("%s" % line)
def replaceSourceReferences(filename):
print(filename)
file = open(filename, 'r')
try:
lines = file.readlines()
except UnicodeDecodeError:
file.close()
file = open(filename, 'r', encoding='latin1')
lines = file.readlines()
file.close()
serialFound = -1
i2cFound = -1
spiFound = -1
for idx in range(len(lines)):
if (lines[idx].find('SPI.h') != -1):
print('SPI library include found on line ' + str(idx))
lines[idx] = lines[idx].replace('SPI.h', 'XLR8SPI.h', 1)
spiFound = idx
elif (lines[idx].find('SPI.') != -1):
lines[idx] = lines[idx].replace('SPI.', 'XLR8PmodSPI.', 1)
elif (lines[idx].find('SPDR = ') != -1):
lines[idx] = lines[idx].replace('SPDR = ', 'XLR8PmodSPI.writeSPDR(', 1)
lines[idx] = lines[idx].replace(';', ');', 1)
elif (lines[idx].find('SPDR') != -1):
lines[idx] = lines[idx].replace('SPDR', 'XLR8PmodSPI.readSPDR()', 1)
elif (lines[idx].find('SPSR = ') != -1):
lines[idx] = lines[idx].replace('SPSR = ', 'XLR8PmodSPI.writeSPSR(', 1)
lines[idx] = lines[idx].replace(';', ');', 1)
elif (lines[idx].find('SPSR') != -1):
lines[idx] = lines[idx].replace('SPSR', 'XLR8PmodSPI.readSPSR()', 1)
elif (lines[idx].find('SPCR = ') != -1):
lines[idx] = lines[idx].replace('SPCR = ', 'XLR8PmodSPI.writeSPCR(', 1)
lines[idx] = lines[idx].replace(';', ');', 1)
elif (lines[idx].find('SPCR') != -1):
lines[idx] = lines[idx].replace('SPCR', 'XLR8PmodSPI.readSPCR()', 1)
for idx in range(len(lines)):
if (lines[idx].find('Wire.h') != -1):
print('I2C library include found on line ' + str(idx))
lines[idx] = lines[idx].replace('Wire.h', 'XLR8Wire.h', 1)
i2cFound = idx
elif (lines[idx].find('Wire.') != -1):
lines[idx] = lines[idx].replace('Wire.', 'XLR8PmodWire.', 1)
for idx in range(len(lines)):
if (lines[idx].find('Serial.') != -1):
if (serialFound == -1):
print('Serial library include found on line ' + str(idx))
serialFound = idx
lines[idx] = lines[idx].replace('Serial.', 'XLR8PmodSerial.', 1)
if (spiFound >= 0):
lines.insert(spiFound+1, '#include <XLR8HinjAddrPack.h>\n')
lines.insert(spiFound+2, 'XLR8SPIClass XLR8PmodSPI(0xAC, 0xAD, 0xAE);\n')
if (i2cFound >= 0):
lines.insert(i2cFound+1, '#include <XLR8HinjAddrPack.h>\n')
lines.insert(i2cFound+2, 'XLR8TwoWire XLR8PmodWire(0xE5, 0xE0, 0xE1, 0xE2, 0xE3, 0xE4);\n')
if (serialFound >= 0):
lines.insert(serialFound+1, '#include <XLR8HinjAddrPack.h>\n')
lines.insert(serialFound+2, 'XLR8Serial XLR8PmodSerial(0xEB, 0xEA, 0xE7, 0xE8, 0xE9, 0xE6);\n')
with open(filename,'w') as f:
for line in lines:
f.write("%s" % line)
tkinter.Tk().withdraw()
directory = askdirectory(initialdir=os.path.expanduser('~/Documents/Arduino/libraries/'))
print(directory)
originalName = os.path.basename(directory)
outputDirectory = "HinjPMOD_"+originalName
if not os.path.exists(outputDirectory):
print("Creating: " + outputDirectory)
os.makedirs(outputDirectory)
os.makedirs(outputDirectory+'/src')
else:
print(outputDirectory+' already exists')
lpFile = open(outputDirectory+'/library.properties', 'w')
lpFile.write('name='+outputDirectory+"\n")
lpFile.write('version=0.0.1'+"\n")
lpFile.write('author=Alorium Technology PMOD Conversion Script'+"\n")
lpFile.write('maintainer=User'+"\n")
lpFile.write('sentence=Hinj PMOD library'+"\n")
lpFile.write('paragraph=Library generated by converting '+originalName+' for use with Hinj PMOD'+"\n")
lpFile.write('architectures=avr'+"\n")
lpFile.write('category=Uncategorized'+"\n")
fileList = []
for r, d, f in os.walk(directory):
for file in f:
if (file.endswith(".h")):
fileList.append(file)
print("Header File: " + os.path.join(r, file))
copyfile(os.path.join(r, file), outputDirectory+'/src/HinjPMOD_'+file)
replaceHeaderReferences(outputDirectory+'/src/HinjPMOD_'+file)
elif (file.endswith(".cpp")):
fileList.append(file)
print("Source File: " + os.path.join(r, file))
copyfile(os.path.join(r, file), outputDirectory+'/src/HinjPMOD_'+file)
replaceSourceReferences(outputDirectory+'/src/HinjPMOD_'+file)
#for dir in d:
#print("Dir: " + os.path.join(r, dir))
print(fileList)
| StarcoderdataPython |
6546840 | # -*- coding: utf-8 -*-
"""
hunk.server
~~~~~~~~~~~
Provides mock server that has JSON API.
"""
import argparse
import os
from flask import Flask, request, make_response, abort
import requests
from .resource import load_resource
from .production import ProductionEnvironment
METHODS = ['GET', 'HEAD', 'POST', 'PUT', 'DELETE', 'OPTIONS', 'PATCH']
app = Flask(__name__)
config = {
'RESOURCE_ROOT': os.getcwd(),
'SERVER_HOSTNAME': 'localhost',
'SERVER_PORT': 8080
}
prod_env = ProductionEnvironment()
def get_response_from_proxy(method, url, data, headers):
rv = getattr(requests, method)(url, data=data, headers=headers)
response = make_response(rv.text, rv.status_code)
response.headers.clear()
for k, v in rv.headers.items():
response.headers.add_header(k, v)
return response
def get_response_from_resource(resource):
response = make_response(resource.json, resource.status_code)
response.headers['Content-Type'] = 'application/json'
for key, value in resource.headers.items():
response.headers[key] = value
return response
@app.after_request
def after_request(response):
response.headers.add('Access-Control-Allow-Origin', '*')
return response
@app.route('/', methods=METHODS)
def index():
return resources('/')
@app.route('/<path:path>', methods=METHODS)
def resources(path):
method = request.method.lower()
if request.path in prod_env.routes:
url = prod_env.build_url(path)
return get_response_from_proxy(
method, url, request.data, request.headers)
rpath = os.path.join(
config['RESOURCE_ROOT'], method, *path.rstrip('/').split('/'))
r = load_resource(rpath)
if r:
return get_response_from_resource(r)
else:
abort(404)
def set_config(root, hostname, port):
config['RESOURCE_ROOT'] = root
config['SERVER_HOSTNAME'] = hostname
config['SERVER_PORT'] = port
def set_production_environment(dirpath, py_filename):
prod_env.load(dirpath, py_filename)
def main():
parser = argparse.ArgumentParser()
parser.add_argument('-r', '--root', default=os.getcwd())
parser.add_argument('-n', '--hostname', default='localhost')
parser.add_argument('-p', '--port', type=int, default=8080)
parser.add_argument('-c', '--production', default='production_conf.py')
parser.add_argument('-d', '--debug', action='store_true')
args = parser.parse_args()
set_config(args.root, args.hostname, args.port)
set_production_environment(args.root, args.production)
app.debug = args.debug or False
app.run(config['SERVER_HOSTNAME'], config['SERVER_PORT'])
if __name__ == '__main__':
main()
| StarcoderdataPython |
11343438 | # Copyright 2021 The Distla Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# =============================================================================
from asic_la.sharded_probability_function.utils import (
permute,
relative_permutation,
invert_permutation,
remove_and_reduce,
send_to_left_side,
send_to_right_side,
)
from asic_la.sharded_probability_function.sharded_probability_function import (
ShardedProbabilityFunction,
)
from asic_la.sharded_probability_function.sharded_discrete_probability_function import (
ShardedDiscretedProbabilityFunction,
)
| StarcoderdataPython |
1982419 | import mock
import transaction
import zeit.cms.celery
import zeit.workflow.testing
@zeit.cms.celery.task(queuename='publish_homepage')
def hp_task():
"""Task with queue homepage."""
@zeit.cms.celery.task
def no_default_queue():
"""Task without a default queue."""
class RouteTaskTests(zeit.cms.testing.FunctionalTestCase):
layer = zeit.workflow.testing.CELERY_LAYER
def get_queue_name(self, task, **kw):
result = task.apply_async(**kw)
publish = 'celery.events.dispatcher.EventDispatcher.publish'
with mock.patch(publish) as publish:
transaction.commit()
result.get()
assert 'task-sent' == publish.call_args[0][0]
return publish.call_args[0][1]['queue']
def test_route_task__returns_default_if_none_given(self):
assert 'default' == self.get_queue_name(no_default_queue)
def test_route_task__returns_queue_depending_on_name_set_on_task(self):
assert 'publish_homepage' == self.get_queue_name(hp_task)
def test_route_task__returns_queue_depending_on_name_set_on_call(self):
assert 'publish_highprio' == self.get_queue_name(
no_default_queue, queuename='publish_highprio')
def test_route_task__priorizes_call_over_task_setting(self):
assert 'publish_lowprio' == self.get_queue_name(
hp_task, queuename='publish_lowprio')
| StarcoderdataPython |
6598571 | <gh_stars>1-10
# -*- coding: utf-8 -*-
# Copyright (c) 2021 by <NAME>
import re
import ast
import random
from tqdm import tqdm
from unicodedata import normalize as nl
def cleanning_text(text):
# TODO: Unicode text
text = nl('NFKC',text)
# TODO: Remove emotion icon
emoji_pattern = re.compile("["
u"\U0001F600-\U0001F64F" # emoticons
u"\U0001F300-\U0001F5FF" # symbols & pictographs
u"\U0001F680-\U0001F6FF" # transport & map symbols
u"\U0001F1E0-\U0001F1FF" # flags (iOS)
u"\U00002500-\U00002BEF" # chinese char
u"\U00002702-\U000027B0"
u"\U00002702-\U000027B0"
u"\U000024C2-\U0001F251"
u"\U0001f926-\U0001f937"
u"\U00010000-\U0010ffff"
u"\u2640-\u2642"
u"\u2600-\u2B55"
u"\u200d"
u"\u23cf"
u"\u23e9"
u"\u231a"
u"\ufe0f" # dingbats
u"\u3030"
u"\u4e00-\u9fff" # chinese,japan,korean word
"]+", flags=re.UNICODE)
text = emoji_pattern.sub(r" ", text)
# TODO: Remove url
text=re.sub(r'(http|https|www)?\s*\S+\.\b(vn|com)?\S+', ' ', text)
# TODO: Remove all html tag and some tag with format word_word ,[],<>
text = re.sub(r'\[.*?\]|<.*?>|[\w]+_[\w]+', ' ', text)
# # TODO: Replace numbers patterns
# text = ' ' + text + ' '
# text = re.sub(r"(\d*[\s\.\,]?\d+\s?đô la\s+)", " _number_usd ", text)
# text = re.sub(r"(\d*[\s\.\,]?\d+\s?usd\s+)", " _number_usd ", text)
# text = re.sub(r"(\d*[\s\.\,]?\d+\s?dollar\s+)", " _number_usd ", text)
# text = re.sub(r"(\d*[\s\.\,]?\d+\s?$\s+)", " _number_usd ", text)
# text = re.sub(r"(\d*[\s\.\,]?\d+\s?yen\s+)", " _number_yen ", text)
# text = re.sub(r"(\d*[\s\.\,]?\d+\s?yên\s+)", " _number_yen ", text)
# text = re.sub(r"(\d*[\s\.\,]?\d+\s?k\s+)", " _number_vnd ", text)
# text = re.sub(r"(\d*[\s\.\,]?\d+\s?vnd\s+)", " _number_vnd ", text)
# text = re.sub(r"(\d*[\s\.\,]?\d+\s?vnđ\s+)", " _number_vnd ", text)
# text = re.sub(r"(\+?\d{9,11}\s+)", " _number_phone ", text)
# text = re.sub(r"(\d+\s+)", " _number_ ", text)
# TODO: Remove puctuation from begining and end string except !.?
text = re.sub(r'^[^a-zA-Z__ÀÁÂÃÈÉÊÌÍÒÓÔÕÙÚĂĐĨŨƠàáâãèéêìíòóôõùúăđĩũơƯĂẠẢẤẦẨẪẬẮẰẲẴẶẸẺẼỀỀỂưăạảấầẩẫậắằẳẵặẹẻẽềềểỄỆỈỊỌỎỐỒỔỖỘỚỜỞỠỢỤỦỨỪễệỉịọỏốồổỗộớờởỡợụủứừỬỮỰỲỴÝỶỸửữựỳỵỷỹ0-9]+|[^a-zA-Z__ÀÁÂÃÈÉÊÌÍÒÓÔÕÙÚĂĐĨŨƠàáâãèéêìíòóôõùúăđĩũơƯĂẠẢẤẦẨẪẬẮẰẲẴẶẸẺẼỀỀỂưăạảấầẩẫậắằẳẵặẹẻẽềềểỄỆỈỊỌỎỐỒỔỖỘỚỜỞỠỢỤỦỨỪễệỉịọỏốồổỗộớờởỡợụủứừỬỮỰỲỴÝỶỸửữựỳỵỷỹ0-9.!?]+$', '', text)
# TODO: Split word and punctuation
# text = ' '.join(re.findall("\d+[\+:\-/][^), ]+|\w+\-\S+|\w+|[,.?!]", text))
text = re.sub(r"\!{1,}", " ! ", text)
text = re.sub(r"\?{1,}", " ? ", text)
text = re.sub(r"\.{1,}", " . ", text)
text = re.sub(r"\,{1,}", " , ", text)
text = re.sub(r"\({1,}", " ( ", text)
text = re.sub(r"\){1,}", " ) ", text)
text = re.sub(r"\-{1,}", " - ", text)
text = re.sub(r"\~{1,}", " ~ ", text)
# TODO: Remove sequence punctuation continous
# text = re.sub(r'(?<=[^a-zA-Z__ÀÁÂÃÈÉÊÌÍÒÓÔÕÙÚĂĐĨŨƠàáâãèéêìíòóôõùúăđĩũơƯĂẠẢẤẦẨẪẬẮẰẲẴẶẸẺẼỀỀỂưăạảấầẩẫậắằẳẵặẹẻẽềềểỄỆỈỊỌỎỐỒỔỖỘỚỜỞỠỢỤỦỨỪễệỉịọỏốồổỗộớờởỡợụủứừỬỮỰỲỴÝỶỸửữựỳỵỷỹ0-9])(?<! )[^a-zA-Z__ÀÁÂÃÈÉÊÌÍÒÓÔÕÙÚĂĐĨŨƠàáâãèéêìíòóôõùúăđĩũơƯĂẠẢẤẦẨẪẬẮẰẲẴẶẸẺẼỀỀỂưăạảấầẩẫậắằẳẵặẹẻẽềềểỄỆỈỊỌỎỐỒỔỖỘỚỜỞỠỢỤỦỨỪễệỉịọỏốồổỗộớờởỡợụủứừỬỮỰỲỴÝỶỸửữựỳỵỷỹ0-9]+',' ',text)
# TODO: Remove many whitespace
text = re.sub(r'\s{2,}', ' ', text)
return text.strip().lower()
def processing_data(data_path, train_path, eval_path, pct=0.01, mode='normal'):
with open(data_path, 'r+', encoding='utf-8') as rf:
lines = rf.readlines()
print(f"The numbers of lines text: {len(lines)}")
outtexts = []
for i in tqdm(range(len(lines))):
if mode == 'fb':
res = ast.literal_eval(lines[i])
text = res.get('content', '')
else:
text = lines[i]
if len(text.split(' ')) >= 3:
text = cleanning_text(text)
outtexts.append(text)
NUMBER_SAMPLES = len(outtexts)
K = int(pct * NUMBER_SAMPLES)
evaltexts = random.sample(outtexts, k=K)
traintexts = list(set(outtexts) - set(evaltexts))
print(f"Length train: {len(traintexts)} | eval: {len(evaltexts)}")
with open(eval_path, 'w', encoding='utf-8') as wf:
for text in tqdm(evaltexts):
wf.writelines(text + '\n')
with open(train_path, 'w', encoding='utf-8') as wf:
for text in tqdm(traintexts):
wf.writelines(text + '\n')
data_path = 'wiki2021.txt'
train_path = 'co-dataset/wiki2021-train.txt'
eval_path = 'co-dataset/wiki2021-eval.txt'
# processing_data(data_path, train_path, eval_path, pct=0.001, mode='normal') | StarcoderdataPython |
3460863 | <filename>main.py<gh_stars>0
import yfinance as yf
import matplotlib.pyplot as plt
import pandas as pd
import seaborn as sb
from hashlib import sha256
stonks = ['ACN', 'MSFT', 'DIS', 'V', 'NVDA', 'PEP', 'AAPL', 'GOOGL', 'VOT', 'DGRO','SPHD','VWO', 'VEA', 'VCLT', 'SPLB', 'IGLB']
unique_filename = sha256(''.join(stonks).encode('utf-8')).hexdigest() # create a unique filename based on above unique stonks!
print(unique_filename)
try:
coll = pd.read_pickle(unique_filename) # Read from file if it exists
print("Previous stonks file found. Using it.")
except FileNotFoundError:
print('Stonks pickle not found. Creating a new one based on your stonks')
coll = yf.download(stonks, start="2015-10-01", end="2020-10-01", interval='1d', group_by='tickers') # get data
coll.to_pickle(unique_filename) # Save this to file
# Clean up columns
to_drop = ['Open', 'High', 'Low', 'Volume', 'Dividends', 'Stock Splits', 'Adj Close']
price_close = coll.drop(to_drop, level=1, axis=1)
# Describe it
print(price_close.describe())
# Calculate returns
returns = price_close.pct_change(axis=0,fill_method='bfill')
print(returns.describe())
# Calculate corr for both prices and returns
prices_corr = price_close.corr()
returns_corr = returns.corr()
# Since it makes sense to show corr for returns, here you go
# https://quantdare.com/correlation-prices-returns/
# https://quant.stackexchange.com/questions/489/correlation-between-prices-or-returns
sb.heatmap(returns_corr, annot=True)
plt.show() | StarcoderdataPython |
12856065 | class Student:
def __init__(self,name,major,gpa):
self.name=name
self.major=major
self.gpa=gpa
def on_honor_roll (self):
if self.gpa>=8.5:
return True
else:
return False | StarcoderdataPython |
9790122 | <reponame>bartezza/hydrafw<gh_stars>0
import time
from pynfcreader.devices.hydra_nfc import HydraNFC
from pynfcreader.sessions.iso14443.iso14443a import Iso14443ASession
from pynfcreader.tools import utils
import pyHydrabus
import logging
# n=pyHydrabus.NFC('/dev/hydrabus')
# >>> # Set mode to ISO 14443A
# >>> n.mode = pyHydrabus.NFC.MODE_ISO_14443A
# >>> # Set radio ON
# >>> n.rf = 1
logging.basicConfig()
logging.root.setLevel(logging.INFO)
def comment_data(msg, data):
# self._logger.info(msg)
print(msg)
for hit in utils.get_pretty_print_block(data):
# self._logger.info("\t" + hit)
print("\t" + hit)
# nfc = HydraNFC(port="/dev/ttyACM0", debug=False)
# hn = Iso14443ASession(drv=hydra_nfc, block_size=120)
# nfc.connect()
# # nfc.set_mode_iso14443A()
# nfc.set_mode_iso14443B()
# nfc.field_on()
def send_command(cmd, name, crc=1, verbose: bool = True, raise_exc: bool = True):
resp = nfc.write(cmd, crc=crc)
if not resp:
mex = f"Command {name} failed"
if raise_exc:
raise Exception(mex)
else:
logging.error(mex)
return None
if verbose:
comment_data(name + ":", resp)
return resp
nfc = pyHydrabus.NFC("/dev/ttyACM0")
try:
if 0:
# nfc.mode = pyHydrabus.NFC.MODE_ISO_14443B
nfc.mode = pyHydrabus.NFC.MODE_ISO_14443A
nfc.rf = 1
resp = nfc.write_bits(b'\x26', 7)
if not resp:
raise Exception("REQ A failure")
comment_data("ATQA:", resp)
resp = nfc.write(b'\x93\x20', 0)
if not resp:
raise Exception("Anticol failure")
comment_data("Anticol:", resp)
else:
nfc.mode = pyHydrabus.NFC.MODE_ISO_14443B
# nfc.mode = pyHydrabus.NFC.MODE_ISO_14443A
nfc.rf = 1
# srix initiate
# return chip id
resp = send_command(b'\x06\x00', "initiate", raise_exc=False)
if resp is not None:
chip_id = resp[0:1]
# print(chip_id)
# srix select
resp = send_command(b"\x0E" + chip_id, "select")
# srix get_uid
resp = send_command(b"\x0B", "GET_UID")
# first should be 0xD0
# srix read block
if 0:
for i in range(128):
block = i
resp = send_command(b"\x08" + bytes([block]), "READ_BLOCK", verbose=False)
comment_data(f"Block {block}:", resp)
if resp is None:
# ASK CTS
# ASK_REQT 0x10
resp = send_command(b'\x10', "initiate", raise_exc=False)
if resp is not None:
print("ASK CTS INITIATED")
#hn.field_off()
#time.sleep(0.1)
#hn.field_on()
#hn.polling()
# hn.send_apdu("00 a4 04 00 0E 32 50 41 59 2E 53 59 53 2E 44 44 46 30 31 00")
# hn.send_apdu("00 a4 04 00 07 A0 00 00 00 42 10 10 00")
# hn.send_apdu("00 a4 04 00 07 A0 00 00 00 04 10 10 00")
# hn.send_apdu("00 a4 04 00 07 A0 00 00 00 03 10 10 00")
# hn.send_apdu("00 a4 04 00 05 A0 00 00 00 03 00")
finally:
# nfc.field_off()
nfc.rf = 0
| StarcoderdataPython |
4986698 | <reponame>saberworks/massassi-django<gh_stars>0
from django import forms
class NewsSearchForm(forms.Form):
required_css_class = 'required'
terms = forms.CharField(
label="Search For",
strip=True,
help_text="enter keywords to search for",
required=True,
)
| StarcoderdataPython |
8024173 | <reponame>n0npax/lime-comb
import builtins
import tempfile
from lime_comb.auth.google import get_anon_cred
from lime_comb.main import *
from .conftest import *
class TestHelperFunctions:
def test_parse_common_version(self, capsys, mocker):
mocker.patch.object(sys, "exit")
base_parser(["--version"])
assert sys.exit.called
captured = capsys.readouterr()
assert captured.out.startswith("version")
def test_get_recipients(self, mocker):
_email = "<EMAIL>"
mocker.patch.object(builtins, "input", return_value=_email)
args, _, _, _, _ = base_parser(["e"])
recipients = get_recipients(args)
assert recipients == [_email]
class TestCommandObjects:
@pytest.mark.parametrize(
"action,name,value,expected",
[
("list", None, None, "email"),
("set", "email", "<EMAIL>", "<EMAIL>"),
("set", "email", "invalid_email", ""),
("set", "invalid_property", "<EMAIL>", ""),
("get", "invalid_property", "dupa.8", ""),
("get", "email", None, ""),
],
)
def test_conf_cmd(
self,
action,
name,
value,
expected,
mocker,
capsys,
mocked_api,
web_login,
pyperclip_copy,
oauth_gcp_conf,
):
args, _, _, _, c_cmd = base_parser(["config", action, name, value])
output = conf_exec(args, c_cmd)
assert expected in output
def test_enc_cmd_plain_text_msg(
self,
mocker,
capsys,
mocked_api,
mocked_gpg_key,
web_login,
pyperclip_copy,
oauth_gcp_conf,
email,
):
args, _, e_cmd, _, _ = base_parser(
["e", "-t", email, "-m", "test1", "-m", "test2"]
)
output = enc_exec(args, e_cmd)
assert output.startswith("-----BEGIN PGP MESSAGE---")
assert output.count("-----BEGIN PGP MESSAGE---") == 2
def test_enc_cmd_plain_test_msg_merged(
self,
mocker,
capsys,
mocked_api,
mocked_gpg_key,
web_login,
pyperclip_copy,
oauth_gcp_conf,
email,
):
args, _, e_cmd, _, _ = base_parser(
["e", "-t", email, "-m", "test1", "-m", "test2", "--mm"]
)
output = enc_exec(args, e_cmd)
assert output.startswith("-----BEGIN PGP MESSAGE---")
assert output.count("-----BEGIN PGP MESSAGE---") == 1
def test_enc_cmd_file_msg(
self,
mocker,
mocked_api,
mocked_gpg_key,
web_login,
pyperclip_copy,
temp_file,
oauth_gcp_conf,
email,
):
args, _, e_cmd, _, _ = base_parser(["e", "-t", email, "-f", temp_file.name])
output = enc_exec(args, e_cmd)
assert output.startswith("-----BEGIN PGP MESSAGE---")
def test_dec_cmd(
self,
mocker,
existing_config,
config_file,
mocked_api,
mocked_gpg_key,
web_login,
pyperclip_copy,
oauth_gcp_conf,
email,
uuid,
keypair,
):
base_test_message = uuid
args, _, e_cmd, _, _ = base_parser(["e", "-t", email, "-m", base_test_message])
enc_msg = enc_exec(args, e_cmd)
args, _, _, d_cmd, _ = base_parser(["d", "-m", enc_msg])
dec_msg = dec_exec(args, d_cmd)
assert dec_msg == base_test_message
@pytest.mark.parametrize(
"action,action_arg",
[
("generate", None),
("delete", "key_id"),
("list-pub", None),
("list-priv", None),
# ("push", None),
# ("pull", "email"),
# ("pull", None), # FIXME
],
)
def test_key_cmd(
self,
web_login,
mocker,
action,
action_arg,
mocked_gpg_key,
email,
mocked_api,
keypair,
pyperclip_copy,
oauth_gcp_conf,
):
mocker.patch.object(
lime_comb.auth.google, "get_cred", return_value=get_anon_cred()
)
if action_arg == "key_id":
action_arg = keypair
if action_arg == "email":
action_arg = email
args, k_cmd, _, _, _ = base_parser(["k", action, action_arg])
output = keys_exec(args, k_cmd)
def test_main(mocker, capsys):
mocker.patch.object(sys, "exit")
main(["--help"])
assert sys.exit.called
captured = capsys.readouterr()
assert "--help" in captured.out
| StarcoderdataPython |
8138877 | <gh_stars>0
from datetime import datetime
from flask import Flask, render_template
import re
import sys
from simple_vbb.vbb import Vbb, DummyVbb, AccessKeyMissingException
app = Flask(__name__)
str_to_ext_id_cache = {}
def resolve_station(search_str):
if search_str in str_to_ext_id_cache:
return str_to_ext_id_cache[search_str]
else:
ext_id = vbb.get_station_ext_id(search_str)
str_to_ext_id_cache[search_str] = ext_id
return ext_id
class TripsViewModel:
def __init__(self, trips):
self.trips = trips
self.augment_trips()
def prepare_station_names(self, leg):
"""Removes clutter from the station names, such as "(Bln)", "[U2]" etc."""
# removes underground suffix from station names, e.g. "U Stadtmitte U2" --> "U Stadtmitte"
remove_u_regex = re.compile(" U\\d")
def clean_station_name(s):
s = s \
.replace(" (Bln)", "") \
.replace(" (Berlin)", "") \
.replace(" Bhf", "") \
.replace("[", "") \
.replace("]", "") \
s = remove_u_regex.sub("", s)
return s.strip()
leg["Origin"]["name"] = clean_station_name(leg["Origin"]["name"])
leg["Destination"]["name"] = clean_station_name(leg["Destination"]["name"])
def prepare_times(self, leg):
"""Prepares arrival and departure times for displaying in the view."""
leg["Origin"]["time"] = leg["Origin"]["time"][:-3]
leg["Destination"]["time"] = leg["Destination"]["time"][:-3]
def prepare_delay_info(self, leg):
"""Calculates delay from scheduled time and real-time predictions."""
def calculate_delay(station):
if "rtTime" in leg[station]:
start_planned = datetime.strptime("%s %s" % (leg[station]["date"], leg[station]["time"]), "%Y-%m-%d %H:%M:%S")
start_actual = datetime.strptime("%s %s" % (leg[station]["rtDate"], leg[station]["rtTime"]), "%Y-%m-%d %H:%M:%S")
return (start_actual - start_planned).total_seconds() / 60
else:
return None
orig_delay = calculate_delay("Origin")
leg["Origin"]["delay"] = orig_delay
dest_delay = calculate_delay("Destination")
leg["Destination"]["delay"] = dest_delay
def prepare_duration(self, trip):
"""Prepares the trip duration for displaying in the view."""
# Original format: PT1H2M --> 1 hour, 2 minutes
duration = trip["duration"]
duration = duration.replace("PT", "")
if "H" in duration:
h_index = duration.index("H")
m_index = duration.index("M")
hours = duration[:h_index]
minutes = int(duration[h_index + 1:m_index])
trip["duration"] = "%s:%02d" % (hours, minutes)
else:
minutes = int(duration.replace("M", ""))
trip["duration"] = "0:%02d" % minutes
def augment_trips(self):
for trip in self.trips:
self.prepare_duration(trip)
for leg in trip["LegList"]["Leg"]:
self.prepare_delay_info(leg)
self.prepare_station_names(leg)
self.prepare_times(leg)
def __iter__(self):
return iter(self.trips)
@app.route("/<from_station>-to-<to_station>")
def fromto(from_station=None, to_station=None):
from_id = resolve_station(from_station)
to_id = resolve_station(to_station)
trips = TripsViewModel(vbb.get_trip(from_id, to_id))
return render_template("trips.html", trips=trips, from_station=from_station.title(), to_station=to_station.title())
@app.route("/<from_station>-to-<to_station>-via-<via_station>")
def fromtovia(from_station=None, to_station=None, via_station=None):
from_id = resolve_station(from_station)
to_id = resolve_station(to_station)
via_id = resolve_station(via_station)
trips = TripsViewModel(vbb.get_trip_via(from_id, to_id, via_id))
return render_template("trips.html", trips=trips, from_station=from_station.title(), to_station=to_station.title())
if __name__ == "__main__":
try:
vbb = Vbb()
# vbb = DummyVbb()
except AccessKeyMissingException:
print("Missing access_key.txt. Exiting.")
sys.exit()
app.run(host='0.0.0.0')
| StarcoderdataPython |
8035414 | <reponame>dmontoya1/cajas
# Generated by Django 2.0.9 on 2019-07-03 19:25
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('users', '0012_auto_20190611_2204'),
]
operations = [
migrations.AddField(
model_name='partner',
name='buyer_unit_partner',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='related_unit_buyer', to='users.Partner', verbose_name='Socio al que se le vendio la ultima unidad'),
),
]
| StarcoderdataPython |
81341 |
# parsetab.py
# This file is automatically generated. Do not edit.
# pylint: disable=W,C,R
_tabversion = '3.10'
_lr_method = 'LALR'
_lr_signature = 'leftplusminusnonassocadvdisadvadv dice disadv div minus newline number plus space star tabcommand : roll_list\n | mod_list\n |\n roll_list : roll\n | roll roll_list\n roll : number dice mod_list\n | number dice\n | dice mod_list\n | dice\n | number\n | number mod_list\n mod_list : mod\n | mod mod_list\n mod : plus number\n | minus number\n | star number\n | div number\n | adv\n | disadv\n '
_lr_action_items = {'$end':([0,1,2,3,4,5,6,7,12,13,14,15,16,17,18,19,20,21,22,23,],[-3,0,-1,-2,-4,-12,-10,-9,-18,-19,-5,-13,-7,-11,-8,-14,-15,-16,-17,-6,]),'number':([0,4,5,6,7,8,9,10,11,12,13,15,16,17,18,19,20,21,22,23,],[6,6,-12,-10,-9,19,20,21,22,-18,-19,-13,-7,-11,-8,-14,-15,-16,-17,-6,]),'dice':([0,4,5,6,7,12,13,15,16,17,18,19,20,21,22,23,],[7,7,-12,16,-9,-18,-19,-13,-7,-11,-8,-14,-15,-16,-17,-6,]),'plus':([0,5,6,7,12,13,16,19,20,21,22,],[8,8,8,8,-18,-19,8,-14,-15,-16,-17,]),'minus':([0,5,6,7,12,13,16,19,20,21,22,],[9,9,9,9,-18,-19,9,-14,-15,-16,-17,]),'star':([0,5,6,7,12,13,16,19,20,21,22,],[10,10,10,10,-18,-19,10,-14,-15,-16,-17,]),'div':([0,5,6,7,12,13,16,19,20,21,22,],[11,11,11,11,-18,-19,11,-14,-15,-16,-17,]),'adv':([0,5,6,7,12,13,16,19,20,21,22,],[12,12,12,12,-18,-19,12,-14,-15,-16,-17,]),'disadv':([0,5,6,7,12,13,16,19,20,21,22,],[13,13,13,13,-18,-19,13,-14,-15,-16,-17,]),}
_lr_action = {}
for _k, _v in _lr_action_items.items():
for _x,_y in zip(_v[0],_v[1]):
if not _x in _lr_action: _lr_action[_x] = {}
_lr_action[_x][_k] = _y
del _lr_action_items
_lr_goto_items = {'command':([0,],[1,]),'roll_list':([0,4,],[2,14,]),'mod_list':([0,5,6,7,16,],[3,15,17,18,23,]),'roll':([0,4,],[4,4,]),'mod':([0,5,6,7,16,],[5,5,5,5,5,]),}
_lr_goto = {}
for _k, _v in _lr_goto_items.items():
for _x, _y in zip(_v[0], _v[1]):
if not _x in _lr_goto: _lr_goto[_x] = {}
_lr_goto[_x][_k] = _y
del _lr_goto_items
_lr_productions = [
("S' -> command","S'",1,None,None,None),
('command -> roll_list','command',1,'p_command','parser.py',28),
('command -> mod_list','command',1,'p_command','parser.py',29),
('command -> <empty>','command',0,'p_command','parser.py',30),
('roll_list -> roll','roll_list',1,'p_roll_list','parser.py',41),
('roll_list -> roll roll_list','roll_list',2,'p_roll_list','parser.py',42),
('roll -> number dice mod_list','roll',3,'p_roll','parser.py',50),
('roll -> number dice','roll',2,'p_roll','parser.py',51),
('roll -> dice mod_list','roll',2,'p_roll','parser.py',52),
('roll -> dice','roll',1,'p_roll','parser.py',53),
('roll -> number','roll',1,'p_roll','parser.py',54),
('roll -> number mod_list','roll',2,'p_roll','parser.py',55),
('mod_list -> mod','mod_list',1,'p_mod_list','parser.py',71),
('mod_list -> mod mod_list','mod_list',2,'p_mod_list','parser.py',72),
('mod -> plus number','mod',2,'p_mod','parser.py',80),
('mod -> minus number','mod',2,'p_mod','parser.py',81),
('mod -> star number','mod',2,'p_mod','parser.py',82),
('mod -> div number','mod',2,'p_mod','parser.py',83),
('mod -> adv','mod',1,'p_mod','parser.py',84),
('mod -> disadv','mod',1,'p_mod','parser.py',85),
]
| StarcoderdataPython |
348444 | import logging
import types
import os
import sys
from typing import Iterable, Optional, Set, List, Dict, Any, Union, Tuple
from getpass import getuser
from powar.settings import AppSettings
from powar.util import saved_sys_properties, read_header, run_command, RunCommandResult, realpath
logger: logging.Logger = logging.getLogger(__name__)
class GlobalConfig:
modules: List[str]
opts: Dict[Any, Any] = {}
class GlobalConfigApi:
opts: Dict[Any, Any]
_man: 'GlobalConfigManager'
def __init__(self, man: 'GlobalConfigManager', opts: Dict[Any, Any]):
self.opts = opts
self._man = man
def modules(self, *modules: List[str]):
self._man.set_modules(modules)
def execute(
self,
command: str,
stdin: Optional[str] = None,
decode_stdout=True,
wait=True,
) -> RunCommandResult:
'''
Run command and return stdout if any
'''
return self._man.execute_command(command, stdin, decode_stdout, wait)
def read(self, filename: str, as_bytes=False) -> Union[str, bytes]:
return self._man.read_file(filename, as_bytes)
class GlobalConfigManager:
_directory: str
_settings: AppSettings
_api: GlobalConfigApi
_modules: List = []
_global_config: GlobalConfig = GlobalConfig()
_config_path: str
_header: Optional[Dict[Any, Any]] = None
def __init__(
self,
directory: str,
app_settings: AppSettings,
):
self._directory = directory
self._settings = app_settings
self._config_path = os.path.join(self._directory,
app_settings.global_config_filename)
def get_global_config(self) -> GlobalConfig:
api = GlobalConfigApi(self, self._global_config.opts)
module = types.ModuleType('powar')
module.p = api # type: ignore
module.__file__ = self._config_path
with open(self._config_path, 'rb') as f:
source = f.read()
code = compile(source, self._config_path, 'exec')
# Save and restore sys variables and cwd
old_cwd = os.getcwd()
with saved_sys_properties():
if self._directory not in sys.path:
sys.path.insert(0, self._directory)
os.chdir(self._directory)
exec(code, module.__dict__)
os.chdir(old_cwd)
self._global_config.modules = self._modules
return self._global_config
def execute_command(self, command: str, stdin: Optional[str],
decode_stdout: bool, wait: bool) -> RunCommandResult:
result = RunCommandResult(stdout=None, code=0)
if not self._settings.dry_run:
result = run_command(command, self._directory,
stdin.encode('utf8') if stdin else None,
decode_stdout, wait)
logger.info(
f"Ran{'' if wait else ' (in bg)'}: {command} for {self._config_path}"
)
return result
def read_file(self, filename: str, as_bytes: bool) -> Union[str, bytes]:
return open(realpath(filename), 'rb' if as_bytes else 'r').read()
def set_modules(self, modules: List[str]):
self._modules = modules
| StarcoderdataPython |
3260769 | <reponame>metaprov/modeldapi<gh_stars>1-10
# Generated by the gRPC Python protocol compiler plugin. DO NOT EDIT!
"""Client and server classes corresponding to protobuf-defined services."""
import grpc
from github.com.metaprov.modelaapi.services.webrequestrun.v1 import webrequestrun_pb2 as github_dot_com_dot_metaprov_dot_modelaapi_dot_services_dot_webrequestrun_dot_v1_dot_webrequestrun__pb2
class WebRequestRunServiceStub(object):
"""Missing associated documentation comment in .proto file."""
def __init__(self, channel):
"""Constructor.
Args:
channel: A grpc.Channel.
"""
self.ListWebRequestRuns = channel.unary_unary(
'/github.com.metaprov.modelaapi.services.webrequestrun.v1.WebRequestRunService/ListWebRequestRuns',
request_serializer=github_dot_com_dot_metaprov_dot_modelaapi_dot_services_dot_webrequestrun_dot_v1_dot_webrequestrun__pb2.ListWebRequestRunsRequest.SerializeToString,
response_deserializer=github_dot_com_dot_metaprov_dot_modelaapi_dot_services_dot_webrequestrun_dot_v1_dot_webrequestrun__pb2.ListWebRequestRunsResponse.FromString,
)
self.CreateWebRequestRun = channel.unary_unary(
'/github.com.metaprov.modelaapi.services.webrequestrun.v1.WebRequestRunService/CreateWebRequestRun',
request_serializer=github_dot_com_dot_metaprov_dot_modelaapi_dot_services_dot_webrequestrun_dot_v1_dot_webrequestrun__pb2.CreateWebRequestRunRequest.SerializeToString,
response_deserializer=github_dot_com_dot_metaprov_dot_modelaapi_dot_services_dot_webrequestrun_dot_v1_dot_webrequestrun__pb2.CreateWebRequestRunResponse.FromString,
)
self.GetWebRequestRun = channel.unary_unary(
'/github.com.metaprov.modelaapi.services.webrequestrun.v1.WebRequestRunService/GetWebRequestRun',
request_serializer=github_dot_com_dot_metaprov_dot_modelaapi_dot_services_dot_webrequestrun_dot_v1_dot_webrequestrun__pb2.GetWebRequestRunRequest.SerializeToString,
response_deserializer=github_dot_com_dot_metaprov_dot_modelaapi_dot_services_dot_webrequestrun_dot_v1_dot_webrequestrun__pb2.GetWebRequestRunResponse.FromString,
)
self.UpdateWebRequestRun = channel.unary_unary(
'/github.com.metaprov.modelaapi.services.webrequestrun.v1.WebRequestRunService/UpdateWebRequestRun',
request_serializer=github_dot_com_dot_metaprov_dot_modelaapi_dot_services_dot_webrequestrun_dot_v1_dot_webrequestrun__pb2.UpdateWebRequestRunRequest.SerializeToString,
response_deserializer=github_dot_com_dot_metaprov_dot_modelaapi_dot_services_dot_webrequestrun_dot_v1_dot_webrequestrun__pb2.UpdateWebRequestRunResponse.FromString,
)
self.DeleteWebRequestRun = channel.unary_unary(
'/github.com.metaprov.modelaapi.services.webrequestrun.v1.WebRequestRunService/DeleteWebRequestRun',
request_serializer=github_dot_com_dot_metaprov_dot_modelaapi_dot_services_dot_webrequestrun_dot_v1_dot_webrequestrun__pb2.DeleteWebRequestRunRequest.SerializeToString,
response_deserializer=github_dot_com_dot_metaprov_dot_modelaapi_dot_services_dot_webrequestrun_dot_v1_dot_webrequestrun__pb2.DeleteWebRequestRunResponse.FromString,
)
class WebRequestRunServiceServicer(object):
"""Missing associated documentation comment in .proto file."""
def ListWebRequestRuns(self, request, context):
"""Missing associated documentation comment in .proto file."""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def CreateWebRequestRun(self, request, context):
"""Missing associated documentation comment in .proto file."""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def GetWebRequestRun(self, request, context):
"""Missing associated documentation comment in .proto file."""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def UpdateWebRequestRun(self, request, context):
"""Missing associated documentation comment in .proto file."""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def DeleteWebRequestRun(self, request, context):
"""Missing associated documentation comment in .proto file."""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def add_WebRequestRunServiceServicer_to_server(servicer, server):
rpc_method_handlers = {
'ListWebRequestRuns': grpc.unary_unary_rpc_method_handler(
servicer.ListWebRequestRuns,
request_deserializer=github_dot_com_dot_metaprov_dot_modelaapi_dot_services_dot_webrequestrun_dot_v1_dot_webrequestrun__pb2.ListWebRequestRunsRequest.FromString,
response_serializer=github_dot_com_dot_metaprov_dot_modelaapi_dot_services_dot_webrequestrun_dot_v1_dot_webrequestrun__pb2.ListWebRequestRunsResponse.SerializeToString,
),
'CreateWebRequestRun': grpc.unary_unary_rpc_method_handler(
servicer.CreateWebRequestRun,
request_deserializer=github_dot_com_dot_metaprov_dot_modelaapi_dot_services_dot_webrequestrun_dot_v1_dot_webrequestrun__pb2.CreateWebRequestRunRequest.FromString,
response_serializer=github_dot_com_dot_metaprov_dot_modelaapi_dot_services_dot_webrequestrun_dot_v1_dot_webrequestrun__pb2.CreateWebRequestRunResponse.SerializeToString,
),
'GetWebRequestRun': grpc.unary_unary_rpc_method_handler(
servicer.GetWebRequestRun,
request_deserializer=github_dot_com_dot_metaprov_dot_modelaapi_dot_services_dot_webrequestrun_dot_v1_dot_webrequestrun__pb2.GetWebRequestRunRequest.FromString,
response_serializer=github_dot_com_dot_metaprov_dot_modelaapi_dot_services_dot_webrequestrun_dot_v1_dot_webrequestrun__pb2.GetWebRequestRunResponse.SerializeToString,
),
'UpdateWebRequestRun': grpc.unary_unary_rpc_method_handler(
servicer.UpdateWebRequestRun,
request_deserializer=github_dot_com_dot_metaprov_dot_modelaapi_dot_services_dot_webrequestrun_dot_v1_dot_webrequestrun__pb2.UpdateWebRequestRunRequest.FromString,
response_serializer=github_dot_com_dot_metaprov_dot_modelaapi_dot_services_dot_webrequestrun_dot_v1_dot_webrequestrun__pb2.UpdateWebRequestRunResponse.SerializeToString,
),
'DeleteWebRequestRun': grpc.unary_unary_rpc_method_handler(
servicer.DeleteWebRequestRun,
request_deserializer=github_dot_com_dot_metaprov_dot_modelaapi_dot_services_dot_webrequestrun_dot_v1_dot_webrequestrun__pb2.DeleteWebRequestRunRequest.FromString,
response_serializer=github_dot_com_dot_metaprov_dot_modelaapi_dot_services_dot_webrequestrun_dot_v1_dot_webrequestrun__pb2.DeleteWebRequestRunResponse.SerializeToString,
),
}
generic_handler = grpc.method_handlers_generic_handler(
'github.com.metaprov.modelaapi.services.webrequestrun.v1.WebRequestRunService', rpc_method_handlers)
server.add_generic_rpc_handlers((generic_handler,))
# This class is part of an EXPERIMENTAL API.
class WebRequestRunService(object):
"""Missing associated documentation comment in .proto file."""
@staticmethod
def ListWebRequestRuns(request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
insecure=False,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None):
return grpc.experimental.unary_unary(request, target, '/github.com.metaprov.modelaapi.services.webrequestrun.v1.WebRequestRunService/ListWebRequestRuns',
github_dot_com_dot_metaprov_dot_modelaapi_dot_services_dot_webrequestrun_dot_v1_dot_webrequestrun__pb2.ListWebRequestRunsRequest.SerializeToString,
github_dot_com_dot_metaprov_dot_modelaapi_dot_services_dot_webrequestrun_dot_v1_dot_webrequestrun__pb2.ListWebRequestRunsResponse.FromString,
options, channel_credentials,
insecure, call_credentials, compression, wait_for_ready, timeout, metadata)
@staticmethod
def CreateWebRequestRun(request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
insecure=False,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None):
return grpc.experimental.unary_unary(request, target, '/github.com.metaprov.modelaapi.services.webrequestrun.v1.WebRequestRunService/CreateWebRequestRun',
github_dot_com_dot_metaprov_dot_modelaapi_dot_services_dot_webrequestrun_dot_v1_dot_webrequestrun__pb2.CreateWebRequestRunRequest.SerializeToString,
github_dot_com_dot_metaprov_dot_modelaapi_dot_services_dot_webrequestrun_dot_v1_dot_webrequestrun__pb2.CreateWebRequestRunResponse.FromString,
options, channel_credentials,
insecure, call_credentials, compression, wait_for_ready, timeout, metadata)
@staticmethod
def GetWebRequestRun(request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
insecure=False,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None):
return grpc.experimental.unary_unary(request, target, '/github.com.metaprov.modelaapi.services.webrequestrun.v1.WebRequestRunService/GetWebRequestRun',
github_dot_com_dot_metaprov_dot_modelaapi_dot_services_dot_webrequestrun_dot_v1_dot_webrequestrun__pb2.GetWebRequestRunRequest.SerializeToString,
github_dot_com_dot_metaprov_dot_modelaapi_dot_services_dot_webrequestrun_dot_v1_dot_webrequestrun__pb2.GetWebRequestRunResponse.FromString,
options, channel_credentials,
insecure, call_credentials, compression, wait_for_ready, timeout, metadata)
@staticmethod
def UpdateWebRequestRun(request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
insecure=False,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None):
return grpc.experimental.unary_unary(request, target, '/github.com.metaprov.modelaapi.services.webrequestrun.v1.WebRequestRunService/UpdateWebRequestRun',
github_dot_com_dot_metaprov_dot_modelaapi_dot_services_dot_webrequestrun_dot_v1_dot_webrequestrun__pb2.UpdateWebRequestRunRequest.SerializeToString,
github_dot_com_dot_metaprov_dot_modelaapi_dot_services_dot_webrequestrun_dot_v1_dot_webrequestrun__pb2.UpdateWebRequestRunResponse.FromString,
options, channel_credentials,
insecure, call_credentials, compression, wait_for_ready, timeout, metadata)
@staticmethod
def DeleteWebRequestRun(request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
insecure=False,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None):
return grpc.experimental.unary_unary(request, target, '/github.com.metaprov.modelaapi.services.webrequestrun.v1.WebRequestRunService/DeleteWebRequestRun',
github_dot_com_dot_metaprov_dot_modelaapi_dot_services_dot_webrequestrun_dot_v1_dot_webrequestrun__pb2.DeleteWebRequestRunRequest.SerializeToString,
github_dot_com_dot_metaprov_dot_modelaapi_dot_services_dot_webrequestrun_dot_v1_dot_webrequestrun__pb2.DeleteWebRequestRunResponse.FromString,
options, channel_credentials,
insecure, call_credentials, compression, wait_for_ready, timeout, metadata)
| StarcoderdataPython |
8117034 | # Copyright <NAME> 2019-2021.
# Distributed under the Boost Software License, Version 1.0.
# (See accompanying file LICENSE_1_0.txt or copy at
# http://www.boost.org/LICENSE_1_0.txt)
import jax
import jax.numpy as jnp
import scipy
jax.config.update("jax_enable_x64", True)
def gaussian_decay(x, *args):
w, t = args
return w - jnp.exp(-(t / x)**2)
def exponential_decay(x, *args):
w, t = args
return w - jnp.exp(-(t / x))
def nls(fun, x0, args=()):
return scipy.optimize.least_squares(fun, x0, jac=jax.jacobian(fun), method='lm', args=args)
def mle(fun, x0, args=()):
logl = lambda x, *args: 0.5 * jnp.sum(fun(x, *args)**2)
return scipy.optimize.minimize(logl, x0, args=args, method='Newton-CG', jac=jax.grad(logl), hess=jax.hessian(logl))
class logistic:
def cdf(self, x, loc=0, scale=1):
y = (x - loc) / scale
return jax.scipy.special.expit(y)
def logcdf(self, x, loc=0, scale=1):
y = (x - loc) / scale
return jnp.log(self.cdf(y))
def pdf(self, x, loc=0, scale=1):
y = (x - loc) / scale
return self.cdf(y) * (1.0 - self.cdf(y))
def logpdf(self, x, loc=0, scale=1):
y = (x - loc) / scale
return jnp.log(self.pdf(y))
def ppf(self, q, loc=0, scale=1):
return jnp.array(jax.scipy.special.logit(q) * scale + loc, 'float64')
dist_norm = (jax.scipy.stats.norm, 200 * jnp.sqrt(2))
dist_logistic = (logistic() , 400 / jnp.log(10))
def score_balance(x, *args):
W, R, (dist, scale) = args
We = dist.cdf
return W - We(x, R, scale)
def squared_error(x, *args):
return score_balance(x, *args)**2
def log_likelihood(x, *args):
W, R, (dist, scale) = args
logWe = dist.logcdf
return jnp.sum(W * logWe(x, R, scale) + (1.0 - W) * logWe(-x, -R, scale))
def L2_reg(x, *args):
Ra = jnp.mean(args[1])
dist, scale = dist_norm
logXe = dist.logpdf
return jnp.sum(logXe(x, Ra, scale))
def tpr_ppf(args):
W, R, (dist, scale) = args
return dist.ppf(W.mean(), R.mean(), scale)
def tpr_root(args, x0=None):
f = lambda x, *args: jnp.sum(score_balance(x, *args))
if x0 is None:
x0 = tpr_ppf(args)
return scipy.optimize.root_scalar(f, args=args, method='newton', fprime=jax.grad(f), fprime2=jax.hessian(f), x0=x0)
def tpr_nls(args, fun=None, x0=None):
fun = lambda x, *args: score_balance(x, *args)
if x0 is None:
x0 = tpr_ppf(args)
return scipy.optimize.least_squares(fun, x0, jac=jax.jacobian(fun), method='lm', args=args)
def tpr_mle(args, x0=None, ci=False):
fun = lambda x, *args: -log_likelihood(x, *args) #- L2_reg(x, *args)
if x0 is None:
x0 = tpr_ppf(args)
mle = scipy.optimize.minimize(fun, x0, args=args, method='trust-ncg', jac=jax.grad(fun), hess=jax.hessian(fun), options={'gtol': 1e-8})
if not ci:
return mle
LR = 0.5 * scipy.stats.chi2.ppf(.95, 1)
f = lambda x, *args: mle.fun + LR - fun(x, *args)
se = scipy.stats.norm.ppf(.975) * jnp.sqrt(jnp.diag(jnp.linalg.inv(mle.hess)))
lb = scipy.optimize.root_scalar(f, args=args, method='newton', fprime=jax.grad(f), fprime2=jax.hessian(f), x0=mle.x - se).root
ub = scipy.optimize.root_scalar(f, args=args, method='newton', fprime=jax.grad(f), fprime2=jax.hessian(f), x0=mle.x + se).root
return mle, lb, ub
def main():
# https://www.kleier.net/txt/rating_23.html#SEC23
w = jnp.array([ 1.0, 0.8, 0.55, 0.3, 0.05 ])
t = jnp.array([ 0, 1, 2, 3, 4 ])
x0 = 1
nls(gaussian_decay, x0, args=(w, t))
nls(exponential_decay, x0, args=(w, t))
mle(gaussian_decay, x0, args=(w, t))
mle(exponential_decay, x0, args=(w, t))
#W = jnp.array([ 1.0, 1.0, 0.0, 0.0, 1.0, 1.0, 0.0])
#R = jnp.array([1436.0, 1162.0, 1782.0, 1708.0, 1297.0, 1342.0, 1813.0])
W = jnp.array([ 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.5])
R = jnp.array([1446.0, 1687.0, 1798.0, 1860.0, 1917.0, 1756.0, 1805.0])
tpr_ppf((W, R, dist_norm ))
tpr_ppf((W, R, dist_logistic))
tpr_root((W, R, dist_norm ))
tpr_root((W, R, dist_logistic))
tpr_nls((W, R, dist_norm ))
tpr_nls((W, R, dist_logistic))
tpr_mle((W, R, dist_norm ))
tpr_mle((W, R, dist_logistic))
| StarcoderdataPython |
1996206 | <filename>libcst/_parser/_types/production.py
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
# pyre-strict
from dataclasses import dataclass
@dataclass(frozen=True)
class Production:
name: str
children: str
def __str__(self) -> str:
return f"{self.name}: {self.children}"
| StarcoderdataPython |
12863006 | <gh_stars>1-10
import sqlite3
from flask import Flask, render_template
app = Flask(__name__)
# database details - to remove some duplication
db_name = 'shopping_data.db'
@app.route('/')
def index():
return render_template('index.html')
@app.route('/customers')
def customers():
conn = sqlite3.connect(db_name)
conn.row_factory = sqlite3.Row
cur = conn.cursor()
# get results from customers
cur.execute("select * from customers")
rows = cur.fetchall()
conn.close()
return render_template('customers.html', rows=rows)
@app.route('/customer_details/<id>')
def customer_details(id):
conn = sqlite3.connect(db_name)
conn.row_factory = sqlite3.Row
cur = conn.cursor()
# get results from customers
cur.execute("select * from customers WHERE id=?", (id))
customer = cur.fetchall()
conn.close()
return render_template('customer_details.html', customer=customer)
@app.route('/orders')
def orders():
conn = sqlite3.connect(db_name)
conn.row_factory = sqlite3.Row
cur = conn.cursor()
# get results from orders
cur.execute("select * from orders")
rows = cur.fetchall()
conn.close()
return render_template('orders.html', rows=rows)
@app.route('/order_details/<id>')
def order_details(id):
conn = sqlite3.connect(db_name)
conn.row_factory = sqlite3.Row
cur = conn.cursor()
# get results from orders
cur.execute("select * from orders WHERE id=?", (id))
order = cur.fetchall()
conn.close()
return render_template('order_details.html', order=order) | StarcoderdataPython |
6554717 | <reponame>nmjhsuzuki/DZICONV
# coding: utf-8
# ==========================================================
# dziconv
# 入力画像群から Deep Zoom 形式画像ファイル(DZI) を生成する
# ==========================================================
# 2020-05-25 Ver.0.1 Initial Version.
# ==========================================================
# * 20200525 メモ suzuki
# 画像処理モジュールとして Pillow のインストールが必要
# >pip install Pillow (管理者モードで実行)
# pathlib を使うので python 3.4 以上が必要
# 入力: csvファイル(画像の配置情報)
# 1行目:
# 識別子,DZI画像幅,DZI画像高さ,背景色R,背景色G,背景色B
# 2行目以降:
# 画像位置X,画像位置Y,画像幅,画像高,ファイルパス
# 出力: DZI 画像ファイル(単一画像形式)
# 識別子 -+- dzc_output.xml (画像情報ファイル)
# +- dzc_output_files (ピラミッド画像ディレクトリ) -+- 0 --- 0_0.jpg
# +- ...
# +- n -+- 0_0.jpg
# ...
# +- i_j.jpg
# ...
# モジュールの輸入
import os
import sys
import math
from PIL import Image
import pathlib
# 定数
tile_size = 512 # 出力タイル画像の1辺
overlap_size = 1 # オーバーラップサイズ
jpg_quality = 100 # 出力JPGの品質
cache_size_MB = 512 # 入力画像のキャッシュサイズ(MB単位)
# 入出力データファイルの情報
input_dir = ''
input_file = 'index.csv'
output_dir = ''
# 入力画像
output_identifier = ''
dzi_size = (0, 0) # W, H
background_color = (0, 0, 0) # R, G, B
class input_data_record:
def __init__(self, x = 0, y = 0, w = 0, h = 0, f = ''):
self.x = x
self.y = y
self.w = w
self.h = h
self.f = f
#fed
#ssalc
input_data = []
# ファイルの読み込み
def read_index_csv(fn):
global output_identifier, dzi_size, background_color
with open(fn, 'r', encoding='utf-8') as f:
l = f.readline()
if (len(l) > 0):
t = l.strip().split(',')
if (len(t) >= 6):
output_identifier = t[0]
dzi_size = (int(t[1]), int(t[2]))
background_color = (int(t[3]), int(t[4]), int(t[5]))
else:
print('illegal input data: line 1.')
exit()
#fi
#fi
i = 1
for l in f:
t = l.strip().split(',')
if (len(t) >= 5):
input_data.append(input_data_record(int(t[0]),int(t[1]),int(t[2]),int(t[3]),t[4]))
else:
print('illegal input data: line %d.' % i)
#fi
i = i + 1
#rof
#htiw
#fed
# レベル情報の定義
class level_info_record:
def __init__(self, w = 0, h = 0, m = 0, n = 0):
self.w = w
self.h = h
self.m = m
self.n = n
#fed
#ssalc
level_info = []
level_max = 0 # 最大レベル
# レベル情報の計算
def make_level_info():
global level_max
w = dzi_size[0]
h = dzi_size[1]
while True:
m = math.ceil(w / tile_size)
n = math.ceil(h / tile_size)
level_info.insert(0, level_info_record(w, h, m, n))
if ((w == 1) and (h == 1)): break
w = int(((w + 1) / 2) if ((w % 2) == 1) else (w / 2))
h = int(((h + 1) / 2) if ((h % 2) == 1) else (h / 2))
#elihw
level_max = len(level_info) - 1
#fed
# 出力ディレクトリの作成
def make_output_dirs():
p = os.path.join(output_dir, output_identifier)
if (not os.path.exists(p)): os.mkdir(p)
pp = os.path.join(p, 'dzc_output_files')
if (not os.path.exists(pp)): os.mkdir(pp)
for i in range(0, len(level_info)):
ppp = os.path.join(pp, str(i))
if (not os.path.exists(ppp)): os.mkdir(ppp)
#rof
#fed
# 画像情報ファイルの出力
def write_output_xml():
with open(os.path.join(output_dir, output_identifier, 'dzc_output.xml'), 'w', encoding='utf_8') as f:
f.write('<?xml version="1.0" encoding="utf-8"?>\n')
f.write('<Image TileSize="%d" Overlap="%d" Format="jpg" ServerFormat="Default" xmlns="http://schemas.microsoft.com/deepzoom/2009">\n' % (tile_size, overlap_size))
f.write('<Size Width="%d" Height="%d"/>\n' % dzi_size)
f.write('</Image>\n')
#htiw
#fed
# 入力画像がどのタイルに属するか調べる
tile_images = None
level_max_m = 0
level_max_n = 0
def check_tile_images():
global tile_images, level_max_m, level_max_n
level_max_m = level_info[level_max].m
level_max_n = level_info[level_max].n
# print(level_max, level_max_m, level_max_n)
tile_images = [[[] for j in range(level_max_n)] for i in range(level_max_m)] # 2次元配列
for k in range(len(input_data)):
r = input_data[k]
imin = math.floor(r.x / tile_size)
jmin = math.floor(r.y / tile_size)
imax = math.floor((r.x + r.w - 1) / tile_size)
jmax = math.floor((r.y + r.h - 1) / tile_size)
for i in range(imin, imax+1):
for j in range(jmin, jmax+1):
# print(i,j,imin,imax,jmin,jmax)
tile_images[i][j].append(r)
#rof
#rof
#rof
#fed
# point 型
class point:
def __init__(self, x = 0, y = 0):
self.x = x
self.y = y
#fed
def xy(self): # (x, y) を返す (Image 関数用)
return (self.x, self.y)
#fed
#ssalc
# rect 型
class rect:
def __init__(self, x = 0, y = 0, w = 0, h = 0):
self.x = x
self.y = y
self.w = w
self.h = h
#fed
def area(self): # (x1, y1, x2 + 1, y2 + 1) を返す (Image 関数用)
return (self.x, self.y, self.x + self.w, self.y + self.h)
#fed
def xy(self): # (x, y) を返す (Image 関数用)
return (self.x, self.y)
#fed
#ssalc
# レベルlの画像におけるタイル(i,j)の画像エリアを求める
# f=Falseのときは画像エリア番号の指定に従って画像エリアを返す
# f=Trueのときはオーバーラップを含めた画像全体を返す
# 画像エリア番号(p, q) (p,q∈{-1,0,+1})は以下の領域を指す.
# (-1, -1)|( 0,-1)| (+1,-1)
# ---------+-------+---------
# (-1, 0)|( 0, 0)| (+1, 0)
# ---------+-------+---------
# (-1, +1)|( 0,+1)| (+1,+1)
# (0,0)は画像の正味の部分 それ以外はオーバーラップ部分
# 有効なエリア (rect_w, rect_h > 0) を返せたら (rect_x, rect_y, rect_w, rect_h) を,さもなくば None を返す.
def get_level_l_tileimage_rect(l, i, j, p, q, f = False):
level_l_m = level_info[l].m
level_l_n = level_info[l].n
level_l_w = level_info[l].w
level_l_h = level_info[l].h
rect_x = -1
rect_y = -1
rect_w = 0
rect_h = 0
if ((0 <= i) and (i < level_l_m) and (0 <= j) and (j < level_l_n)):
overlap_l = 0 if (i == 0) else overlap_size
overlap_r = 0 if (i == level_l_m - 1) else overlap_size
overlap_u = 0 if (j == 0) else overlap_size
overlap_d = 0 if (j == level_l_n - 1) else overlap_size
w = (level_l_w - tile_size * i) if (i == level_l_m - 1) else tile_size
h = (level_l_h - tile_size * j) if (j == level_l_n - 1) else tile_size
if (f):
rect_x = 0
rect_y = 0
rect_w = overlap_l + w + overlap_r
rect_h = overlap_u + h + overlap_d
elif ((-1 <= p) and (p <= 1) and (-1 <= q) and (q <= 1)):
if (p == -1):
rect_x = 0
rect_w = overlap_l
elif (p == 0):
rect_x = overlap_l
rect_w = w
elif (p == 1):
rect_x = overlap_l + w
rect_w = overlap_r
#fi
if (q == -1):
rect_y = 0
rect_h = overlap_u
elif (q == 0):
rect_y = overlap_u
rect_h = h
elif (q == 1):
rect_y = overlap_u + h
rect_h = overlap_d
#fi
else:
print('get_level_l_tileimage_rect(%d, %d, %d, <%d, %d>, %s) -> ' % (l, i, j, p, q, str(f)))
exit()
#fi
#fi
#print('get_level_l_tileimage_rect(%d, %d, %d, %d, %d, %s) -> ' % (l, i, j, p, q, str(f))),
#print(rect_x, rect_y, rect_w, rect_h)
return rect(rect_x, rect_y, rect_w, rect_h) if (rect_w > 0 and rect_h > 0) else None
#fed
# レベルlの画像におけるタイル(i,j)のオーバラップコピー用の画像エリアを返す
# 画像エリア番号(p, q) (p,q∈{-1,0,+1})は以下の領域を指す.
# ただし(0, 0) は無意味
# (-1, -1)| | (+1,-1)
# ---------+-------+---------
# | |
# ---------+-------+---------
# (-1, +1)| | (+1,+1)
#
# ( 0,-1)
# ---------+-------+---------
#
# ---------+-------+---------
# ( 0,+1)
#
# | |
# + +
# (-1, 0)| | (+1, 0)
# + +
# | |
#
# 有効なエリア (rect_w, rect_h > 0) を返せたら (rect_x, rect_y, rect_w, rect_h) を,さもなくば None を返す.
def get_level_l_overlapimage_rect(l, i, j, p, q):
level_l_m = level_info[l].m
level_l_n = level_info[l].n
level_l_w = level_info[l].w
level_l_h = level_info[l].h
rect_w = 0
rect_h = 0
if ((-1 <= p) and (p <= 1) and (-1 <= q) and (q <= 1) and not ((p == 0) and (q == 0))):
if ((0 <= i + p) and (i + p < level_l_m) and (0 <= j + q) and (j + q < level_l_n)):
overlap_l = 0 if (i == 0) else overlap_size
overlap_u = 0 if (j == 0) else overlap_size
w = (level_l_w - tile_size * i) if (i == level_l_m) else tile_size
h = (level_l_h - tile_size * j) if (i == level_l_n) else tile_size
if (p == -1):
rect_x = overlap_l
rect_w = overlap_size
elif (p == 0):
rect_x = overlap_l
rect_w = w
elif (p == 1):
rect_x = overlap_l + w - overlap_size
rect_w = overlap_size
#fi
if (q == -1):
rect_y = overlap_u
rect_h = overlap_size
elif (q == 0):
rect_y = overlap_u
rect_h = h
elif (q == 1):
rect_y = overlap_u + h - overlap_size
rect_h = overlap_size
#fi
#fi
#fi
return rect(rect_x, rect_y, rect_w, rect_h) if ((rect_w > 0) and (rect_h > 0)) else None
#fed
# 入力画像のキャッシュ(キュー構造)
class image_cache_record:
def __init__(self, f, p = None):
self.f = f
self.p = p
self.memsize = p.width * p.height * 3 if (p is not None) else 0
#fed
#ssalc
class image_cache: #
def __init__(self, m = cache_size_MB * 1024 * 1024):
self.memmax = m
self.memsize = 0
self.c = []
#fed
def read_image(self, f):
j = -1
for i in range(0, len(self.c)):
if (self.c[i].f == f):
j = i
break
#fi
#rof
if ((0 <= j) and (j < len(self.c))): # 見つかった
return self.c[j].p # 画像を返す
else: # 見つからない
ff = os.path.join(input_dir, f)
if (os.path.isfile(ff)):
r = image_cache_record(f, Image.open(ff)) # 画像を読み込む
else:
print('%s: file not found' % f)
exit()
#fi
# キャッシュ画像が多すぎたら減らす:末尾を削除
while ((len(self.c) > 0) and (self.memsize + r.memsize > self.memmax)):
self.memsize -= self.c[-1].memsize
del self.c[-1]
#elihw
self.c.insert(0, r) # 画像をキャッシュの先頭に追加
self.memsize += r.memsize
return r.p
#fi
#fed
#ssalc
icache = None
# 入力画像からの最大レベル(level_max)画像の読み込み
# 画像pに書き込む
# 画像pの画像サイズは一辺Blocksize + OverlapSize * 2の正方形
# 読み込んだ画像はpの画像エリア番号(0,0)の範囲に書き込まれる
def read_level_max_image(i, j, p):
# pの書き込み範囲を求める
prect = get_level_l_tileimage_rect(level_max, i, j, 0, 0)
if (prect is not None):
# タイル画像の範囲 [tx1, tx2) x [ty1, ty2)
tx1 = i * tile_size
ty1 = j * tile_size
tx2 = tx1 + prect.w
ty2 = ty1 + prect.h
l = tile_images[i][j]
n = len(l)
for k in range(0, n):
# 読み込み画像の範囲 [ix1, ix2) x [iy1, iy2)
ix1 = l[k].x
iy1 = l[k].y
ix2 = ix1 + l[k].w
iy2 = iy1 + l[k].h
# ブロック画像と読み込み画像の共通範囲 [ox1, ox2) x [oy1, oy2)
ox1 = max(tx1, ix1)
ox2 = min(tx2, ix2)
oy1 = max(ty1, iy1)
oy2 = min(ty2, iy2)
# 共通範囲の幅と高さ
ow = ox2 - ox1
oh = oy2 - oy1
# タイル画像上のローカル座標
oltx1 = ox1 - tx1;
olty1 = oy1 - ty1;
# 読み込み画像のローカル座標
olix1 = ox1 - ix1;
oliy1 = oy1 - iy1;
# 画像を読み込んで張り付ける
#print('(%d,%d) %d: tile(%d,%d)-(%d,%d)[%dx%d], in(%d,%d)-(%d,%d)[%dx%d], out(%d,%d)-(%d,%d)[%dx%d], local_tile(%d,%d), local_in(%d, %d)' % (i, j, k, tx1, ty1, tx2, ty2, prect.w, prect.h, ix1, iy1, ix2, iy2, l[k].w, l[k].h, ox1, oy1, ox2, oy2, ow, oh, oltx1, olty1, olix1, oliy1))
# in_img = icache.read_image(l[k].f)
# in_img_crop = in_img.crop((olix1, oliy1, olix1 + ow, oliy1 + oh))
# if (i == 10 and j == 0 and k == 0): in_img_crop.save(os.path.join(os.getcwd(),'x.jpg'))
# print('in_img[%dx%d], in_img_crop[%dx%d]' % (in_img.size[0],in_img.size[1],in_img_crop.size[0],in_img_crop.size[1]))
# p.paste(in_img_crop, (prect.x + oltx1, prect.y + olty1))
p.paste(icache.read_image(l[k].f).crop((olix1, oliy1, olix1 + ow, oliy1 + oh)), (prect.x + oltx1, prect.y + olty1))
#rof
else:
print('read_image: prect is None')
exit()
#fi
#fed
# レベルlの画像の読み込み(0<=l<=LavelMax-1)
# レベルl+1の画像を4つ読み込んで合成して縮小する
# 画像pに書き込む
# 画像pの画像サイズは一辺Blocksize + OverlapSize * 2の正方形
# 読み込んだ画像はpの(OverlapSize, Overlapsize) - (OverlapSize + BlockSize - 1, OverlapSize + BlockSize - 1)
# の範囲に書き込まれる
def read_level_l_image(l, i, j, p):
if ((0 <= l) and (l <= level_max - 1)):
# レベルl+1の縦横ブロック数
m = level_info[l + 1].m
n = level_info[l + 1].n
# レベルi+1の先頭読み込みブロック
ii = i * 2
jj = j * 2
# 4枚の画像を読み込む
# 縦横ブロック数が足りない場合もある
in_dir = os.path.join(output_dir, output_identifier, 'dzc_output_files', str(l + 1))
q00 = Image.open(os.path.join(in_dir, ('%d_%d.jpg' % (ii, jj ))))
q01 = Image.open(os.path.join(in_dir, ('%d_%d.jpg' % (ii, jj + 1)))) if (jj + 1 < n) else None
q10 = Image.open(os.path.join(in_dir, ('%d_%d.jpg' % (ii + 1, jj )))) if (ii + 1 < m) else None
q11 = Image.open(os.path.join(in_dir, ('%d_%d.jpg' % (ii + 1, jj + 1)))) if ((ii + 1 < m) and (jj + 1 < n)) else None
# オーバーラップ分を除いた画像の真のサイズ
q00r = get_level_l_tileimage_rect(l + 1, ii, jj, 0, 0)
ppw = q00r.w
pph = q00r.h
pp00p = point(0, 0)
if (q01 is not None):
q01r = get_level_l_tileimage_rect(l + 1, ii, jj + 1, 0, 0)
pp01p = point(0, q00r.h)
pph += q01r.h
#fi
if (q10 is not None):
q10r = get_level_l_tileimage_rect(l + 1, ii + 1, jj, 0, 0)
pp10p = point(q00r.w, 0)
ppw += q10r.w
#fi
if (q11 is not None):
q11r = get_level_l_tileimage_rect(l + 1, ii + 1, jj + 1, 0, 0)
pp11p = point(q00r.w, q00r.h)
#fi
# 画像を貼り合わせる画像メモリppを用意する
# 縦横ブロック数が足りない場合もある
# ppw, pphの補正:奇数なら1加える
if ((ppw % 2) == 1): ppw += 1 #fi
if ((pph % 2) == 1): pph += 1 #fi
# print('read_level_l_image(%d, %d, %d, p)' % (l, i, j))
# print(q00r.x, q00r.y, q00r.w, q00r.h)
# print(ppw, pph)
pp = Image.new('RGB', (ppw, pph), background_color)
pp.paste(q00.crop(q00r.area()), pp00p.xy())
if (q01 is not None): pp.paste(q01.crop(q01r.area()), pp01p.xy()) #fi
if (q10 is not None): pp.paste(q10.crop(q10r.area()), pp10p.xy()) #fi
if (q11 is not None): pp.paste(q11.crop(q11r.area()), pp11p.xy()) #fi
# 大きさを1/2にしてpに張り付ける
pw = int(ppw / 2)
ph = int(pph / 2)
pr = get_level_l_tileimage_rect(l, i, j, 0, 0)
p.paste(pp.resize((pw, ph), Image.BICUBIC), pr.xy())
else:
print('read_level_l_image: illegal level %d' % l)
exit()
#fi
#fed
class image_buf_record:
def __init__(self):
self.reset()
#fed
def reset(self):
self.set_unused()
self.img = Image.new('RGB', ((tile_size + overlap_size * 2), (tile_size + overlap_size * 2)), background_color)
#fed
def set_index(self, i, j):
self.i = i
self.j = j
#fed
def set_unused(self):
self.i = -1
self.j = -1
#fed
def is_unused(self):
return ((self.i < 0) or (self.j < 0))
#fed
def write_to_file(self, l, out_dir):
if (not self.is_unused()):
dr = get_level_l_tileimage_rect(l, self.i, self.j, 0, 0, True)
self.img.crop(dr.area()).save(os.path.join(out_dir, ('%d_%d.jpg' % (self.i, self.j))))
self.reset()
#fi
#fed
#ssalc
def make_level_images():
check_tile_images()
for l in range(level_max, -1, -1):
out_dir = os.path.join(output_dir, output_identifier, 'dzc_output_files', str(l))
level_l_m = level_info[l].m
level_l_n = level_info[l].n
level_l_w = level_info[l].w
level_l_h = level_info[l].h
print('Level%d: %dx%d: %dx%d' % (l, level_l_w, level_l_h, level_l_m, level_l_n))
image_buf = [ image_buf_record() for i in range(2 * level_l_n + 3) ]
ibindex = lambda i: ((i + 2 * level_l_n + 3) % (2 * level_l_n + 3))
si = 0 # (p, q) = (0, 0) とするバッファのインデックス
for i in range(0, level_l_m):
print((' %d' % i), end='')
sys.stdout.flush()
for j in range(0, level_l_n):
image_buf[si].set_index(i, j)
if (l == level_max):
read_level_max_image(i, j, image_buf[si].img)
else:
read_level_l_image(l, i, j, image_buf[si].img)
#fi
# オーバーラップイメージの書き込み
# (p,q) = (-1,-1)
sr = get_level_l_overlapimage_rect(l, i, j, -1, -1)
dr = get_level_l_tileimage_rect(l, i - 1, j - 1, 1, 1)
if ((sr is not None) and (dr is not None)):
di = ibindex(si - (level_l_n + 1))
image_buf[di].img.paste(image_buf[si].img.crop(sr.area()), dr.xy())
#fi
# (p,q) = (-1, 0)
sr = get_level_l_overlapimage_rect(l, i, j, -1, 0)
dr = get_level_l_tileimage_rect(l, i - 1, j, 1, 0)
if ((sr is not None) and (dr is not None)):
di = ibindex(si - level_l_n)
image_buf[di].img.paste(image_buf[si].img.crop(sr.area()), dr.xy())
#fi
# (p,q) = (-1, +1)
sr = get_level_l_overlapimage_rect(l, i, j, -1, 1)
dr = get_level_l_tileimage_rect(l, i - 1, j + 1, 1, -1)
if ((sr is not None) and (dr is not None)):
di = ibindex(si - (level_l_n - 1))
image_buf[di].img.paste(image_buf[si].img.crop(sr.area()), dr.xy())
#fi
# (p,q) = ( 0,-1)
sr = get_level_l_overlapimage_rect(l, i, j, 0, -1)
dr = get_level_l_tileimage_rect(l, i, j - 1, 0, 1)
if ((sr is not None) and (dr is not None)):
di = ibindex(si - 1)
image_buf[di].img.paste(image_buf[si].img.crop(sr.area()), dr.xy())
#fi
# (p,q) = ( 0,+1)
sr = get_level_l_overlapimage_rect(l, i, j, 0, 1)
dr = get_level_l_tileimage_rect(l, i, j + 1, 0, -1)
if ((sr is not None) and (dr is not None)):
di = ibindex(si + 1)
image_buf[di].img.paste(image_buf[si].img.crop(sr.area()), dr.xy())
#fi
# (p,q) = (+1,-1)
sr = get_level_l_overlapimage_rect(l, i, j, 1, -1)
dr = get_level_l_tileimage_rect(l, i + 1, j - 1, -1, 1)
if ((sr is not None) and (dr is not None)):
di = ibindex(si + (level_l_n - 1))
image_buf[di].img.paste(image_buf[si].img.crop(sr.area()), dr.xy())
#fi
# (p,q) = (+1, 0)
sr = get_level_l_overlapimage_rect(l, i, j, 1, 0)
dr = get_level_l_tileimage_rect(l, i + 1, j, -1, 0)
if ((sr is not None) and (dr is not None)):
di = ibindex(si + level_l_n)
#print(l, i, j, 1, 0, si, di, len(image_buf)),
#print(sr.area()),
#print(dr.area())
image_buf[di].img.paste(image_buf[si].img.crop(sr.area()), dr.xy())
#fi
# (p,q) = (+1,+1)
sr = get_level_l_overlapimage_rect(l, i, j, 1, 1)
dr = get_level_l_tileimage_rect(l, i + 1, j + 1, -1, -1)
if ((sr is not None) and (dr is not None)):
di = ibindex(si + (level_l_n + 1))
image_buf[di].img.paste(image_buf[si].img.crop(sr.area()), dr.xy())
#fi
# オーバラップ領域までの処理が終わったバッファをファイルに書き出す
di = ibindex(si - (level_l_n + 1))
ii = image_buf[di].i;
jj = image_buf[di].j;
if (((ii == i - 1) and (jj == j - 1)) or ((ii == i - 2) and (jj == level_l_n - 1))):
image_buf[di].write_to_file(l, out_dir)
#fi
si = ibindex(si + 1)
#rof
#rof
# バッファに残っている画像をすべて書き出す.
for i in range(si - (level_l_n + 1), si): image_buf[ibindex(i)].write_to_file(l, out_dir) #rof
print('')
#rof
#fed
# --- 主処理 ---
print('***** Deep Zoom Image File Converter *****')
if (len(sys.argv) < 3):
print('Usage: dziconv InputFile(index.csv) OutputPath [tile_size=%d [OverlapSize=%d [JPEGQuality=%d [CacheSize(MB)=%d]]]].' % (tile_size, overlap_size, jpg_quality, cache_size_MB))
exit()
#fi
# 第1引数の処理
p = pathlib.Path(sys.argv[1])
if (not p.is_absolute()): p = p.resolve() #fi
input_dir, input_file = os.path.split(str(p))
# 第2引数の処理
p = pathlib.Path(sys.argv[2])
if (not p.is_absolute()): p = p.resolve() #fi
output_dir = str(p)
# 第3引数の処理
if (len(sys.argv) >= 4): tile_size = int(sys.argv[3]) #fi
# 第4引数の処理
if (len(sys.argv) >= 5): overlap_size = int(sys.argv[4]) #fi
# 第5引数の処理
if (len(sys.argv) >= 6): jpg_quality = int(sys.argv[5]) #fi
# 第6引数の処理
if (len(sys.argv) >= 7): cache_size_MB = int(sys.argv[6]) #fi
# パラメタ情報の表示
print('===== Parameters =====')
print('Input File Path:%s' % input_dir)
print('Input File Name:%s' % input_file)
print('Output File Path:%s' % output_dir)
print('tile_size:%d' % tile_size)
print('OverlapSize:%d' % overlap_size)
print('JPEGQuality:%d' % jpg_quality)
print('CacheSize(MB):%d' % cache_size_MB)
print('======================');
# 画像情報ファイルを読み込む
read_index_csv(os.path.join(input_dir, input_file))
# ファイル情報の表示
print('===== Input File Infomation =====')
print('Output File ID:%s' % output_identifier)
print('Image Size:%dx%d' % dzi_size)
print('Background Color: (%d,%d,%d)' % background_color)
print('#data:%d' % len(input_data))
print('================================')
# レベル情報の作成
make_level_info()
print('===== DeepZoom Levels =====')
for i in range(0, len(level_info)):
l =level_info[i]
print('%3d:%6dx%6d, (%4dx%4d)' % (i, l.w, l.h, l.m, l.n))
#rof
print('===========================')
# 変換処理開始
print('===== Conversion in progress =====');
icache = image_cache(cache_size_MB * 1024 * 1024)
make_output_dirs()
write_output_xml()
make_level_images()
print('============ Finished ============')
# おわり
| StarcoderdataPython |
12826778 | <reponame>jordimarinvalle/tictactoexxl
# -*- coding: utf-8 -*-
import pytest
from tictactoexxl.game import Game
from tictactoexxl.board import Board
from tictactoexxl.board import BoardPosition
from tictactoexxl.player import Player
class TestTicTacToeXXLGame(object):
board = None
player1 = None
player2 = None
PLAYER1_NAME = "ttt"
PLAYER1_MOVE_REPRESENTATION = "M"
PLAYER2_NAME = "tttxxl"
PLAYER2_MOVE_REPRESENTATION = "W"
def setup_method(self, _):
self.board = Board()
self.player1 = Player(self.PLAYER1_NAME,
self.PLAYER1_MOVE_REPRESENTATION)
self.player2 = Player(self.PLAYER2_NAME,
self.PLAYER2_MOVE_REPRESENTATION)
self.game = Game(board=self.board,
players=[self.player1, self.player2])
def test_game_winning_n_in_a_row_ok_1(self):
assert Game.is_winning_n_in_a_row_ok(num_players=2,
board_dim_x=3,
board_dim_y=3,
n_in_a_row=3) is True
def test_game_winning_n_in_a_row_ok_2(self):
assert Game.is_winning_n_in_a_row_ok(num_players=4,
board_dim_x=3,
board_dim_y=3,
n_in_a_row=3) is True
def test_game_winning_n_in_a_row_ok_3(self):
assert Game.is_winning_n_in_a_row_ok(num_players=3,
board_dim_x=2,
board_dim_y=4,
n_in_a_row=3) is True
def test_game_winning_n_in_a_row_ko_1(self):
assert Game.is_winning_n_in_a_row_ok(num_players=2,
board_dim_x=5,
board_dim_y=5,
n_in_a_row=6) is False
def test_game_winning_n_in_a_row_ko_2(self):
assert Game.is_winning_n_in_a_row_ok(num_players=5,
board_dim_x=3,
board_dim_y=3,
n_in_a_row=3) is False
def test_game_winning_n_in_a_row_ko_3(self):
assert Game.is_winning_n_in_a_row_ok(num_players=5,
board_dim_x=3,
board_dim_y=3,
n_in_a_row=4) is False
def test_game_winning_n_in_a_row_ko_4(self):
assert Game.is_winning_n_in_a_row_ok(num_players=3,
board_dim_x=2,
board_dim_y=5,
n_in_a_row=5) is False
def test_game_players(self):
assert len(self.game.players) is 2
def test_game_get_players_move_representations(self):
set_1 = set(self.game.get_players_move_representations())
set_2 = set([self.PLAYER1_MOVE_REPRESENTATION,
self.PLAYER2_MOVE_REPRESENTATION])
assert set_2.difference(set_1) == set()
def test_game_player_make_a_move(self):
board_position = BoardPosition("a", "1")
self.game.player_make_a_move(self.player1, board_position)
slot_value = self.game.board.get_slot_value(board_position)
assert slot_value is self.player1.move_repr
def test_game_has_player_won(self):
board_position_1 = BoardPosition("a", "1")
self.game.player_make_a_move(self.player1, board_position_1)
board_position_2 = BoardPosition("a", "2")
self.game.player_make_a_move(self.player1, board_position_2)
board_position_3 = BoardPosition("a", "3")
self.game.player_make_a_move(self.player1, board_position_3)
assert self.game.has_player_won(self.player1, board_position_3) is True
if __name__ == '__main__':
pytest.main()
| StarcoderdataPython |
1822618 | <reponame>jiangxuewen16/hq-crawler
import importlib
import json
from collections import namedtuple
from types import FunctionType
# from django.urls import re_path
from django.urls import re_path
from hq_crawler import settings
from django.utils.autoreload import logger
"""
注解路由核心类
"""
class Route:
routeViewPath = namedtuple('classPath', 'path module class_name func_name') # 类方法-具名元组(路由路径 模块 类名 执行的方法名)
classRouteTuple = namedtuple('classRoute', 'module class_name path') # 类路由元祖(模块 类名 路由路径)
ROUTER: list = [] # 路由与路由装饰器的映射
classRoute: list = [] # 类路由
routeList: dict = {} # 路由对方法的映射
@classmethod
def route(cls, path):
def my_decorator(func):
# logger.info('调用的方法列表:', func)
# 类的路由
if not isinstance(func, FunctionType):
cls.classRoute.append(cls.classRouteTuple(func.__module__, func.__qualname__, path))
return func
cls.ROUTER.append(cls.routeViewPath(path, func.__module__, func.__qualname__[:func.__qualname__.index('.')],
func.__name__))
def wrapper(self, *args, **kwargs):
return func(self, *args, **kwargs)
return wrapper
return my_decorator
@classmethod
def register(cls, urlpatterns: list):
routeKeyList = []
for classItem in Route.classRoute: # 类路由
module = importlib.import_module(classItem.module)
routeClass = getattr(module, classItem.class_name)
for routeItem in Route.ROUTER: # 方法路由
if routeItem.module + routeItem.class_name == classItem.module + classItem.class_name: # 是不是同一个类
path = classItem.path + routeItem.path # 路由路径
if path in Route.routeList:
exceptionStr = f'路由重复:{routeItem.module + routeItem.class_name} -> {routeItem.func_name}, 路径:{path}'
raise Exception(exceptionStr)
Route.routeList[path] = routeItem.func_name
if classItem.path in routeKeyList:
continue
path_str = '^' + settings.BASE_URL
urlpatterns.append(re_path(path_str + classItem.path, routeClass.as_view())),
routeKeyList.append(classItem.path)
# logger.info('路由列表:', json.dumps(urlpatterns))
| StarcoderdataPython |
4932379 | #!/usr/bin/env python3
import socket
import math
from gpiozero import PWMOutputDevice
from gpiozero import DigitalOutputDevice
HOST = '192.168.10.104' # Standard loopback interface address (localhost)
PORT = 51717 # Port to listen on (non-privileged ports are > 1023)
MAXDATAITEMS = 4 # Max number of items in the data array split by a pipe character
MIN_WIDTH = 1000
MAX_WIDTH = 2000
#///////////////// Define Motor Driver GPIO Pins /////////////////
# Motor A, Left Side GPIO CONSTANTS
PWM_DRIVE_LEFT = 14 # ENA - H-Bridge enable pin
FORWARD_LEFT_PIN = 23 # IN1 - Forward Drive
REVERSE_LEFT_PIN = 18 # IN2 - Reverse Drive
# Motor B, Right Side GPIO CONSTANTS
PWM_DRIVE_RIGHT = 25 # ENB - H-Bridge enable pin
FORWARD_RIGHT_PIN = 8 # IN1 - Forward Drive
REVERSE_RIGHT_PIN = 7 # IN2 - Reverse Drive
# Initialise objects for H-Bridge GPIO PWM pins
# Set initial duty cycle to 0 and frequency to 1000
driveLeft = PWMOutputDevice(PWM_DRIVE_LEFT, True, 0, 1000)
driveRight = PWMOutputDevice(PWM_DRIVE_RIGHT, True, 0, 1000)
# Initialise objects for H-Bridge digital GPIO pins
forwardLeft = PWMOutputDevice(FORWARD_LEFT_PIN)
reverseLeft = PWMOutputDevice(REVERSE_LEFT_PIN)
forwardRight = PWMOutputDevice(FORWARD_RIGHT_PIN)
reverseRight = PWMOutputDevice(REVERSE_RIGHT_PIN)
def calcCartesianCoordinates(stickX, stickY):
# convert to polar coordinates
r = math.sqrt((pow(stickX,2) + pow(stickY,2))) # hypot(x, y);
t = math.atan2(stickY, stickX)
# rotate by 45 degrees
t += math.pi / 4
# convert back to cartesian
x = r * math.cos(t)
y = r * math.sin(t)
# rescale the new coordinates
x = x * math.sqrt(2)
y = y * math.sqrt(2)
# clamp to 1000/2000
x = max(-100, min(x, 100))
y = max(-100, min(y, 100))
# print("Calculated x and y coordinates as: (%s, %s)" % (x, y))
left = translateCoordinateToMotorWidth(y)
right = translateCoordinateToMotorWidth(-1 * x)
# print("Left and Right: (%s, %s)" % (left, right))
setMotorSpeedAndDirection(left, right)
def translateCoordinateToMotorWidth(coordinate):
# translate desired motorPWMWidth from args
if (coordinate < 0):
tempWidth = 1500 - ( 500 * (-1 * coordinate) / 100 )
motorWidth = int(tempWidth)
elif (coordinate == 0):
motorWidth = 1500
else:
tempWidth = 1500 + ( 500 * ( coordinate / 100 ) )
motorWidth = int(tempWidth)
if(motorWidth < MIN_WIDTH):
motorWidth = MIN_WIDTH
if(motorWidth > MAX_WIDTH):
motorWidth = MAX_WIDTH
# print("Coordinate and Motor Width: (%s, %s)" % (coordinate, motorWidth))
return motorWidth
def calculateDriveValue(vector):
if (vector == 1500):
driveValue = 0.0
if (vector > 1500):
driveValue = (vector - 1500) / 500
elif (vector < 1500):
driveValue = 1.0 - ((vector - 1000) / 500)
return driveValue
def setMotorSpeedAndDirection(left, right):
if (left == 1500):
forwardLeft.value = False
reverseLeft.value = False
if (left > 1500):
forwardLeft.value = True
reverseLeft.value = False
elif (left < 1500):
forwardLeft.value = False
reverseLeft.value = True
driveLeft.value = calculateDriveValue(left)
if (right == 1500):
forwardRight.value = False
reverseRight.value = False
if (right > 1500):
forwardRight.value = True
reverseRight.value = False
elif (right < 1500):
forwardRight.value = False
reverseRight.value = True
driveRight.value = calculateDriveValue(right)
with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as s:
s.bind((HOST, PORT))
s.listen()
conn, addr = s.accept()
with conn:
print('Connected by', addr)
while True:
data = conn.recv(1024)
if data:
dataString = data.decode("utf-8")
dataLength = len(dataString.split("|"))
if (dataLength == MAXDATAITEMS):
dataArray = dataString.split("|", MAXDATAITEMS)
leftStickX = float(dataArray[0])
leftStickY = float(dataArray[1])
rightStickX = float(dataArray[2])
rightStickY = float(dataArray[3])
calcCartesianCoordinates(leftStickX, leftStickY)
| StarcoderdataPython |
344326 | import numpy as np
# Local Modules
from constants import RAINBOW_WAVELENGHTS, SUNLIGHT
from object import HollowSphere, Sphere
from ray import Ray
import utils
EARTH_RADIUS = 6378 * 1000
DEFAULT_CENTER = np.array([0, -EARTH_RADIUS, 0])
DEFAULT_ATMOSPHERE_HEIGHT = EARTH_RADIUS * 0.025
DEFAULT_SUN_DIRECTION = utils.normalize(np.array([0, 1, 0]))
DEFAULT_AVG_DENSITY_HEIGHT = 0.25 * DEFAULT_ATMOSPHERE_HEIGHT
IN_SCATTER_SAMPLES = 10
OPTICAL_DEPTH_SAMPLES = 10
COLOR_CHANNELS = 7
SCATTERING_COEFFICIENTS = np.power(RAINBOW_WAVELENGHTS, -4)
DENSITY_FALLOFF = 4
class SkyDome:
"""
A sky dome that will simulate an atmosphere and a planet.
Attributes:
center(ndarray): The position for the center of the planet
radius(float): The radius of the planet
sun_direction(ndarray): The direction vector pointing towards the
sun
atmosphere_height(float): The altitude on which the atmosphere ends
avg_density_height(float): The altitude on which the average
density of the atmosphere is
"""
def __init__(
self,
center=DEFAULT_CENTER,
radius=EARTH_RADIUS,
sun_direction=DEFAULT_SUN_DIRECTION,
atmosphere_height=DEFAULT_ATMOSPHERE_HEIGHT,
avg_density_height=DEFAULT_AVG_DENSITY_HEIGHT
):
self.center = center
self.radius = radius
self.sun_direction = sun_direction
self.atmosphere_height = atmosphere_height
self.avg_density_height = avg_density_height
self.planet_obj = Sphere(center, None, None, radius)
self.atmosphere_obj = HollowSphere(
center, None, None, radius + atmosphere_height
)
@staticmethod
def phase_function(v1, v2):
return (3 / 4) * (1 + np.dot(v1, v2) ** 2)
def density_at_point(self, p):
height = utils.distance(p, self.center) - self.radius
# normalized_height = height / self.atmosphere_height
# density = np.exp(-normalized_height * DENSITY_FALLOFF) * (
# 1 - normalized_height
# )
density = np.exp(-height / self.avg_density_height)
return density
def optical_depth(
self, p, direction, distance, num_samples=OPTICAL_DEPTH_SAMPLES
):
optical_depth = 0
step_size = distance / num_samples
for i in range(num_samples):
current_distance = i * step_size
sample_point = p + direction * current_distance
sample_density = self.density_at_point(sample_point)
optical_depth += sample_density * step_size
return optical_depth
def out_scattering(
self, p, direction, distance, num_samples=OPTICAL_DEPTH_SAMPLES
):
optical_depth = self.optical_depth(p, direction, distance, num_samples)
out_scattering = 4 * np.pi * SCATTERING_COEFFICIENTS * optical_depth
return out_scattering
def in_scattering(self, r, view_samples=IN_SCATTER_SAMPLES):
dist_to_atmosphere = r.intersect(self.atmosphere_obj)
transmittance = 0
step_size = dist_to_atmosphere / view_samples
for i in range(view_samples):
distance = step_size * i
sample_point = r.at(distance)
sun_ray = Ray(sample_point, self.sun_direction)
t_to_atmosphere = sun_ray.intersect(self.atmosphere_obj)
t_to_planet = sun_ray.intersect(self.planet_obj)
if 0 < t_to_planet < t_to_atmosphere:
continue
out_sun = self.out_scattering(
sun_ray.pr, sun_ray.nr, t_to_atmosphere
)
out_view = self.out_scattering(
sample_point, -r.nr, distance
)
current_density = self.density_at_point(sample_point)
transmittance += (
current_density * np.exp(-(out_sun + out_view)) * step_size
* self.phase_function(sun_ray.nr, -r.nr)
)
light = (
SUNLIGHT * SCATTERING_COEFFICIENTS * transmittance
)
return light
| StarcoderdataPython |
8132914 | from app.models import Category
def test_id():
c = Category(name="Poom")
print(c.get_id())
assert c.get_id() is None
def test_name():
c = Category(name="Benz")
print(c.get_name())
assert c.get_name()
| StarcoderdataPython |
1736167 | #!/usr/bin/env python
# Copyright 2014-2021 The PySCF Developers. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Author: <NAME> <<EMAIL>>
#
'''
RCCSD for real integrals
8-fold permutation symmetry has been used
(ij|kl) = (ji|kl) = (kl|ij) = ...
'''
import ctypes
from functools import reduce
import numpy
from pyscf import gto
from pyscf import lib
from pyscf.lib import logger
from pyscf import ao2mo
from pyscf.ao2mo import _ao2mo
from pyscf.cc import _ccsd
from pyscf.mp.mp2 import get_nocc, get_nmo, get_frozen_mask, _mo_without_core
from pyscf import __config__
BLKMIN = getattr(__config__, 'cc_ccsd_blkmin', 4)
MEMORYMIN = getattr(__config__, 'cc_ccsd_memorymin', 2000)
# t1: ia
# t2: ijab
def kernel(mycc, eris=None, t1=None, t2=None, max_cycle=50, tol=1e-8,
tolnormt=1e-6, verbose=None):
log = logger.new_logger(mycc, verbose)
if eris is None:
eris = mycc.ao2mo(mycc.mo_coeff)
if t1 is None and t2 is None:
t1, t2 = mycc.get_init_guess(eris)
elif t2 is None:
t2 = mycc.get_init_guess(eris)[1]
cput1 = cput0 = (logger.process_clock(), logger.perf_counter())
eold = 0
eccsd = mycc.energy(t1, t2, eris)
log.info('Init E_corr(CCSD) = %.15g', eccsd)
if isinstance(mycc.diis, lib.diis.DIIS):
adiis = mycc.diis
elif mycc.diis:
adiis = lib.diis.DIIS(mycc, mycc.diis_file, incore=mycc.incore_complete)
adiis.space = mycc.diis_space
else:
adiis = None
conv = False
for istep in range(max_cycle):
t1new, t2new = mycc.update_amps(t1, t2, eris)
tmpvec = mycc.amplitudes_to_vector(t1new, t2new)
tmpvec -= mycc.amplitudes_to_vector(t1, t2)
normt = numpy.linalg.norm(tmpvec)
tmpvec = None
if mycc.iterative_damping < 1.0:
alpha = mycc.iterative_damping
t1new = (1-alpha) * t1 + alpha * t1new
t2new *= alpha
t2new += (1-alpha) * t2
t1, t2 = t1new, t2new
t1new = t2new = None
t1, t2 = mycc.run_diis(t1, t2, istep, normt, eccsd-eold, adiis)
eold, eccsd = eccsd, mycc.energy(t1, t2, eris)
log.info('cycle = %d E_corr(CCSD) = %.15g dE = %.9g norm(t1,t2) = %.6g',
istep+1, eccsd, eccsd - eold, normt)
cput1 = log.timer('CCSD iter', *cput1)
if abs(eccsd-eold) < tol and normt < tolnormt:
conv = True
break
log.timer('CCSD', *cput0)
return conv, eccsd, t1, t2
def update_amps(mycc, t1, t2, eris):
if mycc.cc2:
raise NotImplementedError
assert(isinstance(eris, _ChemistsERIs))
time0 = logger.process_clock(), logger.perf_counter()
log = logger.Logger(mycc.stdout, mycc.verbose)
nocc, nvir = t1.shape
fock = eris.fock
mo_e_o = eris.mo_energy[:nocc]
mo_e_v = eris.mo_energy[nocc:] + mycc.level_shift
t1new = numpy.zeros_like(t1)
t2new = mycc._add_vvvv(t1, t2, eris, t2sym='jiba')
t2new *= .5 # *.5 because t2+t2.transpose(1,0,3,2) in the end
time1 = log.timer_debug1('vvvv', *time0)
#** make_inter_F
fov = fock[:nocc,nocc:].copy()
t1new += fov
foo = fock[:nocc,:nocc] - numpy.diag(mo_e_o)
foo += .5 * numpy.einsum('ia,ja->ij', fock[:nocc,nocc:], t1)
fvv = fock[nocc:,nocc:] - numpy.diag(mo_e_v)
fvv -= .5 * numpy.einsum('ia,ib->ab', t1, fock[:nocc,nocc:])
if mycc.incore_complete:
fswap = None
else:
fswap = lib.H5TmpFile()
fwVOov, fwVooV = _add_ovvv_(mycc, t1, t2, eris, fvv, t1new, t2new, fswap)
time1 = log.timer_debug1('ovvv', *time1)
woooo = numpy.asarray(eris.oooo).transpose(0,2,1,3).copy()
unit = nocc**2*nvir*7 + nocc**3 + nocc*nvir**2
mem_now = lib.current_memory()[0]
max_memory = max(0, mycc.max_memory - mem_now)
blksize = min(nvir, max(BLKMIN, int((max_memory*.9e6/8-nocc**4)/unit)))
log.debug1('max_memory %d MB, nocc,nvir = %d,%d blksize = %d',
max_memory, nocc, nvir, blksize)
for p0, p1 in lib.prange(0, nvir, blksize):
wVOov = fwVOov[p0:p1]
wVooV = fwVooV[p0:p1]
eris_ovoo = eris.ovoo[:,p0:p1]
eris_oovv = numpy.empty((nocc,nocc,p1-p0,nvir))
def load_oovv(p0, p1):
eris_oovv[:] = eris.oovv[:,:,p0:p1]
with lib.call_in_background(load_oovv, sync=not mycc.async_io) as prefetch_oovv:
#:eris_oovv = eris.oovv[:,:,p0:p1]
prefetch_oovv(p0, p1)
foo += numpy.einsum('kc,kcji->ij', 2*t1[:,p0:p1], eris_ovoo)
foo += numpy.einsum('kc,icjk->ij', -t1[:,p0:p1], eris_ovoo)
tmp = lib.einsum('la,jaik->lkji', t1[:,p0:p1], eris_ovoo)
woooo += tmp + tmp.transpose(1,0,3,2)
tmp = None
wVOov -= lib.einsum('jbik,ka->bjia', eris_ovoo, t1)
t2new[:,:,p0:p1] += wVOov.transpose(1,2,0,3)
wVooV += lib.einsum('kbij,ka->bija', eris_ovoo, t1)
eris_ovoo = None
load_oovv = prefetch_oovv = None
eris_ovvo = numpy.empty((nocc,p1-p0,nvir,nocc))
def load_ovvo(p0, p1):
eris_ovvo[:] = eris.ovvo[:,p0:p1]
with lib.call_in_background(load_ovvo, sync=not mycc.async_io) as prefetch_ovvo:
#:eris_ovvo = eris.ovvo[:,p0:p1]
prefetch_ovvo(p0, p1)
t1new[:,p0:p1] -= numpy.einsum('jb,jiab->ia', t1, eris_oovv)
wVooV -= eris_oovv.transpose(2,0,1,3)
wVOov += wVooV*.5 #: bjia + bija*.5
load_ovvo = prefetch_ovvo = None
t2new[:,:,p0:p1] += (eris_ovvo*0.5).transpose(0,3,1,2)
eris_voov = eris_ovvo.conj().transpose(1,0,3,2)
t1new[:,p0:p1] += 2*numpy.einsum('jb,aijb->ia', t1, eris_voov)
eris_ovvo = None
tmp = lib.einsum('ic,kjbc->ibkj', t1, eris_oovv)
tmp += lib.einsum('bjkc,ic->jbki', eris_voov, t1)
t2new[:,:,p0:p1] -= lib.einsum('ka,jbki->jiba', t1, tmp)
eris_oovv = tmp = None
fov[:,p0:p1] += numpy.einsum('kc,aikc->ia', t1, eris_voov) * 2
fov[:,p0:p1] -= numpy.einsum('kc,akic->ia', t1, eris_voov)
tau = numpy.einsum('ia,jb->ijab', t1[:,p0:p1]*.5, t1)
tau += t2[:,:,p0:p1]
theta = tau.transpose(1,0,2,3) * 2
theta -= tau
fvv -= lib.einsum('cjia,cjib->ab', theta.transpose(2,1,0,3), eris_voov)
foo += lib.einsum('aikb,kjab->ij', eris_voov, theta)
tau = theta = None
tau = t2[:,:,p0:p1] + numpy.einsum('ia,jb->ijab', t1[:,p0:p1], t1)
woooo += lib.einsum('ijab,aklb->ijkl', tau, eris_voov)
tau = None
def update_wVooV(q0, q1, tau):
wVooV[:] += lib.einsum('bkic,jkca->bija', eris_voov[:,:,:,q0:q1], tau)
with lib.call_in_background(update_wVooV, sync=not mycc.async_io) as update_wVooV:
for q0, q1 in lib.prange(0, nvir, blksize):
tau = t2[:,:,q0:q1] * .5
tau += numpy.einsum('ia,jb->ijab', t1[:,q0:q1], t1)
#:wVooV += lib.einsum('bkic,jkca->bija', eris_voov[:,:,:,q0:q1], tau)
update_wVooV(q0, q1, tau)
tau = update_wVooV = None
def update_t2(q0, q1, tmp):
t2new[:,:,q0:q1] += tmp.transpose(2,0,1,3)
tmp *= .5
t2new[:,:,q0:q1] += tmp.transpose(0,2,1,3)
with lib.call_in_background(update_t2, sync=not mycc.async_io) as update_t2:
for q0, q1 in lib.prange(0, nvir, blksize):
tmp = lib.einsum('jkca,ckib->jaib', t2[:,:,p0:p1,q0:q1], wVooV)
#:t2new[:,:,q0:q1] += tmp.transpose(2,0,1,3)
#:tmp *= .5
#:t2new[:,:,q0:q1] += tmp.transpose(0,2,1,3)
update_t2(q0, q1, tmp)
tmp = None
wVOov += eris_voov
eris_VOov = -.5 * eris_voov.transpose(0,2,1,3)
eris_VOov += eris_voov
eris_voov = None
def update_wVOov(q0, q1, tau):
wVOov[:,:,:,q0:q1] += .5 * lib.einsum('aikc,kcjb->aijb', eris_VOov, tau)
with lib.call_in_background(update_wVOov, sync=not mycc.async_io) as update_wVOov:
for q0, q1 in lib.prange(0, nvir, blksize):
tau = t2[:,:,q0:q1].transpose(1,3,0,2) * 2
tau -= t2[:,:,q0:q1].transpose(0,3,1,2)
tau -= numpy.einsum('ia,jb->ibja', t1[:,q0:q1]*2, t1)
#:wVOov[:,:,:,q0:q1] += .5 * lib.einsum('aikc,kcjb->aijb', eris_VOov, tau)
update_wVOov(q0, q1, tau)
tau = None
def update_t2(q0, q1, theta):
t2new[:,:,q0:q1] += lib.einsum('kica,ckjb->ijab', theta, wVOov)
with lib.call_in_background(update_t2, sync=not mycc.async_io) as update_t2:
for q0, q1 in lib.prange(0, nvir, blksize):
theta = t2[:,:,p0:p1,q0:q1] * 2
theta -= t2[:,:,p0:p1,q0:q1].transpose(1,0,2,3)
#:t2new[:,:,q0:q1] += lib.einsum('kica,ckjb->ijab', theta, wVOov)
update_t2(q0, q1, theta)
theta = None
eris_VOov = wVOov = wVooV = update_wVOov = None
time1 = log.timer_debug1('voov [%d:%d]'%(p0, p1), *time1)
fwVOov = fwVooV = fswap = None
for p0, p1 in lib.prange(0, nvir, blksize):
theta = t2[:,:,p0:p1].transpose(1,0,2,3) * 2 - t2[:,:,p0:p1]
t1new += numpy.einsum('jb,ijba->ia', fov[:,p0:p1], theta)
t1new -= lib.einsum('jbki,kjba->ia', eris.ovoo[:,p0:p1], theta)
tau = numpy.einsum('ia,jb->ijab', t1[:,p0:p1], t1)
tau += t2[:,:,p0:p1]
t2new[:,:,p0:p1] += .5 * lib.einsum('ijkl,klab->ijab', woooo, tau)
theta = tau = None
ft_ij = foo + numpy.einsum('ja,ia->ij', .5*t1, fov)
ft_ab = fvv - numpy.einsum('ia,ib->ab', .5*t1, fov)
t2new += lib.einsum('ijac,bc->ijab', t2, ft_ab)
t2new -= lib.einsum('ki,kjab->ijab', ft_ij, t2)
eia = mo_e_o[:,None] - mo_e_v
t1new += numpy.einsum('ib,ab->ia', t1, fvv)
t1new -= numpy.einsum('ja,ji->ia', t1, foo)
t1new /= eia
#: t2new = t2new + t2new.transpose(1,0,3,2)
for i in range(nocc):
if i > 0:
t2new[i,:i] += t2new[:i,i].transpose(0,2,1)
t2new[i,:i] /= lib.direct_sum('a,jb->jab', eia[i], eia[:i])
t2new[:i,i] = t2new[i,:i].transpose(0,2,1)
t2new[i,i] = t2new[i,i] + t2new[i,i].T
t2new[i,i] /= lib.direct_sum('a,b->ab', eia[i], eia[i])
time0 = log.timer_debug1('update t1 t2', *time0)
return t1new, t2new
def _add_ovvv_(mycc, t1, t2, eris, fvv, t1new, t2new, fswap):
time1 = logger.process_clock(), logger.perf_counter()
log = logger.Logger(mycc.stdout, mycc.verbose)
nocc, nvir = t1.shape
nvir_pair = nvir * (nvir+1) // 2
if fswap is None:
wVOov = numpy.zeros((nvir,nocc,nocc,nvir))
else:
wVOov = fswap.create_dataset('wVOov', (nvir,nocc,nocc,nvir), 'f8')
wooVV = numpy.zeros((nocc,nocc*nvir_pair))
max_memory = mycc.max_memory - lib.current_memory()[0]
unit = nocc*nvir**2*3 + nocc**2*nvir + 2
blksize = min(nvir, max(BLKMIN, int((max_memory*.95e6/8-wooVV.size)/unit)))
if not mycc.direct:
unit = nocc*nvir**2*3 + nocc**2*nvir + 2 + nocc*nvir**2 + nocc*nvir
blksize = min(nvir, max(BLKMIN, int((max_memory*.95e6/8-wooVV.size-nocc**2*nvir)/unit)))
log.debug1('max_memory %d MB, nocc,nvir = %d,%d blksize = %d',
max_memory, nocc, nvir, blksize)
def load_ovvv(buf, p0):
if p0 < nvir:
p1 = min(nvir, p0+blksize)
buf[:p1-p0] = eris.ovvv[:,p0:p1].transpose(1,0,2)
with lib.call_in_background(load_ovvv, sync=not mycc.async_io) as prefetch:
buf = numpy.empty((blksize,nocc,nvir_pair))
buf_prefetch = numpy.empty((blksize,nocc,nvir_pair))
load_ovvv(buf_prefetch, 0)
for p0, p1 in lib.prange(0, nvir, blksize):
buf, buf_prefetch = buf_prefetch, buf
prefetch(buf_prefetch, p1)
eris_vovv = buf[:p1-p0]
#:wooVV -= numpy.einsum('jc,ciba->jiba', t1[:,p0:p1], eris_vovv)
lib.ddot(numpy.asarray(t1[:,p0:p1], order='C'),
eris_vovv.reshape(p1-p0,-1), -1, wooVV, 1)
eris_vovv = lib.unpack_tril(eris_vovv.reshape((p1-p0)*nocc,nvir_pair))
eris_vovv = eris_vovv.reshape(p1-p0,nocc,nvir,nvir)
fvv += 2*numpy.einsum('kc,ckab->ab', t1[:,p0:p1], eris_vovv)
fvv[:,p0:p1] -= numpy.einsum('kc,bkca->ab', t1, eris_vovv)
if not mycc.direct:
vvvo = eris_vovv.transpose(0,2,3,1).copy()
for i in range(nocc):
tau = t2[i,:,p0:p1] + numpy.einsum('a,jb->jab', t1[i,p0:p1], t1)
tmp = lib.einsum('jcd,cdbk->jbk', tau, vvvo)
t2new[i] -= lib.einsum('ka,jbk->jab', t1, tmp)
tau = tmp = None
wVOov[p0:p1] = lib.einsum('biac,jc->bija', eris_vovv, t1)
theta = t2[:,:,p0:p1].transpose(1,2,0,3) * 2
theta -= t2[:,:,p0:p1].transpose(0,2,1,3)
t1new += lib.einsum('icjb,cjba->ia', theta, eris_vovv)
theta = None
time1 = log.timer_debug1('vovv [%d:%d]'%(p0, p1), *time1)
if fswap is None:
wooVV = lib.unpack_tril(wooVV.reshape(nocc**2,nvir_pair))
return wVOov, wooVV.reshape(nocc,nocc,nvir,nvir).transpose(2,1,0,3)
else:
fswap.create_dataset('wVooV', (nvir,nocc,nocc,nvir), 'f8')
wooVV = wooVV.reshape(nocc,nocc,nvir_pair)
tril2sq = lib.square_mat_in_trilu_indices(nvir)
for p0, p1 in lib.prange(0, nvir, blksize):
fswap['wVooV'][p0:p1] = wooVV[:,:,tril2sq[p0:p1]].transpose(2,1,0,3)
return fswap['wVOov'], fswap['wVooV']
def _add_vvvv(mycc, t1, t2, eris, out=None, with_ovvv=None, t2sym=None):
'''t2sym: whether t2 has the symmetry t2[ijab]==t2[jiba] or
t2[ijab]==-t2[jiab] or t2[ijab]==-t2[jiba]
'''
#TODO: Guess the symmetry of t2 amplitudes
#if t2sym is None:
# if t2.shape[0] != t2.shape[1]:
# t2sym = ''
# elif abs(t2-t2.transpose(1,0,3,2)).max() < 1e-12:
# t2sym = 'jiba'
# elif abs(t2+t2.transpose(1,0,2,3)).max() < 1e-12:
# t2sym = '-jiab'
# elif abs(t2+t2.transpose(1,0,3,2)).max() < 1e-12:
# t2sym = '-jiba'
if t2sym in ('jiba', '-jiba', '-jiab'):
Ht2tril = _add_vvvv_tril(mycc, t1, t2, eris, with_ovvv=with_ovvv)
nocc, nvir = t2.shape[1:3]
Ht2 = _unpack_t2_tril(Ht2tril, nocc, nvir, out, t2sym)
else:
Ht2 = _add_vvvv_full(mycc, t1, t2, eris, out, with_ovvv)
return Ht2
def _add_vvvv_tril(mycc, t1, t2, eris, out=None, with_ovvv=None):
'''Ht2 = numpy.einsum('ijcd,acdb->ijab', t2, vvvv)
Using symmetry t2[ijab] = t2[jiba] and Ht2[ijab] = Ht2[jiba], compute the
lower triangular part of Ht2
'''
time0 = logger.process_clock(), logger.perf_counter()
log = logger.Logger(mycc.stdout, mycc.verbose)
if with_ovvv is None:
with_ovvv = mycc.direct
nocc, nvir = t2.shape[1:3]
nocc2 = nocc*(nocc+1)//2
if t1 is None:
tau = t2[numpy.tril_indices(nocc)]
else:
tau = numpy.empty((nocc2,nvir,nvir), dtype=t2.dtype)
p1 = 0
for i in range(nocc):
p0, p1 = p1, p1 + i+1
tau[p0:p1] = numpy.einsum('a,jb->jab', t1[i], t1[:i+1])
tau[p0:p1] += t2[i,:i+1]
if mycc.direct: # AO-direct CCSD
mo = getattr(eris, 'mo_coeff', None)
if mo is None: # If eris does not have the attribute mo_coeff
mo = _mo_without_core(mycc, mycc.mo_coeff)
nao, nmo = mo.shape
aos = numpy.asarray(mo[:,nocc:].T, order='F')
tau = _ao2mo.nr_e2(tau.reshape(nocc2,nvir**2), aos, (0,nao,0,nao), 's1', 's1')
tau = tau.reshape(nocc2,nao,nao)
time0 = log.timer_debug1('vvvv-tau', *time0)
buf = eris._contract_vvvv_t2(mycc, tau, mycc.direct, out, log)
buf = buf.reshape(nocc2,nao,nao)
Ht2tril = _ao2mo.nr_e2(buf, mo.conj(), (nocc,nmo,nocc,nmo), 's1', 's1')
Ht2tril = Ht2tril.reshape(nocc2,nvir,nvir)
if with_ovvv:
#: tmp = numpy.einsum('ijcd,ka,kdcb->ijba', tau, t1, eris.ovvv)
#: t2new -= tmp + tmp.transpose(1,0,3,2)
tmp = _ao2mo.nr_e2(buf, mo.conj(), (nocc,nmo,0,nocc), 's1', 's1')
Ht2tril -= lib.ddot(tmp.reshape(nocc2*nvir,nocc), t1).reshape(nocc2,nvir,nvir)
tmp = _ao2mo.nr_e2(buf, mo.conj(), (0,nocc,nocc,nmo), 's1', 's1')
#: Ht2tril -= numpy.einsum('xkb,ka->xab', tmp.reshape(-1,nocc,nvir), t1)
tmp = lib.transpose(tmp.reshape(nocc2,nocc,nvir), axes=(0,2,1), out=buf)
tmp = lib.ddot(tmp.reshape(nocc2*nvir,nocc), t1, 1,
numpy.ndarray((nocc2*nvir,nvir), buffer=tau), 0)
tmp = lib.transpose(tmp.reshape(nocc2,nvir,nvir), axes=(0,2,1), out=buf)
Ht2tril -= tmp.reshape(nocc2,nvir,nvir)
else:
assert(not with_ovvv)
Ht2tril = eris._contract_vvvv_t2(mycc, tau, mycc.direct, out, log)
return Ht2tril
def _add_vvvv_full(mycc, t1, t2, eris, out=None, with_ovvv=False):
'''Ht2 = numpy.einsum('ijcd,acdb->ijab', t2, vvvv)
without using symmetry t2[ijab] = t2[jiba] in t2 or Ht2
'''
time0 = logger.process_clock(), logger.perf_counter()
log = logger.Logger(mycc.stdout, mycc.verbose)
if t1 is None:
tau = t2
else:
tau = numpy.einsum('ia,jb->ijab', t1, t1)
tau += t2
if mycc.direct: # AO-direct CCSD
if with_ovvv:
raise NotImplementedError
mo = getattr(eris, 'mo_coeff', None)
if mo is None: # If eris does not have the attribute mo_coeff
mo = _mo_without_core(mycc, mycc.mo_coeff)
nocc, nvir = t2.shape[1:3]
nao, nmo = mo.shape
aos = numpy.asarray(mo[:,nocc:].T, order='F')
tau = _ao2mo.nr_e2(tau.reshape(nocc**2,nvir,nvir), aos, (0,nao,0,nao), 's1', 's1')
tau = tau.reshape(nocc,nocc,nao,nao)
time0 = log.timer_debug1('vvvv-tau mo2ao', *time0)
buf = eris._contract_vvvv_t2(mycc, tau, mycc.direct, out, log)
buf = buf.reshape(nocc**2,nao,nao)
Ht2 = _ao2mo.nr_e2(buf, mo.conj(), (nocc,nmo,nocc,nmo), 's1', 's1')
else:
assert(not with_ovvv)
Ht2 = eris._contract_vvvv_t2(mycc, tau, mycc.direct, out, log)
return Ht2.reshape(t2.shape)
def _contract_vvvv_t2(mycc, mol, vvvv, t2, out=None, verbose=None):
'''Ht2 = numpy.einsum('ijcd,acbd->ijab', t2, vvvv)
Args:
vvvv : None or integral object
if vvvv is None, contract t2 to AO-integrals using AO-direct algorithm
'''
if vvvv is None or len(vvvv.shape) == 2:
# AO-direct or vvvv in 4-fold symmetry
return _contract_s4vvvv_t2(mycc, mol, vvvv, t2, out, verbose)
else:
return _contract_s1vvvv_t2(mycc, mol, vvvv, t2, out, verbose)
def _contract_s4vvvv_t2(mycc, mol, vvvv, t2, out=None, verbose=None):
'''Ht2 = numpy.einsum('ijcd,acbd->ijab', t2, vvvv)
where vvvv has to be real and has the 4-fold permutation symmetry
Args:
vvvv : None or integral object
if vvvv is None, contract t2 to AO-integrals using AO-direct algorithm
'''
assert(t2.dtype == numpy.double)
if t2.size == 0:
return numpy.zeros_like(t2)
_dgemm = lib.numpy_helper._dgemm
time0 = logger.process_clock(), logger.perf_counter()
log = logger.new_logger(mycc, verbose)
nvira, nvirb = t2.shape[-2:]
x2 = t2.reshape(-1,nvira,nvirb)
nocc2 = x2.shape[0]
nvir2 = nvira * nvirb
Ht2 = numpy.ndarray(x2.shape, dtype=x2.dtype, buffer=out)
Ht2[:] = 0
def contract_blk_(eri, i0, i1, j0, j1):
ic = i1 - i0
jc = j1 - j0
#:Ht2[:,j0:j1] += numpy.einsum('xef,efab->xab', x2[:,i0:i1], eri)
_dgemm('N', 'N', nocc2, jc*nvirb, ic*nvirb,
x2.reshape(-1,nvir2), eri.reshape(-1,jc*nvirb),
Ht2.reshape(-1,nvir2), 1, 1, i0*nvirb, 0, j0*nvirb)
if i0 > j0:
#:Ht2[:,i0:i1] += numpy.einsum('xef,abef->xab', x2[:,j0:j1], eri)
_dgemm('N', 'T', nocc2, ic*nvirb, jc*nvirb,
x2.reshape(-1,nvir2), eri.reshape(-1,jc*nvirb),
Ht2.reshape(-1,nvir2), 1, 1, j0*nvirb, 0, i0*nvirb)
max_memory = max(MEMORYMIN, mycc.max_memory - lib.current_memory()[0])
if vvvv is None: # AO-direct CCSD
ao_loc = mol.ao_loc_nr()
assert(nvira == nvirb == ao_loc[-1])
intor = mol._add_suffix('int2e')
ao2mopt = _ao2mo.AO2MOpt(mol, intor, 'CVHFnr_schwarz_cond',
'CVHFsetnr_direct_scf')
blksize = max(BLKMIN, numpy.sqrt(max_memory*.9e6/8/nvirb**2/2.5))
blksize = int(min((nvira+3)/4, blksize))
sh_ranges = ao2mo.outcore.balance_partition(ao_loc, blksize)
blksize = max(x[2] for x in sh_ranges)
eribuf = numpy.empty((blksize,blksize,nvirb,nvirb))
loadbuf = numpy.empty((blksize,blksize,nvirb,nvirb))
fint = gto.moleintor.getints4c
for ip, (ish0, ish1, ni) in enumerate(sh_ranges):
for jsh0, jsh1, nj in sh_ranges[:ip]:
eri = fint(intor, mol._atm, mol._bas, mol._env,
shls_slice=(ish0,ish1,jsh0,jsh1), aosym='s2kl',
ao_loc=ao_loc, cintopt=ao2mopt._cintopt, out=eribuf)
i0, i1 = ao_loc[ish0], ao_loc[ish1]
j0, j1 = ao_loc[jsh0], ao_loc[jsh1]
tmp = numpy.ndarray((i1-i0,nvirb,j1-j0,nvirb), buffer=loadbuf)
_ccsd.libcc.CCload_eri(tmp.ctypes.data_as(ctypes.c_void_p),
eri.ctypes.data_as(ctypes.c_void_p),
(ctypes.c_int*4)(i0, i1, j0, j1),
ctypes.c_int(nvirb))
contract_blk_(tmp, i0, i1, j0, j1)
time0 = log.timer_debug1('AO-vvvv [%d:%d,%d:%d]' %
(ish0,ish1,jsh0,jsh1), *time0)
eri = fint(intor, mol._atm, mol._bas, mol._env,
shls_slice=(ish0,ish1,ish0,ish1), aosym='s4',
ao_loc=ao_loc, cintopt=ao2mopt._cintopt, out=eribuf)
i0, i1 = ao_loc[ish0], ao_loc[ish1]
eri = lib.unpack_tril(eri, axis=0)
tmp = numpy.ndarray((i1-i0,nvirb,i1-i0,nvirb), buffer=loadbuf)
_ccsd.libcc.CCload_eri(tmp.ctypes.data_as(ctypes.c_void_p),
eri.ctypes.data_as(ctypes.c_void_p),
(ctypes.c_int*4)(i0, i1, i0, i1),
ctypes.c_int(nvirb))
eri = None
contract_blk_(tmp, i0, i1, i0, i1)
time0 = log.timer_debug1('AO-vvvv [%d:%d,%d:%d]' %
(ish0,ish1,ish0,ish1), *time0)
else:
nvir_pair = nvirb * (nvirb+1) // 2
unit = nvira*nvir_pair*2 + nvirb**2*nvira/4 + 1
if mycc.async_io:
fmap = lib.map_with_prefetch
unit += nvira*nvir_pair
else:
fmap = map
blksize = numpy.sqrt(max(BLKMIN**2, max_memory*.95e6/8/unit))
blksize = int(min((nvira+3)/4, blksize))
def load(v_slice):
i0, i1 = v_slice
off0 = i0*(i0+1)//2
off1 = i1*(i1+1)//2
return numpy.asarray(vvvv[off0:off1], order='C')
tril2sq = lib.square_mat_in_trilu_indices(nvira)
loadbuf = numpy.empty((blksize,blksize,nvirb,nvirb))
slices = [(i0, i1) for i0, i1 in lib.prange(0, nvira, blksize)]
for istep, wwbuf in enumerate(fmap(load, lib.prange(0, nvira, blksize))):
i0, i1 = slices[istep]
off0 = i0*(i0+1)//2
for j0, j1 in lib.prange(0, i1, blksize):
eri = wwbuf[tril2sq[i0:i1,j0:j1]-off0]
tmp = numpy.ndarray((i1-i0,nvirb,j1-j0,nvirb), buffer=loadbuf)
_ccsd.libcc.CCload_eri(tmp.ctypes.data_as(ctypes.c_void_p),
eri.ctypes.data_as(ctypes.c_void_p),
(ctypes.c_int*4)(i0, i1, j0, j1),
ctypes.c_int(nvirb))
contract_blk_(tmp, i0, i1, j0, j1)
wwbuf = None
time0 = log.timer_debug1('vvvv [%d:%d]'%(i0,i1), *time0)
return Ht2.reshape(t2.shape)
def _contract_s1vvvv_t2(mycc, mol, vvvv, t2, out=None, verbose=None):
'''Ht2 = numpy.einsum('ijcd,acdb->ijab', t2, vvvv)
where vvvv can be real or complex and no permutation symmetry is available in vvvv.
Args:
vvvv : None or integral object
if vvvv is None, contract t2 to AO-integrals using AO-direct algorithm
'''
# vvvv == None means AO-direct CCSD. It should redirect to
# _contract_s4vvvv_t2(mycc, mol, vvvv, t2, out, verbose)
assert(vvvv is not None)
time0 = logger.process_clock(), logger.perf_counter()
log = logger.new_logger(mycc, verbose)
nvira, nvirb = t2.shape[-2:]
x2 = t2.reshape(-1,nvira,nvirb)
nocc2 = x2.shape[0]
dtype = numpy.result_type(t2, vvvv)
Ht2 = numpy.ndarray(x2.shape, dtype=dtype, buffer=out)
max_memory = mycc.max_memory - lib.current_memory()[0]
unit = nvirb**2*nvira*2 + nocc2*nvirb + 1
blksize = min(nvira, max(BLKMIN, int(max_memory*1e6/8/unit)))
for p0,p1 in lib.prange(0, nvira, blksize):
Ht2[:,p0:p1] = lib.einsum('xcd,acbd->xab', x2, vvvv[p0:p1])
time0 = log.timer_debug1('vvvv [%d:%d]' % (p0,p1), *time0)
return Ht2.reshape(t2.shape)
def _unpack_t2_tril(t2tril, nocc, nvir, out=None, t2sym='jiba'):
t2 = numpy.ndarray((nocc,nocc,nvir,nvir), dtype=t2tril.dtype, buffer=out)
idx,idy = numpy.tril_indices(nocc)
if t2sym == 'jiba':
t2[idy,idx] = t2tril.transpose(0,2,1)
t2[idx,idy] = t2tril
elif t2sym == '-jiba':
t2[idy,idx] = -t2tril.transpose(0,2,1)
t2[idx,idy] = t2tril
elif t2sym == '-jiab':
t2[idy,idx] =-t2tril
t2[idx,idy] = t2tril
t2[numpy.diag_indices(nocc)] = 0
return t2
def _unpack_4fold(c2vec, nocc, nvir, anti_symm=True):
t2 = numpy.zeros((nocc**2,nvir**2), dtype=c2vec.dtype)
if nocc > 1 and nvir > 1:
t2tril = c2vec.reshape(nocc*(nocc-1)//2,nvir*(nvir-1)//2)
otril = numpy.tril_indices(nocc, k=-1)
vtril = numpy.tril_indices(nvir, k=-1)
lib.takebak_2d(t2, t2tril, otril[0]*nocc+otril[1], vtril[0]*nvir+vtril[1])
lib.takebak_2d(t2, t2tril, otril[1]*nocc+otril[0], vtril[1]*nvir+vtril[0])
if anti_symm: # anti-symmetry when exchanging two particle indices
t2tril = -t2tril
lib.takebak_2d(t2, t2tril, otril[0]*nocc+otril[1], vtril[1]*nvir+vtril[0])
lib.takebak_2d(t2, t2tril, otril[1]*nocc+otril[0], vtril[0]*nvir+vtril[1])
return t2.reshape(nocc,nocc,nvir,nvir)
def amplitudes_to_vector(t1, t2, out=None):
nocc, nvir = t1.shape
nov = nocc * nvir
size = nov + nov*(nov+1)//2
vector = numpy.ndarray(size, t1.dtype, buffer=out)
vector[:nov] = t1.ravel()
lib.pack_tril(t2.transpose(0,2,1,3).reshape(nov,nov), out=vector[nov:])
return vector
def vector_to_amplitudes(vector, nmo, nocc):
nvir = nmo - nocc
nov = nocc * nvir
t1 = vector[:nov].copy().reshape((nocc,nvir))
# filltriu=lib.SYMMETRIC because t2[iajb] == t2[jbia]
t2 = lib.unpack_tril(vector[nov:], filltriu=lib.SYMMETRIC)
t2 = t2.reshape(nocc,nvir,nocc,nvir).transpose(0,2,1,3)
return t1, numpy.asarray(t2, order='C')
def amplitudes_to_vector_s4(t1, t2, out=None):
nocc, nvir = t1.shape
nov = nocc * nvir
size = nov + nocc*(nocc-1)//2*nvir*(nvir-1)//2
vector = numpy.ndarray(size, t1.dtype, buffer=out)
vector[:nov] = t1.ravel()
otril = numpy.tril_indices(nocc, k=-1)
vtril = numpy.tril_indices(nvir, k=-1)
lib.take_2d(t2.reshape(nocc**2,nvir**2), otril[0]*nocc+otril[1],
vtril[0]*nvir+vtril[1], out=vector[nov:])
return vector
def vector_to_amplitudes_s4(vector, nmo, nocc):
nvir = nmo - nocc
nov = nocc * nvir
size = nov + nocc*(nocc-1)//2*nvir*(nvir-1)//2
t1 = vector[:nov].copy().reshape(nocc,nvir)
t2 = numpy.zeros((nocc,nocc,nvir,nvir), dtype=vector.dtype)
t2 = _unpack_4fold(vector[nov:size], nocc, nvir)
return t1, t2
def energy(mycc, t1=None, t2=None, eris=None):
'''CCSD correlation energy'''
if t1 is None: t1 = mycc.t1
if t2 is None: t2 = mycc.t2
if eris is None: eris = mycc.ao2mo()
nocc, nvir = t1.shape
fock = eris.fock
e = numpy.einsum('ia,ia', fock[:nocc,nocc:], t1) * 2
max_memory = mycc.max_memory - lib.current_memory()[0]
blksize = int(min(nvir, max(BLKMIN, max_memory*.3e6/8/(nocc**2*nvir+1))))
for p0, p1 in lib.prange(0, nvir, blksize):
eris_ovvo = eris.ovvo[:,p0:p1]
tau = t2[:,:,p0:p1] + numpy.einsum('ia,jb->ijab', t1[:,p0:p1], t1)
e += 2 * numpy.einsum('ijab,iabj', tau, eris_ovvo)
e -= numpy.einsum('jiab,iabj', tau, eris_ovvo)
if abs(e.imag) > 1e-4:
logger.warn(mycc, 'Non-zero imaginary part found in CCSD energy %s', e)
return e.real
def restore_from_diis_(mycc, diis_file, inplace=True):
'''Reuse an existed DIIS object in the CCSD calculation.
The CCSD amplitudes will be restored from the DIIS object to generate t1
and t2 amplitudes. The t1/t2 amplitudes of the CCSD object will be
overwritten by the generated t1 and t2 amplitudes. The amplitudes vector
and error vector will be reused in the CCSD calculation.
'''
adiis = lib.diis.DIIS(mycc, mycc.diis_file, incore=mycc.incore_complete)
adiis.restore(diis_file, inplace=inplace)
ccvec = adiis.extrapolate()
mycc.t1, mycc.t2 = mycc.vector_to_amplitudes(ccvec)
if inplace:
mycc.diis = adiis
return mycc
def get_t1_diagnostic(t1):
'''Returns the t1 amplitude norm, normalized by number of correlated electrons.'''
nelectron = 2 * t1.shape[0]
return numpy.sqrt(numpy.linalg.norm(t1)**2 / nelectron)
def get_d1_diagnostic(t1):
'''D1 diagnostic given in
Janssen, et. al Chem. Phys. Lett. 290 (1998) 423
'''
f = lambda x: numpy.sqrt(numpy.sort(numpy.abs(x[0])))[-1]
d1norm_ij = f(numpy.linalg.eigh(numpy.einsum('ia,ja->ij',t1,t1)))
d1norm_ab = f(numpy.linalg.eigh(numpy.einsum('ia,ib->ab',t1,t1)))
d1norm = max(d1norm_ij, d1norm_ab)
return d1norm
def get_d2_diagnostic(t2):
'''D2 diagnostic given in
Nielsen, et. al Chem. Phys. Lett. 310 (1999) 568
Note: This is currently only defined in the literature for restricted
closed-shell systems.
'''
f = lambda x: numpy.sqrt(numpy.sort(numpy.abs(x[0])))[-1]
d2norm_ij = f(numpy.linalg.eigh(numpy.einsum('ikab,jkab->ij',t2,t2)))
d2norm_ab = f(numpy.linalg.eigh(numpy.einsum('ijac,ijbc->ab',t2,t2)))
d2norm = max(d2norm_ij, d2norm_ab)
return d2norm
def as_scanner(cc):
'''Generating a scanner/solver for CCSD PES.
The returned solver is a function. This function requires one argument
"mol" as input and returns total CCSD energy.
The solver will automatically use the results of last calculation as the
initial guess of the new calculation. All parameters assigned in the
CCSD and the underlying SCF objects (conv_tol, max_memory etc) are
automatically applied in the solver.
Note scanner has side effects. It may change many underlying objects
(_scf, with_df, with_x2c, ...) during calculation.
Examples::
>>> from pyscf import gto, scf, cc
>>> mol = gto.M(atom='H 0 0 0; F 0 0 1')
>>> cc_scanner = cc.CCSD(scf.RHF(mol)).as_scanner()
>>> e_tot = cc_scanner(gto.M(atom='H 0 0 0; F 0 0 1.1'))
>>> e_tot = cc_scanner(gto.M(atom='H 0 0 0; F 0 0 1.5'))
'''
if isinstance(cc, lib.SinglePointScanner):
return cc
logger.info(cc, 'Set %s as a scanner', cc.__class__)
class CCSD_Scanner(cc.__class__, lib.SinglePointScanner):
def __init__(self, cc):
self.__dict__.update(cc.__dict__)
self._scf = cc._scf.as_scanner()
def __call__(self, mol_or_geom, **kwargs):
if isinstance(mol_or_geom, gto.Mole):
mol = mol_or_geom
else:
mol = self.mol.set_geom_(mol_or_geom, inplace=False)
if self.t2 is not None:
last_size = self.vector_size()
else:
last_size = 0
self.reset(mol)
mf_scanner = self._scf
mf_scanner(mol)
self.mo_coeff = mf_scanner.mo_coeff
self.mo_occ = mf_scanner.mo_occ
if last_size != self.vector_size():
self.t1 = self.t2 = None
self.kernel(self.t1, self.t2, **kwargs)
return self.e_tot
return CCSD_Scanner(cc)
class CCSD(lib.StreamObject):
'''restricted CCSD
Attributes:
verbose : int
Print level. Default value equals to :class:`Mole.verbose`
max_memory : float or int
Allowed memory in MB. Default value equals to :class:`Mole.max_memory`
conv_tol : float
converge threshold. Default is 1e-7.
conv_tol_normt : float
converge threshold for norm(t1,t2). Default is 1e-5.
max_cycle : int
max number of iterations. Default is 50.
diis_space : int
DIIS space size. Default is 6.
diis_start_cycle : int
The step to start DIIS. Default is 0.
iterative_damping : float
The self consistent damping parameter.
direct : bool
AO-direct CCSD. Default is False.
async_io : bool
Allow for asynchronous function execution. Default is True.
incore_complete : bool
Avoid all I/O (also for DIIS). Default is False.
level_shift : float
A shift on virtual orbital energies to stablize the CCSD iteration
frozen : int or list
If integer is given, the inner-most orbitals are frozen from CC
amplitudes. Given the orbital indices (0-based) in a list, both
occupied and virtual orbitals can be frozen in CC calculation.
>>> mol = gto.M(atom = 'H 0 0 0; F 0 0 1.1', basis = 'ccpvdz')
>>> mf = scf.RHF(mol).run()
>>> # freeze 2 core orbitals
>>> mycc = cc.CCSD(mf).set(frozen = 2).run()
>>> # freeze 2 core orbitals and 3 high lying unoccupied orbitals
>>> mycc.set(frozen = [0,1,16,17,18]).run()
Saved results
converged : bool
CCSD converged or not
e_corr : float
CCSD correlation correction
e_tot : float
Total CCSD energy (HF + correlation)
t1, t2 :
T amplitudes t1[i,a], t2[i,j,a,b] (i,j in occ, a,b in virt)
l1, l2 :
Lambda amplitudes l1[i,a], l2[i,j,a,b] (i,j in occ, a,b in virt)
'''
max_cycle = getattr(__config__, 'cc_ccsd_CCSD_max_cycle', 50)
conv_tol = getattr(__config__, 'cc_ccsd_CCSD_conv_tol', 1e-7)
iterative_damping = getattr(__config__, 'cc_ccsd_CCSD_iterative_damping', 1.0)
conv_tol_normt = getattr(__config__, 'cc_ccsd_CCSD_conv_tol_normt', 1e-5)
diis = getattr(__config__, 'cc_ccsd_CCSD_diis', True)
diis_space = getattr(__config__, 'cc_ccsd_CCSD_diis_space', 6)
diis_file = None
diis_start_cycle = getattr(__config__, 'cc_ccsd_CCSD_diis_start_cycle', 0)
# FIXME: Should we avoid DIIS starting early?
diis_start_energy_diff = getattr(__config__, 'cc_ccsd_CCSD_diis_start_energy_diff', 1e9)
direct = getattr(__config__, 'cc_ccsd_CCSD_direct', False)
async_io = getattr(__config__, 'cc_ccsd_CCSD_async_io', True)
incore_complete = getattr(__config__, 'cc_ccsd_CCSD_incore_complete', False)
cc2 = getattr(__config__, 'cc_ccsd_CCSD_cc2', False)
def __init__(self, mf, frozen=None, mo_coeff=None, mo_occ=None):
if isinstance(mf, gto.Mole):
raise RuntimeError('''
You see this error message because of the API updates in pyscf v0.10.
In the new API, the first argument of CC class is HF objects. Please see
http://sunqm.net/pyscf/code-rule.html#api-rules for the details of API conventions''')
from pyscf.scf import hf
if isinstance(mf, hf.KohnShamDFT):
raise RuntimeError('CCSD Warning: The first argument mf is a DFT object. '
'CCSD calculation should be initialized with HF object.\n'
'DFT object can be converted to HF object with '
'the code:\n'
' mf_hf = mol.HF()\n'
' if getattr(mf_dft, "with_x2c", False):\n'
' mf_hf = mf_hf.x2c()\n'
' mf_hf.__dict__.update(mf_dft.__dict__)\n')
if mo_coeff is None: mo_coeff = mf.mo_coeff
if mo_occ is None: mo_occ = mf.mo_occ
self.mol = mf.mol
self._scf = mf
self.verbose = self.mol.verbose
self.stdout = self.mol.stdout
self.max_memory = mf.max_memory
self.frozen = frozen
self.incore_complete = self.incore_complete or self.mol.incore_anyway
self.level_shift = 0
##################################################
# don't modify the following attributes, they are not input options
self.mo_coeff = mo_coeff
self.mo_occ = mo_occ
self.converged = False
self.converged_lambda = False
self.emp2 = None
self.e_hf = None
self.e_corr = None
self.t1 = None
self.t2 = None
self.l1 = None
self.l2 = None
self._nocc = None
self._nmo = None
self.chkfile = mf.chkfile
keys = set(('max_cycle', 'conv_tol', 'iterative_damping',
'conv_tol_normt', 'diis', 'diis_space', 'diis_file',
'diis_start_cycle', 'diis_start_energy_diff', 'direct',
'async_io', 'incore_complete', 'cc2'))
self._keys = set(self.__dict__.keys()).union(keys)
@property
def ecc(self):
return self.e_corr
@property
def e_tot(self):
return (self.e_hf or self._scf.e_tot) + self.e_corr
@property
def nocc(self):
return self.get_nocc()
@nocc.setter
def nocc(self, n):
self._nocc = n
@property
def nmo(self):
return self.get_nmo()
@nmo.setter
def nmo(self, n):
self._nmo = n
def reset(self, mol=None):
if mol is not None:
self.mol = mol
self._scf.reset(mol)
return self
get_nocc = get_nocc
get_nmo = get_nmo
get_frozen_mask = get_frozen_mask
def dump_flags(self, verbose=None):
log = logger.new_logger(self, verbose)
log.info('')
log.info('******** %s ********', self.__class__)
log.info('CC2 = %g', self.cc2)
log.info('CCSD nocc = %s, nmo = %s', self.nocc, self.nmo)
if self.frozen is not None:
log.info('frozen orbitals %s', self.frozen)
log.info('max_cycle = %d', self.max_cycle)
log.info('direct = %d', self.direct)
log.info('conv_tol = %g', self.conv_tol)
log.info('conv_tol_normt = %s', self.conv_tol_normt)
log.info('diis_space = %d', self.diis_space)
#log.info('diis_file = %s', self.diis_file)
log.info('diis_start_cycle = %d', self.diis_start_cycle)
log.info('diis_start_energy_diff = %g', self.diis_start_energy_diff)
log.info('max_memory %d MB (current use %d MB)',
self.max_memory, lib.current_memory()[0])
if (log.verbose >= logger.DEBUG1 and
self.__class__ == CCSD):
nocc = self.nocc
nvir = self.nmo - self.nocc
flops = _flops(nocc, nvir)
log.debug1('total FLOPs %s', flops)
return self
def get_init_guess(self, eris=None):
return self.init_amps(eris)[1:]
def init_amps(self, eris=None):
time0 = logger.process_clock(), logger.perf_counter()
if eris is None:
eris = self.ao2mo(self.mo_coeff)
mo_e = eris.mo_energy
nocc = self.nocc
nvir = mo_e.size - nocc
eia = mo_e[:nocc,None] - mo_e[None,nocc:]
t1 = eris.fock[:nocc,nocc:] / eia
t2 = numpy.empty((nocc,nocc,nvir,nvir), dtype=eris.ovov.dtype)
max_memory = self.max_memory - lib.current_memory()[0]
blksize = int(min(nvir, max(BLKMIN, max_memory*.3e6/8/(nocc**2*nvir+1))))
emp2 = 0
for p0, p1 in lib.prange(0, nvir, blksize):
eris_ovov = eris.ovov[:,p0:p1]
t2[:,:,p0:p1] = (eris_ovov.transpose(0,2,1,3).conj()
/ lib.direct_sum('ia,jb->ijab', eia[:,p0:p1], eia))
emp2 += 2 * numpy.einsum('ijab,iajb', t2[:,:,p0:p1], eris_ovov)
emp2 -= numpy.einsum('jiab,iajb', t2[:,:,p0:p1], eris_ovov)
self.emp2 = emp2.real
e_hf = self.e_hf or eris.e_hf
logger.info(self, 'Init t2, MP2 energy = %.15g E_corr(MP2) %.15g',
e_hf + self.emp2, self.emp2)
logger.timer(self, 'init mp2', *time0)
return self.emp2, t1, t2
energy = energy
_add_vvvv = _add_vvvv
update_amps = update_amps
def kernel(self, t1=None, t2=None, eris=None):
return self.ccsd(t1, t2, eris)
def ccsd(self, t1=None, t2=None, eris=None):
assert(self.mo_coeff is not None)
assert(self.mo_occ is not None)
if self.verbose >= logger.WARN:
self.check_sanity()
self.dump_flags()
if eris is None:
eris = self.ao2mo(self.mo_coeff)
self.e_hf = getattr(eris, 'e_hf', None)
if self.e_hf is None:
self.e_hf = self._scf.e_tot
self.converged, self.e_corr, self.t1, self.t2 = \
kernel(self, eris, t1, t2, max_cycle=self.max_cycle,
tol=self.conv_tol, tolnormt=self.conv_tol_normt,
verbose=self.verbose)
self._finalize()
return self.e_corr, self.t1, self.t2
def _finalize(self):
'''Hook for dumping results and clearing up the object.'''
if self.converged:
logger.info(self, '%s converged', self.__class__.__name__)
else:
logger.note(self, '%s not converged', self.__class__.__name__)
logger.note(self, 'E(%s) = %.16g E_corr = %.16g',
self.__class__.__name__, self.e_tot, self.e_corr)
return self
as_scanner = as_scanner
restore_from_diis_ = restore_from_diis_
def solve_lambda(self, t1=None, t2=None, l1=None, l2=None,
eris=None):
from pyscf.cc import ccsd_lambda
if t1 is None: t1 = self.t1
if t2 is None: t2 = self.t2
if eris is None: eris = self.ao2mo(self.mo_coeff)
self.converged_lambda, self.l1, self.l2 = \
ccsd_lambda.kernel(self, eris, t1, t2, l1, l2,
max_cycle=self.max_cycle,
tol=self.conv_tol_normt,
verbose=self.verbose)
return self.l1, self.l2
def ccsd_t(self, t1=None, t2=None, eris=None):
from pyscf.cc import ccsd_t
if t1 is None: t1 = self.t1
if t2 is None: t2 = self.t2
if eris is None: eris = self.ao2mo(self.mo_coeff)
return ccsd_t.kernel(self, eris, t1, t2, self.verbose)
def ipccsd(self, nroots=1, left=False, koopmans=False, guess=None,
partition=None, eris=None):
from pyscf.cc import eom_rccsd
return eom_rccsd.EOMIP(self).kernel(nroots, left, koopmans, guess,
partition, eris)
def eaccsd(self, nroots=1, left=False, koopmans=False, guess=None,
partition=None, eris=None):
from pyscf.cc import eom_rccsd
return eom_rccsd.EOMEA(self).kernel(nroots, left, koopmans, guess,
partition, eris)
def eeccsd(self, nroots=1, koopmans=False, guess=None, eris=None):
from pyscf.cc import eom_rccsd
return eom_rccsd.EOMEE(self).kernel(nroots, koopmans, guess, eris)
def eomee_ccsd_singlet(self, nroots=1, koopmans=False, guess=None, eris=None):
from pyscf.cc import eom_rccsd
return eom_rccsd.EOMEESinglet(self).kernel(nroots, koopmans, guess, eris)
def eomee_ccsd_triplet(self, nroots=1, koopmans=False, guess=None, eris=None):
from pyscf.cc import eom_rccsd
return eom_rccsd.EOMEETriplet(self).kernel(nroots, koopmans, guess, eris)
def eomsf_ccsd(self, nroots=1, koopmans=False, guess=None, eris=None):
from pyscf.cc import eom_rccsd
return eom_rccsd.EOMEESpinFlip(self).kernel(nroots, koopmans, guess, eris)
def eomip_method(self):
from pyscf.cc import eom_rccsd
return eom_rccsd.EOMIP(self)
def eomea_method(self):
from pyscf.cc import eom_rccsd
return eom_rccsd.EOMEA(self)
def eomee_method(self):
from pyscf.cc import eom_rccsd
return eom_rccsd.EOMEE(self)
def make_rdm1(self, t1=None, t2=None, l1=None, l2=None, ao_repr=False):
'''Un-relaxed 1-particle density matrix in MO space'''
from pyscf.cc import ccsd_rdm
if t1 is None: t1 = self.t1
if t2 is None: t2 = self.t2
if l1 is None: l1 = self.l1
if l2 is None: l2 = self.l2
if l1 is None: l1, l2 = self.solve_lambda(t1, t2)
return ccsd_rdm.make_rdm1(self, t1, t2, l1, l2, ao_repr=ao_repr)
def make_rdm2(self, t1=None, t2=None, l1=None, l2=None, ao_repr=False):
'''2-particle density matrix in MO space. The density matrix is
stored as
dm2[p,r,q,s] = <p^+ q^+ s r>
'''
from pyscf.cc import ccsd_rdm
if t1 is None: t1 = self.t1
if t2 is None: t2 = self.t2
if l1 is None: l1 = self.l1
if l2 is None: l2 = self.l2
if l1 is None: l1, l2 = self.solve_lambda(t1, t2)
return ccsd_rdm.make_rdm2(self, t1, t2, l1, l2, ao_repr=ao_repr)
def ao2mo(self, mo_coeff=None):
# Pseudo code how eris are implemented:
# nocc = self.nocc
# nmo = self.nmo
# nvir = nmo - nocc
# eris = _ChemistsERIs()
# eri = ao2mo.incore.full(self._scf._eri, mo_coeff)
# eri = ao2mo.restore(1, eri, nmo)
# eris.oooo = eri[:nocc,:nocc,:nocc,:nocc].copy()
# eris.ovoo = eri[:nocc,nocc:,:nocc,:nocc].copy()
# eris.ovvo = eri[nocc:,:nocc,nocc:,:nocc].copy()
# eris.ovov = eri[nocc:,:nocc,:nocc,nocc:].copy()
# eris.oovv = eri[:nocc,:nocc,nocc:,nocc:].copy()
# ovvv = eri[:nocc,nocc:,nocc:,nocc:].copy()
# eris.ovvv = lib.pack_tril(ovvv.reshape(-1,nvir,nvir))
# eris.vvvv = ao2mo.restore(4, eri[nocc:,nocc:,nocc:,nocc:], nvir)
# eris.fock = numpy.diag(self._scf.mo_energy)
# return eris
nmo = self.nmo
nao = self.mo_coeff.shape[0]
nmo_pair = nmo * (nmo+1) // 2
nao_pair = nao * (nao+1) // 2
mem_incore = (max(nao_pair**2, nmo**4) + nmo_pair**2) * 8/1e6
mem_now = lib.current_memory()[0]
if (self._scf._eri is not None and
(mem_incore+mem_now < self.max_memory or self.incore_complete)):
return _make_eris_incore(self, mo_coeff)
elif getattr(self._scf, 'with_df', None):
logger.warn(self, 'CCSD detected DF being used in the HF object. '
'MO integrals are computed based on the DF 3-index tensors.\n'
'It\'s recommended to use dfccsd.CCSD for the '
'DF-CCSD calculations')
return _make_df_eris_outcore(self, mo_coeff)
else:
return _make_eris_outcore(self, mo_coeff)
def run_diis(self, t1, t2, istep, normt, de, adiis):
if (adiis and
istep >= self.diis_start_cycle and
abs(de) < self.diis_start_energy_diff):
vec = self.amplitudes_to_vector(t1, t2)
t1, t2 = self.vector_to_amplitudes(adiis.update(vec))
logger.debug1(self, 'DIIS for step %d', istep)
return t1, t2
def amplitudes_to_vector(self, t1, t2, out=None):
return amplitudes_to_vector(t1, t2, out)
def vector_to_amplitudes(self, vec, nmo=None, nocc=None):
if nocc is None: nocc = self.nocc
if nmo is None: nmo = self.nmo
return vector_to_amplitudes(vec, nmo, nocc)
def vector_size(self, nmo=None, nocc=None):
if nocc is None: nocc = self.nocc
if nmo is None: nmo = self.nmo
nvir = nmo - nocc
nov = nocc * nvir
return nov + nov*(nov+1)//2
def dump_chk(self, t1_t2=None, frozen=None, mo_coeff=None, mo_occ=None):
if not self.chkfile:
return self
if t1_t2 is None: t1_t2 = self.t1, self.t2
t1, t2 = t1_t2
if frozen is None: frozen = self.frozen
# "None" cannot be serialized by the chkfile module
if frozen is None:
frozen = 0
cc_chk = {'e_corr': self.e_corr,
't1': t1,
't2': t2,
'frozen': frozen}
if mo_coeff is not None: cc_chk['mo_coeff'] = mo_coeff
if mo_occ is not None: cc_chk['mo_occ'] = mo_occ
if self._nmo is not None: cc_chk['_nmo'] = self._nmo
if self._nocc is not None: cc_chk['_nocc'] = self._nocc
lib.chkfile.save(self.chkfile, 'ccsd', cc_chk)
def density_fit(self, auxbasis=None, with_df=None):
from pyscf.cc import dfccsd
mycc = dfccsd.RCCSD(self._scf, self.frozen, self.mo_coeff, self.mo_occ)
if with_df is not None:
mycc.with_df = with_df
if mycc.with_df.auxbasis != auxbasis:
import copy
mycc.with_df = copy.copy(mycc.with_df)
mycc.with_df.auxbasis = auxbasis
return mycc
def nuc_grad_method(self):
from pyscf.grad import ccsd
return ccsd.Gradients(self)
def get_t1_diagnostic(self, t1=None):
if t1 is None: t1 = self.t1
return get_t1_diagnostic(t1)
def get_d1_diagnostic(self, t1=None):
if t1 is None: t1 = self.t1
return get_d1_diagnostic(t1)
def get_d2_diagnostic(self, t2=None):
if t2 is None: t2 = self.t2
return get_d2_diagnostic(t2)
CC = RCCSD = CCSD
class _ChemistsERIs:
'''(pq|rs)'''
def __init__(self, mol=None):
self.mol = mol
self.mo_coeff = None
self.nocc = None
self.fock = None
self.e_hf = None
self.oooo = None
self.ovoo = None
self.oovv = None
self.ovvo = None
self.ovov = None
self.ovvv = None
self.vvvv = None
def _common_init_(self, mycc, mo_coeff=None):
if mo_coeff is None:
mo_coeff = mycc.mo_coeff
self.mo_coeff = mo_coeff = _mo_without_core(mycc, mo_coeff)
# Note: Recomputed fock matrix and HF energy since SCF may not be fully converged.
dm = mycc._scf.make_rdm1(mycc.mo_coeff, mycc.mo_occ)
vhf = mycc._scf.get_veff(mycc.mol, dm)
fockao = mycc._scf.get_fock(vhf=vhf, dm=dm)
self.fock = reduce(numpy.dot, (mo_coeff.conj().T, fockao, mo_coeff))
self.e_hf = mycc._scf.energy_tot(dm=dm, vhf=vhf)
nocc = self.nocc = mycc.nocc
self.mol = mycc.mol
# Note self.mo_energy can be different to fock.diagonal().
# self.mo_energy is used in the initial guess function (to generate
# MP2 amplitudes) and CCSD update_amps preconditioner.
# fock.diagonal() should only be used to compute the expectation value
# of Slater determinants.
mo_e = self.mo_energy = self.fock.diagonal().real
try:
gap = abs(mo_e[:nocc,None] - mo_e[None,nocc:]).min()
if gap < 1e-5:
logger.warn(mycc, 'HOMO-LUMO gap %s too small for CCSD.\n'
'CCSD may be difficult to converge. Increasing '
'CCSD Attribute level_shift may improve '
'convergence.', gap)
except ValueError: # gap.size == 0
pass
return self
def get_ovvv(self, *slices):
'''To access a subblock of ovvv tensor'''
ovw = numpy.asarray(self.ovvv[slices])
nocc, nvir, nvir_pair = ovw.shape
ovvv = lib.unpack_tril(ovw.reshape(nocc*nvir,nvir_pair))
nvir1 = ovvv.shape[2]
return ovvv.reshape(nocc,nvir,nvir1,nvir1)
def _contract_vvvv_t2(self, mycc, t2, vvvv_or_direct=False, out=None, verbose=None):
if isinstance(vvvv_or_direct, numpy.ndarray):
vvvv = vvvv_or_direct
elif vvvv_or_direct: # AO-direct contraction
vvvv = None
else:
vvvv = self.vvvv
return _contract_vvvv_t2(mycc, self.mol, vvvv, t2, out, verbose)
def _contract_vvvv_oov(self, mycc, r2, out=None):
raise NotImplementedError
def _contract_vvvv_ovv(self, mycc, r2, out=None):
raise NotImplementedError
def _make_eris_incore(mycc, mo_coeff=None):
cput0 = (logger.process_clock(), logger.perf_counter())
eris = _ChemistsERIs()
eris._common_init_(mycc, mo_coeff)
nocc = eris.nocc
nmo = eris.fock.shape[0]
nvir = nmo - nocc
eri1 = ao2mo.incore.full(mycc._scf._eri, eris.mo_coeff)
#:eri1 = ao2mo.restore(1, eri1, nmo)
#:eris.oooo = eri1[:nocc,:nocc,:nocc,:nocc].copy()
#:eris.ovoo = eri1[:nocc,nocc:,:nocc,:nocc].copy()
#:eris.ovvo = eri1[:nocc,nocc:,nocc:,:nocc].copy()
#:eris.ovov = eri1[:nocc,nocc:,:nocc,nocc:].copy()
#:eris.oovv = eri1[:nocc,:nocc,nocc:,nocc:].copy()
#:ovvv = eri1[:nocc,nocc:,nocc:,nocc:].copy()
#:eris.ovvv = lib.pack_tril(ovvv.reshape(-1,nvir,nvir)).reshape(nocc,nvir,-1)
#:eris.vvvv = ao2mo.restore(4, eri1[nocc:,nocc:,nocc:,nocc:], nvir)
if eri1.ndim == 4:
eri1 = ao2mo.restore(4, eri1, nmo)
nvir_pair = nvir * (nvir+1) // 2
eris.oooo = numpy.empty((nocc,nocc,nocc,nocc))
eris.ovoo = numpy.empty((nocc,nvir,nocc,nocc))
eris.ovvo = numpy.empty((nocc,nvir,nvir,nocc))
eris.ovov = numpy.empty((nocc,nvir,nocc,nvir))
eris.ovvv = numpy.empty((nocc,nvir,nvir_pair))
eris.vvvv = numpy.empty((nvir_pair,nvir_pair))
ij = 0
outbuf = numpy.empty((nmo,nmo,nmo))
oovv = numpy.empty((nocc,nocc,nvir,nvir))
for i in range(nocc):
buf = lib.unpack_tril(eri1[ij:ij+i+1], out=outbuf[:i+1])
for j in range(i+1):
eris.oooo[i,j] = eris.oooo[j,i] = buf[j,:nocc,:nocc]
oovv[i,j] = oovv[j,i] = buf[j,nocc:,nocc:]
ij += i + 1
eris.oovv = oovv
oovv = None
ij1 = 0
for i in range(nocc,nmo):
buf = lib.unpack_tril(eri1[ij:ij+i+1], out=outbuf[:i+1])
eris.ovoo[:,i-nocc] = buf[:nocc,:nocc,:nocc]
eris.ovvo[:,i-nocc] = buf[:nocc,nocc:,:nocc]
eris.ovov[:,i-nocc] = buf[:nocc,:nocc,nocc:]
eris.ovvv[:,i-nocc] = lib.pack_tril(buf[:nocc,nocc:,nocc:])
dij = i - nocc + 1
lib.pack_tril(buf[nocc:i+1,nocc:,nocc:],
out=eris.vvvv[ij1:ij1+dij])
ij += i + 1
ij1 += dij
logger.timer(mycc, 'CCSD integral transformation', *cput0)
return eris
def _make_eris_outcore(mycc, mo_coeff=None):
cput0 = (logger.process_clock(), logger.perf_counter())
log = logger.Logger(mycc.stdout, mycc.verbose)
eris = _ChemistsERIs()
eris._common_init_(mycc, mo_coeff)
mol = mycc.mol
mo_coeff = numpy.asarray(eris.mo_coeff, order='F')
nocc = eris.nocc
nao, nmo = mo_coeff.shape
nvir = nmo - nocc
orbo = mo_coeff[:,:nocc]
orbv = mo_coeff[:,nocc:]
nvpair = nvir * (nvir+1) // 2
eris.feri1 = lib.H5TmpFile()
eris.oooo = eris.feri1.create_dataset('oooo', (nocc,nocc,nocc,nocc), 'f8')
eris.oovv = eris.feri1.create_dataset('oovv', (nocc,nocc,nvir,nvir), 'f8', chunks=(nocc,nocc,1,nvir))
eris.ovoo = eris.feri1.create_dataset('ovoo', (nocc,nvir,nocc,nocc), 'f8', chunks=(nocc,1,nocc,nocc))
eris.ovvo = eris.feri1.create_dataset('ovvo', (nocc,nvir,nvir,nocc), 'f8', chunks=(nocc,1,nvir,nocc))
eris.ovov = eris.feri1.create_dataset('ovov', (nocc,nvir,nocc,nvir), 'f8', chunks=(nocc,1,nocc,nvir))
eris.ovvv = eris.feri1.create_dataset('ovvv', (nocc,nvir,nvpair), 'f8')
def save_occ_frac(p0, p1, eri):
eri = eri.reshape(p1-p0,nocc,nmo,nmo)
eris.oooo[p0:p1] = eri[:,:,:nocc,:nocc]
eris.oovv[p0:p1] = eri[:,:,nocc:,nocc:]
def save_vir_frac(p0, p1, eri):
eri = eri.reshape(p1-p0,nocc,nmo,nmo)
eris.ovoo[:,p0:p1] = eri[:,:,:nocc,:nocc].transpose(1,0,2,3)
eris.ovvo[:,p0:p1] = eri[:,:,nocc:,:nocc].transpose(1,0,2,3)
eris.ovov[:,p0:p1] = eri[:,:,:nocc,nocc:].transpose(1,0,2,3)
vvv = lib.pack_tril(eri[:,:,nocc:,nocc:].reshape((p1-p0)*nocc,nvir,nvir))
eris.ovvv[:,p0:p1] = vvv.reshape(p1-p0,nocc,nvpair).transpose(1,0,2)
cput1 = logger.process_clock(), logger.perf_counter()
if not mycc.direct:
max_memory = max(MEMORYMIN, mycc.max_memory-lib.current_memory()[0])
eris.feri2 = lib.H5TmpFile()
ao2mo.full(mol, orbv, eris.feri2, max_memory=max_memory, verbose=log)
eris.vvvv = eris.feri2['eri_mo']
cput1 = log.timer_debug1('transforming vvvv', *cput1)
fswap = lib.H5TmpFile()
max_memory = max(MEMORYMIN, mycc.max_memory-lib.current_memory()[0])
int2e = mol._add_suffix('int2e')
ao2mo.outcore.half_e1(mol, (mo_coeff,orbo), fswap, int2e,
's4', 1, max_memory, verbose=log)
ao_loc = mol.ao_loc_nr()
nao_pair = nao * (nao+1) // 2
blksize = int(min(8e9,max_memory*.5e6)/8/(nao_pair+nmo**2)/nocc)
blksize = min(nmo, max(BLKMIN, blksize))
log.debug1('blksize %d', blksize)
cput2 = cput1
fload = ao2mo.outcore._load_from_h5g
buf = numpy.empty((blksize*nocc,nao_pair))
buf_prefetch = numpy.empty_like(buf)
def load(buf_prefetch, p0, rowmax):
if p0 < rowmax:
p1 = min(rowmax, p0+blksize)
fload(fswap['0'], p0*nocc, p1*nocc, buf_prefetch)
outbuf = numpy.empty((blksize*nocc,nmo**2))
with lib.call_in_background(load, sync=not mycc.async_io) as prefetch:
prefetch(buf_prefetch, 0, nocc)
for p0, p1 in lib.prange(0, nocc, blksize):
buf, buf_prefetch = buf_prefetch, buf
prefetch(buf_prefetch, p1, nocc)
nrow = (p1 - p0) * nocc
dat = ao2mo._ao2mo.nr_e2(buf[:nrow], mo_coeff, (0,nmo,0,nmo),
's4', 's1', out=outbuf, ao_loc=ao_loc)
save_occ_frac(p0, p1, dat)
cput2 = log.timer_debug1('transforming oopp', *cput2)
prefetch(buf_prefetch, nocc, nmo)
for p0, p1 in lib.prange(0, nvir, blksize):
buf, buf_prefetch = buf_prefetch, buf
prefetch(buf_prefetch, nocc+p1, nmo)
nrow = (p1 - p0) * nocc
dat = ao2mo._ao2mo.nr_e2(buf[:nrow], mo_coeff, (0,nmo,0,nmo),
's4', 's1', out=outbuf, ao_loc=ao_loc)
save_vir_frac(p0, p1, dat)
cput2 = log.timer_debug1('transforming ovpp [%d:%d]'%(p0,p1), *cput2)
cput1 = log.timer_debug1('transforming oppp', *cput1)
log.timer('CCSD integral transformation', *cput0)
return eris
def _make_df_eris_outcore(mycc, mo_coeff=None):
cput0 = (logger.process_clock(), logger.perf_counter())
log = logger.Logger(mycc.stdout, mycc.verbose)
eris = _ChemistsERIs()
eris._common_init_(mycc, mo_coeff)
mo_coeff = numpy.asarray(eris.mo_coeff, order='F')
nocc = eris.nocc
nao, nmo = mo_coeff.shape
nvir = nmo - nocc
nvir_pair = nvir*(nvir+1)//2
naux = mycc._scf.with_df.get_naoaux()
Loo = numpy.empty((naux,nocc,nocc))
Lov = numpy.empty((naux,nocc,nvir))
Lvo = numpy.empty((naux,nvir,nocc))
Lvv = numpy.empty((naux,nvir_pair))
ijslice = (0, nmo, 0, nmo)
Lpq = None
p1 = 0
for eri1 in mycc._scf.with_df.loop():
Lpq = _ao2mo.nr_e2(eri1, mo_coeff, ijslice, aosym='s2', out=Lpq).reshape(-1,nmo,nmo)
p0, p1 = p1, p1 + Lpq.shape[0]
Loo[p0:p1] = Lpq[:,:nocc,:nocc]
Lov[p0:p1] = Lpq[:,:nocc,nocc:]
Lvo[p0:p1] = Lpq[:,nocc:,:nocc]
Lvv[p0:p1] = lib.pack_tril(Lpq[:,nocc:,nocc:].reshape(-1,nvir,nvir))
Loo = Loo.reshape(naux,nocc*nocc)
Lov = Lov.reshape(naux,nocc*nvir)
Lvo = Lvo.reshape(naux,nocc*nvir)
eris.feri1 = lib.H5TmpFile()
eris.oooo = eris.feri1.create_dataset('oooo', (nocc,nocc,nocc,nocc), 'f8')
eris.oovv = eris.feri1.create_dataset('oovv', (nocc,nocc,nvir,nvir), 'f8', chunks=(nocc,nocc,1,nvir))
eris.ovoo = eris.feri1.create_dataset('ovoo', (nocc,nvir,nocc,nocc), 'f8', chunks=(nocc,1,nocc,nocc))
eris.ovvo = eris.feri1.create_dataset('ovvo', (nocc,nvir,nvir,nocc), 'f8', chunks=(nocc,1,nvir,nocc))
eris.ovov = eris.feri1.create_dataset('ovov', (nocc,nvir,nocc,nvir), 'f8', chunks=(nocc,1,nocc,nvir))
eris.ovvv = eris.feri1.create_dataset('ovvv', (nocc,nvir,nvir_pair), 'f8')
eris.vvvv = eris.feri1.create_dataset('vvvv', (nvir_pair,nvir_pair), 'f8')
eris.oooo[:] = lib.ddot(Loo.T, Loo).reshape(nocc,nocc,nocc,nocc)
eris.ovoo[:] = lib.ddot(Lov.T, Loo).reshape(nocc,nvir,nocc,nocc)
eris.oovv[:] = lib.unpack_tril(lib.ddot(Loo.T, Lvv)).reshape(nocc,nocc,nvir,nvir)
eris.ovvo[:] = lib.ddot(Lov.T, Lvo).reshape(nocc,nvir,nvir,nocc)
eris.ovov[:] = lib.ddot(Lov.T, Lov).reshape(nocc,nvir,nocc,nvir)
eris.ovvv[:] = lib.ddot(Lov.T, Lvv).reshape(nocc,nvir,nvir_pair)
eris.vvvv[:] = lib.ddot(Lvv.T, Lvv)
log.timer('CCSD integral transformation', *cput0)
return eris
def _flops(nocc, nvir):
'''Total float points'''
return (nocc**3*nvir**2*2 + nocc**2*nvir**3*2 + # Ftilde
nocc**4*nvir*2 * 2 + nocc**4*nvir**2*2 + # Wijkl
nocc*nvir**4*2 * 2 + # Wabcd
nocc**2*nvir**3*2 + nocc**3*nvir**2*2 +
nocc**3*nvir**3*2 + nocc**3*nvir**3*2 +
nocc**2*nvir**3*2 + nocc**3*nvir**2*2 + # Wiabj
nocc**2*nvir**3*2 + nocc**3*nvir**2*2 + # t1
nocc**3*nvir**2*2 * 2 + nocc**4*nvir**2*2 +
nocc*(nocc+1)/2*nvir**4*2 + # vvvv
nocc**2*nvir**3*2 * 2 + nocc**3*nvir**2*2 * 2 + # t2
nocc**3*nvir**3*2 +
nocc**3*nvir**3*2 * 2 + nocc**3*nvir**2*2 * 4) # Wiabj
if __name__ == '__main__':
from pyscf import scf
mol = gto.Mole()
mol.atom = [
[8 , (0. , 0. , 0.)],
[1 , (0. , -0.757 , 0.587)],
[1 , (0. , 0.757 , 0.587)]]
mol.basis = {'H': 'cc-pvdz',
'O': 'cc-pvdz',}
mol.build()
rhf = scf.RHF(mol)
rhf.scf() # -76.0267656731
mf = rhf.density_fit(auxbasis='weigend')
mf._eri = None
mcc = CCSD(mf)
eris = mcc.ao2mo()
emp2, t1, t2 = mcc.init_amps(eris)
print(abs(t2).sum() - 4.9318753386922278)
print(emp2 - -0.20401737899811551)
t1, t2 = update_amps(mcc, t1, t2, eris)
print(abs(t1).sum() - 0.046961325647584914)
print(abs(t2).sum() - 5.378260578551683 )
mcc = CCSD(rhf)
eris = mcc.ao2mo()
emp2, t1, t2 = mcc.init_amps(eris)
print(abs(t2).sum() - 4.9556571218177)
print(emp2 - -0.2040199672883385)
t1, t2 = update_amps(mcc, t1, t2, eris)
print(abs(t1).sum()-0.0475038989126)
print(abs(t2).sum()-5.401823846018721)
print(energy(mcc, t1, t2, eris) - -0.208967840546667)
t1, t2 = update_amps(mcc, t1, t2, eris)
print(energy(mcc, t1, t2, eris) - -0.212173678670510)
print(abs(t1).sum() - 0.05470123093500083)
print(abs(t2).sum() - 5.5605208391876539)
mcc.ccsd()
print(mcc.ecc - -0.213343234198275)
print(abs(mcc.t2).sum() - 5.63970304662375)
mcc.max_memory = 1
mcc.direct = True
mcc.ccsd()
print(mcc.ecc - -0.213343234198275)
print(abs(mcc.t2).sum() - 5.63970304662375)
e, v = mcc.ipccsd(nroots=3)
print(e[0] - 0.43356041409195489)
print(e[1] - 0.51876598058509493)
print(e[2] - 0.6782879569941862 )
e, v = mcc.eeccsd(nroots=4)
print(e[0] - 0.2757159395886167)
print(e[1] - 0.2757159395886167)
print(e[2] - 0.2757159395886167)
print(e[3] - 0.3005716731825082)
| StarcoderdataPython |
379860 | from .client import *
from . import charts
from . import exceptions
from . import resources
| StarcoderdataPython |
5100484 | #from distutils.core import setup
from setuptools import setup
setup(
name='responder',
version='2.3.3.8',
description='LLMNR/NBT-NS/mDNS Poisoner and NTLMv1/2 Relay',
author='<NAME>',
author_email='<EMAIL>',
license='GPLv3',
url='https://github.com/lgandx/Responder/',
long_description=open('README.md').read(),
packages=['certs','files','logs','poisoners','servers','tools','tools.MultiRelay','tools.SMBFinger',],
)
| StarcoderdataPython |
9663603 | # -*- coding: utf-8 -*-
#
# Copyright © Spyder Project Contributors
# Licensed under the terms of the MIT License
#
"""
Tests for EditorSplitter class in editor.py
"""
# Standard library imports
try:
from unittest.mock import Mock
import pathlib
except ImportError:
from mock import Mock # Python 2
import pathlib2 as pathlib
import os
import os.path as osp
from functools import partial
# Third party imports
import pytest
from qtpy.QtCore import Qt
# Local imports
from spyder.plugins.editor.widgets.editor import EditorStack, EditorSplitter
# ---- Qt Test Fixtures
def editor_stack():
editor_stack = EditorStack(None, [])
editor_stack.set_find_widget(Mock())
editor_stack.set_io_actions(Mock(), Mock(), Mock(), Mock())
return editor_stack
@pytest.fixture
def editor_splitter_bot(qtbot):
"""Create editor splitter."""
es = EditorSplitter(None, Mock(), [], first=True)
qtbot.addWidget(es)
es.resize(640, 480)
es.show()
return es
@pytest.fixture
def editor_splitter_lsp(qtbot_module, completion_plugin_all_started, request):
text = """
import sys
"""
completions, capabilities = completion_plugin_all_started
def report_file_open(options):
filename = options['filename']
language = options['language']
callback = options['codeeditor']
completions.register_file(
language.lower(), filename, callback)
callback.start_completion_services()
callback.register_completion_capabilities(capabilities)
with qtbot_module.waitSignal(
callback.completions_response_signal, timeout=30000):
callback.document_did_open()
def register_editorstack(editorstack):
editorstack.sig_perform_completion_request.connect(
completions.send_request)
editorstack.sig_open_file.connect(report_file_open)
editorstack.register_completion_capabilities(capabilities, 'python')
def clone(editorstack, template=None):
# editorstack.clone_from(template)
editor_stack = EditorStack(None, [])
editor_stack.set_find_widget(Mock())
editor_stack.set_io_actions(Mock(), Mock(), Mock(), Mock())
# Emulate "cloning"
editorsplitter.editorstack.new('test.py', 'utf-8', text)
mock_plugin = Mock()
editorsplitter = EditorSplitter(
None, mock_plugin, [], register_editorstack_cb=register_editorstack)
editorsplitter.editorstack.set_find_widget(Mock())
editorsplitter.editorstack.set_io_actions(Mock(), Mock(), Mock(), Mock())
editorsplitter.editorstack.new('test.py', 'utf-8', text)
mock_plugin.clone_editorstack.side_effect = partial(
clone, template=editorsplitter.editorstack)
qtbot_module.addWidget(editorsplitter)
editorsplitter.resize(640, 480)
editorsplitter.show()
def teardown():
editorsplitter.hide()
editorsplitter.close()
request.addfinalizer(teardown)
lsp = completions.get_provider('lsp')
return editorsplitter, lsp
@pytest.fixture
def editor_splitter_layout_bot(editor_splitter_bot):
"""Create editor splitter for testing layouts."""
es = editor_splitter_bot
# Allow the split() to duplicate editor stacks.
def clone(editorstack):
editorstack.close_action.setEnabled(False)
editorstack.set_find_widget(Mock())
editorstack.set_io_actions(Mock(), Mock(), Mock(), Mock())
editorstack.new('foo.py', 'utf-8', 'a = 1\nprint(a)\n\nx = 2')
editorstack.new('layout_test.py', 'utf-8', 'print(spam)')
with open(__file__) as f:
text = f.read()
editorstack.new(__file__, 'utf-8', text)
es.plugin.clone_editorstack.side_effect = clone
# Setup editor info for this EditorStack.
clone(es.editorstack)
return es
# ---- Tests
def test_init(editor_splitter_bot):
""""Test __init__."""
es = editor_splitter_bot
assert es.orientation() == Qt.Horizontal
assert es.testAttribute(Qt.WA_DeleteOnClose)
assert not es.childrenCollapsible()
assert not es.toolbar_list
assert not es.menu_list
assert es.register_editorstack_cb == es.plugin.register_editorstack
assert es.unregister_editorstack_cb == es.plugin.unregister_editorstack
# No menu actions in parameter call.
assert not es.menu_actions
# EditorStack adds its own menu actions to the existing actions.
assert es.editorstack.menu_actions != []
assert isinstance(es.editorstack, EditorStack)
es.plugin.register_editorstack.assert_called_with(es.editorstack)
es.plugin.unregister_editorstack.assert_not_called()
es.plugin.clone_editorstack.assert_not_called()
assert es.count() == 1
assert es.widget(0) == es.editorstack
def test_close(editor_splitter_bot, qtbot):
"""Test the interface for closing the editor splitters."""
# Split the main editorspliter once, than split the second editorsplitter
# twice.
es = editor_splitter_bot
es.split()
esw1 = es.widget(1)
esw1.editorstack.set_closable(True)
assert es.count() == 2
assert esw1.count() == 1
esw1.split()
esw1w1 = esw1.widget(1)
esw1w1.editorstack.set_closable(True)
assert es.count() == 2
assert esw1.count() == 2
assert esw1w1.count() == 1
esw1.split()
esw1w2 = esw1.widget(2)
esw1w2.editorstack.set_closable(True)
assert es.count() == 2
assert esw1.count() == 3
assert esw1w1.count() == esw1w2.count() == 1
# Assert that all the editorsplitters are visible.
assert es.isVisible()
assert esw1.isVisible()
assert esw1w1.isVisible()
assert esw1w2.isVisible()
# Close the editorstack of the editorsplitter esw1 and assert that it is
# not destroyed because it still contains the editorsplitters esw1w1 and
# esw1w2.
with qtbot.waitSignal(esw1.editorstack.destroyed, timeout=1000):
esw1.editorstack.close_split()
assert es.count() == 2
assert esw1.count() == 2
assert esw1.editorstack is None
assert es.isVisible()
assert esw1.isVisible()
assert esw1w1.isVisible()
assert esw1w2.isVisible()
# Close the editorstack of the editorsplitter esw1w1, assert it is
# correctly destroyed afterwards on the Qt side and that it is correctly
# removed from the editorsplitter esw1.
with qtbot.waitSignal(esw1w1.destroyed, timeout=1000):
esw1w1.editorstack.close_split()
with pytest.raises(RuntimeError):
esw1w1.count()
assert es.count() == 2
assert esw1.count() == 1
assert es.isVisible()
assert esw1.isVisible()
assert esw1w2.isVisible()
# Close the editorstack of the editorsplitter esw1w2 and assert that
# editorsplitters esw1w2 AND esw1 are correctly destroyed afterward on
# the Qt side.
with qtbot.waitSignal(esw1.destroyed, timeout=1000):
esw1w2.editorstack.close_split()
with pytest.raises(RuntimeError):
esw1.count()
with pytest.raises(RuntimeError):
esw1w2.count()
assert es.isVisible()
assert es.count() == 1
# Test that the editorstack of the main editorsplitter es cannot be closed.
es.editorstack.close_split()
assert es.isVisible()
assert es.count() == 1
def test_split(editor_splitter_layout_bot):
"""Test split() that adds new splitters to this instance."""
es = editor_splitter_layout_bot
# Split main panel with default split.
es.split() # Call directly.
assert es.orientation() == Qt.Vertical
assert not es.editorstack.horsplit_action.isEnabled()
assert es.editorstack.versplit_action.isEnabled()
assert es.count() == 2
assert isinstance(es.widget(1), EditorSplitter)
# Each splitter gets its own editor stack as the first widget.
assert es.widget(1).count() == 1
assert es.widget(1).editorstack == es.widget(1).widget(0)
es.widget(1).plugin.clone_editorstack.assert_called_with(
editorstack=es.widget(1).editorstack)
# Create a horizontal split on original widget.
es.editorstack.sig_split_horizontally.emit() # Call from signal.
assert es.orientation() == Qt.Horizontal
assert es.editorstack.horsplit_action.isEnabled()
assert not es.editorstack.versplit_action.isEnabled()
assert es.count() == 3
assert isinstance(es.widget(2), EditorSplitter)
# Two splits have been created and each contains one EditorStack.
assert es.widget(1).count() == 1
assert es.widget(2).count() == 1
# Test splitting one of the children.
es1 = es.widget(1)
es1.editorstack.sig_split_vertically.emit()
assert es.orientation() == Qt.Horizontal # Main split didn't change.
assert es1.orientation() == Qt.Vertical # Child splitter.
assert not es1.editorstack.horsplit_action.isEnabled()
assert es1.editorstack.versplit_action.isEnabled()
assert es1.count() == 2
assert isinstance(es1.widget(0), EditorStack)
assert isinstance(es1.widget(1), EditorSplitter)
assert not es1.widget(1).isHidden()
def test_iter_editorstacks(editor_splitter_bot):
"""Test iter_editorstacks."""
es = editor_splitter_bot
es_iter = es.iter_editorstacks
# Check base splitter.
assert es_iter() == [(es.editorstack, es.orientation())]
# Split once.
es.split(Qt.Vertical)
esw1 = es.widget(1)
assert es_iter() == [(es.editorstack, es.orientation()),
(esw1.editorstack, esw1.orientation())]
# Second splitter on base isn't iterated.
es.split(Qt.Horizontal)
assert es_iter() == [(es.editorstack, es.orientation()),
(esw1.editorstack, esw1.orientation())]
# Split a child.
esw1.split(Qt.Vertical)
esw1w1 = es.widget(1).widget(1)
assert es_iter() == [(es.editorstack, es.orientation()),
(esw1.editorstack, esw1.orientation()),
(esw1w1.editorstack, esw1w1.orientation())]
def test_get_layout_settings(editor_splitter_bot, qtbot, mocker):
"""Test get_layout_settings()."""
es = editor_splitter_bot
# Initial settings from setup.
setting = es.get_layout_settings()
assert setting['splitsettings'] == [(False, None, [])]
# Add some editors to patch output of iter_editorstacks.
stack1 = editor_stack()
stack1.new('foo.py', 'utf-8', 'a = 1\nprint(a)\n\nx = 2')
stack1.new('layout_test.py', 'utf-8', 'spam egg\n')
stack2 = editor_stack()
stack2.new('test.py', 'utf-8', 'test text')
mocker.patch.object(EditorSplitter, "iter_editorstacks")
EditorSplitter.iter_editorstacks.return_value = (
[(stack1, Qt.Vertical), (stack2, Qt.Horizontal)])
setting = es.get_layout_settings()
assert setting['hexstate']
assert setting['sizes'] == es.sizes()
assert setting['splitsettings'] == [(False, 'foo.py', [5, 3]),
(False, 'test.py', [2])]
def test_set_layout_settings_dont_goto(editor_splitter_layout_bot):
"""Test set_layout_settings()."""
es = editor_splitter_layout_bot
linecount = es.editorstack.data[2].editor.get_cursor_line_number()
# New layout to restore.
state = '000000ff000000010000000200000231000001ff00ffffffff010000000200'
sizes = [561, 511]
splitsettings = [(False, 'layout_test.py', [2, 1, 52]),
(False, 'foo.py', [3, 2, 125]),
(False, __file__, [1, 1, 1])]
new_settings = {'hexstate': state,
'sizes': sizes,
'splitsettings': splitsettings}
# Current widget doesn't have saved settings applied.
get_settings = es.get_layout_settings()
assert es.count() == 1
assert get_settings['hexstate'] != state
assert get_settings['splitsettings'] != splitsettings
# Invalid settings value.
assert es.set_layout_settings({'spam': 'test'}) is None
# Restore layout with dont_goto set.
es.set_layout_settings(new_settings, dont_goto=True)
get_settings = es.get_layout_settings()
# Check that the panels were restored.
assert es.count() == 2 # One EditorStack and one EditorSplitter.
assert es.widget(1).count() == 2 # One EditorStack and one EditorSplitter.
assert es.widget(1).widget(1).count() == 1 # One EditorStack.
assert get_settings['hexstate'] == state
# All the lines for each tab and split are at the last line number.
assert get_settings['splitsettings'] == [(False, 'foo.py', [5, 2, linecount]),
(False, 'foo.py', [5, 2, linecount]),
(False, 'foo.py', [5, 2, linecount])]
def test_set_layout_settings_goto(editor_splitter_layout_bot):
"""Test set_layout_settings()."""
es = editor_splitter_layout_bot
# New layout to restore.
state = '000000ff000000010000000200000231000001ff00ffffffff010000000200'
sizes = [561, 511]
splitsettings = [(False, 'layout_test.py', [2, 1, 52]),
(False, 'foo.py', [3, 2, 125]),
(False, __file__, [1, 1, 1])]
new_settings = {'hexstate': state,
'sizes': sizes,
'splitsettings': splitsettings}
# Restore layout without dont_goto, meaning it should position to the lines.
es.set_layout_settings(new_settings, dont_goto=None)
get_settings = es.get_layout_settings()
# Even though the original splitsettings had different file names
# selected, the current tab isn't restored in set_layout_settings().
# However, this shows that the current line was positioned for each tab
# and each split.
assert get_settings['splitsettings'] == [(False, 'foo.py', [2, 1, 52]),
(False, 'foo.py', [3, 2, 125]),
(False, 'foo.py', [1, 1, 1])]
@pytest.mark.slow
@pytest.mark.first
@pytest.mark.skipif(os.name == 'nt',
reason="Makes other tests fail on Windows")
def test_lsp_splitter_close(editor_splitter_lsp):
"""Test for spyder-ide/spyder#9341."""
editorsplitter, lsp_manager = editor_splitter_lsp
editorsplitter.split()
lsp_files = lsp_manager.clients['python']['instance'].watched_files
editor = editorsplitter.editorstack.get_current_editor()
path = pathlib.Path(osp.abspath(editor.filename)).as_uri()
assert len(lsp_files[path]) == 2
editorstacks = editorsplitter.iter_editorstacks()
assert len(editorstacks) == 2
last_editorstack = editorstacks[0][0]
last_editorstack.close()
lsp_files = lsp_manager.clients['python']['instance'].watched_files
assert len(lsp_files[path]) == 1
if __name__ == "__main__":
import os.path as osp
pytest.main(['-x', osp.basename(__file__), '-v', '-rw'])
| StarcoderdataPython |
8088105 | <filename>conformance/resources/swapi_create_subgraph.py
import json
import matplotlib.pyplot as plt
import networkx as nx
import os
verts = {}
with open("./swapi_vertices.json") as fh:
for line in fh:
line = json.loads(line)
verts[line["gid"]] = line
edges = {}
with open("./swapi_edges.json") as fh:
for line in fh:
line = json.loads(line)
edges[line["gid"]] = line
G1 = nx.DiGraph()
for gid, e in edges.items():
G1.add_edge(e["from"], e["to"])
G = nx.DiGraph()
whitelist = list(G1.neighbors("Film:1")) + ["Film:1"]
edges_sub = []
verts_sub = [verts[x] for x in whitelist]
for gid, e in edges.items():
if e["from"] not in whitelist or e["to"] not in whitelist:
continue
G.add_edge(e["from"], e["to"])
edges_sub.append(e)
# write subgraph to output files
with open("swapi_subgraph_vertices.json", "w") as fh:
for v in verts_sub:
fh.write(json.dumps(v))
fh.write(os.linesep)
with open("swapi_subgraph_edges.json", "w") as fh:
for e in edges_sub:
fh.write(json.dumps(e))
fh.write(os.linesep)
# Plot the subgraph
films = [x for x in G.nodes() if x.startswith("Film")]
people = [x for x in G.nodes() if x.startswith("Character")]
species = [x for x in G.nodes() if x.startswith("Species")]
planets = [x for x in G.nodes() if x.startswith("Planet")]
starships = [x for x in G.nodes() if x.startswith("Starship")]
vehicles = [x for x in G.nodes() if x.startswith("Vehicle")]
labels = {label: label.split(":")[1] for label in G.nodes()}
pos = nx.spring_layout(G)
plt.clf()
nx.draw_networkx_nodes(G, pos,
nodelist=films,
node_color='lightgrey',
node_size=500,
label="Film")
nx.draw_networkx_nodes(G, pos,
nodelist=people,
node_color='indianred',
node_size=500,
label="Character")
nx.draw_networkx_nodes(G, pos,
nodelist=species,
node_color='mediumturquoise',
node_size=500,
label="Species")
nx.draw_networkx_nodes(G, pos,
nodelist=planets,
node_color='mediumseagreen',
node_size=500,
label="Planet")
nx.draw_networkx_nodes(G, pos,
nodelist=starships,
node_color='violet',
node_size=500,
label="Starship")
nx.draw_networkx_nodes(G, pos,
nodelist=vehicles,
node_color='slateblue',
node_size=500,
label="Vehicle")
nx.draw_networkx_edges(G, pos, width=1.5, arrowsize=12)
nx.draw_networkx_labels(G, pos, labels, font_size=14)
plt.legend(numpoints=1)
plt.show()
| StarcoderdataPython |
6624826 | # -*- coding: utf-8 -*-
"""
pygments.lexers.web
~~~~~~~~~~~~~~~~~~~
Lexers for web-related languages and markup.
:copyright: Copyright 2006-2012 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
import re
import copy
from pygments.lexer import RegexLexer, ExtendedRegexLexer, bygroups, using, \
include, this
from pygments.token import Text, Comment, Operator, Keyword, Name, String, \
Number, Other, Punctuation, Literal
from pygments.util import get_bool_opt, get_list_opt, looks_like_xml, \
html_doctype_matches, unirange
from pygments.lexers.agile import RubyLexer
from pygments.lexers.compiled import ScalaLexer
__all__ = ['HtmlLexer', 'XmlLexer', 'JavascriptLexer', 'JsonLexer', 'CssLexer',
'PhpLexer', 'ActionScriptLexer', 'XsltLexer', 'ActionScript3Lexer',
'MxmlLexer', 'HaxeLexer', 'HamlLexer', 'SassLexer', 'ScssLexer',
'ObjectiveJLexer', 'CoffeeScriptLexer', 'LiveScriptLexer',
'DuelLexer', 'ScamlLexer', 'JadeLexer', 'XQueryLexer',
'DtdLexer', 'DartLexer', 'LassoLexer']
class JavascriptLexer(RegexLexer):
"""
For JavaScript source code.
"""
name = 'JavaScript'
aliases = ['js', 'javascript']
filenames = ['*.js', ]
mimetypes = ['application/javascript', 'application/x-javascript',
'text/x-javascript', 'text/javascript', ]
flags = re.DOTALL
tokens = {
'commentsandwhitespace': [
(r'\s+', Text),
(r'<!--', Comment),
(r'//.*?\n', Comment.Single),
(r'/\*.*?\*/', Comment.Multiline)
],
'slashstartsregex': [
include('commentsandwhitespace'),
(r'/(\\.|[^[/\\\n]|\[(\\.|[^\]\\\n])*])+/'
r'([gim]+\b|\B)', String.Regex, '#pop'),
(r'(?=/)', Text, ('#pop', 'badregex')),
(r'', Text, '#pop')
],
'badregex': [
(r'\n', Text, '#pop')
],
'root': [
(r'^(?=\s|/|<!--)', Text, 'slashstartsregex'),
include('commentsandwhitespace'),
(r'\+\+|--|~|&&|\?|:|\|\||\\(?=\n)|'
r'(<<|>>>?|==?|!=?|[-<>+*%&\|\^/])=?', Operator, 'slashstartsregex'),
(r'[{(\[;,]', Punctuation, 'slashstartsregex'),
(r'[})\].]', Punctuation),
(r'(for|in|while|do|break|return|continue|switch|case|default|if|else|'
r'throw|try|catch|finally|new|delete|typeof|instanceof|void|'
r'this)\b', Keyword, 'slashstartsregex'),
(r'(var|let|with|function)\b', Keyword.Declaration, 'slashstartsregex'),
(r'(abstract|boolean|byte|char|class|const|debugger|double|enum|export|'
r'extends|final|float|goto|implements|import|int|interface|long|native|'
r'package|private|protected|public|short|static|super|synchronized|throws|'
r'transient|volatile)\b', Keyword.Reserved),
(r'(true|false|null|NaN|Infinity|undefined)\b', Keyword.Constant),
(r'(Array|Boolean|Date|Error|Function|Math|netscape|'
r'Number|Object|Packages|RegExp|String|sun|decodeURI|'
r'decodeURIComponent|encodeURI|encodeURIComponent|'
r'Error|eval|isFinite|isNaN|parseFloat|parseInt|document|this|'
r'window)\b', Name.Builtin),
(r'[$a-zA-Z_][a-zA-Z0-9_]*', Name.Other),
(r'[0-9][0-9]*\.[0-9]+([eE][0-9]+)?[fd]?', Number.Float),
(r'0x[0-9a-fA-F]+', Number.Hex),
(r'[0-9]+', Number.Integer),
(r'"(\\\\|\\"|[^"])*"', String.Double),
(r"'(\\\\|\\'|[^'])*'", String.Single),
]
}
class JsonLexer(RegexLexer):
"""
For JSON data structures.
*New in Pygments 1.5.*
"""
name = 'JSON'
aliases = ['json']
filenames = ['*.json']
mimetypes = [ 'application/json', ]
# integer part of a number
int_part = r'-?(0|[1-9]\d*)'
# fractional part of a number
frac_part = r'\.\d+'
# exponential part of a number
exp_part = r'[eE](\+|-)?\d+'
flags = re.DOTALL
tokens = {
'whitespace': [
(r'\s+', Text),
],
# represents a simple terminal value
'simplevalue': [
(r'(true|false|null)\b', Keyword.Constant),
(('%(int_part)s(%(frac_part)s%(exp_part)s|'
'%(exp_part)s|%(frac_part)s)') % vars(),
Number.Float),
(int_part, Number.Integer),
(r'"(\\\\|\\"|[^"])*"', String.Double),
],
# the right hand side of an object, after the attribute name
'objectattribute': [
include('value'),
(r':', Punctuation),
# comma terminates the attribute but expects more
(r',', Punctuation, '#pop'),
# a closing bracket terminates the entire object, so pop twice
(r'}', Punctuation, ('#pop', '#pop')),
],
# a json object - { attr, attr, ... }
'objectvalue': [
include('whitespace'),
(r'"(\\\\|\\"|[^"])*"', Name.Tag, 'objectattribute'),
(r'}', Punctuation, '#pop'),
],
# json array - [ value, value, ... }
'arrayvalue': [
include('whitespace'),
include('value'),
(r',', Punctuation),
(r']', Punctuation, '#pop'),
],
# a json value - either a simple value or a complex value (object or array)
'value': [
include('whitespace'),
include('simplevalue'),
(r'{', Punctuation, 'objectvalue'),
(r'\[', Punctuation, 'arrayvalue'),
],
# the root of a json document whould be a value
'root': [
include('value'),
],
}
JSONLexer = JsonLexer # for backwards compatibility with Pygments 1.5
class ActionScriptLexer(RegexLexer):
"""
For ActionScript source code.
*New in Pygments 0.9.*
"""
name = 'ActionScript'
aliases = ['as', 'actionscript']
filenames = ['*.as']
mimetypes = ['application/x-actionscript3', 'text/x-actionscript3',
'text/actionscript3']
flags = re.DOTALL
tokens = {
'root': [
(r'\s+', Text),
(r'//.*?\n', Comment.Single),
(r'/\*.*?\*/', Comment.Multiline),
(r'/(\\\\|\\/|[^/\n])*/[gim]*', String.Regex),
(r'[~\^\*!%&<>\|+=:;,/?\\-]+', Operator),
(r'[{}\[\]();.]+', Punctuation),
(r'(case|default|for|each|in|while|do|break|return|continue|if|else|'
r'throw|try|catch|var|with|new|typeof|arguments|instanceof|this|'
r'switch)\b', Keyword),
(r'(class|public|final|internal|native|override|private|protected|'
r'static|import|extends|implements|interface|intrinsic|return|super|'
r'dynamic|function|const|get|namespace|package|set)\b',
Keyword.Declaration),
(r'(true|false|null|NaN|Infinity|-Infinity|undefined|Void)\b',
Keyword.Constant),
(r'(Accessibility|AccessibilityProperties|ActionScriptVersion|'
r'ActivityEvent|AntiAliasType|ApplicationDomain|AsBroadcaster|Array|'
r'AsyncErrorEvent|AVM1Movie|BevelFilter|Bitmap|BitmapData|'
r'BitmapDataChannel|BitmapFilter|BitmapFilterQuality|BitmapFilterType|'
r'BlendMode|BlurFilter|Boolean|ByteArray|Camera|Capabilities|CapsStyle|'
r'Class|Color|ColorMatrixFilter|ColorTransform|ContextMenu|'
r'ContextMenuBuiltInItems|ContextMenuEvent|ContextMenuItem|'
r'ConvultionFilter|CSMSettings|DataEvent|Date|DefinitionError|'
r'DeleteObjectSample|Dictionary|DisplacmentMapFilter|DisplayObject|'
r'DisplacmentMapFilterMode|DisplayObjectContainer|DropShadowFilter|'
r'Endian|EOFError|Error|ErrorEvent|EvalError|Event|EventDispatcher|'
r'EventPhase|ExternalInterface|FileFilter|FileReference|'
r'FileReferenceList|FocusDirection|FocusEvent|Font|FontStyle|FontType|'
r'FrameLabel|FullScreenEvent|Function|GlowFilter|GradientBevelFilter|'
r'GradientGlowFilter|GradientType|Graphics|GridFitType|HTTPStatusEvent|'
r'IBitmapDrawable|ID3Info|IDataInput|IDataOutput|IDynamicPropertyOutput'
r'IDynamicPropertyWriter|IEventDispatcher|IExternalizable|'
r'IllegalOperationError|IME|IMEConversionMode|IMEEvent|int|'
r'InteractiveObject|InterpolationMethod|InvalidSWFError|InvokeEvent|'
r'IOError|IOErrorEvent|JointStyle|Key|Keyboard|KeyboardEvent|KeyLocation|'
r'LineScaleMode|Loader|LoaderContext|LoaderInfo|LoadVars|LocalConnection|'
r'Locale|Math|Matrix|MemoryError|Microphone|MorphShape|Mouse|MouseEvent|'
r'MovieClip|MovieClipLoader|Namespace|NetConnection|NetStatusEvent|'
r'NetStream|NewObjectSample|Number|Object|ObjectEncoding|PixelSnapping|'
r'Point|PrintJob|PrintJobOptions|PrintJobOrientation|ProgressEvent|Proxy|'
r'QName|RangeError|Rectangle|ReferenceError|RegExp|Responder|Sample|Scene|'
r'ScriptTimeoutError|Security|SecurityDomain|SecurityError|'
r'SecurityErrorEvent|SecurityPanel|Selection|Shape|SharedObject|'
r'SharedObjectFlushStatus|SimpleButton|Socket|Sound|SoundChannel|'
r'SoundLoaderContext|SoundMixer|SoundTransform|SpreadMethod|Sprite|'
r'StackFrame|StackOverflowError|Stage|StageAlign|StageDisplayState|'
r'StageQuality|StageScaleMode|StaticText|StatusEvent|String|StyleSheet|'
r'SWFVersion|SyncEvent|SyntaxError|System|TextColorType|TextField|'
r'TextFieldAutoSize|TextFieldType|TextFormat|TextFormatAlign|'
r'TextLineMetrics|TextRenderer|TextSnapshot|Timer|TimerEvent|Transform|'
r'TypeError|uint|URIError|URLLoader|URLLoaderDataFormat|URLRequest|'
r'URLRequestHeader|URLRequestMethod|URLStream|URLVariabeles|VerifyError|'
r'Video|XML|XMLDocument|XMLList|XMLNode|XMLNodeType|XMLSocket|XMLUI)\b',
Name.Builtin),
(r'(decodeURI|decodeURIComponent|encodeURI|escape|eval|isFinite|isNaN|'
r'isXMLName|clearInterval|fscommand|getTimer|getURL|getVersion|'
r'isFinite|parseFloat|parseInt|setInterval|trace|updateAfterEvent|'
r'unescape)\b',Name.Function),
(r'[$a-zA-Z_][a-zA-Z0-9_]*', Name.Other),
(r'[0-9][0-9]*\.[0-9]+([eE][0-9]+)?[fd]?', Number.Float),
(r'0x[0-9a-f]+', Number.Hex),
(r'[0-9]+', Number.Integer),
(r'"(\\\\|\\"|[^"])*"', String.Double),
(r"'(\\\\|\\'|[^'])*'", String.Single),
]
}
class ActionScript3Lexer(RegexLexer):
"""
For ActionScript 3 source code.
*New in Pygments 0.11.*
"""
name = 'ActionScript 3'
aliases = ['as3', 'actionscript3']
filenames = ['*.as']
mimetypes = ['application/x-actionscript', 'text/x-actionscript',
'text/actionscript']
identifier = r'[$a-zA-Z_][a-zA-Z0-9_]*'
typeidentifier = identifier + '(?:\.<\w+>)?'
flags = re.DOTALL | re.MULTILINE
tokens = {
'root': [
(r'\s+', Text),
(r'(function\s+)(' + identifier + r')(\s*)(\()',
bygroups(Keyword.Declaration, Name.Function, Text, Operator),
'funcparams'),
(r'(var|const)(\s+)(' + identifier + r')(\s*)(:)(\s*)(' +
typeidentifier + r')',
bygroups(Keyword.Declaration, Text, Name, Text, Punctuation, Text,
Keyword.Type)),
(r'(import|package)(\s+)((?:' + identifier + r'|\.)+)(\s*)',
bygroups(Keyword, Text, Name.Namespace, Text)),
(r'(new)(\s+)(' + typeidentifier + r')(\s*)(\()',
bygroups(Keyword, Text, Keyword.Type, Text, Operator)),
(r'//.*?\n', Comment.Single),
(r'/\*.*?\*/', Comment.Multiline),
(r'/(\\\\|\\/|[^\n])*/[gisx]*', String.Regex),
(r'(\.)(' + identifier + r')', bygroups(Operator, Name.Attribute)),
(r'(case|default|for|each|in|while|do|break|return|continue|if|else|'
r'throw|try|catch|with|new|typeof|arguments|instanceof|this|'
r'switch|import|include|as|is)\b',
Keyword),
(r'(class|public|final|internal|native|override|private|protected|'
r'static|import|extends|implements|interface|intrinsic|return|super|'
r'dynamic|function|const|get|namespace|package|set)\b',
Keyword.Declaration),
(r'(true|false|null|NaN|Infinity|-Infinity|undefined|void)\b',
Keyword.Constant),
(r'(decodeURI|decodeURIComponent|encodeURI|escape|eval|isFinite|isNaN|'
r'isXMLName|clearInterval|fscommand|getTimer|getURL|getVersion|'
r'isFinite|parseFloat|parseInt|setInterval|trace|updateAfterEvent|'
r'unescape)\b', Name.Function),
(identifier, Name),
(r'[0-9][0-9]*\.[0-9]+([eE][0-9]+)?[fd]?', Number.Float),
(r'0x[0-9a-f]+', Number.Hex),
(r'[0-9]+', Number.Integer),
(r'"(\\\\|\\"|[^"])*"', String.Double),
(r"'(\\\\|\\'|[^'])*'", String.Single),
(r'[~\^\*!%&<>\|+=:;,/?\\{}\[\]().-]+', Operator),
],
'funcparams': [
(r'\s+', Text),
(r'(\s*)(\.\.\.)?(' + identifier + r')(\s*)(:)(\s*)(' +
typeidentifier + r'|\*)(\s*)',
bygroups(Text, Punctuation, Name, Text, Operator, Text,
Keyword.Type, Text), 'defval'),
(r'\)', Operator, 'type')
],
'type': [
(r'(\s*)(:)(\s*)(' + typeidentifier + r'|\*)',
bygroups(Text, Operator, Text, Keyword.Type), '#pop:2'),
(r'\s*', Text, '#pop:2')
],
'defval': [
(r'(=)(\s*)([^(),]+)(\s*)(,?)',
bygroups(Operator, Text, using(this), Text, Operator), '#pop'),
(r',?', Operator, '#pop')
]
}
def analyse_text(text):
if re.match(r'\w+\s*:\s*\w', text):
return 0.3
return 0
class CssLexer(RegexLexer):
"""
For CSS (Cascading Style Sheets).
"""
name = 'CSS'
aliases = ['css']
filenames = ['*.css']
mimetypes = ['text/css']
tokens = {
'root': [
include('basics'),
],
'basics': [
(r'\s+', Text),
(r'/\*(?:.|\n)*?\*/', Comment),
(r'{', Punctuation, 'content'),
(r'\:[a-zA-Z0-9_-]+', Name.Decorator),
(r'\.[a-zA-Z0-9_-]+', Name.Class),
(r'\#[a-zA-Z0-9_-]+', Name.Function),
(r'@[a-zA-Z0-9_-]+', Keyword, 'atrule'),
(r'[a-zA-Z0-9_-]+', Name.Tag),
(r'[~\^\*!%&\[\]\(\)<>\|+=@:;,./?-]', Operator),
(r'"(\\\\|\\"|[^"])*"', String.Double),
(r"'(\\\\|\\'|[^'])*'", String.Single)
],
'atrule': [
(r'{', Punctuation, 'atcontent'),
(r';', Punctuation, '#pop'),
include('basics'),
],
'atcontent': [
include('basics'),
(r'}', Punctuation, '#pop:2'),
],
'content': [
(r'\s+', Text),
(r'}', Punctuation, '#pop'),
(r'url\(.*?\)', String.Other),
(r'^@.*?$', Comment.Preproc),
(r'(azimuth|background-attachment|background-color|'
r'background-image|background-position|background-repeat|'
r'background|border-bottom-color|border-bottom-style|'
r'border-bottom-width|border-left-color|border-left-style|'
r'border-left-width|border-right|border-right-color|'
r'border-right-style|border-right-width|border-top-color|'
r'border-top-style|border-top-width|border-bottom|'
r'border-collapse|border-left|border-width|border-color|'
r'border-spacing|border-style|border-top|border|caption-side|'
r'clear|clip|color|content|counter-increment|counter-reset|'
r'cue-after|cue-before|cue|cursor|direction|display|'
r'elevation|empty-cells|float|font-family|font-size|'
r'font-size-adjust|font-stretch|font-style|font-variant|'
r'font-weight|font|height|letter-spacing|line-height|'
r'list-style-type|list-style-image|list-style-position|'
r'list-style|margin-bottom|margin-left|margin-right|'
r'margin-top|margin|marker-offset|marks|max-height|max-width|'
r'min-height|min-width|opacity|orphans|outline|outline-color|'
r'outline-style|outline-width|overflow(?:-x|-y)?|padding-bottom|'
r'padding-left|padding-right|padding-top|padding|page|'
r'page-break-after|page-break-before|page-break-inside|'
r'pause-after|pause-before|pause|pitch|pitch-range|'
r'play-during|position|quotes|richness|right|size|'
r'speak-header|speak-numeral|speak-punctuation|speak|'
r'speech-rate|stress|table-layout|text-align|text-decoration|'
r'text-indent|text-shadow|text-transform|top|unicode-bidi|'
r'vertical-align|visibility|voice-family|volume|white-space|'
r'widows|width|word-spacing|z-index|bottom|left|'
r'above|absolute|always|armenian|aural|auto|avoid|baseline|'
r'behind|below|bidi-override|blink|block|bold|bolder|both|'
r'capitalize|center-left|center-right|center|circle|'
r'cjk-ideographic|close-quote|collapse|condensed|continuous|'
r'crop|crosshair|cross|cursive|dashed|decimal-leading-zero|'
r'decimal|default|digits|disc|dotted|double|e-resize|embed|'
r'extra-condensed|extra-expanded|expanded|fantasy|far-left|'
r'far-right|faster|fast|fixed|georgian|groove|hebrew|help|'
r'hidden|hide|higher|high|hiragana-iroha|hiragana|icon|'
r'inherit|inline-table|inline|inset|inside|invert|italic|'
r'justify|katakana-iroha|katakana|landscape|larger|large|'
r'left-side|leftwards|level|lighter|line-through|list-item|'
r'loud|lower-alpha|lower-greek|lower-roman|lowercase|ltr|'
r'lower|low|medium|message-box|middle|mix|monospace|'
r'n-resize|narrower|ne-resize|no-close-quote|no-open-quote|'
r'no-repeat|none|normal|nowrap|nw-resize|oblique|once|'
r'open-quote|outset|outside|overline|pointer|portrait|px|'
r'relative|repeat-x|repeat-y|repeat|rgb|ridge|right-side|'
r'rightwards|s-resize|sans-serif|scroll|se-resize|'
r'semi-condensed|semi-expanded|separate|serif|show|silent|'
r'slow|slower|small-caps|small-caption|smaller|soft|solid|'
r'spell-out|square|static|status-bar|super|sw-resize|'
r'table-caption|table-cell|table-column|table-column-group|'
r'table-footer-group|table-header-group|table-row|'
r'table-row-group|text|text-bottom|text-top|thick|thin|'
r'transparent|ultra-condensed|ultra-expanded|underline|'
r'upper-alpha|upper-latin|upper-roman|uppercase|url|'
r'visible|w-resize|wait|wider|x-fast|x-high|x-large|x-loud|'
r'x-low|x-small|x-soft|xx-large|xx-small|yes)\b', Keyword),
(r'(indigo|gold|firebrick|indianred|yellow|darkolivegreen|'
r'darkseagreen|mediumvioletred|mediumorchid|chartreuse|'
r'mediumslateblue|black|springgreen|crimson|lightsalmon|brown|'
r'turquoise|olivedrab|cyan|silver|skyblue|gray|darkturquoise|'
r'goldenrod|darkgreen|darkviolet|darkgray|lightpink|teal|'
r'darkmagenta|lightgoldenrodyellow|lavender|yellowgreen|thistle|'
r'violet|navy|orchid|blue|ghostwhite|honeydew|cornflowerblue|'
r'darkblue|darkkhaki|mediumpurple|cornsilk|red|bisque|slategray|'
r'darkcyan|khaki|wheat|deepskyblue|darkred|steelblue|aliceblue|'
r'gainsboro|mediumturquoise|floralwhite|coral|purple|lightgrey|'
r'lightcyan|darksalmon|beige|azure|lightsteelblue|oldlace|'
r'greenyellow|royalblue|lightseagreen|mistyrose|sienna|'
r'lightcoral|orangered|navajowhite|lime|palegreen|burlywood|'
r'seashell|mediumspringgreen|fuchsia|papayawhip|blanchedalmond|'
r'peru|aquamarine|white|darkslategray|ivory|dodgerblue|'
r'lemonchiffon|chocolate|orange|forestgreen|slateblue|olive|'
r'mintcream|antiquewhite|darkorange|cadetblue|moccasin|'
r'limegreen|saddlebrown|darkslateblue|lightskyblue|deeppink|'
r'plum|aqua|darkgoldenrod|maroon|sandybrown|magenta|tan|'
r'rosybrown|pink|lightblue|palevioletred|mediumseagreen|'
r'dimgray|powderblue|seagreen|snow|mediumblue|midnightblue|'
r'paleturquoise|palegoldenrod|whitesmoke|darkorchid|salmon|'
r'lightslategray|lawngreen|lightgreen|tomato|hotpink|'
r'lightyellow|lavenderblush|linen|mediumaquamarine|green|'
r'blueviolet|peachpuff)\b', Name.Builtin),
(r'\!important', Comment.Preproc),
(r'/\*(?:.|\n)*?\*/', Comment),
(r'\#[a-zA-Z0-9]{1,6}', Number),
(r'[\.-]?[0-9]*[\.]?[0-9]+(em|px|\%|pt|pc|in|mm|cm|ex|s)\b', Number),
(r'-?[0-9]+', Number),
(r'[~\^\*!%&<>\|+=@:,./?-]+', Operator),
(r'[\[\]();]+', Punctuation),
(r'"(\\\\|\\"|[^"])*"', String.Double),
(r"'(\\\\|\\'|[^'])*'", String.Single),
(r'[a-zA-Z_][a-zA-Z0-9_]*', Name)
]
}
class ObjectiveJLexer(RegexLexer):
"""
For Objective-J source code with preprocessor directives.
*New in Pygments 1.3.*
"""
name = 'Objective-J'
aliases = ['objective-j', 'objectivej', 'obj-j', 'objj']
filenames = ['*.j']
mimetypes = ['text/x-objective-j']
#: optional Comment or Whitespace
_ws = r'(?:\s|//.*?\n|/[*].*?[*]/)*'
flags = re.DOTALL | re.MULTILINE
tokens = {
'root': [
include('whitespace'),
# function definition
(r'^(' + _ws + r'[\+-]' + _ws + r')([\(a-zA-Z_].*?[^\(])(' + _ws + '{)',
bygroups(using(this), using(this, state='function_signature'),
using(this))),
# class definition
(r'(@interface|@implementation)(\s+)', bygroups(Keyword, Text),
'classname'),
(r'(@class|@protocol)(\s*)', bygroups(Keyword, Text),
'forward_classname'),
(r'(\s*)(@end)(\s*)', bygroups(Text, Keyword, Text)),
include('statements'),
('[{\(\)}]', Punctuation),
(';', Punctuation),
],
'whitespace': [
(r'(@import)(\s+)("(?:\\\\|\\"|[^"])*")',
bygroups(Comment.Preproc, Text, String.Double)),
(r'(@import)(\s+)(<(?:\\\\|\\>|[^>])*>)',
bygroups(Comment.Preproc, Text, String.Double)),
(r'(#(?:include|import))(\s+)("(?:\\\\|\\"|[^"])*")',
bygroups(Comment.Preproc, Text, String.Double)),
(r'(#(?:include|import))(\s+)(<(?:\\\\|\\>|[^>])*>)',
bygroups(Comment.Preproc, Text, String.Double)),
(r'#if\s+0', Comment.Preproc, 'if0'),
(r'#', Comment.Preproc, 'macro'),
(r'\n', Text),
(r'\s+', Text),
(r'\\\n', Text), # line continuation
(r'//(\n|(.|\n)*?[^\\]\n)', Comment.Single),
(r'/(\\\n)?[*](.|\n)*?[*](\\\n)?/', Comment.Multiline),
(r'<!--', Comment),
],
'slashstartsregex': [
include('whitespace'),
(r'/(\\.|[^[/\\\n]|\[(\\.|[^\]\\\n])*])+/'
r'([gim]+\b|\B)', String.Regex, '#pop'),
(r'(?=/)', Text, ('#pop', 'badregex')),
(r'', Text, '#pop'),
],
'badregex': [
(r'\n', Text, '#pop'),
],
'statements': [
(r'(L|@)?"', String, 'string'),
(r"(L|@)?'(\\.|\\[0-7]{1,3}|\\x[a-fA-F0-9]{1,2}|[^\\\'\n])'",
String.Char),
(r'"(\\\\|\\"|[^"])*"', String.Double),
(r"'(\\\\|\\'|[^'])*'", String.Single),
(r'(\d+\.\d*|\.\d+|\d+)[eE][+-]?\d+[lL]?', Number.Float),
(r'(\d+\.\d*|\.\d+|\d+[fF])[fF]?', Number.Float),
(r'0x[0-9a-fA-F]+[Ll]?', Number.Hex),
(r'0[0-7]+[Ll]?', Number.Oct),
(r'\d+[Ll]?', Number.Integer),
(r'^(?=\s|/|<!--)', Text, 'slashstartsregex'),
(r'\+\+|--|~|&&|\?|:|\|\||\\(?=\n)|'
r'(<<|>>>?|==?|!=?|[-<>+*%&\|\^/])=?',
Operator, 'slashstartsregex'),
(r'[{(\[;,]', Punctuation, 'slashstartsregex'),
(r'[})\].]', Punctuation),
(r'(for|in|while|do|break|return|continue|switch|case|default|if|'
r'else|throw|try|catch|finally|new|delete|typeof|instanceof|void|'
r'prototype|__proto__)\b', Keyword, 'slashstartsregex'),
(r'(var|with|function)\b', Keyword.Declaration, 'slashstartsregex'),
(r'(@selector|@private|@protected|@public|@encode|'
r'@synchronized|@try|@throw|@catch|@finally|@end|@property|'
r'@synthesize|@dynamic|@for|@accessors|new)\b', Keyword),
(r'(int|long|float|short|double|char|unsigned|signed|void|'
r'id|BOOL|bool|boolean|IBOutlet|IBAction|SEL|@outlet|@action)\b',
Keyword.Type),
(r'(self|super)\b', Name.Builtin),
(r'(TRUE|YES|FALSE|NO|Nil|nil|NULL)\b', Keyword.Constant),
(r'(true|false|null|NaN|Infinity|undefined)\b', Keyword.Constant),
(r'(ABS|ASIN|ACOS|ATAN|ATAN2|SIN|COS|TAN|EXP|POW|CEIL|FLOOR|ROUND|'
r'MIN|MAX|RAND|SQRT|E|LN2|LN10|LOG2E|LOG10E|PI|PI2|PI_2|SQRT1_2|'
r'SQRT2)\b', Keyword.Constant),
(r'(Array|Boolean|Date|Error|Function|Math|netscape|'
r'Number|Object|Packages|RegExp|String|sun|decodeURI|'
r'decodeURIComponent|encodeURI|encodeURIComponent|'
r'Error|eval|isFinite|isNaN|parseFloat|parseInt|document|this|'
r'window)\b', Name.Builtin),
(r'([$a-zA-Z_][a-zA-Z0-9_]*)(' + _ws + r')(?=\()',
bygroups(Name.Function, using(this))),
(r'[$a-zA-Z_][a-zA-Z0-9_]*', Name),
],
'classname' : [
# interface definition that inherits
(r'([a-zA-Z_][a-zA-Z0-9_]*)(' + _ws + r':' + _ws +
r')([a-zA-Z_][a-zA-Z0-9_]*)?',
bygroups(Name.Class, using(this), Name.Class), '#pop'),
# interface definition for a category
(r'([a-zA-Z_][a-zA-Z0-9_]*)(' + _ws + r'\()([a-zA-Z_][a-zA-Z0-9_]*)(\))',
bygroups(Name.Class, using(this), Name.Label, Text), '#pop'),
# simple interface / implementation
(r'([a-zA-Z_][a-zA-Z0-9_]*)', Name.Class, '#pop'),
],
'forward_classname' : [
(r'([a-zA-Z_][a-zA-Z0-9_]*)(\s*,\s*)',
bygroups(Name.Class, Text), '#push'),
(r'([a-zA-Z_][a-zA-Z0-9_]*)(\s*;?)',
bygroups(Name.Class, Text), '#pop'),
],
'function_signature': [
include('whitespace'),
# start of a selector w/ parameters
(r'(\(' + _ws + r')' # open paren
r'([a-zA-Z_][a-zA-Z0-9_]+)' # return type
r'(' + _ws + r'\)' + _ws + r')' # close paren
r'([$a-zA-Z_][a-zA-Z0-9_]+' + _ws + r':)', # function name
bygroups(using(this), Keyword.Type, using(this),
Name.Function), 'function_parameters'),
# no-param function
(r'(\(' + _ws + r')' # open paren
r'([a-zA-Z_][a-zA-Z0-9_]+)' # return type
r'(' + _ws + r'\)' + _ws + r')' # close paren
r'([$a-zA-Z_][a-zA-Z0-9_]+)', # function name
bygroups(using(this), Keyword.Type, using(this),
Name.Function), "#pop"),
# no return type given, start of a selector w/ parameters
(r'([$a-zA-Z_][a-zA-Z0-9_]+' + _ws + r':)', # function name
bygroups (Name.Function), 'function_parameters'),
# no return type given, no-param function
(r'([$a-zA-Z_][a-zA-Z0-9_]+)', # function name
bygroups(Name.Function), "#pop"),
('', Text, '#pop'),
],
'function_parameters': [
include('whitespace'),
# parameters
(r'(\(' + _ws + ')' # open paren
r'([^\)]+)' # type
r'(' + _ws + r'\)' + _ws + r')' # close paren
r'([$a-zA-Z_][a-zA-Z0-9_]+)', # param name
bygroups(using(this), Keyword.Type, using(this), Text)),
# one piece of a selector name
(r'([$a-zA-Z_][a-zA-Z0-9_]+' + _ws + r':)', # function name
Name.Function),
# smallest possible selector piece
(r'(:)', Name.Function),
# var args
(r'(,' + _ws + r'\.\.\.)', using(this)),
# param name
(r'([$a-zA-Z_][a-zA-Z0-9_]+)', Text),
],
'expression' : [
(r'([$a-zA-Z_][a-zA-Z0-9_]*)(\()', bygroups(Name.Function,
Punctuation)),
(r'(\))', Punctuation, "#pop"),
],
'string': [
(r'"', String, '#pop'),
(r'\\([\\abfnrtv"\']|x[a-fA-F0-9]{2,4}|[0-7]{1,3})', String.Escape),
(r'[^\\"\n]+', String), # all other characters
(r'\\\n', String), # line continuation
(r'\\', String), # stray backslash
],
'macro': [
(r'[^/\n]+', Comment.Preproc),
(r'/[*](.|\n)*?[*]/', Comment.Multiline),
(r'//.*?\n', Comment.Single, '#pop'),
(r'/', Comment.Preproc),
(r'(?<=\\)\n', Comment.Preproc),
(r'\n', Comment.Preproc, '#pop'),
],
'if0': [
(r'^\s*#if.*?(?<!\\)\n', Comment.Preproc, '#push'),
(r'^\s*#endif.*?(?<!\\)\n', Comment.Preproc, '#pop'),
(r'.*?\n', Comment),
]
}
def analyse_text(text):
if re.search('^\s*@import\s+[<"]', text, re.MULTILINE):
# special directive found in most Objective-J files
return True
return False
class HtmlLexer(RegexLexer):
"""
For HTML 4 and XHTML 1 markup. Nested JavaScript and CSS is highlighted
by the appropriate lexer.
"""
name = 'HTML'
aliases = ['html']
filenames = ['*.html', '*.htm', '*.xhtml', '*.xslt']
mimetypes = ['text/html', 'application/xhtml+xml']
flags = re.IGNORECASE | re.DOTALL
tokens = {
'root': [
('[^<&]+', Text),
(r'&\S*?;', Name.Entity),
(r'\<\!\[CDATA\[.*?\]\]\>', Comment.Preproc),
('<!--', Comment, 'comment'),
(r'<\?.*?\?>', Comment.Preproc),
('<![^>]*>', Comment.Preproc),
(r'<\s*script\s*', Name.Tag, ('script-content', 'tag')),
(r'<\s*style\s*', Name.Tag, ('style-content', 'tag')),
(r'<\s*[a-zA-Z0-9:]+', Name.Tag, 'tag'),
(r'<\s*/\s*[a-zA-Z0-9:]+\s*>', Name.Tag),
],
'comment': [
('[^-]+', Comment),
('-->', Comment, '#pop'),
('-', Comment),
],
'tag': [
(r'\s+', Text),
(r'[a-zA-Z0-9_:-]+\s*=', Name.Attribute, 'attr'),
(r'[a-zA-Z0-9_:-]+', Name.Attribute),
(r'/?\s*>', Name.Tag, '#pop'),
],
'script-content': [
(r'<\s*/\s*script\s*>', Name.Tag, '#pop'),
(r'.+?(?=<\s*/\s*script\s*>)', using(JavascriptLexer)),
],
'style-content': [
(r'<\s*/\s*style\s*>', Name.Tag, '#pop'),
(r'.+?(?=<\s*/\s*style\s*>)', using(CssLexer)),
],
'attr': [
('".*?"', String, '#pop'),
("'.*?'", String, '#pop'),
(r'[^\s>]+', String, '#pop'),
],
}
def analyse_text(text):
if html_doctype_matches(text):
return 0.5
class PhpLexer(RegexLexer):
"""
For `PHP <http://www.php.net/>`_ source code.
For PHP embedded in HTML, use the `HtmlPhpLexer`.
Additional options accepted:
`startinline`
If given and ``True`` the lexer starts highlighting with
php code (i.e.: no starting ``<?php`` required). The default
is ``False``.
`funcnamehighlighting`
If given and ``True``, highlight builtin function names
(default: ``True``).
`disabledmodules`
If given, must be a list of module names whose function names
should not be highlighted. By default all modules are highlighted
except the special ``'unknown'`` module that includes functions
that are known to php but are undocumented.
To get a list of allowed modules have a look into the
`_phpbuiltins` module:
.. sourcecode:: pycon
>>> from pygments.lexers._phpbuiltins import MODULES
>>> MODULES.keys()
['PHP Options/Info', 'Zip', 'dba', ...]
In fact the names of those modules match the module names from
the php documentation.
"""
name = 'PHP'
aliases = ['php', 'php3', 'php4', 'php5']
filenames = ['*.php', '*.php[345]']
mimetypes = ['text/x-php']
flags = re.IGNORECASE | re.DOTALL | re.MULTILINE
tokens = {
'root': [
(r'<\?(php)?', Comment.Preproc, 'php'),
(r'[^<]+', Other),
(r'<', Other)
],
'php': [
(r'\?>', Comment.Preproc, '#pop'),
(r'<<<(\'?)([a-zA-Z_][a-zA-Z0-9_]*)\1\n.*?\n\2\;?\n', String),
(r'\s+', Text),
(r'#.*?\n', Comment.Single),
(r'//.*?\n', Comment.Single),
# put the empty comment here, it is otherwise seen as
# the start of a docstring
(r'/\*\*/', Comment.Multiline),
(r'/\*\*.*?\*/', String.Doc),
(r'/\*.*?\*/', Comment.Multiline),
(r'(->|::)(\s*)([a-zA-Z_][a-zA-Z0-9_]*)',
bygroups(Operator, Text, Name.Attribute)),
(r'[~!%^&*+=|:.<>/?@-]+', Operator),
(r'[\[\]{}();,]+', Punctuation),
(r'(class)(\s+)', bygroups(Keyword, Text), 'classname'),
(r'(function)(\s*)(?=\()', bygroups(Keyword, Text)),
(r'(function)(\s+)(&?)(\s*)',
bygroups(Keyword, Text, Operator, Text), 'functionname'),
(r'(const)(\s+)([a-zA-Z_][a-zA-Z0-9_]*)',
bygroups(Keyword, Text, Name.Constant)),
(r'(and|E_PARSE|old_function|E_ERROR|or|as|E_WARNING|parent|'
r'eval|PHP_OS|break|exit|case|extends|PHP_VERSION|cfunction|'
r'FALSE|print|for|require|continue|foreach|require_once|'
r'declare|return|default|static|do|switch|die|stdClass|'
r'echo|else|TRUE|elseif|var|empty|if|xor|enddeclare|include|'
r'virtual|endfor|include_once|while|endforeach|global|__FILE__|'
r'endif|list|__LINE__|endswitch|new|__sleep|endwhile|not|'
r'array|__wakeup|E_ALL|NULL|final|php_user_filter|interface|'
r'implements|public|private|protected|abstract|clone|try|'
r'catch|throw|this|use|namespace|trait)\b', Keyword),
(r'(true|false|null)\b', Keyword.Constant),
(r'\$\{\$+[a-zA-Z_][a-zA-Z0-9_]*\}', Name.Variable),
(r'\$+[a-zA-Z_][a-zA-Z0-9_]*', Name.Variable),
(r'[\\a-zA-Z_][\\a-zA-Z0-9_]*', Name.Other),
(r'(\d+\.\d*|\d*\.\d+)([eE][+-]?[0-9]+)?', Number.Float),
(r'\d+[eE][+-]?[0-9]+', Number.Float),
(r'0[0-7]+', Number.Oct),
(r'0[xX][a-fA-F0-9]+', Number.Hex),
(r'\d+', Number.Integer),
(r"'([^'\\]*(?:\\.[^'\\]*)*)'", String.Single),
(r'`([^`\\]*(?:\\.[^`\\]*)*)`', String.Backtick),
(r'"', String.Double, 'string'),
],
'classname': [
(r'[a-zA-Z_][\\a-zA-Z0-9_]*', Name.Class, '#pop')
],
'functionname': [
(r'[a-zA-Z_][a-zA-Z0-9_]*', Name.Function, '#pop')
],
'string': [
(r'"', String.Double, '#pop'),
(r'[^{$"\\]+', String.Double),
(r'\\([nrt\"$\\]|[0-7]{1,3}|x[0-9A-Fa-f]{1,2})', String.Escape),
(r'\$[a-zA-Z_][a-zA-Z0-9_]*(\[\S+\]|->[a-zA-Z_][a-zA-Z0-9_]*)?',
String.Interpol),
(r'(\{\$\{)(.*?)(\}\})',
bygroups(String.Interpol, using(this, _startinline=True),
String.Interpol)),
(r'(\{)(\$.*?)(\})',
bygroups(String.Interpol, using(this, _startinline=True),
String.Interpol)),
(r'(\$\{)(\S+)(\})',
bygroups(String.Interpol, Name.Variable, String.Interpol)),
(r'[${\\]+', String.Double)
],
}
def __init__(self, **options):
self.funcnamehighlighting = get_bool_opt(
options, 'funcnamehighlighting', True)
self.disabledmodules = get_list_opt(
options, 'disabledmodules', ['unknown'])
self.startinline = get_bool_opt(options, 'startinline', False)
# private option argument for the lexer itself
if '_startinline' in options:
self.startinline = options.pop('_startinline')
# collect activated functions in a set
self._functions = set()
if self.funcnamehighlighting:
from pygments.lexers._phpbuiltins import MODULES
for key, value in MODULES.iteritems():
if key not in self.disabledmodules:
self._functions.update(value)
RegexLexer.__init__(self, **options)
def get_tokens_unprocessed(self, text):
stack = ['root']
if self.startinline:
stack.append('php')
for index, token, value in \
RegexLexer.get_tokens_unprocessed(self, text, stack):
if token is Name.Other:
if value in self._functions:
yield index, Name.Builtin, value
continue
yield index, token, value
def analyse_text(text):
rv = 0.0
if re.search(r'<\?(?!xml)', text):
rv += 0.3
if '?>' in text:
rv += 0.1
return rv
class DtdLexer(RegexLexer):
"""
A lexer for DTDs (Document Type Definitions).
*New in Pygments 1.5.*
"""
flags = re.MULTILINE | re.DOTALL
name = 'DTD'
aliases = ['dtd']
filenames = ['*.dtd']
mimetypes = ['application/xml-dtd']
tokens = {
'root': [
include('common'),
(r'(<!ELEMENT)(\s+)(\S+)',
bygroups(Keyword, Text, Name.Tag), 'element'),
(r'(<!ATTLIST)(\s+)(\S+)',
bygroups(Keyword, Text, Name.Tag), 'attlist'),
(r'(<!ENTITY)(\s+)(\S+)',
bygroups(Keyword, Text, Name.Entity), 'entity'),
(r'(<!NOTATION)(\s+)(\S+)',
bygroups(Keyword, Text, Name.Tag), 'notation'),
(r'(<!\[)([^\[\s]+)(\s*)(\[)', # conditional sections
bygroups(Keyword, Name.Entity, Text, Keyword)),
(r'(<!DOCTYPE)(\s+)([^>\s]+)',
bygroups(Keyword, Text, Name.Tag)),
(r'PUBLIC|SYSTEM', Keyword.Constant),
(r'[\[\]>]', Keyword),
],
'common': [
(r'\s+', Text),
(r'(%|&)[^;]*;', Name.Entity),
('<!--', Comment, 'comment'),
(r'[(|)*,?+]', Operator),
(r'"[^"]*"', String.Double),
(r'\'[^\']*\'', String.Single),
],
'comment': [
('[^-]+', Comment),
('-->', Comment, '#pop'),
('-', Comment),
],
'element': [
include('common'),
(r'EMPTY|ANY|#PCDATA', Keyword.Constant),
(r'[^>\s\|()?+*,]+', Name.Tag),
(r'>', Keyword, '#pop'),
],
'attlist': [
include('common'),
(r'CDATA|IDREFS|IDREF|ID|NMTOKENS|NMTOKEN|ENTITIES|ENTITY|NOTATION', Keyword.Constant),
(r'#REQUIRED|#IMPLIED|#FIXED', Keyword.Constant),
(r'xml:space|xml:lang', Keyword.Reserved),
(r'[^>\s\|()?+*,]+', Name.Attribute),
(r'>', Keyword, '#pop'),
],
'entity': [
include('common'),
(r'SYSTEM|PUBLIC|NDATA', Keyword.Constant),
(r'[^>\s\|()?+*,]+', Name.Entity),
(r'>', Keyword, '#pop'),
],
'notation': [
include('common'),
(r'SYSTEM|PUBLIC', Keyword.Constant),
(r'[^>\s\|()?+*,]+', Name.Attribute),
(r'>', Keyword, '#pop'),
],
}
def analyse_text(text):
if not looks_like_xml(text) and \
('<!ELEMENT' in text or '<!ATTLIST' in text or '<!ENTITY' in text):
return 0.8
class XmlLexer(RegexLexer):
"""
Generic lexer for XML (eXtensible Markup Language).
"""
flags = re.MULTILINE | re.DOTALL | re.UNICODE
name = 'XML'
aliases = ['xml']
filenames = ['*.xml', '*.xsl', '*.rss', '*.xslt', '*.xsd', '*.wsdl']
mimetypes = ['text/xml', 'application/xml', 'image/svg+xml',
'application/rss+xml', 'application/atom+xml']
tokens = {
'root': [
('[^<&]+', Text),
(r'&\S*?;', Name.Entity),
(r'\<\!\[CDATA\[.*?\]\]\>', Comment.Preproc),
('<!--', Comment, 'comment'),
(r'<\?.*?\?>', Comment.Preproc),
('<![^>]*>', Comment.Preproc),
(r'<\s*[\w:.-]+', Name.Tag, 'tag'),
(r'<\s*/\s*[\w:.-]+\s*>', Name.Tag),
],
'comment': [
('[^-]+', Comment),
('-->', Comment, '#pop'),
('-', Comment),
],
'tag': [
(r'\s+', Text),
(r'[\w.:-]+\s*=', Name.Attribute, 'attr'),
(r'/?\s*>', Name.Tag, '#pop'),
],
'attr': [
('\s+', Text),
('".*?"', String, '#pop'),
("'.*?'", String, '#pop'),
(r'[^\s>]+', String, '#pop'),
],
}
def analyse_text(text):
if looks_like_xml(text):
return 0.5
class XsltLexer(XmlLexer):
'''
A lexer for XSLT.
*New in Pygments 0.10.*
'''
name = 'XSLT'
aliases = ['xslt']
filenames = ['*.xsl', '*.xslt', '*.xpl'] # xpl is XProc
mimetypes = ['application/xsl+xml', 'application/xslt+xml']
EXTRA_KEYWORDS = set([
'apply-imports', 'apply-templates', 'attribute',
'attribute-set', 'call-template', 'choose', 'comment',
'copy', 'copy-of', 'decimal-format', 'element', 'fallback',
'for-each', 'if', 'import', 'include', 'key', 'message',
'namespace-alias', 'number', 'otherwise', 'output', 'param',
'preserve-space', 'processing-instruction', 'sort',
'strip-space', 'stylesheet', 'template', 'text', 'transform',
'value-of', 'variable', 'when', 'with-param'
])
def get_tokens_unprocessed(self, text):
for index, token, value in XmlLexer.get_tokens_unprocessed(self, text):
m = re.match('</?xsl:([^>]*)/?>?', value)
if token is Name.Tag and m and m.group(1) in self.EXTRA_KEYWORDS:
yield index, Keyword, value
else:
yield index, token, value
def analyse_text(text):
if looks_like_xml(text) and '<xsl' in text:
return 0.8
class MxmlLexer(RegexLexer):
"""
For MXML markup.
Nested AS3 in <script> tags is highlighted by the appropriate lexer.
*New in Pygments 1.1.*
"""
flags = re.MULTILINE | re.DOTALL
name = 'MXML'
aliases = ['mxml']
filenames = ['*.mxml']
mimetimes = ['text/xml', 'application/xml']
tokens = {
'root': [
('[^<&]+', Text),
(r'&\S*?;', Name.Entity),
(r'(\<\!\[CDATA\[)(.*?)(\]\]\>)',
bygroups(String, using(ActionScript3Lexer), String)),
('<!--', Comment, 'comment'),
(r'<\?.*?\?>', Comment.Preproc),
('<![^>]*>', Comment.Preproc),
(r'<\s*[a-zA-Z0-9:._-]+', Name.Tag, 'tag'),
(r'<\s*/\s*[a-zA-Z0-9:._-]+\s*>', Name.Tag),
],
'comment': [
('[^-]+', Comment),
('-->', Comment, '#pop'),
('-', Comment),
],
'tag': [
(r'\s+', Text),
(r'[a-zA-Z0-9_.:-]+\s*=', Name.Attribute, 'attr'),
(r'/?\s*>', Name.Tag, '#pop'),
],
'attr': [
('\s+', Text),
('".*?"', String, '#pop'),
("'.*?'", String, '#pop'),
(r'[^\s>]+', String, '#pop'),
],
}
class HaxeLexer(RegexLexer):
"""
For haXe source code (http://haxe.org/).
*New in Pygments 1.3.*
"""
name = 'haXe'
aliases = ['hx', 'haXe']
filenames = ['*.hx']
mimetypes = ['text/haxe']
ident = r'(?:[a-zA-Z_][a-zA-Z0-9_]*)'
typeid = r'(?:(?:[a-z0-9_\.])*[A-Z_][A-Za-z0-9_]*)'
key_prop = r'(?:default|null|never)'
key_decl_mod = r'(?:public|private|override|static|inline|extern|dynamic)'
flags = re.DOTALL | re.MULTILINE
tokens = {
'root': [
include('whitespace'),
include('comments'),
(key_decl_mod, Keyword.Declaration),
include('enumdef'),
include('typedef'),
include('classdef'),
include('imports'),
],
# General constructs
'comments': [
(r'//.*?\n', Comment.Single),
(r'/\*.*?\*/', Comment.Multiline),
(r'#[^\n]*', Comment.Preproc),
],
'whitespace': [
include('comments'),
(r'\s+', Text),
],
'codekeywords': [
(r'\b(if|else|while|do|for|in|break|continue|'
r'return|switch|case|try|catch|throw|null|trace|'
r'new|this|super|untyped|cast|callback|here)\b',
Keyword.Reserved),
],
'literals': [
(r'0[xX][0-9a-fA-F]+', Number.Hex),
(r'[0-9]+', Number.Integer),
(r'[0-9][0-9]*\.[0-9]+([eE][0-9]+)?[fd]?', Number.Float),
(r"'(\\\\|\\'|[^'])*'", String.Single),
(r'"(\\\\|\\"|[^"])*"', String.Double),
(r'~/([^\n])*?/[gisx]*', String.Regex),
(r'\b(true|false|null)\b', Keyword.Constant),
],
'codeblock': [
include('whitespace'),
include('new'),
include('case'),
include('anonfundef'),
include('literals'),
include('vardef'),
include('codekeywords'),
(r'[();,\[\]]', Punctuation),
(r'(?:=|\+=|-=|\*=|/=|%=|&=|\|=|\^=|<<=|>>=|>>>=|\|\||&&|'
r'\.\.\.|==|!=|>|<|>=|<=|\||&|\^|<<|>>>|>>|\+|\-|\*|/|%|'
r'!|\+\+|\-\-|~|\.|\?|\:)',
Operator),
(ident, Name),
(r'}', Punctuation,'#pop'),
(r'{', Punctuation,'#push'),
],
# Instance/Block level constructs
'propertydef': [
(r'(\()(' + key_prop + ')(,)(' + key_prop + ')(\))',
bygroups(Punctuation, Keyword.Reserved, Punctuation,
Keyword.Reserved, Punctuation)),
],
'new': [
(r'\bnew\b', Keyword, 'typedecl'),
],
'case': [
(r'\b(case)(\s+)(' + ident + ')(\s*)(\()',
bygroups(Keyword.Reserved, Text, Name, Text, Punctuation),
'funargdecl'),
],
'vardef': [
(r'\b(var)(\s+)(' + ident + ')',
bygroups(Keyword.Declaration, Text, Name.Variable), 'vardecl'),
],
'vardecl': [
include('whitespace'),
include('typelabel'),
(r'=', Operator,'#pop'),
(r';', Punctuation,'#pop'),
],
'instancevardef': [
(key_decl_mod,Keyword.Declaration),
(r'\b(var)(\s+)(' + ident + ')',
bygroups(Keyword.Declaration, Text, Name.Variable.Instance),
'instancevardecl'),
],
'instancevardecl': [
include('vardecl'),
include('propertydef'),
],
'anonfundef': [
(r'\bfunction\b', Keyword.Declaration, 'fundecl'),
],
'instancefundef': [
(key_decl_mod, Keyword.Declaration),
(r'\b(function)(\s+)(' + ident + ')',
bygroups(Keyword.Declaration, Text, Name.Function), 'fundecl'),
],
'fundecl': [
include('whitespace'),
include('typelabel'),
include('generictypedecl'),
(r'\(',Punctuation,'funargdecl'),
(r'(?=[a-zA-Z0-9_])',Text,'#pop'),
(r'{',Punctuation,('#pop','codeblock')),
(r';',Punctuation,'#pop'),
],
'funargdecl': [
include('whitespace'),
(ident, Name.Variable),
include('typelabel'),
include('literals'),
(r'=', Operator),
(r',', Punctuation),
(r'\?', Punctuation),
(r'\)', Punctuation, '#pop'),
],
'typelabel': [
(r':', Punctuation, 'type'),
],
'typedecl': [
include('whitespace'),
(typeid, Name.Class),
(r'<', Punctuation, 'generictypedecl'),
(r'(?=[{}()=,a-z])', Text,'#pop'),
],
'type': [
include('whitespace'),
(typeid, Name.Class),
(r'<', Punctuation, 'generictypedecl'),
(r'->', Keyword.Type),
(r'(?=[{}(),;=])', Text, '#pop'),
],
'generictypedecl': [
include('whitespace'),
(typeid, Name.Class),
(r'<', Punctuation, '#push'),
(r'>', Punctuation, '#pop'),
(r',', Punctuation),
],
# Top level constructs
'imports': [
(r'(package|import|using)(\s+)([^;]+)(;)',
bygroups(Keyword.Namespace, Text, Name.Namespace,Punctuation)),
],
'typedef': [
(r'typedef', Keyword.Declaration, ('typedefprebody', 'typedecl')),
],
'typedefprebody': [
include('whitespace'),
(r'(=)(\s*)({)', bygroups(Punctuation, Text, Punctuation),
('#pop', 'typedefbody')),
],
'enumdef': [
(r'enum', Keyword.Declaration, ('enumdefprebody', 'typedecl')),
],
'enumdefprebody': [
include('whitespace'),
(r'{', Punctuation, ('#pop','enumdefbody')),
],
'classdef': [
(r'class', Keyword.Declaration, ('classdefprebody', 'typedecl')),
],
'classdefprebody': [
include('whitespace'),
(r'(extends|implements)', Keyword.Declaration,'typedecl'),
(r'{', Punctuation, ('#pop', 'classdefbody')),
],
'interfacedef': [
(r'interface', Keyword.Declaration,
('interfacedefprebody', 'typedecl')),
],
'interfacedefprebody': [
include('whitespace'),
(r'(extends)', Keyword.Declaration, 'typedecl'),
(r'{', Punctuation, ('#pop', 'classdefbody')),
],
'typedefbody': [
include('whitespace'),
include('instancevardef'),
include('instancefundef'),
(r'>', Punctuation, 'typedecl'),
(r',', Punctuation),
(r'}', Punctuation, '#pop'),
],
'enumdefbody': [
include('whitespace'),
(ident, Name.Variable.Instance),
(r'\(', Punctuation, 'funargdecl'),
(r';', Punctuation),
(r'}', Punctuation, '#pop'),
],
'classdefbody': [
include('whitespace'),
include('instancevardef'),
include('instancefundef'),
(r'}', Punctuation, '#pop'),
include('codeblock'),
],
}
def analyse_text(text):
if re.match(r'\w+\s*:\s*\w', text): return 0.3
def _indentation(lexer, match, ctx):
indentation = match.group(0)
yield match.start(), Text, indentation
ctx.last_indentation = indentation
ctx.pos = match.end()
if hasattr(ctx, 'block_state') and ctx.block_state and \
indentation.startswith(ctx.block_indentation) and \
indentation != ctx.block_indentation:
ctx.stack.append(ctx.block_state)
else:
ctx.block_state = None
ctx.block_indentation = None
ctx.stack.append('content')
def _starts_block(token, state):
def callback(lexer, match, ctx):
yield match.start(), token, match.group(0)
if hasattr(ctx, 'last_indentation'):
ctx.block_indentation = ctx.last_indentation
else:
ctx.block_indentation = ''
ctx.block_state = state
ctx.pos = match.end()
return callback
class HamlLexer(ExtendedRegexLexer):
"""
For Haml markup.
*New in Pygments 1.3.*
"""
name = 'Haml'
aliases = ['haml', 'HAML']
filenames = ['*.haml']
mimetypes = ['text/x-haml']
flags = re.IGNORECASE
# Haml can include " |\n" anywhere,
# which is ignored and used to wrap long lines.
# To accomodate this, use this custom faux dot instead.
_dot = r'(?: \|\n(?=.* \|)|.)'
# In certain places, a comma at the end of the line
# allows line wrapping as well.
_comma_dot = r'(?:,\s*\n|' + _dot + ')'
tokens = {
'root': [
(r'[ \t]*\n', Text),
(r'[ \t]*', _indentation),
],
'css': [
(r'\.[a-z0-9_:-]+', Name.Class, 'tag'),
(r'\#[a-z0-9_:-]+', Name.Function, 'tag'),
],
'eval-or-plain': [
(r'[&!]?==', Punctuation, 'plain'),
(r'([&!]?[=~])(' + _comma_dot + r'*\n)',
bygroups(Punctuation, using(RubyLexer)),
'root'),
(r'', Text, 'plain'),
],
'content': [
include('css'),
(r'%[a-z0-9_:-]+', Name.Tag, 'tag'),
(r'!!!' + _dot + r'*\n', Name.Namespace, '#pop'),
(r'(/)(\[' + _dot + '*?\])(' + _dot + r'*\n)',
bygroups(Comment, Comment.Special, Comment),
'#pop'),
(r'/' + _dot + r'*\n', _starts_block(Comment, 'html-comment-block'),
'#pop'),
(r'-#' + _dot + r'*\n', _starts_block(Comment.Preproc,
'haml-comment-block'), '#pop'),
(r'(-)(' + _comma_dot + r'*\n)',
bygroups(Punctuation, using(RubyLexer)),
'#pop'),
(r':' + _dot + r'*\n', _starts_block(Name.Decorator, 'filter-block'),
'#pop'),
include('eval-or-plain'),
],
'tag': [
include('css'),
(r'\{(,\n|' + _dot + ')*?\}', using(RubyLexer)),
(r'\[' + _dot + '*?\]', using(RubyLexer)),
(r'\(', Text, 'html-attributes'),
(r'/[ \t]*\n', Punctuation, '#pop:2'),
(r'[<>]{1,2}(?=[ \t=])', Punctuation),
include('eval-or-plain'),
],
'plain': [
(r'([^#\n]|#[^{\n]|(\\\\)*\\#\{)+', Text),
(r'(#\{)(' + _dot + '*?)(\})',
bygroups(String.Interpol, using(RubyLexer), String.Interpol)),
(r'\n', Text, 'root'),
],
'html-attributes': [
(r'\s+', Text),
(r'[a-z0-9_:-]+[ \t]*=', Name.Attribute, 'html-attribute-value'),
(r'[a-z0-9_:-]+', Name.Attribute),
(r'\)', Text, '#pop'),
],
'html-attribute-value': [
(r'[ \t]+', Text),
(r'[a-z0-9_]+', Name.Variable, '#pop'),
(r'@[a-z0-9_]+', Name.Variable.Instance, '#pop'),
(r'\$[a-z0-9_]+', Name.Variable.Global, '#pop'),
(r"'(\\\\|\\'|[^'\n])*'", String, '#pop'),
(r'"(\\\\|\\"|[^"\n])*"', String, '#pop'),
],
'html-comment-block': [
(_dot + '+', Comment),
(r'\n', Text, 'root'),
],
'haml-comment-block': [
(_dot + '+', Comment.Preproc),
(r'\n', Text, 'root'),
],
'filter-block': [
(r'([^#\n]|#[^{\n]|(\\\\)*\\#\{)+', Name.Decorator),
(r'(#\{)(' + _dot + '*?)(\})',
bygroups(String.Interpol, using(RubyLexer), String.Interpol)),
(r'\n', Text, 'root'),
],
}
common_sass_tokens = {
'value': [
(r'[ \t]+', Text),
(r'[!$][\w-]+', Name.Variable),
(r'url\(', String.Other, 'string-url'),
(r'[a-z_-][\w-]*(?=\()', Name.Function),
(r'(azimuth|background-attachment|background-color|'
r'background-image|background-position|background-repeat|'
r'background|border-bottom-color|border-bottom-style|'
r'border-bottom-width|border-left-color|border-left-style|'
r'border-left-width|border-right|border-right-color|'
r'border-right-style|border-right-width|border-top-color|'
r'border-top-style|border-top-width|border-bottom|'
r'border-collapse|border-left|border-width|border-color|'
r'border-spacing|border-style|border-top|border|caption-side|'
r'clear|clip|color|content|counter-increment|counter-reset|'
r'cue-after|cue-before|cue|cursor|direction|display|'
r'elevation|empty-cells|float|font-family|font-size|'
r'font-size-adjust|font-stretch|font-style|font-variant|'
r'font-weight|font|height|letter-spacing|line-height|'
r'list-style-type|list-style-image|list-style-position|'
r'list-style|margin-bottom|margin-left|margin-right|'
r'margin-top|margin|marker-offset|marks|max-height|max-width|'
r'min-height|min-width|opacity|orphans|outline|outline-color|'
r'outline-style|outline-width|overflow|padding-bottom|'
r'padding-left|padding-right|padding-top|padding|page|'
r'page-break-after|page-break-before|page-break-inside|'
r'pause-after|pause-before|pause|pitch|pitch-range|'
r'play-during|position|quotes|richness|right|size|'
r'speak-header|speak-numeral|speak-punctuation|speak|'
r'speech-rate|stress|table-layout|text-align|text-decoration|'
r'text-indent|text-shadow|text-transform|top|unicode-bidi|'
r'vertical-align|visibility|voice-family|volume|white-space|'
r'widows|width|word-spacing|z-index|bottom|left|'
r'above|absolute|always|armenian|aural|auto|avoid|baseline|'
r'behind|below|bidi-override|blink|block|bold|bolder|both|'
r'capitalize|center-left|center-right|center|circle|'
r'cjk-ideographic|close-quote|collapse|condensed|continuous|'
r'crop|crosshair|cross|cursive|dashed|decimal-leading-zero|'
r'decimal|default|digits|disc|dotted|double|e-resize|embed|'
r'extra-condensed|extra-expanded|expanded|fantasy|far-left|'
r'far-right|faster|fast|fixed|georgian|groove|hebrew|help|'
r'hidden|hide|higher|high|hiragana-iroha|hiragana|icon|'
r'inherit|inline-table|inline|inset|inside|invert|italic|'
r'justify|katakana-iroha|katakana|landscape|larger|large|'
r'left-side|leftwards|level|lighter|line-through|list-item|'
r'loud|lower-alpha|lower-greek|lower-roman|lowercase|ltr|'
r'lower|low|medium|message-box|middle|mix|monospace|'
r'n-resize|narrower|ne-resize|no-close-quote|no-open-quote|'
r'no-repeat|none|normal|nowrap|nw-resize|oblique|once|'
r'open-quote|outset|outside|overline|pointer|portrait|px|'
r'relative|repeat-x|repeat-y|repeat|rgb|ridge|right-side|'
r'rightwards|s-resize|sans-serif|scroll|se-resize|'
r'semi-condensed|semi-expanded|separate|serif|show|silent|'
r'slow|slower|small-caps|small-caption|smaller|soft|solid|'
r'spell-out|square|static|status-bar|super|sw-resize|'
r'table-caption|table-cell|table-column|table-column-group|'
r'table-footer-group|table-header-group|table-row|'
r'table-row-group|text|text-bottom|text-top|thick|thin|'
r'transparent|ultra-condensed|ultra-expanded|underline|'
r'upper-alpha|upper-latin|upper-roman|uppercase|url|'
r'visible|w-resize|wait|wider|x-fast|x-high|x-large|x-loud|'
r'x-low|x-small|x-soft|xx-large|xx-small|yes)\b', Name.Constant),
(r'(indigo|gold|firebrick|indianred|darkolivegreen|'
r'darkseagreen|mediumvioletred|mediumorchid|chartreuse|'
r'mediumslateblue|springgreen|crimson|lightsalmon|brown|'
r'turquoise|olivedrab|cyan|skyblue|darkturquoise|'
r'goldenrod|darkgreen|darkviolet|darkgray|lightpink|'
r'darkmagenta|lightgoldenrodyellow|lavender|yellowgreen|thistle|'
r'violet|orchid|ghostwhite|honeydew|cornflowerblue|'
r'darkblue|darkkhaki|mediumpurple|cornsilk|bisque|slategray|'
r'darkcyan|khaki|wheat|deepskyblue|darkred|steelblue|aliceblue|'
r'gainsboro|mediumturquoise|floralwhite|coral|lightgrey|'
r'lightcyan|darksalmon|beige|azure|lightsteelblue|oldlace|'
r'greenyellow|royalblue|lightseagreen|mistyrose|sienna|'
r'lightcoral|orangered|navajowhite|palegreen|burlywood|'
r'seashell|mediumspringgreen|papayawhip|blanchedalmond|'
r'peru|aquamarine|darkslategray|ivory|dodgerblue|'
r'lemonchiffon|chocolate|orange|forestgreen|slateblue|'
r'mintcream|antiquewhite|darkorange|cadetblue|moccasin|'
r'limegreen|saddlebrown|darkslateblue|lightskyblue|deeppink|'
r'plum|darkgoldenrod|sandybrown|magenta|tan|'
r'rosybrown|pink|lightblue|palevioletred|mediumseagreen|'
r'dimgray|powderblue|seagreen|snow|mediumblue|midnightblue|'
r'paleturquoise|palegoldenrod|whitesmoke|darkorchid|salmon|'
r'lightslategray|lawngreen|lightgreen|tomato|hotpink|'
r'lightyellow|lavenderblush|linen|mediumaquamarine|'
r'blueviolet|peachpuff)\b', Name.Entity),
(r'(black|silver|gray|white|maroon|red|purple|fuchsia|green|'
r'lime|olive|yellow|navy|blue|teal|aqua)\b', Name.Builtin),
(r'\!(important|default)', Name.Exception),
(r'(true|false)', Name.Pseudo),
(r'(and|or|not)', Operator.Word),
(r'/\*', Comment.Multiline, 'inline-comment'),
(r'//[^\n]*', Comment.Single),
(r'\#[a-z0-9]{1,6}', Number.Hex),
(r'(-?\d+)(\%|[a-z]+)?', bygroups(Number.Integer, Keyword.Type)),
(r'(-?\d*\.\d+)(\%|[a-z]+)?', bygroups(Number.Float, Keyword.Type)),
(r'#{', String.Interpol, 'interpolation'),
(r'[~\^\*!&%<>\|+=@:,./?-]+', Operator),
(r'[\[\]()]+', Punctuation),
(r'"', String.Double, 'string-double'),
(r"'", String.Single, 'string-single'),
(r'[a-z_-][\w-]*', Name),
],
'interpolation': [
(r'\}', String.Interpol, '#pop'),
include('value'),
],
'selector': [
(r'[ \t]+', Text),
(r'\:', Name.Decorator, 'pseudo-class'),
(r'\.', Name.Class, 'class'),
(r'\#', Name.Namespace, 'id'),
(r'[a-zA-Z0-9_-]+', Name.Tag),
(r'#\{', String.Interpol, 'interpolation'),
(r'&', Keyword),
(r'[~\^\*!&\[\]\(\)<>\|+=@:;,./?-]', Operator),
(r'"', String.Double, 'string-double'),
(r"'", String.Single, 'string-single'),
],
'string-double': [
(r'(\\.|#(?=[^\n{])|[^\n"#])+', String.Double),
(r'#\{', String.Interpol, 'interpolation'),
(r'"', String.Double, '#pop'),
],
'string-single': [
(r"(\\.|#(?=[^\n{])|[^\n'#])+", String.Double),
(r'#\{', String.Interpol, 'interpolation'),
(r"'", String.Double, '#pop'),
],
'string-url': [
(r'(\\#|#(?=[^\n{])|[^\n#)])+', String.Other),
(r'#\{', String.Interpol, 'interpolation'),
(r'\)', String.Other, '#pop'),
],
'pseudo-class': [
(r'[\w-]+', Name.Decorator),
(r'#\{', String.Interpol, 'interpolation'),
(r'', Text, '#pop'),
],
'class': [
(r'[\w-]+', Name.Class),
(r'#\{', String.Interpol, 'interpolation'),
(r'', Text, '#pop'),
],
'id': [
(r'[\w-]+', Name.Namespace),
(r'#\{', String.Interpol, 'interpolation'),
(r'', Text, '#pop'),
],
'for': [
(r'(from|to|through)', Operator.Word),
include('value'),
],
}
class SassLexer(ExtendedRegexLexer):
"""
For Sass stylesheets.
*New in Pygments 1.3.*
"""
name = 'Sass'
aliases = ['sass', 'SASS']
filenames = ['*.sass']
mimetypes = ['text/x-sass']
flags = re.IGNORECASE
tokens = {
'root': [
(r'[ \t]*\n', Text),
(r'[ \t]*', _indentation),
],
'content': [
(r'//[^\n]*', _starts_block(Comment.Single, 'single-comment'),
'root'),
(r'/\*[^\n]*', _starts_block(Comment.Multiline, 'multi-comment'),
'root'),
(r'@import', Keyword, 'import'),
(r'@for', Keyword, 'for'),
(r'@(debug|warn|if|while)', Keyword, 'value'),
(r'(@mixin)( [\w-]+)', bygroups(Keyword, Name.Function), 'value'),
(r'(@include)( [\w-]+)', bygroups(Keyword, Name.Decorator), 'value'),
(r'@extend', Keyword, 'selector'),
(r'@[a-z0-9_-]+', Keyword, 'selector'),
(r'=[\w-]+', Name.Function, 'value'),
(r'\+[\w-]+', Name.Decorator, 'value'),
(r'([!$][\w-]\w*)([ \t]*(?:(?:\|\|)?=|:))',
bygroups(Name.Variable, Operator), 'value'),
(r':', Name.Attribute, 'old-style-attr'),
(r'(?=.+?[=:]([^a-z]|$))', Name.Attribute, 'new-style-attr'),
(r'', Text, 'selector'),
],
'single-comment': [
(r'.+', Comment.Single),
(r'\n', Text, 'root'),
],
'multi-comment': [
(r'.+', Comment.Multiline),
(r'\n', Text, 'root'),
],
'import': [
(r'[ \t]+', Text),
(r'\S+', String),
(r'\n', Text, 'root'),
],
'old-style-attr': [
(r'[^\s:="\[]+', Name.Attribute),
(r'#{', String.Interpol, 'interpolation'),
(r'[ \t]*=', Operator, 'value'),
(r'', Text, 'value'),
],
'new-style-attr': [
(r'[^\s:="\[]+', Name.Attribute),
(r'#{', String.Interpol, 'interpolation'),
(r'[ \t]*[=:]', Operator, 'value'),
],
'inline-comment': [
(r"(\\#|#(?=[^\n{])|\*(?=[^\n/])|[^\n#*])+", Comment.Multiline),
(r'#\{', String.Interpol, 'interpolation'),
(r"\*/", Comment, '#pop'),
],
}
for group, common in common_sass_tokens.iteritems():
tokens[group] = copy.copy(common)
tokens['value'].append((r'\n', Text, 'root'))
tokens['selector'].append((r'\n', Text, 'root'))
class ScssLexer(RegexLexer):
"""
For SCSS stylesheets.
"""
name = 'SCSS'
aliases = ['scss']
filenames = ['*.scss']
mimetypes = ['text/x-scss']
flags = re.IGNORECASE | re.DOTALL
tokens = {
'root': [
(r'\s+', Text),
(r'//.*?\n', Comment.Single),
(r'/\*.*?\*/', Comment.Multiline),
(r'@import', Keyword, 'value'),
(r'@for', Keyword, 'for'),
(r'@(debug|warn|if|while)', Keyword, 'value'),
(r'(@mixin)( [\w-]+)', bygroups(Keyword, Name.Function), 'value'),
(r'(@include)( [\w-]+)', bygroups(Keyword, Name.Decorator), 'value'),
(r'@extend', Keyword, 'selector'),
(r'@[a-z0-9_-]+', Keyword, 'selector'),
(r'(\$[\w-]\w*)([ \t]*:)', bygroups(Name.Variable, Operator), 'value'),
(r'(?=[^;{}][;}])', Name.Attribute, 'attr'),
(r'(?=[^;{}:]+:[^a-z])', Name.Attribute, 'attr'),
(r'', Text, 'selector'),
],
'attr': [
(r'[^\s:="\[]+', Name.Attribute),
(r'#{', String.Interpol, 'interpolation'),
(r'[ \t]*:', Operator, 'value'),
],
'inline-comment': [
(r"(\\#|#(?=[^{])|\*(?=[^/])|[^#*])+", Comment.Multiline),
(r'#\{', String.Interpol, 'interpolation'),
(r"\*/", Comment, '#pop'),
],
}
for group, common in common_sass_tokens.iteritems():
tokens[group] = copy.copy(common)
tokens['value'].extend([(r'\n', Text), (r'[;{}]', Punctuation, 'root')])
tokens['selector'].extend([(r'\n', Text), (r'[;{}]', Punctuation, 'root')])
class CoffeeScriptLexer(RegexLexer):
"""
For `CoffeeScript`_ source code.
.. _CoffeeScript: http://coffeescript.org
*New in Pygments 1.3.*
"""
name = 'CoffeeScript'
aliases = ['coffee-script', 'coffeescript']
filenames = ['*.coffee']
mimetypes = ['text/coffeescript']
flags = re.DOTALL
tokens = {
'commentsandwhitespace': [
(r'\s+', Text),
(r'###[^#].*?###', Comment.Multiline),
(r'#(?!##[^#]).*?\n', Comment.Single),
],
'multilineregex': [
(r'[^/#]+', String.Regex),
(r'///([gim]+\b|\B)', String.Regex, '#pop'),
(r'#{', String.Interpol, 'interpoling_string'),
(r'[/#]', String.Regex),
],
'slashstartsregex': [
include('commentsandwhitespace'),
(r'///', String.Regex, ('#pop', 'multilineregex')),
(r'/(?! )(\\.|[^[/\\\n]|\[(\\.|[^\]\\\n])*])+/'
r'([gim]+\b|\B)', String.Regex, '#pop'),
(r'', Text, '#pop'),
],
'root': [
# this next expr leads to infinite loops root -> slashstartsregex
#(r'^(?=\s|/|<!--)', Text, 'slashstartsregex'),
include('commentsandwhitespace'),
(r'\+\+|~|&&|\band\b|\bor\b|\bis\b|\bisnt\b|\bnot\b|\?|:|'
r'\|\||\\(?=\n)|(<<|>>>?|==?|!=?|'
r'=(?!>)|-(?!>)|[<>+*`%&\|\^/])=?',
Operator, 'slashstartsregex'),
(r'(?:\([^()]+\))?\s*[=-]>', Name.Function),
(r'[{(\[;,]', Punctuation, 'slashstartsregex'),
(r'[})\].]', Punctuation),
(r'(?<![\.\$])(for|own|in|of|while|until|'
r'loop|break|return|continue|'
r'switch|when|then|if|unless|else|'
r'throw|try|catch|finally|new|delete|typeof|instanceof|super|'
r'extends|this|class|by)\b', Keyword, 'slashstartsregex'),
(r'(?<![\.\$])(true|false|yes|no|on|off|null|'
r'NaN|Infinity|undefined)\b',
Keyword.Constant),
(r'(Array|Boolean|Date|Error|Function|Math|netscape|'
r'Number|Object|Packages|RegExp|String|sun|decodeURI|'
r'decodeURIComponent|encodeURI|encodeURIComponent|'
r'eval|isFinite|isNaN|parseFloat|parseInt|document|window)\b',
Name.Builtin),
(r'[$a-zA-Z_][a-zA-Z0-9_\.:\$]*\s*[:=]\s', Name.Variable,
'slashstartsregex'),
(r'@[$a-zA-Z_][a-zA-Z0-9_\.:\$]*\s*[:=]\s', Name.Variable.Instance,
'slashstartsregex'),
(r'@', Name.Other, 'slashstartsregex'),
(r'@?[$a-zA-Z_][a-zA-Z0-9_\$]*', Name.Other, 'slashstartsregex'),
(r'[0-9][0-9]*\.[0-9]+([eE][0-9]+)?[fd]?', Number.Float),
(r'0x[0-9a-fA-F]+', Number.Hex),
(r'[0-9]+', Number.Integer),
('"""', String, 'tdqs'),
("'''", String, 'tsqs'),
('"', String, 'dqs'),
("'", String, 'sqs'),
],
'strings': [
(r'[^#\\\'"]+', String),
# note that all coffee script strings are multi-line.
# hashmarks, quotes and backslashes must be parsed one at a time
],
'interpoling_string' : [
(r'}', String.Interpol, "#pop"),
include('root')
],
'dqs': [
(r'"', String, '#pop'),
(r'\\.|\'', String), # double-quoted string don't need ' escapes
(r'#{', String.Interpol, "interpoling_string"),
include('strings')
],
'sqs': [
(r"'", String, '#pop'),
(r'#|\\.|"', String), # single quoted strings don't need " escapses
include('strings')
],
'tdqs': [
(r'"""', String, '#pop'),
(r'\\.|\'|"', String), # no need to escape quotes in triple-string
(r'#{', String.Interpol, "interpoling_string"),
include('strings'),
],
'tsqs': [
(r"'''", String, '#pop'),
(r'#|\\.|\'|"', String), # no need to escape quotes in triple-strings
include('strings')
],
}
class LiveScriptLexer(RegexLexer):
"""
For `LiveScript`_ source code.
.. _LiveScript: http://gkz.github.com/LiveScript/
New in Pygments 1.6.
"""
name = 'LiveScript'
aliases = ['live-script', 'livescript']
filenames = ['*.ls']
mimetypes = ['text/livescript']
flags = re.DOTALL
tokens = {
'commentsandwhitespace': [
(r'\s+', Text),
(r'/\*.*?\*/', Comment.Multiline),
(r'#.*?\n', Comment.Single),
],
'multilineregex': [
include('commentsandwhitespace'),
(r'//([gim]+\b|\B)', String.Regex, '#pop'),
(r'/', String.Regex),
(r'[^/#]+', String.Regex)
],
'slashstartsregex': [
include('commentsandwhitespace'),
(r'//', String.Regex, ('#pop', 'multilineregex')),
(r'/(?! )(\\.|[^[/\\\n]|\[(\\.|[^\]\\\n])*])+/'
r'([gim]+\b|\B)', String.Regex, '#pop'),
(r'', Text, '#pop'),
],
'root': [
# this next expr leads to infinite loops root -> slashstartsregex
#(r'^(?=\s|/|<!--)', Text, 'slashstartsregex'),
include('commentsandwhitespace'),
(r'(?:\([^()]+\))?[ ]*[~-]{1,2}>|'
r'(?:\(?[^()\n]+\)?)?[ ]*<[~-]{1,2}', Name.Function),
(r'\+\+|&&|(?<![\.\$])\b(?:and|x?or|is|isnt|not)\b|\?|:|=|'
r'\|\||\\(?=\n)|(<<|>>>?|==?|!=?|'
r'~(?!\~?>)|-(?!\-?>)|<(?!\[)|(?<!\])>|'
r'[+*`%&\|\^/])=?',
Operator, 'slashstartsregex'),
(r'[{(\[;,]', Punctuation, 'slashstartsregex'),
(r'[})\].]', Punctuation),
(r'(?<![\.\$])(for|own|in|of|while|until|loop|break|'
r'return|continue|switch|when|then|if|unless|else|'
r'throw|try|catch|finally|new|delete|typeof|instanceof|super|'
r'extends|this|class|by|const|var|to|til)\b', Keyword,
'slashstartsregex'),
(r'(?<![\.\$])(true|false|yes|no|on|off|'
r'null|NaN|Infinity|undefined|void)\b',
Keyword.Constant),
(r'(Array|Boolean|Date|Error|Function|Math|netscape|'
r'Number|Object|Packages|RegExp|String|sun|decodeURI|'
r'decodeURIComponent|encodeURI|encodeURIComponent|'
r'eval|isFinite|isNaN|parseFloat|parseInt|document|window)\b',
Name.Builtin),
(r'[$a-zA-Z_][a-zA-Z0-9_\.\-:\$]*\s*[:=]\s', Name.Variable,
'slashstartsregex'),
(r'@[$a-zA-Z_][a-zA-Z0-9_\.\-:\$]*\s*[:=]\s', Name.Variable.Instance,
'slashstartsregex'),
(r'@', Name.Other, 'slashstartsregex'),
(r'@?[$a-zA-Z_][a-zA-Z0-9_\-]*', Name.Other, 'slashstartsregex'),
(r'[0-9]+\.[0-9]+([eE][0-9]+)?[fd]?(?:[a-zA-Z_]+)?', Number.Float),
(r'[0-9]+(~[0-9a-z]+)?(?:[a-zA-Z_]+)?', Number.Integer),
('"""', String, 'tdqs'),
("'''", String, 'tsqs'),
('"', String, 'dqs'),
("'", String, 'sqs'),
(r'\\[\w$-]+', String),
(r'<\[.*\]>', String),
],
'strings': [
(r'[^#\\\'"]+', String),
# note that all coffee script strings are multi-line.
# hashmarks, quotes and backslashes must be parsed one at a time
],
'interpoling_string' : [
(r'}', String.Interpol, "#pop"),
include('root')
],
'dqs': [
(r'"', String, '#pop'),
(r'\\.|\'', String), # double-quoted string don't need ' escapes
(r'#{', String.Interpol, "interpoling_string"),
(r'#', String),
include('strings')
],
'sqs': [
(r"'", String, '#pop'),
(r'#|\\.|"', String), # single quoted strings don't need " escapses
include('strings')
],
'tdqs': [
(r'"""', String, '#pop'),
(r'\\.|\'|"', String), # no need to escape quotes in triple-string
(r'#{', String.Interpol, "interpoling_string"),
(r'#', String),
include('strings'),
],
'tsqs': [
(r"'''", String, '#pop'),
(r'#|\\.|\'|"', String), # no need to escape quotes in triple-strings
include('strings')
],
}
class DuelLexer(RegexLexer):
"""
Lexer for Duel Views Engine (formerly JBST) markup with JavaScript code blocks.
See http://duelengine.org/.
See http://jsonml.org/jbst/.
*New in Pygments 1.4.*
"""
name = 'Duel'
aliases = ['duel', 'Duel Engine', 'Duel View', 'JBST', 'jbst', 'JsonML+BST']
filenames = ['*.duel','*.jbst']
mimetypes = ['text/x-duel','text/x-jbst']
flags = re.DOTALL
tokens = {
'root': [
(r'(<%[@=#!:]?)(.*?)(%>)',
bygroups(Name.Tag, using(JavascriptLexer), Name.Tag)),
(r'(<%\$)(.*?)(:)(.*?)(%>)',
bygroups(Name.Tag, Name.Function, Punctuation, String, Name.Tag)),
(r'(<%--)(.*?)(--%>)',
bygroups(Name.Tag, Comment.Multiline, Name.Tag)),
(r'(<script.*?>)(.*?)(</script>)',
bygroups(using(HtmlLexer),
using(JavascriptLexer), using(HtmlLexer))),
(r'(.+?)(?=<)', using(HtmlLexer)),
(r'.+', using(HtmlLexer)),
],
}
class ScamlLexer(ExtendedRegexLexer):
"""
For `Scaml markup <http://scalate.fusesource.org/>`_. Scaml is Haml for Scala.
*New in Pygments 1.4.*
"""
name = 'Scaml'
aliases = ['scaml', 'SCAML']
filenames = ['*.scaml']
mimetypes = ['text/x-scaml']
flags = re.IGNORECASE
# Scaml does not yet support the " |\n" notation to
# wrap long lines. Once it does, use the custom faux
# dot instead.
# _dot = r'(?: \|\n(?=.* \|)|.)'
_dot = r'.'
tokens = {
'root': [
(r'[ \t]*\n', Text),
(r'[ \t]*', _indentation),
],
'css': [
(r'\.[a-z0-9_:-]+', Name.Class, 'tag'),
(r'\#[a-z0-9_:-]+', Name.Function, 'tag'),
],
'eval-or-plain': [
(r'[&!]?==', Punctuation, 'plain'),
(r'([&!]?[=~])(' + _dot + r'*\n)',
bygroups(Punctuation, using(ScalaLexer)),
'root'),
(r'', Text, 'plain'),
],
'content': [
include('css'),
(r'%[a-z0-9_:-]+', Name.Tag, 'tag'),
(r'!!!' + _dot + r'*\n', Name.Namespace, '#pop'),
(r'(/)(\[' + _dot + '*?\])(' + _dot + r'*\n)',
bygroups(Comment, Comment.Special, Comment),
'#pop'),
(r'/' + _dot + r'*\n', _starts_block(Comment, 'html-comment-block'),
'#pop'),
(r'-#' + _dot + r'*\n', _starts_block(Comment.Preproc,
'scaml-comment-block'), '#pop'),
(r'(-@\s*)(import)?(' + _dot + r'*\n)',
bygroups(Punctuation, Keyword, using(ScalaLexer)),
'#pop'),
(r'(-)(' + _dot + r'*\n)',
bygroups(Punctuation, using(ScalaLexer)),
'#pop'),
(r':' + _dot + r'*\n', _starts_block(Name.Decorator, 'filter-block'),
'#pop'),
include('eval-or-plain'),
],
'tag': [
include('css'),
(r'\{(,\n|' + _dot + ')*?\}', using(ScalaLexer)),
(r'\[' + _dot + '*?\]', using(ScalaLexer)),
(r'\(', Text, 'html-attributes'),
(r'/[ \t]*\n', Punctuation, '#pop:2'),
(r'[<>]{1,2}(?=[ \t=])', Punctuation),
include('eval-or-plain'),
],
'plain': [
(r'([^#\n]|#[^{\n]|(\\\\)*\\#\{)+', Text),
(r'(#\{)(' + _dot + '*?)(\})',
bygroups(String.Interpol, using(ScalaLexer), String.Interpol)),
(r'\n', Text, 'root'),
],
'html-attributes': [
(r'\s+', Text),
(r'[a-z0-9_:-]+[ \t]*=', Name.Attribute, 'html-attribute-value'),
(r'[a-z0-9_:-]+', Name.Attribute),
(r'\)', Text, '#pop'),
],
'html-attribute-value': [
(r'[ \t]+', Text),
(r'[a-z0-9_]+', Name.Variable, '#pop'),
(r'@[a-z0-9_]+', Name.Variable.Instance, '#pop'),
(r'\$[a-z0-9_]+', Name.Variable.Global, '#pop'),
(r"'(\\\\|\\'|[^'\n])*'", String, '#pop'),
(r'"(\\\\|\\"|[^"\n])*"', String, '#pop'),
],
'html-comment-block': [
(_dot + '+', Comment),
(r'\n', Text, 'root'),
],
'scaml-comment-block': [
(_dot + '+', Comment.Preproc),
(r'\n', Text, 'root'),
],
'filter-block': [
(r'([^#\n]|#[^{\n]|(\\\\)*\\#\{)+', Name.Decorator),
(r'(#\{)(' + _dot + '*?)(\})',
bygroups(String.Interpol, using(ScalaLexer), String.Interpol)),
(r'\n', Text, 'root'),
],
}
class JadeLexer(ExtendedRegexLexer):
"""
For Jade markup.
Jade is a variant of Scaml, see:
http://scalate.fusesource.org/documentation/scaml-reference.html
*New in Pygments 1.4.*
"""
name = 'Jade'
aliases = ['jade', 'JADE']
filenames = ['*.jade']
mimetypes = ['text/x-jade']
flags = re.IGNORECASE
_dot = r'.'
tokens = {
'root': [
(r'[ \t]*\n', Text),
(r'[ \t]*', _indentation),
],
'css': [
(r'\.[a-z0-9_:-]+', Name.Class, 'tag'),
(r'\#[a-z0-9_:-]+', Name.Function, 'tag'),
],
'eval-or-plain': [
(r'[&!]?==', Punctuation, 'plain'),
(r'([&!]?[=~])(' + _dot + r'*\n)',
bygroups(Punctuation, using(ScalaLexer)), 'root'),
(r'', Text, 'plain'),
],
'content': [
include('css'),
(r'!!!' + _dot + r'*\n', Name.Namespace, '#pop'),
(r'(/)(\[' + _dot + '*?\])(' + _dot + r'*\n)',
bygroups(Comment, Comment.Special, Comment),
'#pop'),
(r'/' + _dot + r'*\n', _starts_block(Comment, 'html-comment-block'),
'#pop'),
(r'-#' + _dot + r'*\n', _starts_block(Comment.Preproc,
'scaml-comment-block'), '#pop'),
(r'(-@\s*)(import)?(' + _dot + r'*\n)',
bygroups(Punctuation, Keyword, using(ScalaLexer)),
'#pop'),
(r'(-)(' + _dot + r'*\n)',
bygroups(Punctuation, using(ScalaLexer)),
'#pop'),
(r':' + _dot + r'*\n', _starts_block(Name.Decorator, 'filter-block'),
'#pop'),
(r'[a-z0-9_:-]+', Name.Tag, 'tag'),
(r'\|', Text, 'eval-or-plain'),
],
'tag': [
include('css'),
(r'\{(,\n|' + _dot + ')*?\}', using(ScalaLexer)),
(r'\[' + _dot + '*?\]', using(ScalaLexer)),
(r'\(', Text, 'html-attributes'),
(r'/[ \t]*\n', Punctuation, '#pop:2'),
(r'[<>]{1,2}(?=[ \t=])', Punctuation),
include('eval-or-plain'),
],
'plain': [
(r'([^#\n]|#[^{\n]|(\\\\)*\\#\{)+', Text),
(r'(#\{)(' + _dot + '*?)(\})',
bygroups(String.Interpol, using(ScalaLexer), String.Interpol)),
(r'\n', Text, 'root'),
],
'html-attributes': [
(r'\s+', Text),
(r'[a-z0-9_:-]+[ \t]*=', Name.Attribute, 'html-attribute-value'),
(r'[a-z0-9_:-]+', Name.Attribute),
(r'\)', Text, '#pop'),
],
'html-attribute-value': [
(r'[ \t]+', Text),
(r'[a-z0-9_]+', Name.Variable, '#pop'),
(r'@[a-z0-9_]+', Name.Variable.Instance, '#pop'),
(r'\$[a-z0-9_]+', Name.Variable.Global, '#pop'),
(r"'(\\\\|\\'|[^'\n])*'", String, '#pop'),
(r'"(\\\\|\\"|[^"\n])*"', String, '#pop'),
],
'html-comment-block': [
(_dot + '+', Comment),
(r'\n', Text, 'root'),
],
'scaml-comment-block': [
(_dot + '+', Comment.Preproc),
(r'\n', Text, 'root'),
],
'filter-block': [
(r'([^#\n]|#[^{\n]|(\\\\)*\\#\{)+', Name.Decorator),
(r'(#\{)(' + _dot + '*?)(\})',
bygroups(String.Interpol, using(ScalaLexer), String.Interpol)),
(r'\n', Text, 'root'),
],
}
class XQueryLexer(ExtendedRegexLexer):
"""
An XQuery lexer, parsing a stream and outputting the tokens needed to
highlight xquery code.
*New in Pygments 1.4.*
"""
name = 'XQuery'
aliases = ['xquery', 'xqy']
filenames = ['*.xqy', '*.xquery']
mimetypes = ['text/xquery', 'application/xquery']
xquery_parse_state = []
# FIX UNICODE LATER
#ncnamestartchar = (
# ur"[A-Z]|_|[a-z]|[\u00C0-\u00D6]|[\u00D8-\u00F6]|[\u00F8-\u02FF]|"
# ur"[\u0370-\u037D]|[\u037F-\u1FFF]|[\u200C-\u200D]|[\u2070-\u218F]|"
# ur"[\u2C00-\u2FEF]|[\u3001-\uD7FF]|[\uF900-\uFDCF]|[\uFDF0-\uFFFD]|"
# ur"[\u10000-\uEFFFF]"
#)
ncnamestartchar = r"(?:[A-Z]|_|[a-z])"
# FIX UNICODE LATER
#ncnamechar = ncnamestartchar + (ur"|-|\.|[0-9]|\u00B7|[\u0300-\u036F]|"
# ur"[\u203F-\u2040]")
ncnamechar = r"(?:" + ncnamestartchar + r"|-|\.|[0-9])"
ncname = "(?:%s+%s*)" % (ncnamestartchar, ncnamechar)
pitarget_namestartchar = r"(?:[A-KN-WY-Z]|_|:|[a-kn-wy-z])"
pitarget_namechar = r"(?:" + pitarget_namestartchar + r"|-|\.|[0-9])"
pitarget = "%s+%s*" % (pitarget_namestartchar, pitarget_namechar)
prefixedname = "%s:%s" % (ncname, ncname)
unprefixedname = ncname
qname = "(?:%s|%s)" % (prefixedname, unprefixedname)
entityref = r'(?:&(?:lt|gt|amp|quot|apos|nbsp);)'
charref = r'(?:&#[0-9]+;|&#x[0-9a-fA-F]+;)'
stringdouble = r'(?:"(?:' + entityref + r'|' + charref + r'|""|[^&"])*")'
stringsingle = r"(?:'(?:" + entityref + r"|" + charref + r"|''|[^&'])*')"
# FIX UNICODE LATER
#elementcontentchar = (ur'\t|\r|\n|[\u0020-\u0025]|[\u0028-\u003b]|'
# ur'[\u003d-\u007a]|\u007c|[\u007e-\u007F]')
elementcontentchar = r'[A-Za-z]|\s|\d|[!"#$%\(\)\*\+,\-\./\:;=\?\@\[\\\]^_\'`\|~]'
#quotattrcontentchar = (ur'\t|\r|\n|[\u0020-\u0021]|[\u0023-\u0025]|'
# ur'[\u0027-\u003b]|[\u003d-\u007a]|\u007c|[\u007e-\u007F]')
quotattrcontentchar = r'[A-Za-z]|\s|\d|[!#$%\(\)\*\+,\-\./\:;=\?\@\[\\\]^_\'`\|~]'
#aposattrcontentchar = (ur'\t|\r|\n|[\u0020-\u0025]|[\u0028-\u003b]|'
# ur'[\u003d-\u007a]|\u007c|[\u007e-\u007F]')
aposattrcontentchar = r'[A-Za-z]|\s|\d|[!"#$%\(\)\*\+,\-\./\:;=\?\@\[\\\]^_`\|~]'
# CHAR elements - fix the above elementcontentchar, quotattrcontentchar,
# aposattrcontentchar
#x9 | #xA | #xD | [#x20-#xD7FF] | [#xE000-#xFFFD] | [#x10000-#x10FFFF]
flags = re.DOTALL | re.MULTILINE | re.UNICODE
def punctuation_root_callback(lexer, match, ctx):
yield match.start(), Punctuation, match.group(1)
# transition to root always - don't pop off stack
ctx.stack = ['root']
ctx.pos = match.end()
def operator_root_callback(lexer, match, ctx):
yield match.start(), Operator, match.group(1)
# transition to root always - don't pop off stack
ctx.stack = ['root']
ctx.pos = match.end()
def popstate_tag_callback(lexer, match, ctx):
yield match.start(), Name.Tag, match.group(1)
ctx.stack.append(lexer.xquery_parse_state.pop())
ctx.pos = match.end()
def popstate_xmlcomment_callback(lexer, match, ctx):
yield match.start(), String.Doc, match.group(1)
ctx.stack.append(lexer.xquery_parse_state.pop())
ctx.pos = match.end()
def popstate_kindtest_callback(lexer, match, ctx):
yield match.start(), Punctuation, match.group(1)
next_state = lexer.xquery_parse_state.pop()
if next_state == 'occurrenceindicator':
if re.match("[?*+]+", match.group(2)):
yield match.start(), Punctuation, match.group(2)
ctx.stack.append('operator')
ctx.pos = match.end()
else:
ctx.stack.append('operator')
ctx.pos = match.end(1)
else:
ctx.stack.append(next_state)
ctx.pos = match.end(1)
def popstate_callback(lexer, match, ctx):
yield match.start(), Punctuation, match.group(1)
# if we have run out of our state stack, pop whatever is on the pygments
# state stack
if len(lexer.xquery_parse_state) == 0:
ctx.stack.pop()
elif len(ctx.stack) > 1:
ctx.stack.append(lexer.xquery_parse_state.pop())
else:
# i don't know if i'll need this, but in case, default back to root
ctx.stack = ['root']
ctx.pos = match.end()
def pushstate_element_content_starttag_callback(lexer, match, ctx):
yield match.start(), Name.Tag, match.group(1)
lexer.xquery_parse_state.append('element_content')
ctx.stack.append('start_tag')
ctx.pos = match.end()
def pushstate_cdata_section_callback(lexer, match, ctx):
yield match.start(), String.Doc, match.group(1)
ctx.stack.append('cdata_section')
lexer.xquery_parse_state.append(ctx.state.pop)
ctx.pos = match.end()
def pushstate_starttag_callback(lexer, match, ctx):
yield match.start(), Name.Tag, match.group(1)
lexer.xquery_parse_state.append(ctx.state.pop)
ctx.stack.append('start_tag')
ctx.pos = match.end()
def pushstate_operator_order_callback(lexer, match, ctx):
yield match.start(), Keyword, match.group(1)
yield match.start(), Text, match.group(2)
yield match.start(), Punctuation, match.group(3)
ctx.stack = ['root']
lexer.xquery_parse_state.append('operator')
ctx.pos = match.end()
def pushstate_operator_root_validate(lexer, match, ctx):
yield match.start(), Keyword, match.group(1)
yield match.start(), Text, match.group(2)
yield match.start(), Punctuation, match.group(3)
ctx.stack = ['root']
lexer.xquery_parse_state.append('operator')
ctx.pos = match.end()
def pushstate_operator_root_validate_withmode(lexer, match, ctx):
yield match.start(), Keyword, match.group(1)
yield match.start(), Text, match.group(2)
yield match.start(), Keyword, match.group(3)
ctx.stack = ['root']
lexer.xquery_parse_state.append('operator')
ctx.pos = match.end()
def pushstate_operator_processing_instruction_callback(lexer, match, ctx):
yield match.start(), String.Doc, match.group(1)
ctx.stack.append('processing_instruction')
lexer.xquery_parse_state.append('operator')
ctx.pos = match.end()
def pushstate_element_content_processing_instruction_callback(lexer, match, ctx):
yield match.start(), String.Doc, match.group(1)
ctx.stack.append('processing_instruction')
lexer.xquery_parse_state.append('element_content')
ctx.pos = match.end()
def pushstate_element_content_cdata_section_callback(lexer, match, ctx):
yield match.start(), String.Doc, match.group(1)
ctx.stack.append('cdata_section')
lexer.xquery_parse_state.append('element_content')
ctx.pos = match.end()
def pushstate_operator_cdata_section_callback(lexer, match, ctx):
yield match.start(), String.Doc, match.group(1)
ctx.stack.append('cdata_section')
lexer.xquery_parse_state.append('operator')
ctx.pos = match.end()
def pushstate_element_content_xmlcomment_callback(lexer, match, ctx):
yield match.start(), String.Doc, match.group(1)
ctx.stack.append('xml_comment')
lexer.xquery_parse_state.append('element_content')
ctx.pos = match.end()
def pushstate_operator_xmlcomment_callback(lexer, match, ctx):
yield match.start(), String.Doc, match.group(1)
ctx.stack.append('xml_comment')
lexer.xquery_parse_state.append('operator')
ctx.pos = match.end()
def pushstate_kindtest_callback(lexer, match, ctx):
yield match.start(), Keyword, match.group(1)
yield match.start(), Text, match.group(2)
yield match.start(), Punctuation, match.group(3)
lexer.xquery_parse_state.append('kindtest')
ctx.stack.append('kindtest')
ctx.pos = match.end()
def pushstate_operator_kindtestforpi_callback(lexer, match, ctx):
yield match.start(), Keyword, match.group(1)
yield match.start(), Text, match.group(2)
yield match.start(), Punctuation, match.group(3)
lexer.xquery_parse_state.append('operator')
ctx.stack.append('kindtestforpi')
ctx.pos = match.end()
def pushstate_operator_kindtest_callback(lexer, match, ctx):
yield match.start(), Keyword, match.group(1)
yield match.start(), Text, match.group(2)
yield match.start(), Punctuation, match.group(3)
lexer.xquery_parse_state.append('operator')
ctx.stack.append('kindtest')
ctx.pos = match.end()
def pushstate_occurrenceindicator_kindtest_callback(lexer, match, ctx):
yield match.start(), Name.Tag, match.group(1)
yield match.start(), Text, match.group(2)
yield match.start(), Punctuation, match.group(3)
lexer.xquery_parse_state.append('occurrenceindicator')
ctx.stack.append('kindtest')
ctx.pos = match.end()
def pushstate_operator_starttag_callback(lexer, match, ctx):
yield match.start(), Name.Tag, match.group(1)
lexer.xquery_parse_state.append('operator')
ctx.stack.append('start_tag')
ctx.pos = match.end()
def pushstate_operator_root_callback(lexer, match, ctx):
yield match.start(), Punctuation, match.group(1)
lexer.xquery_parse_state.append('operator')
ctx.stack = ['root']#.append('root')
ctx.pos = match.end()
def pushstate_operator_root_construct_callback(lexer, match, ctx):
yield match.start(), Keyword, match.group(1)
yield match.start(), Text, match.group(2)
yield match.start(), Punctuation, match.group(3)
lexer.xquery_parse_state.append('operator')
ctx.stack = ['root']
ctx.pos = match.end()
def pushstate_root_callback(lexer, match, ctx):
yield match.start(), Punctuation, match.group(1)
cur_state = ctx.stack.pop()
lexer.xquery_parse_state.append(cur_state)
ctx.stack = ['root']#.append('root')
ctx.pos = match.end()
def pushstate_operator_attribute_callback(lexer, match, ctx):
yield match.start(), Name.Attribute, match.group(1)
ctx.stack.append('operator')
ctx.pos = match.end()
def pushstate_operator_callback(lexer, match, ctx):
yield match.start(), Keyword, match.group(1)
yield match.start(), Text, match.group(2)
yield match.start(), Punctuation, match.group(3)
lexer.xquery_parse_state.append('operator')
ctx.pos = match.end()
tokens = {
'comment': [
# xquery comments
(r'(:\))', Comment, '#pop'),
(r'(\(:)', Comment, '#push'),
(r'[^:)]', Comment),
(r'([^:)]|:|\))', Comment),
],
'whitespace': [
(r'\s+', Text),
],
'operator': [
include('whitespace'),
(r'(\})', popstate_callback),
(r'\(:', Comment, 'comment'),
(r'(\{)', pushstate_root_callback),
(r'then|else|external|at|div|except', Keyword, 'root'),
(r'order by', Keyword, 'root'),
(r'is|mod|order\s+by|stable\s+order\s+by', Keyword, 'root'),
(r'and|or', Operator.Word, 'root'),
(r'(eq|ge|gt|le|lt|ne|idiv|intersect|in)(?=\b)',
Operator.Word, 'root'),
(r'return|satisfies|to|union|where|preserve\s+strip',
Keyword, 'root'),
(r'(>=|>>|>|<=|<<|<|-|\*|!=|\+|\||:=|=)',
operator_root_callback),
(r'(::|;|\[|//|/|,)',
punctuation_root_callback),
(r'(castable|cast)(\s+)(as)\b',
bygroups(Keyword, Text, Keyword), 'singletype'),
(r'(instance)(\s+)(of)\b',
bygroups(Keyword, Text, Keyword), 'itemtype'),
(r'(treat)(\s+)(as)\b',
bygroups(Keyword, Text, Keyword), 'itemtype'),
(r'(case|as)\b', Keyword, 'itemtype'),
(r'(\))(\s*)(as)',
bygroups(Punctuation, Text, Keyword), 'itemtype'),
(r'\$', Name.Variable, 'varname'),
(r'(for|let)(\s+)(\$)',
bygroups(Keyword, Text, Name.Variable), 'varname'),
#(r'\)|\?|\]', Punctuation, '#push'),
(r'\)|\?|\]', Punctuation),
(r'(empty)(\s+)(greatest|least)', bygroups(Keyword, Text, Keyword)),
(r'ascending|descending|default', Keyword, '#push'),
(r'external', Keyword),
(r'collation', Keyword, 'uritooperator'),
# finally catch all string literals and stay in operator state
(stringdouble, String.Double),
(stringsingle, String.Single),
(r'(catch)(\s*)', bygroups(Keyword, Text), 'root'),
],
'uritooperator': [
(stringdouble, String.Double, '#pop'),
(stringsingle, String.Single, '#pop'),
],
'namespacedecl': [
include('whitespace'),
(r'\(:', Comment, 'comment'),
(r'(at)(\s+)('+stringdouble+')', bygroups(Keyword, Text, String.Double)),
(r"(at)(\s+)("+stringsingle+')', bygroups(Keyword, Text, String.Single)),
(stringdouble, String.Double),
(stringsingle, String.Single),
(r',', Punctuation),
(r'=', Operator),
(r';', Punctuation, 'root'),
(ncname, Name.Namespace),
],
'namespacekeyword': [
include('whitespace'),
(r'\(:', Comment, 'comment'),
(stringdouble, String.Double, 'namespacedecl'),
(stringsingle, String.Single, 'namespacedecl'),
(r'inherit|no-inherit', Keyword, 'root'),
(r'namespace', Keyword, 'namespacedecl'),
(r'(default)(\s+)(element)', bygroups(Keyword, Text, Keyword)),
(r'preserve|no-preserve', Keyword),
(r',', Punctuation),
],
'varname': [
(r'\(:', Comment, 'comment'),
(qname, Name.Variable, 'operator'),
],
'singletype': [
(r'\(:', Comment, 'comment'),
(ncname + r'(:\*)', Name.Variable, 'operator'),
(qname, Name.Variable, 'operator'),
],
'itemtype': [
include('whitespace'),
(r'\(:', Comment, 'comment'),
(r'\$', Punctuation, 'varname'),
(r'(void)(\s*)(\()(\s*)(\))',
bygroups(Keyword, Text, Punctuation, Text, Punctuation), 'operator'),
(r'(element|attribute|schema-element|schema-attribute|comment|text|'
r'node|binary|document-node|empty-sequence)(\s*)(\()',
pushstate_occurrenceindicator_kindtest_callback),
# Marklogic specific type?
(r'(processing-instruction)(\s*)(\()',
bygroups(Keyword, Text, Punctuation),
('occurrenceindicator', 'kindtestforpi')),
(r'(item)(\s*)(\()(\s*)(\))(?=[*+?])',
bygroups(Keyword, Text, Punctuation, Text, Punctuation),
'occurrenceindicator'),
(r'\(\#', Punctuation, 'pragma'),
(r';', Punctuation, '#pop'),
(r'then|else', Keyword, '#pop'),
(r'(at)(\s+)(' + stringdouble + ')',
bygroups(Keyword, Text, String.Double), 'namespacedecl'),
(r'(at)(\s+)(' + stringsingle + ')',
bygroups(Keyword, Text, String.Single), 'namespacedecl'),
(r'except|intersect|in|is|return|satisfies|to|union|where',
Keyword, 'root'),
(r'and|div|eq|ge|gt|le|lt|ne|idiv|mod|or', Operator.Word, 'root'),
(r':=|=|,|>=|>>|>|\[|\(|<=|<<|<|-|!=|\|', Operator, 'root'),
(r'external|at', Keyword, 'root'),
(r'(stable)(\s+)(order)(\s+)(by)',
bygroups(Keyword, Text, Keyword, Text, Keyword), 'root'),
(r'(castable|cast)(\s+)(as)',
bygroups(Keyword, Text, Keyword), 'singletype'),
(r'(treat)(\s+)(as)', bygroups(Keyword, Text, Keyword)),
(r'(instance)(\s+)(of)', bygroups(Keyword, Text, Keyword)),
(r'case|as', Keyword, 'itemtype'),
(r'(\))(\s*)(as)', bygroups(Operator, Text, Keyword), 'itemtype'),
(ncname + r':\*', Keyword.Type, 'operator'),
(qname, Keyword.Type, 'occurrenceindicator'),
],
'kindtest': [
(r'\(:', Comment, 'comment'),
(r'{', Punctuation, 'root'),
(r'(\))([*+?]?)', popstate_kindtest_callback),
(r'\*', Name, 'closekindtest'),
(qname, Name, 'closekindtest'),
(r'(element|schema-element)(\s*)(\()', pushstate_kindtest_callback),
],
'kindtestforpi': [
(r'\(:', Comment, 'comment'),
(r'\)', Punctuation, '#pop'),
(ncname, Name.Variable),
(stringdouble, String.Double),
(stringsingle, String.Single),
],
'closekindtest': [
(r'\(:', Comment, 'comment'),
(r'(\))', popstate_callback),
(r',', Punctuation),
(r'(\{)', pushstate_operator_root_callback),
(r'\?', Punctuation),
],
'xml_comment': [
(r'(-->)', popstate_xmlcomment_callback),
(r'[^-]{1,2}', Literal),
(ur'\t|\r|\n|[\u0020-\uD7FF]|[\uE000-\uFFFD]|' +
unirange(0x10000, 0x10ffff), Literal),
],
'processing_instruction': [
(r'\s+', Text, 'processing_instruction_content'),
(r'\?>', String.Doc, '#pop'),
(pitarget, Name),
],
'processing_instruction_content': [
(r'\?>', String.Doc, '#pop'),
(ur'\t|\r|\n|[\u0020-\uD7FF]|[\uE000-\uFFFD]|' +
unirange(0x10000, 0x10ffff), Literal),
],
'cdata_section': [
(r']]>', String.Doc, '#pop'),
(ur'\t|\r|\n|[\u0020-\uD7FF]|[\uE000-\uFFFD]|' +
unirange(0x10000, 0x10ffff), Literal),
],
'start_tag': [
include('whitespace'),
(r'(/>)', popstate_tag_callback),
(r'>', Name.Tag, 'element_content'),
(r'"', Punctuation, 'quot_attribute_content'),
(r"'", Punctuation, 'apos_attribute_content'),
(r'=', Operator),
(qname, Name.Tag),
],
'quot_attribute_content': [
(r'"', Punctuation, 'start_tag'),
(r'(\{)', pushstate_root_callback),
(r'""', Name.Attribute),
(quotattrcontentchar, Name.Attribute),
(entityref, Name.Attribute),
(charref, Name.Attribute),
(r'\{\{|\}\}', Name.Attribute),
],
'apos_attribute_content': [
(r"'", Punctuation, 'start_tag'),
(r'\{', Punctuation, 'root'),
(r"''", Name.Attribute),
(aposattrcontentchar, Name.Attribute),
(entityref, Name.Attribute),
(charref, Name.Attribute),
(r'\{\{|\}\}', Name.Attribute),
],
'element_content': [
(r'</', Name.Tag, 'end_tag'),
(r'(\{)', pushstate_root_callback),
(r'(<!--)', pushstate_element_content_xmlcomment_callback),
(r'(<\?)', pushstate_element_content_processing_instruction_callback),
(r'(<!\[CDATA\[)', pushstate_element_content_cdata_section_callback),
(r'(<)', pushstate_element_content_starttag_callback),
(elementcontentchar, Literal),
(entityref, Literal),
(charref, Literal),
(r'\{\{|\}\}', Literal),
],
'end_tag': [
include('whitespace'),
(r'(>)', popstate_tag_callback),
(qname, Name.Tag),
],
'xmlspace_decl': [
(r'\(:', Comment, 'comment'),
(r'preserve|strip', Keyword, '#pop'),
],
'declareordering': [
(r'\(:', Comment, 'comment'),
include('whitespace'),
(r'ordered|unordered', Keyword, '#pop'),
],
'xqueryversion': [
include('whitespace'),
(r'\(:', Comment, 'comment'),
(stringdouble, String.Double),
(stringsingle, String.Single),
(r'encoding', Keyword),
(r';', Punctuation, '#pop'),
],
'pragma': [
(qname, Name.Variable, 'pragmacontents'),
],
'pragmacontents': [
(r'#\)', Punctuation, 'operator'),
(ur'\t|\r|\n|[\u0020-\uD7FF]|[\uE000-\uFFFD]|' +
unirange(0x10000, 0x10ffff), Literal),
(r'(\s+)', Text),
],
'occurrenceindicator': [
include('whitespace'),
(r'\(:', Comment, 'comment'),
(r'\*|\?|\+', Operator, 'operator'),
(r':=', Operator, 'root'),
(r'', Text, 'operator'),
],
'option': [
include('whitespace'),
(qname, Name.Variable, '#pop'),
],
'qname_braren': [
include('whitespace'),
(r'(\{)', pushstate_operator_root_callback),
(r'(\()', Punctuation, 'root'),
],
'element_qname': [
(qname, Name.Variable, 'root'),
],
'attribute_qname': [
(qname, Name.Variable, 'root'),
],
'root': [
include('whitespace'),
(r'\(:', Comment, 'comment'),
# handle operator state
# order on numbers matters - handle most complex first
(r'\d+(\.\d*)?[eE][\+\-]?\d+', Number.Double, 'operator'),
(r'(\.\d+)[eE][\+\-]?\d+', Number.Double, 'operator'),
(r'(\.\d+|\d+\.\d*)', Number, 'operator'),
(r'(\d+)', Number.Integer, 'operator'),
(r'(\.\.|\.|\))', Punctuation, 'operator'),
(r'(declare)(\s+)(construction)',
bygroups(Keyword, Text, Keyword), 'operator'),
(r'(declare)(\s+)(default)(\s+)(order)',
bygroups(Keyword, Text, Keyword, Text, Keyword), 'operator'),
(ncname + ':\*', Name, 'operator'),
('\*:'+ncname, Name.Tag, 'operator'),
('\*', Name.Tag, 'operator'),
(stringdouble, String.Double, 'operator'),
(stringsingle, String.Single, 'operator'),
(r'(\})', popstate_callback),
#NAMESPACE DECL
(r'(declare)(\s+)(default)(\s+)(collation)',
bygroups(Keyword, Text, Keyword, Text, Keyword)),
(r'(module|declare)(\s+)(namespace)',
bygroups(Keyword, Text, Keyword), 'namespacedecl'),
(r'(declare)(\s+)(base-uri)',
bygroups(Keyword, Text, Keyword), 'namespacedecl'),
#NAMESPACE KEYWORD
(r'(declare)(\s+)(default)(\s+)(element|function)',
bygroups(Keyword, Text, Keyword, Text, Keyword), 'namespacekeyword'),
(r'(import)(\s+)(schema|module)',
bygroups(Keyword.Pseudo, Text, Keyword.Pseudo), 'namespacekeyword'),
(r'(declare)(\s+)(copy-namespaces)',
bygroups(Keyword, Text, Keyword), 'namespacekeyword'),
#VARNAMEs
(r'(for|let|some|every)(\s+)(\$)',
bygroups(Keyword, Text, Name.Variable), 'varname'),
(r'\$', Name.Variable, 'varname'),
(r'(declare)(\s+)(variable)(\s+)(\$)',
bygroups(Keyword, Text, Keyword, Text, Name.Variable), 'varname'),
#ITEMTYPE
(r'(\))(\s+)(as)', bygroups(Operator, Text, Keyword), 'itemtype'),
(r'(element|attribute|schema-element|schema-attribute|comment|'
r'text|node|document-node|empty-sequence)(\s+)(\()',
pushstate_operator_kindtest_callback),
(r'(processing-instruction)(\s+)(\()',
pushstate_operator_kindtestforpi_callback),
(r'(<!--)', pushstate_operator_xmlcomment_callback),
(r'(<\?)', pushstate_operator_processing_instruction_callback),
(r'(<!\[CDATA\[)', pushstate_operator_cdata_section_callback),
# (r'</', Name.Tag, 'end_tag'),
(r'(<)', pushstate_operator_starttag_callback),
(r'(declare)(\s+)(boundary-space)',
bygroups(Keyword, Text, Keyword), 'xmlspace_decl'),
(r'(validate)(\s+)(lax|strict)',
pushstate_operator_root_validate_withmode),
(r'(validate)(\s*)(\{)', pushstate_operator_root_validate),
(r'(typeswitch)(\s*)(\()', bygroups(Keyword, Text, Punctuation)),
(r'(element|attribute)(\s*)(\{)',
pushstate_operator_root_construct_callback),
(r'(document|text|processing-instruction|comment)(\s*)(\{)',
pushstate_operator_root_construct_callback),
#ATTRIBUTE
(r'(attribute)(\s+)(?=' + qname + r')',
bygroups(Keyword, Text), 'attribute_qname'),
#ELEMENT
(r'(element)(\s+)(?=' +qname+ r')',
bygroups(Keyword, Text), 'element_qname'),
#PROCESSING_INSTRUCTION
(r'(processing-instruction)(\s+)(' + ncname + r')(\s*)(\{)',
bygroups(Keyword, Text, Name.Variable, Text, Punctuation),
'operator'),
(r'(declare|define)(\s+)(function)',
bygroups(Keyword, Text, Keyword)),
(r'(\{)', pushstate_operator_root_callback),
(r'(unordered|ordered)(\s*)(\{)',
pushstate_operator_order_callback),
(r'(declare)(\s+)(ordering)',
bygroups(Keyword, Text, Keyword), 'declareordering'),
(r'(xquery)(\s+)(version)',
bygroups(Keyword.Pseudo, Text, Keyword.Pseudo), 'xqueryversion'),
(r'(\(#)', Punctuation, 'pragma'),
# sometimes return can occur in root state
(r'return', Keyword),
(r'(declare)(\s+)(option)', bygroups(Keyword, Text, Keyword),
'option'),
#URI LITERALS - single and double quoted
(r'(at)(\s+)('+stringdouble+')', String.Double, 'namespacedecl'),
(r'(at)(\s+)('+stringsingle+')', String.Single, 'namespacedecl'),
(r'(ancestor-or-self|ancestor|attribute|child|descendant-or-self)(::)',
bygroups(Keyword, Punctuation)),
(r'(descendant|following-sibling|following|parent|preceding-sibling'
r'|preceding|self)(::)', bygroups(Keyword, Punctuation)),
(r'(if)(\s*)(\()', bygroups(Keyword, Text, Punctuation)),
(r'then|else', Keyword),
# ML specific
(r'(try)(\s*)', bygroups(Keyword, Text), 'root'),
(r'(catch)(\s*)(\()(\$)',
bygroups(Keyword, Text, Punctuation, Name.Variable), 'varname'),
(r'(@'+qname+')', Name.Attribute),
(r'(@'+ncname+')', Name.Attribute),
(r'@\*:'+ncname, Name.Attribute),
(r'(@)', Name.Attribute),
(r'//|/|\+|-|;|,|\(|\)', Punctuation),
# STANDALONE QNAMES
(qname + r'(?=\s*{)', Name.Tag, 'qname_braren'),
(qname + r'(?=\s*\([^:])', Name.Function, 'qname_braren'),
(qname, Name.Tag, 'operator'),
]
}
class DartLexer(RegexLexer):
"""
For `Dart <http://dartlang.org/>`_ source code.
*New in Pygments 1.5.*
"""
name = 'Dart'
aliases = ['dart']
filenames = ['*.dart']
mimetypes = ['text/x-dart']
flags = re.MULTILINE | re.DOTALL
tokens = {
'root': [
(r'#!(.*?)$', Comment.Preproc),
(r'(#)(import|library|source)', bygroups(Text, Keyword)),
(r'[^\S\n]+', Text),
(r'//.*?\n', Comment.Single),
(r'/\*.*?\*/', Comment.Multiline),
(r'(class|interface)(\s+)',
bygroups(Keyword.Declaration, Text), 'class'),
(r'(assert|break|case|catch|continue|default|do|else|finally|for|'
r'if|in|is|new|return|super|switch|this|throw|try|while)\b',
Keyword),
(r'(abstract|const|extends|factory|final|get|implements|'
r'native|operator|set|static|typedef|var)\b', Keyword.Declaration),
(r'(bool|double|Dynamic|int|num|Object|String|void)', Keyword.Type),
(r'(false|null|true)', Keyword.Constant),
(r'@"(\\\\|\\"|[^"])*"', String.Double), # raw string
(r"@'(\\\\|\\'|[^'])*'", String.Single), # raw string
(r'"', String.Double, 'string_double'),
(r"'", String.Single, 'string_single'),
(r'[a-zA-Z_$][a-zA-Z0-9_]*:', Name.Label),
(r'[a-zA-Z_$][a-zA-Z0-9_]*', Name),
(r'[~!%^&*+=|?:<>/-]', Operator),
(r'[(){}\[\],.;]', Punctuation),
(r'0[xX][0-9a-fA-F]+', Number.Hex),
# DIGIT+ (‘.’ DIGIT*)? EXPONENT?
(r'\d+(\.\d*)?([eE][+-]?\d+)?', Number),
(r'\.\d+([eE][+-]?\d+)?', Number), # ‘.’ DIGIT+ EXPONENT?
(r'\n', Text)
# pseudo-keyword negate intentionally left out
],
'class': [
(r'[a-zA-Z_$][a-zA-Z0-9_]*', Name.Class, '#pop')
],
'string_double': [
(r'"', String.Double, '#pop'),
(r'[^"$]+', String.Double),
(r'(\$)([a-zA-Z_][a-zA-Z0-9_]*)', bygroups(String.Interpol, Name)),
(r'(\$\{)(.*?)(\})',
bygroups(String.Interpol, using(this), String.Interpol)),
(r'\$+', String.Double)
],
'string_single': [
(r"'", String.Single, '#pop'),
(r"[^'$]+", String.Single),
(r'(\$)([a-zA-Z_][a-zA-Z0-9_]*)', bygroups(String.Interpol, Name)),
(r'(\$\{)(.*?)(\})',
bygroups(String.Interpol, using(this), String.Interpol)),
(r'\$+', String.Single)
]
}
class LassoLexer(RegexLexer):
"""
For `Lasso <http://www.lassosoft.com/>`_ source code, covering both
Lasso 9 syntax and LassoScript for Lasso 8.6 and earlier. For Lasso
embedded in HTML, use the `LassoHtmlLexer`.
Additional options accepted:
`builtinshighlighting`
If given and ``True``, highlight builtin tags, types, traits, and
methods (default: ``True``).
`requiredelimiters`
If given and ``True``, only highlight code between delimiters as Lasso
(default: ``False``).
*New in Pygments 1.6.*
"""
name = 'Lasso'
aliases = ['lasso', 'lassoscript']
filenames = ['*.lasso', '*.lasso[89]']
alias_filenames = ['*.incl', '*.inc', '*.las']
mimetypes = ['text/x-lasso']
flags = re.IGNORECASE | re.DOTALL | re.MULTILINE
tokens = {
'root': [
(r'^#!.+lasso9\b', Comment.Preproc, 'lasso'),
(r'\s+', Other),
(r'\[noprocess\]', Comment.Preproc, ('delimiters', 'noprocess')),
(r'\[', Comment.Preproc, ('delimiters', 'squarebrackets')),
(r'<\?(LassoScript|lasso|=)', Comment.Preproc,
('delimiters', 'anglebrackets')),
(r'<', Other, 'delimiters'),
include('lasso'),
],
'delimiters': [
(r'\[noprocess\]', Comment.Preproc, 'noprocess'),
(r'\[', Comment.Preproc, 'squarebrackets'),
(r'<\?(LassoScript|lasso|=)', Comment.Preproc, 'anglebrackets'),
(r'<', Other),
(r'[^[<]+', Other),
],
'noprocess': [
(r'\[/noprocess\]', Comment.Preproc, '#pop'),
(r'\[', Other),
(r'[^[]', Other),
],
'squarebrackets': [
(r'\]', Comment.Preproc, '#pop'),
include('lasso'),
],
'anglebrackets': [
(r'\?>', Comment.Preproc, '#pop'),
include('lasso'),
],
'lasso': [
# whitespace/comments
(r'\s+', Text),
(r'//.*?\n', Comment.Single),
(r'/\*\*!.*?\*/', String.Doc),
(r'/\*.*?\*/', Comment.Multiline),
# names
(r'\$[a-z_][\w\.]*', Name.Variable),
(r'(#[a-z_][\w\.]*|#\d+)', Name.Variable.Instance),
(r"\.'[a-z_][\w\.]*'", Name.Variable.Class),
(r"(self)(->)('[a-z_][\w\.]*')",
bygroups(Name.Builtin.Pseudo, Operator, Name.Variable.Class)),
(r'(self|void)\b', Name.Builtin.Pseudo),
(r'-[a-z_][\w\.]*', Name.Attribute),
(r'(::)([a-z_][\w\.]*)', bygroups(Punctuation, Name.Label)),
(r'(error_(code|msg)_\w+|Error_AddError|Error_ColumnRestriction|'
r'Error_DatabaseConnectionUnavailable|Error_DatabaseTimeout|'
r'Error_DeleteError|Error_FieldRestriction|Error_FileNotFound|'
r'Error_InvalidDatabase|Error_InvalidPassword|'
r'Error_InvalidUsername|Error_ModuleNotFound|'
r'Error_NoError|Error_NoPermission|Error_OutOfMemory|'
r'Error_ReqColumnMissing|Error_ReqFieldMissing|'
r'Error_RequiredColumnMissing|Error_RequiredFieldMissing|'
r'Error_UpdateError)\b', Name.Exception),
# definitions
(r'(parent)(\s+)([a-z_][\w\.]*)',
bygroups(Keyword.Declaration, Text, Name.Class)),
(r'(define)(\s+)([a-z_][\w\.]*)(\s*)(=>)(\s*)(type|trait|thread)',
bygroups(Keyword.Declaration, Text, Name.Class, Text, Operator,
Text, Keyword)),
(r'(define)(\s+)([a-z_][\w\.]*)(->)([a-z_][\w\.]*=?)',
bygroups(Keyword.Declaration, Text, Name.Class, Operator,
Name.Function)),
(r'(define)(\s+)([a-z_][\w\.]*=?)',
bygroups(Keyword.Declaration, Text, Name.Function)),
(r'(public|protected|private)(\s+)([a-z_][\w\.]*)(\s*)(=>)',
bygroups(Keyword, Text, Name.Function, Text, Operator)),
(r'(public|protected|private|provide)(\s+)([a-z_][\w\.]*=?)(\s*)(\()',
bygroups(Keyword, Text, Name.Function, Text, Punctuation)),
# keywords
(r'\.\.\.', Keyword.Pseudo),
(r'(true|false|null|[+\-]?infinity|\+?NaN)\b', Keyword.Constant),
(r'(local|var|variable|global|data)\b', Keyword.Declaration),
(r'(array|date|decimal|duration|integer|map|pair|string|tag|'
r'xml)\b', Keyword.Type),
(r'(/?)(Cache|Database_Names|Database_SchemaNames|'
r'Database_TableNames|Define_Tag|Define_Type|Email_Batch|'
r'Encode_Set|HTML_Comment|Handle|Handle_Error|Header|If|Inline|'
r'Iterate|LJAX_Target|Link|Link_CurrentAction|Link_CurrentGroup|'
r'Link_CurrentRecord|Link_Detail|Link_FirstGroup|'
r'Link_FirstRecord|Link_LastGroup|Link_LastRecord|Link_NextGroup|'
r'Link_NextRecord|Link_PrevGroup|Link_PrevRecord|Log|Loop|'
r'Namespace_Using|NoProcess|Output_None|Portal|Private|Protect|'
r'Records|Referer|Referrer|Repeating|ResultSet|Rows|Search_Args|'
r'Search_Arguments|Select|Sort_Args|Sort_Arguments|Thread_Atomic|'
r'Value_List|While|Abort|Case|Else|If_Empty|If_False|If_Null|'
r'If_True|Loop_Abort|Loop_Continue|Loop_Count|Params|Params_Up|'
r'Return|Return_Value|Run_Children|SOAP_DefineTag|'
r'SOAP_LastRequest|SOAP_LastResponse|Tag_Name)\b',
bygroups(Punctuation, Keyword)),
(r'(and|ascending|average|by|case|define|descending|do|else|'
r'equals|frozen|group|import|in|inherited|into|join|let|match|'
r'max|min|not|on|or|order|params|parent|private|protected|'
r'provide|public|require|return|select|skip|sum|take|thread|to|'
r'trait|type|where|with)\b', Keyword),
# literals
(r'([+\-]?\d*\.\d+(e[+\-]?\d+)?)', Number.Float),
(r'0x[\da-f]+', Number.Hex),
(r'[+\-]?\d+', Number.Integer),
(r"'", String.Single, 'singlestring'),
(r'"', String.Double, 'doublestring'),
(r'`[^`]*`', String.Backtick),
# other
(r'(=)(bw|ew|cn|lte?|gte?|n?eq|ft|n?rx)\b',
bygroups(Operator, Operator.Word)),
(r'([=\+\-\*/%<>&|!\?\.\\]+|:=)', Operator),
(r'[{}():;,@^]', Punctuation),
(r'(/?)([\w\.]+)', bygroups(Punctuation, Name.Other)),
],
'singlestring': [
(r"'", String.Single, '#pop'),
(r"[^'\\]+", String.Single),
include('escape'),
(r"\\+", String.Single),
],
'doublestring': [
(r'"', String.Double, '#pop'),
(r'[^"\\]+', String.Double),
include('escape'),
(r'\\+', String.Double),
],
'escape': [
(r'\\(U[\da-f]{8}|u[\da-f]{4}|x[\da-f]{1,2}|[0-7]{1,3}|:[^:]+:|'
r'[abefnrtv\"\'\?\\]|$)', String.Escape),
],
}
def __init__(self, **options):
self.builtinshighlighting = get_bool_opt(
options, 'builtinshighlighting', True)
self.requiredelimiters = get_bool_opt(
options, 'requiredelimiters', False)
self._builtins = set()
if self.builtinshighlighting:
from pygments.lexers._lassobuiltins import BUILTINS
for key, value in BUILTINS.iteritems():
self._builtins.update(value)
RegexLexer.__init__(self, **options)
def get_tokens_unprocessed(self, text):
stack = ['root']
if self.requiredelimiters:
stack.append('delimiters')
for index, token, value in \
RegexLexer.get_tokens_unprocessed(self, text, stack):
if token is Name.Other:
if value.lower() in self._builtins:
yield index, Name.Builtin, value
continue
yield index, token, value
def analyse_text(text):
rv = 0.0
if 'bin/lasso9' in text:
rv += 0.8
if re.search(r'<\?(=|lasso)', text, re.I):
rv += 0.4
if re.search(r'local\(', text, re.I):
rv += 0.4
if re.search(r'(\[\n|\?>)', text):
rv += 0.4
return rv
| StarcoderdataPython |
11391173 | <reponame>jhutchings1/pyright
# This sample tests the type checker's handling of generic protocol types.
from typing import TypeVar, Protocol
T = TypeVar('T')
T_co = TypeVar('T_co', covariant=True)
T_contra = TypeVar('T_contra', contravariant=True)
class Box(Protocol[T_co]):
def content(self) -> T_co:
...
box: Box[float]
second_box: Box[int]
# This should not generate an error due to the covariance of 'Box'.
box = second_box
class Sender(Protocol[T_contra]):
def send(self, data: T_contra) -> int:
...
sender: Sender[float]
new_sender: Sender[int]
# This should not generate an error because 'Sender' is contravariant.
new_sender = sender
class Proto(Protocol[T]):
attr: T
class NotProto2:
attr: int
var: Proto[float]
another_var: Proto[int]
# This should generate an error because T is invariant.
var = another_var
another_var2: NotProto2
# This should generate an error because T is invariant.
var = another_var2
| StarcoderdataPython |
3352341 | from . import flavors
class InvalidUsage(Exception):
status_code = 400
def __init__(self, message, status_code=None, payload=None):
super().__init__()
self.message = message
if status_code is not None:
self.status_code = status_code
self.payload = payload
def to_dict(self):
rv = dict(self.payload or ())
rv['message'] = self.message
return rv
class UnknownFlavor(Exception):
def __init__(self, *args, **kwargs):
message = (
'Allowed flavors are ' +
', '.join([f"'{flavor}'" for flavor in flavors.allowed_flavors()]) +
'.'
)
super().__init__(message, *args, **kwargs)
class FlavorNotSet(InvalidUsage):
def __init__(self, *args, **kwargs):
super().__init__(message='No flavor has been set.', *args, **kwargs)
| StarcoderdataPython |
4892023 | <gh_stars>0
from rubygems_utils import RubyGemsTestUtils
class RubyGemsTestrubygems_uuidtools(RubyGemsTestUtils):
def test_gem_list_rubygems_uuidtools(self):
self.gem_is_installed("uuidtools")
def test_load_uuidtools(self):
self.gem_is_loadable("uuidtools")
| StarcoderdataPython |
4901835 | <reponame>TrabalhoAPC2021-02/Trabalho_Final<gh_stars>0
from click import style
import dash
import dash_core_components as dcc
from dash import html
from dash.dependencies import Output, Input
import dash_bootstrap_components as dbc
import plotly.graph_objects as go
import plotly.express as px
import pandas as pd
import numpy as np
import json
url = 'https://raw.githubusercontent.com/TrabalhoAPC2021-02/Trabalho_Final/main/arquivos/02.1_salarios_CLT_Terceirizado.xlsx'
df = pd.read_excel(url)
df['Cargo'].value_counts()
# criando um data frame para os cargos mais repetidos
programador = df[df['Cargo'] == 'Programador']
anpro = df[df['Cargo'] == 'Analista Programador']
ansup = df[df['Cargo'] == 'Analista de Suporte']
ansis = df[df['Cargo'] == 'Analista de sistemas']
# tratando os dados para calcular a soma e a media
# para terceirizado
t_programador = programador['Terceiro'].tolist()
t_anpro = anpro['Terceiro'].tolist()
t_ansup =ansup['Terceiro'].tolist()
t_ansis = ansis['Terceiro'].tolist()
# usando o for para criar uma média para cada cargo
soma_t_pro = 0
for i in range (len(t_programador)):
soma_t_pro = soma_t_pro + t_programador[i]
media_pro = soma_t_pro/len(t_programador)
print('Terc_programador',soma_t_pro, media_pro)
soma_t_anpro = 0
for i in range (len(t_anpro)):
soma_t_anpro = soma_t_anpro + t_anpro[i]
media_anpro = soma_t_anpro/len(t_anpro)
print('Terc_analista_programador',soma_t_anpro, media_anpro)
soma_t_ansup = 0
for i in range (len(t_ansup)):
soma_t_ansup = soma_t_ansup + t_ansup[i]
media_ansup = soma_t_ansup/len(t_ansup)
print('Terc_analista_suporte', soma_t_ansup, media_ansup)
soma_t_ansis = 0
for i in range (len(t_ansis)):
soma_t_ansis = soma_t_ansis + t_ansis[i]
media_ansis = soma_t_ansis/len(t_ansis)
print('Terc_analista_sistemas',soma_t_ansis, media_ansis)
# criando um novo dataframe
# para terceirizado
column = ['Cargo', 'Terceiro']
line = ['', '','','']
dados = [['Analista de Suporte', media_ansup], ['Programador', media_pro]
, ['Analista programador', media_anpro], ['Analista de sistemas', media_ansis]]
tb_terceiro = pd.DataFrame(data=dados, index=line, columns=column)
print(tb_terceiro)
# tratando os dados para calcular a soma e a media
# para o CLT
clt_programador = programador['CLT'].tolist()
clt_anpro = anpro['CLT'].tolist()
clt_ansup =ansup['CLT'].tolist()
clt_ansis = ansis['CLT'].tolist()
soma_clt_pro = 0
for i in range (len(clt_programador)):
soma_clt_pro = soma_clt_pro + clt_programador[i]
media_clt_pro = soma_clt_pro/len(clt_programador)
print('CLT_programador', soma_clt_pro, media_clt_pro)
soma_clt_anpro = 0
for i in range (len(clt_anpro)):
soma_clt_anpro = soma_clt_anpro + clt_anpro[i]
media_clt_anpro = soma_clt_anpro/len(clt_anpro)
print('CLT_analista_programador', soma_clt_anpro, media_clt_anpro)
soma_clt_ansup = 0
for i in range (len(clt_ansup)):
soma_clt_ansup = soma_clt_ansup + clt_ansup[i]
media_clt_ansup = soma_clt_ansup/len(clt_ansup)
print('CLT_analista_suporte', soma_clt_ansup, media_clt_ansup)
soma_clt_ansis = 0
for i in range (len(clt_ansis)):
soma_clt_ansis = soma_clt_ansis + clt_ansis[i]
media_clt_ansis = soma_clt_ansis/len(clt_ansis)
print('CLT_analista_sistemas',soma_clt_ansis, media_clt_ansis)
# criando um novo dataframe
# para o CLT
column = ['Cargo', 'CLT']
line = ['', '','','']
dados = [['Analista de Suporte', media_clt_ansup], ['Programador', media_clt_pro]
, ['Analista programador', media_clt_anpro], ['Analista de sistemas', media_clt_ansis]]
tb_clt = pd.DataFrame(data=dados, index=line, columns=column)
print(tb_clt)
app =dash.Dash(__name__, external_stylesheets=[dbc.themes.CYBORG])
fig = go.Figure()
fig.add_trace(go.Funnel(name='Terceiro', y=tb_terceiro['Cargo'], x=tb_terceiro['Terceiro']))
fig.add_trace(go.Funnel(name='CLT', orientation='h', y=tb_clt['Cargo'], x=tb_clt['CLT']))
fig.update_layout(
template='plotly_dark',
autosize=True,
margin=go.Margin(l=0, r=0, t=0, b=0),
)
#============================================
# Layout
app.layout = dbc.Container(
dbc.Row([
dbc.Col([
html.Div([
html.Img(id='logo', src=app.get_asset_url("logo.png"), height=50),
html.H5("Salário em R$ Terceirizados e CLT"),
dbc.Button("MERCADO DE TI", color='primary', id='cargos_botoes', size='lg'),
], style={}),
html.P('Informe qual cargo deseja visualizar', style={'margin-top': '40px'}),
html.Div(id='test_div', children=[
dcc.Dropdown(
# passar uma lista somente com os 4 nomes
df['Cargo'].unique(),
'Cargos',
id='test_botao'
)
]),
dbc.Row([
dbc.Col([
dbc.Card([
dbc.CardBody([
html.Span('Maior salário CLT'),
html.H3(style={'color': '#04bf3c'}, id='maior_salario_clt'),
html.Span('Menor salário CLT'),
html.H5(id='menor_clt'),
])
], color='light', outline=True, style={'margin-top': '10px', 'box-shadow': '0 4px 4px 0 rgba(0,0,0,0.15), 0 4px 20px 0 rgba(0,0,0,0.19)', 'color': '#FFFFFF'})
], md=6),
dbc.Col([
dbc.Card([
dbc.CardBody([
html.Span('Maior salário Terceiro'),
html.H3(style={'color': '#0413bf'}, id='maior_salario_terceiro'),
html.Span('Menor salário Terceiro'),
html.H5(id='menor_terceiro'),
])
], color='light', outline=True, style={'margin-top': '10px', 'box-shadow': '0 4px 4px 0 rgba(0,0,0,0.15), 0 4px 20px 0 rgba(0,0,0,0.19)', 'color': '#FFFFFF'})
], md=6),
]),
], md=5, style={'padding': '25px', 'background-color': '#242424'}),
dbc.Col([
dcc.Loading(id='loading_1', type='default'
,children=[
dcc.Graph(id='Funil', figure=fig, style={'height' : '100vh', 'margin-right' : '10px'})
])
]),
])
, fluid=True)
#=================================
# Interatividade
if __name__ == '__main__':
app.run_server(debug=True) | StarcoderdataPython |
4865095 | from typing import List
class Solution:
def trap(self, bars: List[int]) -> int:
if not bars or len(bars) < 3: return 0
volume = 0
left, right = 0, len(bars) - 1
l_max, r_max = bars[left], bars[right]
while left < right:
l_max, r_max = max(bars[left], l_max), max(bars[right], r_max)
if l_max <= r_max:
volume += l_max - bars[left]
left += 1
else:
volume += r_max - bars[right]
right -= 1
return volume
s = Solution()
ip = [0,1,0,2,1,0,1,3,2,1,2,1]
ans = s.trap(ip)
print(ans) | StarcoderdataPython |
6442836 | # Natural Language Toolkit: An Crubadan N-grams Reader
#
# Copyright (C) 2001-2021 NLTK Project
# Author: <NAME> <<EMAIL>>
#
# URL: <http://nltk.org/>
# For license information, see LICENSE.TXT
"""
An NLTK interface for the n-gram statistics gathered from
the corpora for each language using An Crubadan.
There are multiple potential applications for the data but
this reader was created with the goal of using it in the
context of language identification.
For details about An Crubadan, this data, and its potential uses, see:
http://borel.slu.edu/crubadan/index.html
"""
import re
from os import path
from nltk.corpus.reader import CorpusReader
from nltk.data import ZipFilePathPointer
from nltk.probability import FreqDist
class CrubadanCorpusReader(CorpusReader):
"""
A corpus reader used to access language An Crubadan n-gram files.
"""
_LANG_MAPPER_FILE = "table.txt"
_all_lang_freq = {}
def __init__(self, root, fileids, encoding="utf8", tagset=None):
super().__init__(root, fileids, encoding="utf8")
self._lang_mapping_data = []
self._load_lang_mapping_data()
def lang_freq(self, lang):
"""Return n-gram FreqDist for a specific language
given ISO 639-3 language code"""
if lang not in self._all_lang_freq:
self._all_lang_freq[lang] = self._load_lang_ngrams(lang)
return self._all_lang_freq[lang]
def langs(self):
"""Return a list of supported languages as ISO 639-3 codes"""
return [row[1] for row in self._lang_mapping_data]
def iso_to_crubadan(self, lang):
"""Return internal Crubadan code based on ISO 639-3 code"""
for i in self._lang_mapping_data:
if i[1].lower() == lang.lower():
return i[0]
def crubadan_to_iso(self, lang):
"""Return ISO 639-3 code given internal Crubadan code"""
for i in self._lang_mapping_data:
if i[0].lower() == lang.lower():
return i[1]
def _load_lang_mapping_data(self):
"""Load language mappings between codes and description from table.txt"""
if isinstance(self.root, ZipFilePathPointer):
raise RuntimeError(
"Please install the 'crubadan' corpus first, use nltk.download()"
)
mapper_file = path.join(self.root, self._LANG_MAPPER_FILE)
if self._LANG_MAPPER_FILE not in self.fileids():
raise RuntimeError("Could not find language mapper file: " + mapper_file)
with open(mapper_file, encoding="utf-8") as raw:
strip_raw = raw.read().strip()
self._lang_mapping_data = [row.split("\t") for row in strip_raw.split("\n")]
def _load_lang_ngrams(self, lang):
"""Load single n-gram language file given the ISO 639-3 language code
and return its FreqDist"""
if lang not in self.langs():
raise RuntimeError("Unsupported language.")
crubadan_code = self.iso_to_crubadan(lang)
ngram_file = path.join(self.root, crubadan_code + "-3grams.txt")
if not path.isfile(ngram_file):
raise RuntimeError("No N-gram file found for requested language.")
counts = FreqDist()
with open(ngram_file, encoding="utf-8") as f:
for line in f:
data = line.split(" ")
ngram = data[1].strip("\n")
freq = int(data[0])
counts[ngram] = freq
return counts
| StarcoderdataPython |
11258066 | <filename>httptest2/httptest2/testmodule/models.py<gh_stars>0
from __future__ import unicode_literals
from django.db import models
# Create your models here.
class TestModule(models.Model):
created = models.DateTimeField(auto_now_add=True)
testmodule_id = models.IntegerField()
status = models.BooleanField()
class Meta:
ordering=('created',)
| StarcoderdataPython |
8000435 | import time
import morpfw
import pytest
# lets setup a skeleton app
class App(morpfw.BaseApp):
pass
class Root(object):
pass
@App.path(model=Root, path='')
def get_root(request):
return Root()
# lets hook up some scheduled job
# run this code every 5 seconds
@App.periodic(name='myproject.every-5-seconds', seconds=5)
def run_5_secs(request_options):
print('periodic tick!')
# run this code every 1 minute, using cron style scheduling
@App.cron(name='myproject.minutely', minute='*')
def run_every_1_minute(request_options):
print('cron tick!')
| StarcoderdataPython |
265940 | <gh_stars>10-100
from six.moves import queue
import gym
import threading
import uuid
from typing import Optional
from ray.rllib.utils.annotations import PublicAPI
from ray.rllib.utils.typing import EnvActionType, EnvObsType, EnvInfoDict
@PublicAPI
class ExternalEnv(threading.Thread):
"""An environment that interfaces with external agents.
Unlike simulator envs, control is inverted: The environment queries the
policy to obtain actions and in return logs observations and rewards for
training. This is in contrast to gym.Env, where the algorithm drives the
simulation through env.step() calls.
You can use ExternalEnv as the backend for policy serving (by serving HTTP
requests in the run loop), for ingesting offline logs data (by reading
offline transitions in the run loop), or other custom use cases not easily
expressed through gym.Env.
ExternalEnv supports both on-policy actions (through self.get_action()),
and off-policy actions (through self.log_action()).
This env is thread-safe, but individual episodes must be executed serially.
Examples:
>>> register_env("my_env", lambda config: YourExternalEnv(config))
>>> trainer = DQNTrainer(env="my_env")
>>> while True:
>>> print(trainer.train())
"""
@PublicAPI
def __init__(self,
action_space: gym.Space,
observation_space: gym.Space,
max_concurrent: int = 100):
"""Initializes an ExternalEnv instance.
Args:
action_space: Action space of the env.
observation_space: Observation space of the env.
max_concurrent: Max number of active episodes to allow at
once. Exceeding this limit raises an error.
"""
threading.Thread.__init__(self)
self.daemon = True
self.action_space = action_space
self.observation_space = observation_space
self._episodes = {}
self._finished = set()
self._results_avail_condition = threading.Condition()
self._max_concurrent_episodes = max_concurrent
@PublicAPI
def run(self):
"""Override this to implement the run loop.
Your loop should continuously:
1. Call self.start_episode(episode_id)
2. Call self.[get|log]_action(episode_id, obs, [action]?)
3. Call self.log_returns(episode_id, reward)
4. Call self.end_episode(episode_id, obs)
5. Wait if nothing to do.
Multiple episodes may be started at the same time.
"""
raise NotImplementedError
@PublicAPI
def start_episode(self,
episode_id: Optional[str] = None,
training_enabled: bool = True) -> str:
"""Record the start of an episode.
Args:
episode_id: Unique string id for the episode or
None for it to be auto-assigned and returned.
training_enabled: Whether to use experiences for this
episode to improve the policy.
Returns:
Unique string id for the episode.
"""
if episode_id is None:
episode_id = uuid.uuid4().hex
if episode_id in self._finished:
raise ValueError(
"Episode {} has already completed.".format(episode_id))
if episode_id in self._episodes:
raise ValueError(
"Episode {} is already started".format(episode_id))
self._episodes[episode_id] = _ExternalEnvEpisode(
episode_id, self._results_avail_condition, training_enabled)
return episode_id
@PublicAPI
def get_action(self, episode_id: str,
observation: EnvObsType) -> EnvActionType:
"""Record an observation and get the on-policy action.
Args:
episode_id: Episode id returned from start_episode().
observation: Current environment observation.
Returns:
Action from the env action space.
"""
episode = self._get(episode_id)
return episode.wait_for_action(observation)
@PublicAPI
def log_action(self, episode_id: str, observation: EnvObsType,
action: EnvActionType) -> None:
"""Record an observation and (off-policy) action taken.
Args:
episode_id: Episode id returned from start_episode().
observation: Current environment observation.
action: Action for the observation.
"""
episode = self._get(episode_id)
episode.log_action(observation, action)
@PublicAPI
def log_returns(self,
episode_id: str,
reward: float,
info: Optional[EnvInfoDict] = None) -> None:
"""Records returns (rewards and infos) from the environment.
The reward will be attributed to the previous action taken by the
episode. Rewards accumulate until the next action. If no reward is
logged before the next action, a reward of 0.0 is assumed.
Args:
episode_id: Episode id returned from start_episode().
reward: Reward from the environment.
info: Optional info dict.
"""
episode = self._get(episode_id)
episode.cur_reward += reward
if info:
episode.cur_info = info or {}
@PublicAPI
def end_episode(self, episode_id: str, observation: EnvObsType) -> None:
"""Records the end of an episode.
Args:
episode_id: Episode id returned from start_episode().
observation: Current environment observation.
"""
episode = self._get(episode_id)
self._finished.add(episode.episode_id)
episode.done(observation)
def _get(self, episode_id: str) -> "_ExternalEnvEpisode":
"""Get a started episode by its ID or raise an error."""
if episode_id in self._finished:
raise ValueError(
"Episode {} has already completed.".format(episode_id))
if episode_id not in self._episodes:
raise ValueError("Episode {} not found.".format(episode_id))
return self._episodes[episode_id]
class _ExternalEnvEpisode:
"""Tracked state for each active episode."""
def __init__(self,
episode_id: str,
results_avail_condition: threading.Condition,
training_enabled: bool,
multiagent: bool = False):
self.episode_id = episode_id
self.results_avail_condition = results_avail_condition
self.training_enabled = training_enabled
self.multiagent = multiagent
self.data_queue = queue.Queue()
self.action_queue = queue.Queue()
if multiagent:
self.new_observation_dict = None
self.new_action_dict = None
self.cur_reward_dict = {}
self.cur_done_dict = {"__all__": False}
self.cur_info_dict = {}
else:
self.new_observation = None
self.new_action = None
self.cur_reward = 0.0
self.cur_done = False
self.cur_info = {}
def get_data(self):
if self.data_queue.empty():
return None
return self.data_queue.get_nowait()
def log_action(self, observation, action):
if self.multiagent:
self.new_observation_dict = observation
self.new_action_dict = action
else:
self.new_observation = observation
self.new_action = action
self._send()
self.action_queue.get(True, timeout=60.0)
def wait_for_action(self, observation):
if self.multiagent:
self.new_observation_dict = observation
else:
self.new_observation = observation
self._send()
return self.action_queue.get(True, timeout=300.0)
def done(self, observation):
if self.multiagent:
self.new_observation_dict = observation
self.cur_done_dict = {"__all__": True}
else:
self.new_observation = observation
self.cur_done = True
self._send()
def _send(self):
if self.multiagent:
if not self.training_enabled:
for agent_id in self.cur_info_dict:
self.cur_info_dict[agent_id]["training_enabled"] = False
item = {
"obs": self.new_observation_dict,
"reward": self.cur_reward_dict,
"done": self.cur_done_dict,
"info": self.cur_info_dict,
}
if self.new_action_dict is not None:
item["off_policy_action"] = self.new_action_dict
self.new_observation_dict = None
self.new_action_dict = None
self.cur_reward_dict = {}
else:
item = {
"obs": self.new_observation,
"reward": self.cur_reward,
"done": self.cur_done,
"info": self.cur_info,
}
if self.new_action is not None:
item["off_policy_action"] = self.new_action
self.new_observation = None
self.new_action = None
self.cur_reward = 0.0
if not self.training_enabled:
item["info"]["training_enabled"] = False
with self.results_avail_condition:
self.data_queue.put_nowait(item)
self.results_avail_condition.notify()
| StarcoderdataPython |
4900270 | from collections import namedtuple
from mock import patch
import re
import unittest
from urllib import urlencode
from wtforms.validators import ValidationError
import pytest
from fixtures import standard_graph, graph, users, groups, session, permissions # noqa
from fixtures import fe_app as app # noqa
from util import get_group_permissions, get_user_permissions, grant_permission
from grouper.constants import (
ARGUMENT_VALIDATION,
AUDIT_MANAGER,
PERMISSION_ADMIN,
PERMISSION_AUDITOR,
PERMISSION_GRANT,
PERMISSION_VALIDATION,
)
from grouper.fe.forms import ValidateRegex
import grouper.fe.util
from grouper.models import AsyncNotification, Group, Permission, User
from grouper.permissions import (
get_grantable_permissions,
get_owner_arg_list,
get_owners_by_grantable_permission,
get_requests_by_owner,
)
from url_util import url
@pytest.fixture
def grantable_permissions(session, standard_graph):
perm_grant, _ = Permission.get_or_create(session, name=PERMISSION_GRANT, description="")
perm0, _ = Permission.get_or_create(session, name="grantable", description="")
perm1, _ = Permission.get_or_create(session, name="grantable.one", description="")
perm2, _ = Permission.get_or_create(session, name="grantable.two", description="")
session.commit()
return perm_grant, perm0, perm1, perm2
def _get_unsent_and_mark_as_sent_emails(session):
"""Helper to count unsent emails and then mark them as sent."""
emails = session.query(AsyncNotification).filter(AsyncNotification.sent == False).all()
for email in emails:
email.sent = True
session.commit()
return emails
def test_basic_permission(standard_graph, session, users, groups, permissions): # noqa
""" Test adding some permissions to various groups and ensuring that the permissions are all
implemented as expected. This also tests permissions inheritance in the graph. """
graph = standard_graph # noqa
assert sorted(get_group_permissions(graph, "team-sre")) == ["audited:", "ssh:*", "sudo:shell"]
assert sorted(get_group_permissions(graph, "tech-ops")) == [
"audited:", "ssh:shell", "sudo:shell"]
assert sorted(get_group_permissions(graph, "team-infra")) == ["sudo:shell"]
assert sorted(get_group_permissions(graph, "all-teams")) == []
assert sorted(get_user_permissions(graph, "<EMAIL>")) == [
"audited:", "ssh:*", "ssh:shell", "sudo:shell"]
assert sorted(get_user_permissions(graph, "<EMAIL>")) == [
"audited:", "ssh:*", "ssh:shell", "sudo:shell"]
assert sorted(get_user_permissions(graph, "<EMAIL>")) == [
"audited:", AUDIT_MANAGER + ":", PERMISSION_AUDITOR + ":", "ssh:*", "sudo:shell"]
assert sorted(get_user_permissions(graph, "<EMAIL>")) == []
assert sorted(get_user_permissions(graph, "<EMAIL>")) == [
"sudo:shell"]
def test_has_permission(standard_graph, users): # noqa
""" Tests the has_permission method of a user object. """
# In our setup, zorkian has 'audited' with no arguments
assert users["<EMAIL>"].has_permission("audited"), "zorkian has permission audited"
assert not users["<EMAIL>"].has_permission("audited", argument='foo'), \
"zorkian has permission audited:foo"
assert not users["<EMAIL>"].has_permission("audited", argument='*'), \
"zorkian has permission audited:*"
# zay has ssh:*
assert users["<EMAIL>"].has_permission("ssh"), "zay has permission ssh"
assert users["<EMAIL>"].has_permission("ssh", argument='foo'), "zay has permission ssh:foo"
assert users["<EMAIL>"].has_permission("ssh", argument='*'), "zay has permission ssh:*"
class PermissionTests(unittest.TestCase):
def test_reject_bad_permission_names(self):
self.assertEquals(len(grouper.fe.util.test_reserved_names("permission_lacks_period")), 1)
self.assertEquals(len(grouper.fe.util.test_reserved_names("grouper.prefix.reserved")), 1)
self.assertEquals(len(grouper.fe.util.test_reserved_names("admin.prefix.reserved")), 1)
self.assertEquals(len(grouper.fe.util.test_reserved_names("test.prefix.reserved")), 1)
Field = namedtuple("field", "data")
def eval_permission(perm):
ValidateRegex(PERMISSION_VALIDATION)(form=None, field=Field(data=perm))
self.assertIsNone(eval_permission('foo.bar'))
self.assertIsNone(eval_permission('foobar'))
self.assertIsNone(eval_permission('foo.bar_baz'))
self.assertRaises(ValidationError, eval_permission, 'foo__bar')
self.assertRaises(ValidationError, eval_permission, 'foo.bar.')
self.assertRaises(ValidationError, eval_permission, 'foo._bar')
def eval_argument(arg):
ValidateRegex(ARGUMENT_VALIDATION)(form=None, field=Field(data=arg))
self.assertIsNone(eval_argument('foo.bar'))
self.assertIsNone(eval_argument('foobar'))
self.assertIsNone(eval_argument('underscore_'))
self.assertIsNone(eval_argument('equals='))
self.assertIsNone(eval_argument('plus+'))
self.assertIsNone(eval_argument('slash/'))
self.assertIsNone(eval_argument('dot.'))
self.assertIsNone(eval_argument('colon:'))
self.assertIsNone(eval_argument('hyphen-'))
self.assertIsNone(eval_argument('dollar_sign$'))
self.assertIsNone(eval_argument('dollar$sign'))
self.assertIsNone(eval_argument('left_bracket['))
self.assertIsNone(eval_argument('right_bracket]'))
self.assertIsNone(eval_argument('caret^'))
self.assertIsNone(eval_argument('underscore_equals=plus+slash/dot.color:hyphen-ok'))
self.assertRaises(ValidationError, eval_argument, 'whitespace invalid')
self.assertRaises(ValidationError, eval_argument, 'question?mark')
self.assertRaises(ValidationError, eval_argument, 'exclaimation!point')
def test_grantable_permissions(session, standard_graph, users, groups, grantable_permissions):
perm_grant, perm0, perm1, _ = grantable_permissions
assert not users["<EMAIL>"].my_grantable_permissions(), "start with none"
grant_permission(groups["auditors"], perm_grant, argument="notgrantable.one")
assert not users["<EMAIL>"].my_grantable_permissions(), "grant on non-existent is fine"
grant_permission(groups["auditors"], perm_grant, argument=perm0.name)
grants = users["<EMAIL>"].my_grantable_permissions()
assert len(grants) == 1, "only specific permission grant"
assert grants[0][0].name == perm0.name, "only specific permission grant"
grant_permission(groups["auditors"], perm_grant, argument="grantable.*")
grants = users["<EMAIL>"].my_grantable_permissions()
assert len(grants) == 3, "wildcard grant should grab appropriat amount"
assert sorted([x[0].name for x in grants]) == ["grantable", "grantable.one", "grantable.two"]
args_by_perm = get_grantable_permissions(session, None)
assert args_by_perm[perm1.name] == ["*"], "wildcard grant reflected in list of grantable"
grant_permission(groups["auditors"], perm_grant, argument="{}/single_arg".format(perm1.name))
args_by_perm = get_grantable_permissions(session, None)
assert args_by_perm[perm1.name] == ["*"], "wildcard grant reflected cause no restricted perms"
args_by_perm = get_grantable_permissions(session, [perm1.name])
assert args_by_perm[perm1.name] == ["single_arg"], \
"least permissive argument shown cause of restricted perms"
def test_permission_grant_to_owners(session, standard_graph, groups, grantable_permissions):
"""Test we're getting correct owners according to granted
'grouper.permission.grant' permissions."""
perm_grant, _, perm1, perm2 = grantable_permissions
assert not get_owners_by_grantable_permission(session), 'nothing to begin with'
# grant a grant on a non-existent permission
grant_permission(groups["auditors"], perm_grant, argument="notgrantable.one")
assert not get_owners_by_grantable_permission(session), 'ignore grants for non-existent perms'
# grant a wildcard grant -- make sure all permissions are represented and
# the grant isn't inherited
grant_permission(groups["all-teams"], perm_grant, argument="grantable.*")
owners_by_arg_by_perm = get_owners_by_grantable_permission(session)
expected = [groups['all-teams']]
assert owners_by_arg_by_perm[perm1.name]['*'] == expected, 'grants are not inherited'
assert len(owners_by_arg_by_perm) == 2
assert len(owners_by_arg_by_perm[perm1.name]) == 1
assert len(owners_by_arg_by_perm[perm2.name]) == 1
# grant on argument substring
grant_permission(groups["team-sre"], perm_grant, argument="{}/somesubstring*".format(
perm1.name))
owners_by_arg_by_perm = get_owners_by_grantable_permission(session)
expected = [groups['all-teams']]
assert owners_by_arg_by_perm[perm1.name]['*'] == expected
expected = [groups["team-sre"]]
assert owners_by_arg_by_perm[perm1.name]['somesubstring*'] == expected
# make sure get_owner() respect substrings
res = [o for o, a in get_owner_arg_list(session, perm1, "somesubstring",
owners_by_arg_by_perm=owners_by_arg_by_perm)]
assert (sorted(res) == sorted([groups["all-teams"], groups["team-sre"]]),
"should include substring wildcard matches")
res = [o for o, a in get_owner_arg_list(session, perm1, "othersubstring",
owners_by_arg_by_perm=owners_by_arg_by_perm)]
assert sorted(res) == [groups["all-teams"]], "negative test of substring wildcard matches"
# permission admins have all the power
perm_admin, _ = Permission.get_or_create(session, name=PERMISSION_ADMIN, description="")
session.commit()
grant_permission(groups["security-team"], perm_admin)
owners_by_arg_by_perm = get_owners_by_grantable_permission(session)
all_permissions = Permission.get_all(session)
for perm in all_permissions:
assert perm.name in owners_by_arg_by_perm, 'all permission should be represented'
assert groups["security-team"] in owners_by_arg_by_perm[perm.name]["*"], \
'permission admin should be wildcard owners'
def _load_permissions_by_group_name(session, group_name):
group = Group.get(session, name=group_name)
return [name for _, name, _, _, _ in group.my_permissions()]
@pytest.mark.gen_test
def test_permission_request_flow(session, standard_graph, groups, grantable_permissions,
http_client, base_url):
"""Test that a permission request gets into the system correctly and
notifications are sent correctly."""
perm_grant, _, perm1, perm2 = grantable_permissions
grant_permission(groups["all-teams"], perm_grant, argument="grantable.*")
# REQUEST: 'grantable.one', 'some argument' for 'serving-team'
groupname = "serving-team"
username = "<EMAIL>"
fe_url = url(base_url, "/groups/{}/permission/request".format(groupname))
resp = yield http_client.fetch(fe_url, method="POST",
body=urlencode({"permission_name": "grantable.one", "argument": "some argument",
"reason": "blah blah black sheep", "argument_type": "text"}),
headers={'X-Grouper-User': username})
assert resp.code == 200
emails = _get_unsent_and_mark_as_sent_emails(session)
assert len(emails) == 1, "only one user (and no group) should receive notification for request"
perms = _load_permissions_by_group_name(session, 'serving-team')
assert len(perms) == 1
assert "grantable.one" not in perms, "requested permission shouldn't be granted immediately"
user = User.get(session, name='<EMAIL>')
request_tuple, total = get_requests_by_owner(session, user, "pending", 10, 0)
assert len(request_tuple.requests) == 0, "random user shouldn't have a request"
user = User.get(session, name='<EMAIL>')
request_tuple, total = get_requests_by_owner(session, user, "pending", 10, 0)
assert len(request_tuple.requests) == 1, "user in group with grant should have a request"
# APPROVE grant: have '<EMAIL>' action this request as owner of
# 'all-teams' which has the grant permission for the requested permission
request_id = request_tuple.requests[0].id
fe_url = url(base_url, "/permissions/requests/{}".format(request_id))
resp = yield http_client.fetch(fe_url, method="POST",
body=urlencode({"status": "actioned", "reason": "lgtm"}),
headers={'X-Grouper-User': user.name})
assert resp.code == 200
perms = _load_permissions_by_group_name(session, 'serving-team')
assert len(perms) == 2
assert "grantable.one" in perms, "requested permission shouldn't be granted immediately"
emails = _get_unsent_and_mark_as_sent_emails(session)
assert len(emails) == 1, "requester should receive email as well"
# (re)REQUEST: 'grantable.one', 'some argument' for 'serving-team'
groupname = "serving-team"
username = "<EMAIL>"
fe_url = url(base_url, "/groups/{}/permission/request".format(groupname))
resp = yield http_client.fetch(fe_url, method="POST",
body=urlencode({"permission_name": "grantable.one", "argument": "some argument",
"reason": "blah blah black sheep", "argument_type": "text"}),
headers={'X-Grouper-User': username})
assert resp.code == 200
user = User.get(session, name='<EMAIL>')
request_tuple, total = get_requests_by_owner(session, user, "pending", 10, 0)
assert len(request_tuple.requests) == 0, "request for existing perm should fail"
# REQUEST: 'grantable.two', 'some argument' for 'serving-team'
groupname = "serving-team"
username = "<EMAIL>"
fe_url = url(base_url, "/groups/{}/permission/request".format(groupname))
resp = yield http_client.fetch(fe_url, method="POST",
body=urlencode({"permission_name": "grantable.two", "argument": "some argument",
"reason": "blah blah black sheep", "argument_type": "text"}),
headers={'X-Grouper-User': username})
assert resp.code == 200
emails = _get_unsent_and_mark_as_sent_emails(session)
assert len(emails) == 1, "only one user (and no group) should receive notification for request"
perms = _load_permissions_by_group_name(session, 'serving-team')
assert len(perms) == 2
assert "grantable.two" not in perms, "requested permission shouldn't be granted immediately"
user = User.get(session, name='<EMAIL>')
request_tuple, total = get_requests_by_owner(session, user, "pending", 10, 0)
assert len(request_tuple.requests) == 0, "random user shouldn't have a request"
user = User.get(session, name='<EMAIL>')
request_tuple, total = get_requests_by_owner(session, user, "pending", 10, 0)
assert len(request_tuple.requests) == 1, "user in group with grant should have a request"
# CANCEL request: have '<EMAIL>' cancel this request
request_id = request_tuple.requests[0].id
fe_url = url(base_url, "/permissions/requests/{}".format(request_id))
resp = yield http_client.fetch(fe_url, method="POST",
body=urlencode({"status": "cancelled", "reason": "heck no"}),
headers={'X-Grouper-User': user.name})
assert resp.code == 200
emails = _get_unsent_and_mark_as_sent_emails(session)
assert len(emails) == 1, "rejection email should be sent"
perms = _load_permissions_by_group_name(session, 'serving-team')
assert len(perms) == 2
assert "grantable.two" not in perms, "no new permissions should be granted for this"
@pytest.mark.gen_test
def test_limited_permissions(session, standard_graph, groups, grantable_permissions,
http_client, base_url):
"""Test that notifications are not sent to wildcard grant owners unless necessary."""
perm_grant, _, perm1, _ = grantable_permissions
# one super wildcard, one wildcard grant and one specific grant
grant_permission(groups["sad-team"], perm_grant, argument="*")
grant_permission(groups["all-teams"], perm_grant, argument="grantable.*")
grant_permission(groups["security-team"], perm_grant,
argument="{}/specific_arg".format(perm1.name))
security_team_members = {name for (t, name) in groups['security-team'].my_members().keys()
if t == 'User'}
# SPECIFIC REQUEST: 'grantable.one', 'specific_arg' for 'sad-team'
groupname = "sad-team"
username = "<EMAIL>"
fe_url = url(base_url, "/groups/{}/permission/request".format(groupname))
resp = yield http_client.fetch(fe_url, method="POST",
body=urlencode({"permission_name": perm1.name, "argument": "specific_arg",
"reason": "blah blah black sheep", "argument_type": "text"}),
headers={'X-Grouper-User': username})
assert resp.code == 200
emails = _get_unsent_and_mark_as_sent_emails(session)
assert len(emails) == 2, "email only sent to security-team"
assert not security_team_members.difference(e.email for e in emails), \
"only security-team members get notification"
| StarcoderdataPython |
8083586 | PLUGINS = ["netbox_vault_secrets"]
PLUGINS_CONFIG = {
"netbox_vault_secrets": {
"api_url": "http://localhost:8082/",
"kv_mount_path": "/secret",
"secret_path_prefix": "/netbox",
"login_methods": ["token", "oidc"],
"oidc": {
"roles": {
"demo": "Google",
},
},
}
}
| StarcoderdataPython |
1941530 | # -*- coding: utf-8 -*-
import gzip
import json
import logging
import multiprocessing
import sys
from datetime import datetime
from os.path import basename, join
import gensim
from textlytics.sentiment.document_preprocessing import DocumentPreprocessor
import spacy
parser = spacy.load('en', parser=False, entity=False)
log = logging.getLogger()
log.setLevel(logging.INFO)
ch = logging.StreamHandler(sys.stdout)
# ch.setLevel(logging.INFO)
formatter = logging.Formatter(
'%(asctime)s - %(name)s - %(levelname)s - %(message)s')
ch.setFormatter(formatter)
log.addHandler(ch)
class Word2VecAmazonReviews(object):
def __init__(self, path):
self.path = path
self.stop_words = [u'all', u'just', u'over', u'both', u'through',
u'its', u'before',
u'herself', u'should', u'to', u'only', u'under',
u'ours', u'then', u'them', u'his',
u'they', u'during', u'now', u'him', u'nor', u'these',
u'she', u'each', u'further',
u'where', u'few', u'because', u'some', u'our',
u'ourselves', u'out', u'what',
u'for', u'while', u're', u'above', u'between', u'be',
u'we', u'who', u'wa', u'here',
u'hers', u'by', u'on', u'about', u'theirs',
u'against', u'or', u'own', u'into',
u'yourself', u'down', u'your', u'from', u'her',
u'their', u'there', u'whom', u'too',
u'themselves', u'until', u'more', u'himself',
u'that', u'but', u'don', u'with',
u'than', u'those', u'he', u'me', u'myself', u'this',
u'up', u'will', u'below',
u'can', u'of', u'my', u'and', u'do', u'it', u'an',
u'as', u'itself', u'at', u'have',
u'in', u'any', u'if', u'again', u'when', u'same',
u'how', u'other', u'which',
u'you', u'after', u'most', u'such', u'why', u'a',
u'off', u'i', u'so', u'the',
u'yours', u'once', '"\'"', '\'', 'quot']
# TODO move to preprocessing module!!
def clean_text(self, document):
dp = DocumentPreprocessor(self.stop_words)
document = dp.clean_html(document)
document = dp.remove_urls(document)
document = document.strip()
# document = dp.remove_numbers(document)
# document = dp.remove_punctuation_and_multi_spaces_document(document)
document = parser(unicode(document.lower()))
# document = [t.lemma_.encode('utf-8') for t in document]
document = [w for w in document if w not in self.stop_words]
return document
def __iter__(self):
for n_line, line in enumerate(gzip.open(self.path, 'r'), start=1):
# for n_line, line in enumerate(open(self.path, 'r'), start=1):
j = json.loads(line)
toks = self.clean_text(j['reviewText'])
yield toks
# break
# toks = self.clean_text(j['summary'])
# yield toks
def w2v_train(amazon_domain_paths, output_path):
"""
Word 2 Vec training:
Parameters
----------
amazon_domain_paths : list
List of paths to the files with reviews (as default they are
tar.gz files).
output_path : string
Path to the directory where all word_vectorization models will be saved.
"""
results = {'start': datetime.now()}
for amazon_domain_path in amazon_domain_paths:
size = 300
cores = multiprocessing.cpu_count()
f_name = basename(amazon_domain_path)
log.info('Dataset is starting: {}'.format(f_name))
results['{}-start'.format(f_name)] = datetime.now()
# FIXME na sztywno size dla Word2Vec
model = gensim.models.Word2Vec(min_count=3, window=10, size=size, workers=cores)
model.build_vocab(Word2VecAmazonReviews(amazon_domain_path))
model.train(Word2VecAmazonReviews(amazon_domain_path))
model.save_word2vec_format(join(output_path, '{}-size-{}.model'.format(f_name, size)), binary=True)
results['{}-stop'.format(f_name)] = datetime.now()
results['stop'] = datetime.now()
| StarcoderdataPython |
1723297 | import pygame
class Zombie(pygame.sprite.Sprite):
def __init__(self, direction):
# Call the parent class (Sprite) constructor
super().__init__()
self.image = pygame.Surface([4, 10])
if direction == 'left':
self.image = pygame.image.load('zombie_right.png').convert_alpha()
elif direction == 'right':
self.image = pygame.image.load('zombie_left.png').convert_alpha()
self.rect = self.image.get_rect()
self.direction = direction
def update(self):
if self.direction == 'left':
self.rect.x += 50
elif self.direction == 'right':
self.rect.x -= 50
| StarcoderdataPython |
11222603 | """Coroutine of downloading media in parallel."""
import asyncio
from asyncio import Semaphore
from typing import Iterable, List, Optional
import aiohttp
from parallelmediadownloader.media_download_coroutine import DownloadOrder, MediaDownloadCoroutine
from parallelmediadownloader.media_filter import MediaFilter
from parallelmediadownloader.media_save_coroutine import MediaSaveCoroutine
from parallelmediadownloader.modeia_download_result import MediaDownloadResult
__all__ = ["ParallelMediaDownloadCoroutine"]
class ParallelMediaDownloadCoroutine:
"""Coroutine of downloading media in parallel."""
@staticmethod
async def execute(
list_download_order: Iterable[DownloadOrder],
*,
limit: int = 5,
media_filter: Optional[MediaFilter] = None,
allow_http_status: List[int] = None
) -> List[MediaDownloadResult]:
"""Downloads media in parallel."""
media_download_coroutine = MediaDownloadCoroutine(
MediaSaveCoroutine(media_filter=media_filter), allow_http_status=allow_http_status
)
semaphore = Semaphore(limit)
async with aiohttp.ClientSession() as client_session:
tasks = [
media_download_coroutine.execute(semaphore, client_session, download_order)
for download_order in list_download_order
]
return await asyncio.gather(*tasks) # type: ignore
| StarcoderdataPython |
1799179 | <reponame>NovaSBE-DSKC/predict-campaing-sucess-rate
from dskc import exploration as dskc_exploration
from dskc import clean as dskc_clean
from dskc import modeling as dskc_modeling
from dskc import _util as dskc_util
from dskc.visualization import graphs as dskc_graphs
from dskc.visualization import terminal as dskc_terminal
from dskc.stats import dskc_stats
from dskc._settings import colors
from dskc.io import get_root_path
| StarcoderdataPython |
5057770 | from __future__ import division, print_function
# Multicut Pipeline implemented with luigi
# Taksks for Feature Calculation
import luigi
from .customTargets import VolumeTarget
from .dataTasks import InputData, StackedRegionAdjacencyGraph, ExternalSegmentation
from .defectHandlingTasks import ModifiedAdjacency
from .pipelineParameter import PipelineParameter
from .tools import config_logger, run_decorator
import logging
import os
import numpy as np
import vigra
from concurrent import futures
# import the proper nifty version
try:
import nifty.graph.rag as nrag
except ImportError:
try:
import nifty_with_cplex.graph.rag as nrag
except ImportError:
import nifty_with_gurobi.graph.rag as nrag
# init the workflow logger
workflow_logger = logging.getLogger(__name__)
config_logger(workflow_logger)
# FIXME need to adjust chunks for parallel n5 writing
class RegionNodeFeatures(luigi.Task):
pathToInput = luigi.Parameter()
pathToSeg = luigi.Parameter()
keyToInput = luigi.Parameter(default='data')
keyToSeg = luigi.Parameter(default='data')
def requires(self):
return {"data": InputData(self.pathToInput),
"seg": ExternalSegmentation(self.pathToSeg),
"rag": StackedRegionAdjacencyGraph(self.pathToSeg, self.keyToSeg)}
@run_decorator
def run(self):
inp = self.input()
data = inp["data"]
seg = inp["seg"]
data.open(self.keyToInput)
seg.open(self.keyToSeg)
shape = data.shape(self.keyToInput)
assert shape == seg.shape(self.keyToSeg), str(shape) + " , " + str(seg.shape())
min_max_node = inp['rag'].readKey('minMaxLabelPerSlice').astype('uint32')
n_nodes = inp['rag'].readKey('numberOfNodes')
n_feats = 20
# list of the region statistics, that we want to extract
# drop te Histogram, because it blows up the feature space...
# TODO also use Mean and add Histogram if needed
statistics = ["Count", "Kurtosis", # Histogram
"Maximum", "Minimum", "Quantiles",
"RegionRadii", "Skewness", "Sum",
"Variance", "Weighted<RegionCenter>", "RegionCenter"]
out = self.output()
out_shape = (n_nodes, n_feats)
chunk_shape = (min(5000, n_nodes), out_shape[1])
out.open("data", shape=out_shape, chunks=chunk_shape, dtype='float32')
# get region statistics with the vigra region feature extractor for a single slice
def extract_stats_slice(z):
start, end = [z, 0, 0], [z + 1, shape[1], shape[2]]
min_node, max_node = min_max_node[z, 0], min_max_node[z, 1]
data_slice = data.read(start, end, self.keyToInput).squeeze().astype('float32', copy=False)
seg_slice = seg.read(start, end, self.keyToSeg).squeeze() - min_node
extractor = vigra.analysis.extractRegionFeatures(data_slice,
seg_slice.astype('uint32', copy=False),
features=statistics)
region_stats_slice = []
for stat_name in statistics:
stat = extractor[stat_name]
if stat.ndim == 1:
region_stats_slice.append(stat[:, None])
else:
region_stats_slice.append(stat)
region_stats_slice = np.nan_to_num(np.concatenate(region_stats_slice,
axis=1).astype('float32',
copy=False))
assert region_stats_slice.shape[0] == max_node + 1 - min_node
out.write((min_node, 0), region_stats_slice)
return True
# parallel
n_workers = min(shape[0], PipelineParameter().nThreads)
# n_workers = 1
with futures.ThreadPoolExecutor(max_workers=n_workers) as tp:
tasks = [tp.submit(extract_stats_slice, z) for z in range(shape[0])]
[task.result() for task in tasks]
out.close()
def output(self):
seg_file = os.path.split(self.pathToSeg)[1][:-3]
save_path = os.path.join(PipelineParameter().cache, "RegionNodeFeatures_%s" % seg_file)
save_path += VolumeTarget.file_ending()
return VolumeTarget(save_path)
# FIXME need to adjust chunks for parallel n5 writing
class RegionFeatures(luigi.Task):
pathToInput = luigi.Parameter()
pathToSeg = luigi.Parameter()
keyToInput = luigi.Parameter(default='data')
keyToSeg = luigi.Parameter(default='data')
# TODO have to rethink this if we include lifted multicut
def requires(self):
required_tasks = {"rag": StackedRegionAdjacencyGraph(self.pathToSeg, self.keyToSeg),
"node_feats": RegionNodeFeatures(pathToInput=self.pathToInput,
pathToSeg=self.pathToSeg,
keyToInput=self.keyToInput,
keyToSeg=self.keyToSeg)}
if PipelineParameter().defectPipeline:
required_tasks['modified_adjacency'] = ModifiedAdjacency(self.pathToSeg)
return required_tasks
@run_decorator
def run(self):
inp = self.input()
out = self.output()
if not os.path.exists(out.path):
os.mkdir(out.path)
node_feats_file = inp["node_feats"]
node_feats_file.open()
node_feats = node_feats_file.read([0, 0], node_feats_file.shape())
if PipelineParameter().defectPipeline:
modified_adjacency = inp['modified_adjacency']
if modified_adjacency.read('has_defects'):
self._compute_modified_feats(node_feats, inp, out)
else:
self._compute_standard_feats(node_feats, inp, out)
else:
self._compute_standard_feats(node_feats, inp, out)
node_feats_file.close()
out.close()
def _compute_feats_from_uvs(self,
node_feats,
uv_ids,
key,
out,
skip_ranges=None):
if not isinstance(skip_ranges, np.ndarray):
assert skip_ranges is None
workflow_logger.info("RegionFeatures: _compute_feats_from_uvs called with key: %s" % key)
n_edges = uv_ids.shape[0]
# magic 16 = number of regionStatistics that are combined by min, max, sum and absdiff
nStatFeats = 16
n_feats = 4 * nStatFeats + 4
if isinstance(skip_ranges, np.ndarray):
n_feats += 1
# we open the out file for this features
out_shape = (n_edges, n_feats)
chunk_shape = (2500, out_shape[1])
out.open(key, dtype='float32', shape=out_shape, chunks=chunk_shape)
# the statistic features that are combined by min, max, sum and absdiff
stats = node_feats[:, :nStatFeats]
# the center features that are combined by quadratic euclidean distance
centers = node_feats[:, nStatFeats:]
def quadratic_euclidean_dist(x, y):
return np.square(np.subtract(x, y))
def absdiff(x, y):
return np.abs(np.subtract(x, y))
combine = (np.minimum, np.maximum, absdiff, np.add)
def feats_for_subset(uvs_sub, edge_offset):
fU = stats[uvs_sub[:, 0], :]
fV = stats[uvs_sub[:, 1], :]
feats_sub = [comb(fU, fV) for comb in combine]
sU = centers[uvs_sub[:, 0], :]
sV = centers[uvs_sub[:, 1], :]
feats_sub.append(quadratic_euclidean_dist(sU, sV))
feats_sub = np.concatenate(feats_sub, axis=1)
out.write((edge_offset, 0), feats_sub, key)
return True
# TODO maybe some tweeking can speed this up further
# we should tune nSplits s.t. edgeStart - edgeStop is a multiple of chunks!
# maybe less threads could also help ?!
n_workers = PipelineParameter().nThreads
# n_workers = 10
# we split the edges in 500 blocks
n_splits = 500
with futures.ThreadPoolExecutor(max_workers=n_workers) as executor:
tasks = []
for ii in range(n_splits):
edge_start = int(float(ii) / n_splits * n_edges)
edge_stop = n_edges if ii == n_splits - 1 else int(float(ii + 1) / n_splits * n_edges)
tasks.append(executor.submit(feats_for_subset,
uv_ids[edge_start:edge_stop, :],
edge_start))
[t.result() for t in tasks]
workflow_logger.info("RegionFeatures: _compute_feats_from_uvs done.")
if isinstance(skip_ranges, np.ndarray):
assert skip_ranges.shape == (n_edges,)
out.writeSubarray((0, n_feats - 1), skip_ranges[:, None], key)
def _compute_standard_feats(self, node_feats, inp, out):
rag = inp['rag']
uv_ids = rag.readKey('uvIds')
transition_edge = rag.readKey('totalNumberOfInSliceEdges')
# xy-feature
self._compute_feats_from_uvs(node_feats, uv_ids[:transition_edge], "features_xy", out)
# z-feature
self._compute_feats_from_uvs(node_feats, uv_ids[transition_edge:], "features_z", out)
# calculate and insert region features for the skip_edges
# and delete the delete_edges
def _compute_modified_feats(self, node_feats, inp, out):
rag = inp['rag']
modified_adjacency = inp['modified_adjacency']
uv_ids = rag.readKey('uvIds')
transition_edge = rag.readKey('totalNumberOfInSliceEdges')
# compute the standard xy-features with additional ranges
self._compute_feats_from_uvs(node_feats, uv_ids[:transition_edge], 'features_xy', out)
# compute the z-features with proper edges deleted from uv-ids
delete_edges = modified_adjacency.read('delete_edges')
uvs_z = uv_ids[transition_edge:]
if delete_edges.size:
assert delete_edges.min() >= transition_edge
delete_edges -= transition_edge
uvs_z = np.delete(uvs_z, delete_edges, axis=0)
self._compute_feats_from_uvs(node_feats, uvs_z, 'features_z', out)
skip_edges = modified_adjacency.read('skip_edges')
skip_ranges = modified_adjacency.read('skip_ranges')
assert skip_ranges.shape[0] == skip_edges.shape[0]
# if we have skip edges, compute features for them
if skip_edges.size:
self._compute_feats_from_uvs(node_feats,
skip_edges,
'features_skip',
out,
skip_ranges)
def output(self):
seg_file = os.path.split(self.pathToSeg)[1][:-3]
save_path = os.path.join(PipelineParameter().cache, "RegionFeatures_")
if PipelineParameter().defectPipeline:
save_path += "modified_%s" % seg_file
else:
save_path += "standard_%s" % seg_file
save_path += VolumeTarget.file_ending()
return VolumeTarget(save_path)
class EdgeFeatures(luigi.Task):
# input over which filters are calculated and features accumulated
pathToInput = luigi.Parameter()
keyToInput = luigi.Parameter(default='data')
# current oversegmentation
pathToSeg = luigi.Parameter()
keyToSeg = luigi.Parameter(default='data')
# optional parameters
keepOnlyXY = luigi.BoolParameter(default=False)
keepOnlyZ = luigi.BoolParameter(default=False)
simpleFeatures = luigi.BoolParameter(default=False)
zDirection = luigi.Parameter(default=0)
# For now we can't set these any more, needs to be passed to C++ somehow
# filterNames = luigi.ListParameter(
# default=[
# "gaussianSmoothing",
# "hessianOfGaussianEigenvalues",
# "laplacianOfGaussian"]
# )
# sigmas = luigi.ListParameter(default = [1.6, 4.2, 8.3] )
def requires(self):
required_tasks = {'rag': StackedRegionAdjacencyGraph(self.pathToSeg, self.keyToSeg),
'data': InputData(self.pathToInput)}
if PipelineParameter().defectPipeline:
required_tasks['modified_adjacency'] = ModifiedAdjacency(self.pathToSeg)
return required_tasks
@run_decorator
def run(self):
assert not(self.keepOnlyXY and self.keepOnlyZ)
inp = self.input()
rag = inp['rag'].read()
data_file = inp['data']
data_file.open(self.keyToInput)
data = data_file.get(self.keyToInput)
out = self.output()
if not os.path.exists(out.path):
os.mkdir(out.path)
has_defects = False
if PipelineParameter().defectPipeline:
modified_adjacency = inp['modified_adjacency']
if modified_adjacency.read('has_defects'):
has_defects = True
if has_defects:
self._compute_modified_feats(data, rag, modified_adjacency, out)
else:
self._compute_standard_feats(data, rag, out)
out.close()
data_file.close()
if PipelineParameter().useN5Backend:
self._postprocess_output_n5(out, has_defects)
else:
self._postprocess_output_h5(out, has_defects)
# we delete the old features_z and then rename the keep features
def _postprocess_output_n5(self, out, has_defects):
from shutil import move, rmtree
if has_defects:
z_path = os.path.join(out.path, 'features_z')
rmtree(z_path)
z_path_new = os.path.join(out.path, 'features_z_new')
move(z_path_new, z_path)
# if we only compute features for one of the edge-types
# remove the features of the other type
if self.keepOnlyXY:
rmtree(os.path.join(out.path, 'features_z'))
if self.keepOnlyZ:
rmtree(os.path.join(out.path, 'features_xy'))
def _postprocess_output_h5(self, out, has_defects):
import h5py
if has_defects:
with h5py.File(out.path) as f:
f['features_z_new'] = f['features_z']
del f['features_z_new']
# if we only compute features for one of the edge-types
# remove the features of the other type
if self.keepOnlyXY:
with h5py.File(out.path) as f:
del f['features_z']
if self.keepOnlyZ:
with h5py.File(out.path) as f:
del f['features_xy']
def _compute_standard_feats(self, data, rag, out):
workflow_logger.info("EdgeFeatures: _compute_standard_feats called.")
n_edges_xy = rag.totalNumberOfInSliceEdges if not self.keepOnlyZ else 1
n_edges_z = rag.totalNumberOfInBetweenSliceEdges if not self.keepOnlyXY else 1
# as minimum chunk size, we choose the minimum number of edges
# of a given type per slice
min_edges_xy = min(np.min(rag.numberOfInSliceEdges()), n_edges_xy)
min_edges_z = min(np.min(rag.numberOfInBetweenSliceEdges()[:-1]), n_edges_z)
# number of features:
# 9 * 12 for features from filter accumulation
# 9 for simple features
# TODO would be nice not to hard code this here...
n_feats = 9 if self.simpleFeatures else 9 * 12
# max chunk size s.t. n_feats * max_chunk_size ~ 64**3
max_chunk_size = 30000 if self.simpleFeatures else 2500
out_shape_xy = (n_edges_xy, n_feats)
# we choose the min in-slice edge number as minimum chunk size
chunk_shape_xy = (min(max_chunk_size, min_edges_xy), n_feats)
out_shape_z = (n_edges_z, n_feats)
chunk_shape_z = (min(max_chunk_size, min_edges_z), n_feats)
# open the output files
out.open('features_xy', dtype='float32', shape=out_shape_xy, chunks=chunk_shape_xy)
out.open('features_z', dtype='float32', shape=out_shape_z, chunks=chunk_shape_z)
if self.simpleFeatures:
workflow_logger.info("EdgeFeatures: computing standard features.")
nrag.accumulateEdgeStandardFeatures(rag, data,
out.get('features_xy'), out.get('features_z'),
self.keepOnlyXY, self.keepOnlyZ,
self.zDirection,
PipelineParameter().nThreads)
else:
workflow_logger.info("EdgeFeatures: computing features from filers.")
nrag.accumulateEdgeFeaturesFromFilters(rag, data,
out.get('features_xy'), out.get('features_z'),
self.keepOnlyXY, self.keepOnlyZ,
self.zDirection,
PipelineParameter().nThreads)
workflow_logger.info("EdgeFeatures: _compute_standard_feats done.")
# TODO implement simpler feature computation in nifty
def _compute_modified_feats(self, data, rag, modified_adjacency, out):
workflow_logger.info("EdgeFeatures: _compute_modified_feats called.")
# first, compute the standard feats
self._compute_standard_feats(data, rag, out)
transition_edge = rag.totalNumberOfInSliceEdges
# copy the z-features we keep and delete the ones that are not needed
delete_edges = modified_adjacency.read('delete_edges')
has_delete_edges = delete_edges.size and not self.keepOnlyXY
if has_delete_edges:
assert delete_edges.min() >= transition_edge
# we substract the transition edge, because we count from the begin of z edges
delete_edges -= transition_edge
# read the original z-features
standard_feat_shape = out['features_z'].shape()
n_modified = standard_feat_shape[0] - delete_edges.shape[0]
n_feats = standard_feat_shape[1]
# open a new file for the modified edges
out_shape = (n_modified, n_feats)
chunk_shape = (min(2500, n_modified), n_feats)
out.open('features_z_new', dtype='float32', shape=out_shape, chunks=chunk_shape)
# find all edges with continuous indices that will be deleted
consecutive_deletes = np.split(delete_edges,
np.where(np.diff(delete_edges) != 1)[0] + 1)
prev_edge, total_copied = 0, 0
# find the interval of edges to keep
keep_edge_intervals = []
if prev_edge != consecutive_deletes[0][0]:
keep_edge_intervals.append([prev_edge, consecutive_deletes[0][0]])
keep_edge_intervals.extend([[consecutive_deletes[i][-1] + 1, consecutive_deletes[i + 1][0]]
for i in range(len(consecutive_deletes) - 1)])
if consecutive_deletes[-1][-1] != standard_feat_shape[0] - 1:
keep_edge_intervals.append([consecutive_deletes[-1][-1] + 1, standard_feat_shape[0]])
for keep_start, keep_stop in keep_edge_intervals:
n_copy = keep_stop - keep_start
assert n_copy > 0, str(n_copy)
out.write([total_copied, 0],
out.readSubarray([keep_start, 0], [keep_stop, n_feats], 'features_z'),
'features_z_new')
total_copied += n_copy
assert total_copied == standard_feat_shape[0] - delete_edges.shape[0], "%i, %i" % (
total_copied,
standard_feat_shape[0] - delete_edges.shape[0])
skip_edges = modified_adjacency.read('skip_edges')
skip_ranges = modified_adjacency.read('skip_ranges')
skip_starts = modified_adjacency.read('skip_starts')
assert skip_ranges.shape[0] == skip_edges.shape[0]
assert skip_starts.shape[0] == skip_edges.shape[0]
# modify the features only if we have skip edges
if skip_edges.size and not self.keepOnlyXY:
# TODO simple feats for skip features
# TODO i/o in nifty to speed up calculation
skip_feats = nrag.accumulateSkipEdgeFeaturesFromFilters(rag,
data,
# skip_edges need to be passed as a list of pairs!
[(int(skip_e[0]), int(skip_e[1]))
for skip_e in skip_edges],
list(skip_ranges),
list(skip_starts),
self.zDirection,
PipelineParameter().nThreads)
assert skip_feats.shape[0] == skip_edges.shape[0]
# TODO reactivate check once we have simple skip feats
# assert skip_feats.shape[1] == n_feats, "%i, %i" % (skip_feats.shape[1], n_feats)
# open file for the skip edges
vigra.writeHDF5(skip_feats,
os.path.join(out.path, 'features_skip.h5'), 'data',
chunks=(min(2500, skip_feats.shape[0]), skip_feats.shape[1]))
def output(self):
seg_file = os.path.split(self.pathToSeg)[1][:-3]
inp_file = os.path.split(self.pathToInput)[1][:-3]
with_defects = PipelineParameter().defectPipeline
save_path = os.path.join(PipelineParameter().cache,
"EdgeFeatures_%s_%s_%s" % (seg_file,
inp_file,
'modified' if with_defects
else 'standard'))
if self.keepOnlyXY:
save_path += '_xy'
if self.keepOnlyZ:
save_path += '_z'
if self.simpleFeatures:
save_path += '_simple'
if self.zDirection != 0:
save_path += '_zDir%i' % self.zDirection
save_path += VolumeTarget.file_ending()
return VolumeTarget(save_path)
| StarcoderdataPython |
9737957 | <reponame>vuiseng9/nncf_pytorch
"""
Copyright (c) 2022 Intel Corporation
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from collections import OrderedDict
from typing import Any
from typing import Dict
from typing import List
from nncf import NNCFConfig
from nncf.experimental.torch.nas.bootstrapNAS.elasticity.base_handler import SingleElasticityBuilder
from nncf.experimental.torch.nas.bootstrapNAS.elasticity.base_handler import create_elasticity_builder_from_config
from nncf.experimental.torch.nas.bootstrapNAS.elasticity.elasticity_controller import ElasticityController
from nncf.experimental.torch.nas.bootstrapNAS.elasticity.elasticity_dim import ElasticityDim
from nncf.experimental.torch.nas.bootstrapNAS.elasticity.multi_elasticity_handler import MultiElasticityHandler
from nncf.torch.algo_selector import PT_COMPRESSION_ALGORITHMS
from nncf.torch.compression_method_api import PTCompressionAlgorithmBuilder
from nncf.torch.graph.transformations.layout import PTTransformationLayout
from nncf.torch.nncf_network import NNCFNetwork
class EBuilderStateNames:
AVAILABLE_ELASTICITY_DIMS = 'available_elasticity_dims'
BUILDER_STATES = 'builder_states'
@PT_COMPRESSION_ALGORITHMS.register('elasticity')
class ElasticityBuilder(PTCompressionAlgorithmBuilder):
"""
Determines which modifications should be made to the original FP32 model in order to introduce elasticity
to the model.
"""
_state_names = EBuilderStateNames
# NOTE: This is the order of activation elasticity dimensions when multiple of them are enabled.
# Don't confuse with the order of adding elasticity dimension on training stages (progressiveness of
# elasticity). For vanilla progressive shrinking the stages order is the following:
# 1st stage: kernel
# 2nd stage: kernel + depth
# 3rd stage: kernel + depth + width
# The execution order is orthogonal to this.
# Though the order of kernel/width operations shouldn't lead to a different result mathematically,
# there may be a minor floating-point error in the 6th sign. To make behavior stable, elastic kernel is always
# applied after width.
# Depth goes after width, because currently only depth handler knows about shapes after setting config by width
# handler and don't skip some blocks if shapes on the block boundaries are not the same. Potentially, we could
# support an alternative strategy, when width handler knows about skipped blocks and ,
# but currently, it's not supported.
ALL_DIMS_IN_EXECUTION_ORDER = [ElasticityDim.WIDTH, ElasticityDim.KERNEL, ElasticityDim.DEPTH]
def __init__(self, nncf_config: NNCFConfig, should_init: bool = True):
super().__init__(nncf_config, should_init)
self._multi_elasticity_handler = None
# TODO(nlyalyus): ignored/target scope is not supported (ticket 68052)
self._ignored_scopes = self.config.get('ignored_scopes', None)
self._target_scopes = self.config.get('target_scopes', None)
self._multi_elasticity_handler_state = None
all_elasticity_dims = {e.value for e in ElasticityDim}
available_elasticity_dims_str = self._algo_config.get('available_elasticity_dims', all_elasticity_dims)
self._available_elasticity_dims = list(map(ElasticityDim.from_str, available_elasticity_dims_str))
self._elasticity_builders = OrderedDict() # type: Dict[ElasticityDim, SingleElasticityBuilder]
self._builder_states = None
def initialize(self, model: NNCFNetwork) -> None:
"""
Initialize model parameters before training
:param model: The model with additional modifications necessary to enable
algorithm-specific compression during fine-tuning.
"""
def get_available_elasticity_dims(self) -> List[ElasticityDim]:
"""
:return: list of available elasticity dimensions
"""
return self._available_elasticity_dims
def _get_algo_specific_config_section(self) -> Dict:
return self.config.get('bootstrapNAS', {}).get('training', {}).get('elasticity', {})
def _build_controller(self, model: NNCFNetwork) -> 'ElasticityController':
"""
Simple implementation of building controller without setting builder state and loading controller's one.
:param model: The model with additional modifications necessary to enable
algorithm-specific compression during fine-tuning.
:return: The instance of the `ElasticityController`.
"""
return ElasticityController(model, self._algo_config, self._multi_elasticity_handler)
def _get_transformation_layout(self, target_model: NNCFNetwork) -> PTTransformationLayout:
sorted_elasticity_dims = list(
filter(lambda x: x in self._available_elasticity_dims, self.ALL_DIMS_IN_EXECUTION_ORDER))
ignored_scopes = self._ignored_scopes
target_scopes = self._target_scopes
for elasticity_dim in sorted_elasticity_dims:
elasticity_config = self._algo_config.get(elasticity_dim.value, {})
elasticity_builder = create_elasticity_builder_from_config(elasticity_config,
elasticity_dim,
ignored_scopes,
target_scopes)
self._elasticity_builders[elasticity_dim] = elasticity_builder
if self._builder_states is not None:
for dim_str, builder_state in self._builder_states.items():
dim = ElasticityDim.from_str(dim_str)
if dim in self._elasticity_builders:
self._elasticity_builders[dim].load_state(builder_state)
elasticity_handlers = OrderedDict()
for dim, builder in self._elasticity_builders.items():
handler = builder.build(target_model)
elasticity_handlers[dim] = handler
self._multi_elasticity_handler = MultiElasticityHandler(elasticity_handlers, target_model)
layout = PTTransformationLayout()
for handler in elasticity_handlers.values():
commands = handler.get_transformation_commands()
for command in commands:
layout.register(command)
return layout
def _get_state_without_name(self) -> Dict[str, Any]:
"""
Implementation of get_state that returns state without builder name.
:return: Returns a dictionary with Python data structures
(dict, list, tuple, str, int, float, True, False, None) that represents state of the object.
"""
builder_states = {dim.value: builder.get_state() for dim, builder in self._elasticity_builders.items()}
available_elasticity_dims_state = list(map(lambda x: x.value, self.get_available_elasticity_dims()))
return {
self._state_names.BUILDER_STATES: builder_states,
self._state_names.AVAILABLE_ELASTICITY_DIMS: available_elasticity_dims_state
}
def _load_state_without_name(self, state_without_name: Dict[str, Any]):
"""
Implementation of load state that takes state without builder name.
:param state_without_name: Output of `_get_state_without_name()` method.
"""
self._builder_states = state_without_name[self._state_names.BUILDER_STATES]
available_elasticity_dims_state = state_without_name[self._state_names.AVAILABLE_ELASTICITY_DIMS]
# No conflict resolving with the related config options, parameters are overridden by compression state
self._available_elasticity_dims = list(map(ElasticityDim.from_str, available_elasticity_dims_state))
| StarcoderdataPython |
3394136 | from typing import Dict, Optional
class MockTracerToInversion:
def __init__(
self,
tracer,
sparse_image_plane_grid_pg_list=None,
profiling_dict: Optional[Dict] = None,
):
self.tracer = tracer
self.sparse_image_plane_grid_pg_list = sparse_image_plane_grid_pg_list
self.profiling_dict = profiling_dict
def sparse_image_plane_grid_pg_list_from(self, grid):
return self.sparse_image_plane_grid_pg_list
| StarcoderdataPython |
5186221 | <reponame>fcgtyg/SEAS
# -*-coding:utf-8-*-
import json
import os
import threading
import subprocess32 as subprocess
import sys
from Password import Password
from External_Functions.passwordGenerator import passwordGenerator
from External_Functions.sendEmail import send_mail_password_reset
from mysql.connector import IntegrityError, InterfaceError
class User:
def __init__(self, db, organization, username):
self.db = db
self.execute = db.execute
self.execute("USE %s" % organization)
self.username = username
self.organization = organization
self.allowed_extensions = {'png', 'jpg', 'jpeg'}
self.pass_word = Password()
self.user_id = None
self.role = None
self.name = None
self.surname = None
self.hashed_pass = None
self.email = None
self.department = None
self.profile_pic_path = None
self.role_name = None
self.get = self.get_user_info()
def get_user_info(self):
"""
:return: List, [studentID, roleID, name, surname, username, password_hash, email, department, profile_pic_path]
"""
try:
self.user_id, self.role, self.name, self.surname, self.username, \
self.hashed_pass, self.email, self.department, self.profile_pic_path = \
self.execute("SELECT * FROM members WHERE Username='%s'" % self.username)[0]
self.role_name = self.execute("SELECT Role FROM roles WHERE roleID = %s" % self.role)[0][0]
return [self.username, self.name, self.surname, self.user_id, self.role_name, self.email, self.department]
except InterfaceError:
return "No such a person!"
def change_password_or_email(self, old_password, new_val, email=False):
if self.pass_word.verify_password_hash(old_password, self.hashed_pass):
if email:
self.execute(
"UPDATE members SET Email='%s' WHERE Username = '%s'" % (new_val, self.username))
return "Mail Changed"
else:
print new_val
password = self.pass_word.hash_password(new_val)
self.execute("UPDATE members SET Password='%s' WHERE Username = '%s'" % (password, self.username))
return "Password Changed"
else:
return "Not Authorized"
def allowed_file(self, filename): # to check if file type is appropriate.
return '.' in filename and filename.rsplit('.', 1)[1].lower() in self.allowed_extensions
def upload_profile_pic(self, pic):
if pic and self.allowed_file(pic.filename):
extension = "." + pic.filename.rsplit('.', 1)[1].lower()
base_path = "/var/www/SEAS/uploads/%s/profile_pictures/" % self.organization
path = base_path + str(self.user_id) + extension
if not os.path.exists(base_path):
os.makedirs(base_path)
with open(path, "wb") as f:
data = None
while data != "":
data = pic.read()
f.write(data)
self.execute("update members set ProfilePic = '%s' where PersonID = '%s';" % (path, self.user_id))
return "Done"
return "Not allowed extension."
def get_profile_picture(self,):
return self.profile_pic_path
def verify_password(self, password):
return self.pass_word.verify_password_hash(password, self.hashed_pass)
def reset_password(self):
password = <PASSWORD>(8)
try:
password_ = self.pass_word.hash_password(password)
self.execute("INSERT INTO temporary_passwords (UserID, Password)"
"VALUES (%d, '%s');" % (int(self.user_id), password_))
self.execute("CREATE EVENT user_%d ON SCHEDULE AT date_add(now(), INTERVAL 30 MINUTE) "
"DO DELETE FROM temporary_passwords WHERE UserID = %d;"
% (int(self.user_id), int(self.user_id)))
auth = ["%s %s" % (self.name, self.surname), self.email, password, self.username]
threading.Thread(target=send_mail_password_reset, args=(auth,)).start()
return "Check your mail address for credentials."
except IntegrityError:
return "Your account has been reset already."
def check_and_change_password(self, temp_pass, new_pass):
password = self.execute("SELECT Password FROM temporary_passwords WHERE UserID = %d;"
% (int(self.user_id)))[0][0]
if self.pass_word.verify_password_hash(temp_pass, password):
try:
self.execute("DELETE FROM temporary_passwords WHERE UserID = %d;" % (int(self.user_id)))
except IndexError:
return "There is not any reset request for the user!"
new_pass = self.pass_word.hash_password(<PASSWORD>)
return self.execute("UPDATE members SET members.Password = '%s' WHERE PersonID = %d;"
% (new_pass, int(self.user_id)))
return "Wrong Temporary Password!"
def get_last_activity(self, endpoint):
if endpoint == "last_login":
rtn = self.execute("SELECT Api_Endpoint, Time, IP FROM istanbul_sehir_university.last_activities "
"where username = '%s' and Api_Endpoint = 'sign_in' order by Time DESC limit 5;"
% self.username)
else:
rtn = self.execute("SELECT Api_Endpoint, Time, IP FROM istanbul_sehir_university.last_activities "
"where username = '%s' order by Time DESC limit 5;" % self.username)
while len(rtn) < 5:
rtn.append("")
return rtn
class Lecturer(User):
def __init__(self, db, organization, username):
self.db = db
self.execute = db.execute
self.execute("USE %s" % organization)
self.username = username
self.organization = organization
self.allowed_extensions = {'png', 'jpg', 'jpeg'}
self.pass_word = Password()
self.get = self.get_user_info()
def get_lecturer_courses(self):
return self.execute("SELECT courses.Name, courses.CODE FROM lecturers JOIN courses ON "
"lecturers.CourseID = courses.CourseID JOIN members ON "
"members.PersonID = lecturers.LecturerID WHERE members.Username = '%s';" % self.username)
def grade_answer(self, question_id, student, grade):
return self.execute("INSERT INTO answers(questionID, studentID, grade) VALUES "
"('%s', (SELECT PersonID from members where Username = '%s') , '%s') "
"ON DUPLICATE KEY UPDATE grade=VALUES(grade)" % (str(question_id), student, str(grade)))
class Student(User):
def __init__(self, db, organization, username):
self.db = db
self.execute = db.execute
self.execute("USE %s" % organization)
self.username = username
self.organization = organization
self.allowed_extensions = {'png', 'jpg', 'jpeg'}
self.pass_word = Password()
self.get = self.get_user_info()
def get_student_courses(self):
return self.execute("SELECT courses.Name, courses.CODE FROM registrations JOIN courses ON "
"registrations.CourseID = courses.CourseID JOIN members ON "
"members.PersonID = registrations.studentID WHERE members.Username = '%s';"
% self.username)
def add_answer(self, question_id, answer):
answer = answer.replace("'", "''").replace('""', '"')
grade = self.check_answer(question_id, answer)
if grade is None:
command = "INSERT INTO answers(examID, questionID, studentID, answer) values " \
"((select examID from questions q where q.questionID = %d),%d, %d, '%s') " \
"ON DUPLICATE KEY UPDATE answer = '%s';" \
% (int(question_id),int(question_id), int(self.user_id), answer, answer)
else:
command = "INSERT INTO answers(examID, questionID, studentID, answer, grade) values " \
"((select examID from questions q where q.questionID = %d),%d, %d, '%s', '%f') " \
"ON DUPLICATE KEY UPDATE answer = '%s', grade='%f';" \
% (int(question_id), int(question_id), int(self.user_id), answer, grade, answer, grade)
return self.execute(command)
def check_answer(self, question_id, answer):
question_type, true_answer, value, tags, test_cases = self.execute("SELECT Type, Answer, Value, Tags, Test_Cases"
" FROM questions WHERE QuestionID = %d"
%int(question_id))[0]
with open("temp.py", "w") as script:
script.write(answer)
if question_type == "multiple_choice":
if true_answer.lower() == answer.lower():
return value
else:
return 0
pass
elif question_type == "programming":
try:
test_cases = self.parse_outputs(json.loads(test_cases.replace("''", "'").replace("u'", "u''"), strict=False))
except SyntaxError:
test_cases = self.parse_outputs(json.loads(test_cases.replace("''", "'"), strict=False))
python_path = sys.executable
if "(u'',)" in test_cases:
try:
output = subprocess.check_output(("%s temp.py" %python_path), stderr=subprocess.STDOUT, timeout=5)[:-1]
except subprocess.TimeoutExpired:
output = "TimeoutError"
except subprocess.CalledProcessError:
output = "CodeIntegrityError"
return value if output == test_cases["(u'',)"][0].decode("utf-8") else 0
test_case_dict = dict()
for i, j in test_cases.items():
in_ = eval("(%s)" % eval(i)[0].decode("utf-8"))
out_ = eval("(%s)" % j[0].decode("utf-8"))
test_case_dict = dict(zip(in_, out_))
break
test_score = 0
for test_input, test_output in test_case_dict.items():
command = """%s -c "from temp import *; print %s%s" """ % (python_path, tags[:-1],repr(test_input))
try:
output = subprocess.check_output(command, timeout=5)[:-1]
if len(output.split("\n"))>1:
output = output.replace("\nNone", "")
if len(test_output) == 1:
check = test_output[0] == eval(output)
else:
check = test_output == eval(output)
if check:
test_score += 1
except subprocess.CalledProcessError:
output="CodeIntegrityError"
except subprocess.TimeoutExpired:
output="TimeoutError"
grade = value*(float(test_score)/len(test_case_dict))
print grade
return grade
pass
else:
return None
@staticmethod
def parse_outputs(question):
parsed_outputs = {}
for key, value in question.items():
a = tuple(eval(key))
try:
b = tuple(value)
except TypeError:
b = value
parsed_outputs[str(a)] = b
return parsed_outputs
| StarcoderdataPython |
9731866 | ## 백트래킹
# 입력
N, M = map(int, input().split())
orders = []
for i in range(M):
orders.append(0)
# 백트랙킹 함수
def back(idx,order_list):
ord_list = order_list[:]
if(idx >= M):
# 완성된 수열 출력
for i in range(M-1):
print(ord_list[i],end = " ")
print(ord_list[M-1])
else:
st = 1 if (idx == 0) else order_list[idx-1]
for i in range(st ,N+1):
# 이전 idx 위치의 배열 값보다 큰 값 할당
ord_list[idx] = i
back(idx + 1, ord_list)
back(0,orders) | StarcoderdataPython |
4995524 | <reponame>VVKot/leetcode-solutions<filename>leetcode/python/1065_index_pairs_of_a_string.py
"""
T: O(N**3)
S: O(N) -> does not consider output space
We move through the text and collect prefixes that are present in words.
When we seen an actual word, we add its position to the result.
"""
from typing import List
class Trie:
WORD_MARK = '*'
def __init__(self):
self.trie = {}
def insert(self, word: str) -> None:
trie = self.trie
for ch in word:
trie = trie.setdefault(ch, {})
trie[self.WORD_MARK] = {}
def full_match(self, word: str) -> bool:
trie = self.trie
for ch in word:
if ch in trie:
trie = trie[ch]
else:
return False
return self.WORD_MARK in trie
def starts_with(self, prefix: str) -> bool:
trie = self.trie
for ch in prefix:
if ch in trie:
trie = trie[ch]
else:
return False
return True
class Solution:
def indexPairs(self, text: str, words: List[str]) -> List[List[int]]:
trie = self.build_trie(words)
index_pairs = []
S = len(text)
for left in range(S):
for right in range(left, S):
curr_substring = text[left:right + 1]
if not trie.starts_with(curr_substring):
break
if trie.full_match(curr_substring):
index_pairs.append([left, right])
return index_pairs
def build_trie(self, words: List[str]) -> Trie:
trie = Trie()
for word in words:
trie.insert(word)
return trie
| StarcoderdataPython |
9785533 | """Delete a block storage snapshot."""
# :license: MIT, see LICENSE for more details.
import click
import SoftLayer
from SoftLayer.CLI import environment
@click.command()
@click.argument('snapshot_id')
@environment.pass_env
def cli(env, snapshot_id):
"""Deletes a snapshot on a given volume"""
block_manager = SoftLayer.BlockStorageManager(env.client)
deleted = block_manager.delete_snapshot(snapshot_id)
if deleted:
click.echo('Snapshot %s deleted' % snapshot_id)
| StarcoderdataPython |
4929648 | # coding=utf-8
"""BERT finetuning runner."""
from __future__ import absolute_import, division, print_function
import argparse
import csv
import logging
import os
import random
import sys
import codecs
import json
import numpy as np
import torch
from collections import defaultdict
from torch.utils.data import (DataLoader, RandomSampler, SequentialSampler,
TensorDataset)
from torch.utils.data.distributed import DistributedSampler
from tqdm import tqdm, trange
from util import *
from metric import *
# import clf_distill_loss_functions
from scipy.special import softmax
from transformers.file_utils import PYTORCH_TRANSFORMERS_CACHE
from transformers.modeling_bert import BertForSequenceClassification, BertConfig, BertForNextSentencePrediction
# from bert_distill import *
from transformers.tokenization_bert import BertTokenizer
logging.basicConfig(format='%(asctime)s - %(levelname)s - %(name)s - %(message)s',
datefmt='%m/%d/%Y %H:%M:%S',
level=logging.INFO)
logger = logging.getLogger(__name__)
def cal_accuracy(entail_probs, examples, hypo_type_list, gold_label_list):
assert entail_probs.shape[0] == len(examples)
text_num = int(len(examples) / len(hypo_type_list))
pid = 0
hit_size = 0
pred_label_list = []
for i in range(text_num):
max_prob = -100.0
max_type = None
for hypo, type in hypo_type_list:
if entail_probs[pid] > max_prob:
max_prob = entail_probs[pid]
max_type = type
pid += 1
pred_label_list.append(max_type)
if max_type == gold_label_list[i]:
hit_size += 1
return pred_label_list, hit_size / text_num
def eval(model, dataloader, device, num_labels, return_logits=False):
model.eval()
logger.info("***** Running evaluation *****")
eval_loss = 0
nb_eval_steps = 0
preds = []
print('Evaluating...')
for idx, (input_ids, input_mask, segment_ids, label_ids) in enumerate(tqdm(dataloader)):
input_ids = input_ids.to(device)
input_mask = input_mask.to(device)
segment_ids = segment_ids.to(device)
label_ids = label_ids.to(device)
with torch.no_grad():
inputs = {
"input_ids": input_ids,
"attention_mask": input_mask,
"token_type_ids": segment_ids,
}
logits = model(**inputs)
if isinstance(logits, tuple):
logits = logits[0]
if len(preds) == 0:
preds.append(logits.detach().cpu().numpy())
else:
preds[0] = np.append(preds[0], logits.detach().cpu().numpy(), axis=0)
preds = preds[0]
#entailment prob
# print(preds.shape)
# print(preds[0])
pred_probs = softmax(preds, axis=1)[:, 0]
if return_logits:
return preds, pred_probs
return pred_probs
def load_model(pretrain_model_dir, use_nsp, use_distill, num_labels=2):
logger.info("load pretrained model from {}".format(pretrain_model_dir))
if use_nsp:
model = BertForNextSentencePrediction.from_pretrained(pretrain_model_dir)
logger.info("use next sentence prediction model")
# elif use_distill:
# loss_fn = clf_distill_loss_functions.Plain()
# model = BertDistill.from_pretrained(pretrain_model_dir, loss_fn=loss_fn)
# logger.info("use distill model")
else:
model = BertForSequenceClassification.from_pretrained(pretrain_model_dir, num_labels=num_labels)
model.eval()
tokenizer = BertTokenizer.from_pretrained(pretrain_model_dir, do_lower_case=True)
return model, tokenizer
def main():
parser = argparse.ArgumentParser()
def str2bool(bstr):
if bstr.lower() == 'true':
return True
return False
## Required parameters
parser.add_argument("--pretrain_model_dir", default=r"bert-base-uncased", type=str)
parser.add_argument("--thred", type=float, default=0.5)
parser.add_argument("--use_nsp", type=str2bool, default=False)
parser.add_argument("--use_distill", type=str2bool, default=False)
parser.add_argument("--reverse", type=str2bool, default=False)
parser.add_argument("--random_input", type=str2bool, default=False)
parser.add_argument("--label_single", type=str2bool, default=True)
parser.add_argument("--output_dir", default=None, type=str)
parser.add_argument("--input_fn", default=None, type=str)
parser.add_argument("--label_fn", default=None, type=str)
parser.add_argument("--max_seq_length", default=128, type=int)
parser.add_argument("--eval_batch_size", default=64, type=int, help="Total batch size for eval.")
parser.add_argument('--seed', type=int, default=0, help="random seed for initialization")
args = parser.parse_args()
if not os.path.exists(args.output_dir):
os.makedirs(args.output_dir)
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
n_gpu = torch.cuda.device_count()
random.seed(args.seed)
np.random.seed(args.seed)
torch.manual_seed(args.seed)
if n_gpu > 0:
torch.cuda.manual_seed_all(args.seed)
if args.random_input:
rand_fn = os.path.join(args.output_dir, "rand.txt")
random_text_fn(args.input_fn, rand_fn)
args.input_fn = rand_fn
argsDict = args.__dict__
with open(os.path.join(args.output_dir, "setting.txt"), "w", encoding="utf-8") as fp:
for eachArg, value in argsDict.items():
fp.writelines(eachArg + ' : ' + str(value) + '\n')
processor = NSPData(args.label_fn, input_fn=args.input_fn, reverse=args.reverse)
label_list = processor.get_labels()
num_labels = len(label_list)
# Prepare model
model, tokenizer = load_model(args.pretrain_model_dir, args.use_nsp, args.use_distill, num_labels)
model.to(device)
if n_gpu > 1:
model = torch.nn.DataParallel(model)
'''load test set'''
test_examples, test_label_list, test_hypo_type_list = processor.get_examples(label_single=args.label_single)
print(test_hypo_type_list)
# lines = ["\t".join(['index', 'sentence1', 'sentence2', 'label']) + "\n"]
# for i, ex in enumerate(test_examples):
# sent1 = ex.text_a.replace("\t", " ")
# sent2 = ex.text_b.replace("\t", " ")
# label = 'entailment' if ex.label == 'isNext' else 'not_entailment'
# lines.append('\t'.join([str(i), sent1, sent2, label]) + "\n")
# with open(os.path.join(args.output_dir, "test.tsv"), mode="w", encoding="utf-8") as fp:
# fp.writelines(lines)
test_features, _ = convert_examples_to_features(test_examples, label_list, args.max_seq_length, tokenizer)
test_all_input_ids = torch.tensor([f.input_ids for f in test_features], dtype=torch.long)
test_all_input_mask = torch.tensor([f.input_mask for f in test_features], dtype=torch.long)
test_all_segment_ids = torch.tensor([f.segment_ids for f in test_features], dtype=torch.long)
test_all_label_ids = torch.tensor([f.label_id for f in test_features], dtype=torch.long)
test_data = TensorDataset(test_all_input_ids, test_all_input_mask, test_all_segment_ids, test_all_label_ids)
test_sampler = SequentialSampler(test_data)
test_dataloader = DataLoader(test_data, sampler=test_sampler, batch_size=args.eval_batch_size)
logits, pred_probs = eval(model, test_dataloader, device, num_labels, return_logits=True)
# np.savetxt(os.path.join(args.output_dir, "final_test_logits.txt"), logits, delimiter=',')
# np.savetxt(os.path.join(args.output_dir, "final_test_log.txt"), pred_probs, delimiter=',')
text_num = len(test_label_list)
if ("topic" in args.input_fn) or ("sst" in args.input_fn) or ("agnews" in args.input_fn) or ("snips" in args.input_fn):
#single label, without none
test_pred_label_list = predict_single(pred_probs, text_num, test_hypo_type_list, thred=-100)
acc = cal_acc(test_label_list, test_pred_label_list)
print(acc)
with open(os.path.join(args.output_dir, "metric.txt"), "w", encoding="utf-8") as fp:
fp.writelines(["accuracy: {}\n".format(acc)])
elif "emotion" in args.input_fn:
#single label, with none
if args.use_nsp:
test_pred_label_list = predict_single(pred_probs, text_num, test_hypo_type_list, thred=args.thred)
wf1 = cal_wf1(test_pred_label_list, test_label_list, processor.classes)
print(wf1)
else:
test_pred_label_list = predict_single(pred_probs, text_num, test_hypo_type_list, thred=0.5)
wf1 = cal_wf1(test_pred_label_list, test_label_list, processor.classes)
print(wf1)
with open(os.path.join(args.output_dir, "metric.txt"), "w", encoding="utf-8") as fp:
fp.writelines(["weighted f1: {}\n".format(wf1)])
elif "situation" in args.input_fn:
if args.use_nsp:
test_pred_label_list = predict_multi(pred_probs, text_num, test_hypo_type_list, thred=args.thred)
# test_pred_label_list = predict_multi_baseline(pred_probs, text_num, test_hypo_type_list)
wf1 = cal_wf1(test_pred_label_list, test_label_list, processor.classes)
print(wf1)
else:
test_pred_label_list = predict_multi(pred_probs, text_num, test_hypo_type_list, thred=0.5)
wf1 = cal_wf1(test_pred_label_list, test_label_list, processor.classes)
print(wf1)
with open(os.path.join(args.output_dir, "metric.txt"), "w", encoding="utf-8") as fp:
fp.writelines(["weighted f1: {}\n".format(wf1)])
with open(os.path.join(args.output_dir, "final_test_pred.txt"), mode="w", encoding="utf-8") as fp:
if isinstance(test_label_list[0], list):
fp.writelines(["\t".join(x) + "\n" for x in test_pred_label_list])
else:
fp.writelines([x + "\n" for x in test_pred_label_list])
print(args.pretrain_model_dir)
if __name__ == "__main__":
main() | StarcoderdataPython |
9773244 | # Generated by Django 3.1.3 on 2020-11-19 14:10
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('akastories', '0002_auto_20201119_1303'),
]
operations = [
migrations.AlterField(
model_name='storyuser',
name='username',
field=models.CharField(max_length=20, unique=True),
),
]
| StarcoderdataPython |
3445065 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
""" Train the disambiguator """
from quantulum3.classifier import train_classifier
import argparse
arguments = [
{
'dest': 'store',
'help': 'store resulting classifier in quantulum3 project folder',
'type': bool,
'default': True
},
{
'dest': 'lang',
'help': 'language in which to train the classifier, default \'en_US\'',
'type': str,
'default': 'en_US'
},
]
if __name__ == '__main__':
parser = argparse.ArgumentParser(
'train',
description='Train unit disambiguator based on data in quantulum '
'project folder')
for arg in arguments:
parser.add_argument('--{}'.format(arg['dest']), **arg)
args = parser.parse_args()
print('Start training for language {}, {} storing the classifier'.format(
args.lang, '' if args.store else 'not'))
train_classifier(store=args.store, lang=args.lang)
print('Done')
| StarcoderdataPython |
1695659 | from django.contrib.auth import get_user_model
from django.db.models import fields
from rest_framework import serializers
from rest_framework.relations import StringRelatedField, SlugRelatedField
from rest_framework.fields import CurrentUserDefault
from users.models import Profile
from polls.models import Poll, Question, Answer, Choice
User = get_user_model()
class UserSerializer(serializers.ModelSerializer):
role = serializers.SlugRelatedField(
many=False,
read_only=True,
slug_field='role'
)
class Meta:
model = User
fields = ['id', 'username', 'date_joined', 'role']
class ChoiceSerializer(serializers.ModelSerializer):
class Meta:
model = Choice
fields = ['id','text', 'is_correct']
class QuestionSerializer(serializers.ModelSerializer):
choices = ChoiceSerializer(many=True, read_only=True)
poll = serializers.SlugRelatedField(
many=False,
read_only=True,
slug_field='title'
)
class Meta:
model = Question
fields = ['id', 'pub_date', 'question_text', 'choices', 'choice_type', 'poll']
class PollSerializer(serializers.ModelSerializer):
id = serializers.ReadOnlyField()
questions = QuestionSerializer(many=True, read_only=True)
class Meta:
model = Poll
fields = ['id', 'title', 'start_date', 'end_date', 'questions']
class AnswerSerializer(serializers.ModelSerializer):
id = serializers.ReadOnlyField()
user = serializers.SlugRelatedField(
slug_field='username', read_only=True)
#question = serializers.SlugRelatedField(slug_field='id', read_only=True)
#choices = ChoiceSerializer(many=True)
class Meta:
model = Answer
fields = ['id', 'user', 'answer_text', 'created', 'choices', 'question'] | StarcoderdataPython |
48109 | """
爬取原网页的html,过滤新闻内容并重新拼接,保留原网页样式。
"""
import pymysql
import datetime
import requests
from lxml import etree
import pdfkit
import os
import time
import json
import re
# 敏感词过滤类,AC自动机
import Ac_auto
# 任务id
task_id = 2
# 爬取的地址和名称
spider_url = 'https://news.cqu.edu.cn/newsv2/'
spider_name = '重大新闻网'
# 爬虫程序爬取主页和首页的运行日期
spider_month = [1, 7]
spider_day = [1]
# 睡眠时间
sleep_time = 0.1
# mysql登录信息
conn = pymysql.connect(
host='localhost',
port=3307,
user='root',
passwd='<PASSWORD>',
db='spider_test',
use_unicode=True,
charset="utf8mb4"
)
# mysql 插入
# 插入spider结果表
insert_result = '''
INSERT INTO t_spider_result VALUES (NULL, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, NULL)
'''
# 全局字典变量,以键值对(键:对应URL,值:标题)形式存储爬取的数据记录。
dict_data = dict()
# pdfkit配置及设置
confg = pdfkit.configuration(wkhtmltopdf=r'/usr/local/bin/wkhtmltopdf')
options = {
'page-size': 'A4',
'viewport-size': 1920*1080
}
# 伪装http请求头部
headers = {
'User-Agent':
'Mozilla/5.0 (compatible; MSIE 9.0; Windows NT 6.1; Trident/5.0;'
}
# 从数据库配置表里获取xpath, 并解析获取内容
def get_xpath_content(html, xpath_name):
# 去掉空格字符串函数
def not_empty(s):
return s and s.strip()
cur.execute("SELECT xpath FROM t_spider_config_xpath WHERE name = %s", xpath_name)
xpath = cur.fetchone()
xpath = xpath[0]
content = html.xpath(xpath)
# 对content做处理, 元素小于2的处理成字符串, 并去掉首尾多余的空格, 大于2的去掉空格字符串
if len(content) < 2:
content = ''.join(content)
content = content.strip()
else:
content = list(filter(not_empty, content))
return content
# 获取配置表的id,赋值给结果表
def get_conf_id(module_name=None):
if module_name:
cur.execute("SELECT id FROM t_spider_conf WHERE moduleName like %s ", '%' + module_name + '%')
conf_id = cur.fetchone()
conf_id = conf_id[0]
return conf_id
# 查找所有栏目的url(板块url),并保存
def all_urls_list(f_data):
global dict_data
# 读取字典中的数据
f_data.seek(0, 0)
content = f_data.read()
if content:
dict_data = json.loads(content)
heading = '新闻网'
# 根据时间需求爬取主页及各级首页,需求:1月1日和7月1日各爬取一次,其他时间不做爬取
run_date = datetime.date.today()
if run_date.month in spider_month and run_date.day in spider_day:
print('正在爬取 {} 主页。'.format(spider_name))
# 存储index的记录,放进字典和数据库,如果已经存在,则不存储
judge = spider_url in dict_data.keys()
if not judge:
dict_data[spider_url] = heading
# 创建文件夹
# 先判断文件夹是否存在,不存在则创建文件夹
now_dir = os.getcwd()
new_dir = now_dir + '/' + heading + '首页'
dir_judge = os.path.exists(new_dir)
if not dir_judge:
os.mkdir(new_dir)
res = requests.get(spider_url, headers=headers)
res.encoding = 'UTF-8'
raw_html = res.text
html_filter = sensitive_word_filter(raw_html)
html_filter = path_rewrite(html_filter)
timestamp = round(time.time())
html_file = new_dir + '/' + str(timestamp) + '.html'
pdf_file = new_dir + '/' + str(timestamp) + '.pdf'
# 获取配置表的id,赋值给结果表
conf_id = get_conf_id('所有栏目')
time_now = datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S')
cur.execute(insert_result, (conf_id, 'index', spider_url, html_filter, html_file, pdf_file, time_now, heading, run_date, ''))
conn.commit()
json_data = json.dumps(dict_data)
f_data.seek(0, 0)
f_data.write(json_data)
try:
with open(html_file, 'w+', encoding='UTF-8') as f1:
f1.write(html_filter)
# html转pdf
pdfkit.from_url(spider_url, pdf_file, configuration=confg, options=options)
print('《{}》 的首页已储存,转换pdf格式已成功。'.format(heading))
time.sleep(sleep_time)
except IOError:
print("Warning: wkhtmltopdf读取文件失败, 可能是网页无法打开或者图片/css样式丢失。")
else:
print('{} 首页记录已爬取过且保存在数据库中!'.format(heading))
else:
print('爬虫程序启动日期并非 ', end='')
for month in spider_month:
for day in spider_day:
print('{} 月 {} 日, '.format(month, day), end='')
print('{} 主页不做爬取!'.format(spider_name))
r = requests.get(spider_url, headers=headers)
r.encoding = 'UTF-8'
html = etree.HTML(r.text)
news_heading_url_list = []
try:
news_heading_url_list = get_xpath_content(html, '所有栏目URL的xpath')
# 将主页的url去掉
news_heading_url_list.remove(news_heading_url_list[0])
# 增加快讯,专题两个板块
news_heading_url_list.append('https://news.cqu.edu.cn/newsv2/list-15.html')
news_heading_url_list.append('http://news.cqu.edu.cn/kjcd/')
except IndexError:
print("xpath配置错误!")
except etree.XPathEvalError:
print("数据库里未找到记录!")
# print(news_heading_url_list)
return news_heading_url_list
# 查找每个栏目/板块下的每一页的url(列表url),并保存
# 适用于第一大类:新闻模块,第二大类:媒体重大,第三大类:通知公告简报,第四大类:学术预告, 第五大类:快讯
def get_url_list(url, all_urls, f_data):
global dict_data
# 读取字典中的数据
f_data.seek(0, 0)
content = f_data.read()
if content:
dict_data = json.loads(content)
url_list = []
r = requests.get(url, headers=headers)
r.encoding = 'UTF-8'
html = etree.HTML(r.text)
news_heading = ''
# 获取板块在news_heading_url_list的序号,并获取板块名称以及板块下总的新闻数目
# 对 快讯板块做处理:
if url == 'https://news.cqu.edu.cn/newsv2/list-15.html':
try:
news_heading = get_xpath_content(html, '快讯类栏目标题xpath')
news_heading = ''.join(news_heading)
# print(news_heading)
except IndexError:
print("xpath配置错误!")
except etree.XPathEvalError:
print("数据库里未找到记录!")
temp_url = url
else:
cur.execute("SELECT xpath from t_spider_config_xpath where name = %s", '新闻类栏目标题xpath')
xpath = cur.fetchone()
xpath = xpath[0]
# 根据不同的栏目指定不同的xpath
index = all_urls.index(url)
xpath = xpath.replace('?', str(index + 2))
try:
news_heading = html.xpath(xpath)
news_heading = ''.join(news_heading)
# print(news_heading)
except IndexError:
print("xpath配置错误!")
except etree.XPathEvalError:
print("数据库里未找到记录!")
temp_url = url + '?page=1'
# 查找最大页数
page = html.xpath('/html/body/div[@class="row"]/div/div[@class="lists"]/div[@class="page"]/a[12]/text()')
page = ''.join(page)
# print(page)
max_page = int(page)
# 根据时间需求爬取主页及各级首页,需求:1月1日和7月1日各爬取一次,其他时间不做爬取
run_date = datetime.date.today()
if run_date.month in spider_month and run_date.day in spider_day:
list_heading = '各级首页'
print('正在爬取 {} 栏目首页。'.format(news_heading))
# 存储list第一页的记录,放进字典和数据库,如果已经存在,则不存储
judge = temp_url in dict_data.keys()
if not judge:
dict_data[temp_url] = list_heading
# 创建文件夹
# 先判断文件夹是否存在,不存在则创建文件夹
now_dir = os.getcwd()
new_dir = now_dir + '/' + news_heading
dir_judge = os.path.exists(new_dir)
if not dir_judge:
os.mkdir(new_dir)
res = requests.get(temp_url, headers=headers)
res.encoding = 'UTF-8'
raw_html = res.text
html_filter = sensitive_word_filter(raw_html)
html_filter = path_rewrite(html_filter)
timestamp = round(time.time())
html_file = new_dir + '/' + str(timestamp) + '.html'
pdf_file = new_dir + '/' + str(timestamp) + '.pdf'
# 获取配置表的id,赋值给结果表
conf_id = get_conf_id(news_heading)
time_now = datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S')
cur.execute(insert_result, (conf_id, 'list', temp_url, html_filter, html_file, pdf_file, time_now, list_heading, run_date, ''))
conn.commit()
json_data = json.dumps(dict_data)
f_data.seek(0, 0)
f_data.write(json_data)
try:
with open(html_file, 'w+', encoding='UTF-8') as f1:
f1.write(html_filter)
# html转pdf
pdfkit.from_url(temp_url, pdf_file, configuration=confg, options=options)
print('栏目 《{}》 的首页已储存,转换pdf格式已成功。'.format(news_heading))
time.sleep(sleep_time)
except IOError:
print("Warning: wkhtmltopdf读取文件失败, 可能是网页无法打开或者图片/css样式丢失。")
else:
print('{} 栏目 首页记录已爬取过且保存在数据库中!'.format(news_heading))
else:
print('爬虫程序启动日期并非 ', end='')
for month in spider_month:
for day in spider_day:
print('{} 月 {} 日, '.format(month, day), end='')
print('{} 栏目首页不做爬取!'.format(news_heading))
# 对 快讯板块做处理:
if url == 'https://news.cqu.edu.cn/newsv2/list-15.html':
for i in range(1, max_page + 1):
temp_url = url[:-5] + '-' + str(i) + '.html'
url_list.append(temp_url)
else:
for i in range(1, max_page + 1):
# print('爬取网上新闻的第{}页......'.format(i))
temp_url = url + '?page=' + str(i)
url_list.append(temp_url)
# print(url_list)
return url_list
# 查找专题 栏目下的每一页的url(列表url),并保存, 返回一个字典文件。
def get_topic_url_list(url, f_data):
global dict_data
# 读取字典中的数据
f_data.seek(0, 0)
content = f_data.read()
if content:
dict_data = json.loads(content)
url_dict = dict()
r = requests.get(url, headers=headers)
r.encoding = 'UTF-8'
html = etree.HTML(r.text)
news_heading = '专题'
# 根据时间需求爬取主页及各级首页,需求:1月1日和7月1日各爬取一次,其他时间不做爬取
run_date = datetime.date.today()
if run_date.month in spider_month and run_date.day in spider_day:
list_heading = '各级首页'
print('正在爬取 {} 栏目首页。'.format(news_heading))
# 存储专题list的记录,放进字典和数据库,如果已经存在,则不存储
judge = url in dict_data.keys()
if not judge:
dict_data[url] = list_heading
# 创建文件夹
# 先判断文件夹是否存在,不存在则创建文件夹
now_dir = os.getcwd()
new_dir = now_dir + '/' + news_heading
dir_judge = os.path.exists(new_dir)
if not dir_judge:
os.mkdir(new_dir)
res = requests.get(url, headers=headers)
res.encoding = 'UTF-8'
raw_html = res.text
html_filter = sensitive_word_filter(raw_html)
html_filter = path_rewrite(html_filter)
timestamp = round(time.time())
html_file = new_dir + '/' + str(timestamp) + '.html'
pdf_file = new_dir + '/' + str(timestamp) + '.pdf'
# 获取配置表的id,赋值给结果表
conf_id = get_conf_id(news_heading)
time_now = datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S')
cur.execute(insert_result, (conf_id, 'list', url, html_filter, html_file, pdf_file, time_now, list_heading, run_date, ''))
conn.commit()
json_data = json.dumps(dict_data)
f_data.seek(0, 0)
f_data.write(json_data)
try:
with open(html_file, 'w+', encoding='UTF-8') as f1:
f1.write(html_filter)
# html转pdf
pdfkit.from_url(url, pdf_file, configuration=confg, options=options)
print('栏目 《{}》 的主页已储存,转换pdf格式已成功。'.format(news_heading))
time.sleep(sleep_time)
except IOError:
print("Warning: wkhtmltopdf读取文件失败, 可能是网页无法打开或者图片/css样式丢失。")
else:
print('{} 栏目 主页记录已爬取过且保存在数据库中!'.format(news_heading))
else:
print('爬虫程序启动日期并非 ', end='')
for month in spider_month:
for day in spider_day:
print('{} 月 {} 日, '.format(month, day), end='')
print('{} 栏目首页不做爬取!'.format(news_heading))
try:
topic_urls_list = get_xpath_content(html, '专题网址xpath')
topic_names_list = get_xpath_content(html, '专题标题xpath')
# print(topic_urls_list)
# print(topic_names_list)
# 首页4个专题的URL添加进topic_urls_list, 将四个专题标题名添加进topic_names_list
topic_name = ['毕业季|青春不落幕 友谊不散场', '辉煌70年•追梦重大人', '不忘初心 牢记使命', '一带一路年会']
for i in range(4, 8):
topic_urls_list.append('http://news.cqu.edu.cn/newsv2/index.php?m=special&c=index&specialid=8' + str(i))
topic_names_list.append(topic_name[(i - 4)])
# 给每个专题标题名添加’专题_‘进行区分
temp_list = []
for each in topic_names_list:
temp_list.append('专题_' + each)
topic_names_list = temp_list
url_dict = dict(zip(topic_names_list, topic_urls_list))
# 字典key:专题标题,value:专题链接
# print(url_dict)
except IndexError:
print("xpath配置错误!")
except etree.XPathEvalError:
print("数据库里未找到记录!")
return url_dict
# 读取新闻模块每个页面的url,获取新闻模块的每条新闻的归档元数据,并将页面转成pdf格式保存
def get_news_info(url_list, module_url, all_urls, f_data):
global dict_data
# 读取字典中的数据
f_data.seek(0, 0)
content = f_data.read()
if content:
dict_data = json.loads(content)
# 获取配置表的id,赋值给结果表
conf_id = get_conf_id('新闻模块')
# 新闻模块新闻数累加器
sum_i = 0
# 新闻模块页数计数器
page = 1
# 获取栏目名称
news_heading = ''
dict_news = dict()
dict_news = {'网站名称': spider_name, '网站域名': spider_url}
r = requests.get(module_url, headers=headers)
r.encoding = 'UTF-8'
html = etree.HTML(r.text)
cur.execute("SELECT xpath from t_spider_config_xpath where name = %s", '新闻类栏目标题xpath')
xpath = cur.fetchone()
xpath = xpath[0]
# 根据不同的栏目指定不同的xpath
index = all_urls.index(module_url)
xpath = xpath.replace('?', str(index + 2))
try:
news_heading = html.xpath(xpath)
news_heading = ''.join(news_heading)
# print(news_heading)
except IndexError:
print("xpath配置错误!")
except etree.XPathEvalError:
print("数据库里未找到记录!")
# 创建文件夹
# 先判断文件夹是否存在,不存在则创建文件夹
now_dir = os.getcwd()
new_dir = now_dir + '/' + news_heading
dir_judge = os.path.exists(new_dir)
if not dir_judge:
os.mkdir(new_dir)
# 每一页的url
for url in url_list:
r = requests.get(url, headers=headers)
r.encoding = 'UTF-8'
raw_html = r.text
html = etree.HTML(raw_html)
links_list = get_xpath_content(html, '新闻模块网址xpath')
title_list = get_xpath_content(html, '新闻模块标题xpath')
# 每一条新闻的url + 每一个标题
for each_url, title in zip(links_list, title_list):
print('正在爬取 {} 栏目下,第 {} 页 总第 {} 条新闻。'.format(news_heading, page, sum_i + 1))
# 存储每一个新闻模块链接URL的记录,放进字典和数据库,如果已经存在,则不存储
judge = each_url in dict_data.keys()
try:
if not judge:
dict_data[each_url] = title
r = requests.get(each_url, headers=headers)
r.encoding = 'UTF-8'
raw_html = r.text
html = etree.HTML(raw_html)
html_filter = sensitive_word_filter(raw_html)
html_filter = path_rewrite(html_filter)
timestamp = round(time.time())
html_file = new_dir + '/' + str(timestamp) + '.html'
pdf_file = new_dir + '/' + str(timestamp) + '.pdf'
dict_news['所属栏目'] = news_heading
try:
cur.execute("SELECT name from t_spider_config_xpath where name like %s", '新闻模块' + '%')
xpath_name = cur.fetchall()
for each in xpath_name:
dict_news[each[0][4:-5]] = get_xpath_content(html, each[0])
except IndexError:
print("xpath配置错误!")
except etree.XPathEvalError:
print("数据库里未找到记录!")
dict_news['标题'] = title
dict_news['网址'] = each_url
time_now = datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S')
dict_news['采集时间'] = time_now
dict_news['采集人'] = '档案馆'
if dict_news['发布时间']:
release_time = dict_news['发布时间']
else:
release_time = None
json_dict = json.dumps(dict_news, ensure_ascii=False, indent=4)
print(json_dict)
judge_identifier = not_found_judge(raw_html, r)
# 判断网页是不是404 not found
if judge_identifier:
cur.execute(insert_result, (conf_id, 'detail', each_url, html_filter, html_file, pdf_file,
time_now, news_heading, release_time, json_dict))
conn.commit()
json_data = json.dumps(dict_data)
f_data.seek(0, 0)
f_data.write(json_data)
sum_i += 1
with open(html_file, 'w+', encoding='UTF-8') as f1:
f1.write(html_filter)
# html转pdf
pdfkit.from_url(each_url, pdf_file, configuration=confg, options=options)
print('该新闻《{}》pdf格式已转换成功。'.format(title))
time.sleep(sleep_time)
else:
# 将404 not found 记录进数据库
html_filter = '404 not found'
cur.execute(insert_result, (conf_id, 'detail', each_url, html_filter, '', '',
time_now, news_heading, None, json_dict))
conn.commit()
json_data = json.dumps(dict_data)
f_data.seek(0, 0)
f_data.write(json_data)
print('该新闻《{}》网页不存在, 以‘404 not found’为网页内容存入数据库。'.format(title))
sum_i += 1
else:
sum_i += 1
print('{} 栏目 的 第 {} 条新闻 已爬取过且保存在数据库中!'.format(news_heading, sum_i))
except IOError:
print("Warning: wkhtmltopdf读取文件失败, 可能是网页无法打开或者图片/css样式丢失。")
except IndexError:
print("该栏目《{}》下的新闻已全部爬取完!".format(news_heading))
break
print('第{}页已经爬取完'.format(page))
page += 1
print('{} 栏目下 共有{}页 {}条新闻'.format(news_heading, page - 1, sum_i))
# 读取媒体重大每个页面的url,获取媒体重大的每条新闻的归档元数据,并将页面转成pdf格式保存
def get_media_info(url_list, f_data):
global dict_data
# 读取字典中的数据
f_data.seek(0, 0)
content = f_data.read()
if content:
dict_data = json.loads(content)
# 媒体重大新闻数累加器
sum_i = 0
# 媒体重大页数计数器
page = 1
# 媒体重大发布时间处理计数器
i = 0
news_heading = '媒体重大'
# 获取配置表的id,赋值给结果表
conf_id = get_conf_id(news_heading)
dict_media = dict()
dict_media = {'网站名称': spider_name, '网站域名': spider_url}
# 创建文件夹
# 先判断文件夹是否存在,不存在则创建文件夹
now_dir = os.getcwd()
new_dir = now_dir + '/' + news_heading
dir_judge = os.path.exists(new_dir)
if not dir_judge:
os.mkdir(new_dir)
# 每一页的url
for url in url_list:
r = requests.get(url, headers=headers)
r.encoding = 'UTF-8'
raw_html = r.text
html = etree.HTML(raw_html)
links_list = get_xpath_content(html, '媒体重大网址xpath')
title_list = get_xpath_content(html, '媒体重大标题xpath')
release_time_list = get_xpath_content(html, '媒体重大发布时间xpath')
# 格式化发布时间
temp_list = []
for each in release_time_list:
each = each.strip()
# print(each)
temp_list.append(each)
release_time_list = []
while i < len(temp_list) - 1:
release_time = temp_list[i] + '月' + temp_list[i + 1] + '日'
release_time_list.append(release_time)
i += 2
# 将计数器清零
i = 0
# 每一条新闻的url + 每一条发布时间 + 每一个标题
for each_url, release_time, title in zip(links_list, release_time_list, title_list):
print('正在爬取 {} 栏目下,第 {} 页 总第 {} 条新闻。'.format(news_heading, page, sum_i + 1))
# 存储每一个媒体重大链接URL的记录,放进字典和数据库,如果已经存在,则不存储
judge = each_url in dict_data.keys()
try:
if not judge:
dict_data[each_url] = title
r = requests.get(each_url, headers=headers)
r.encoding = 'UTF-8'
raw_html = r.text
html = etree.HTML(raw_html)
html_filter = sensitive_word_filter(raw_html)
html_filter = path_rewrite(html_filter)
timestamp = round(time.time())
html_file = new_dir + '/' + str(timestamp) + '.html'
pdf_file = new_dir + '/' + str(timestamp) + '.pdf'
resource = ''
dict_media['所属栏目'] = news_heading
# 从数据库获取xpath, 并根据xpath获取内容
try:
cur.execute("SELECT name from t_spider_config_xpath where name like %s", news_heading + '%')
xpath_name = cur.fetchall()
for each in xpath_name:
# [4:-5]表示去掉开头‘媒体重大’四个字符和结尾‘xpath’五个字符
if each[0][4:-5] == '具体新闻内容':
resource = get_xpath_content(html, each[0])[-1]
if each[0][4:-5] == '作者所属单位':
department = get_xpath_content(html, each[0])[:-4]
dict_media[each[0][4:-5]] = department
else:
dict_media[each[0][4:-5]] = get_xpath_content(html, each[0])
except IndexError:
print("xpath配置错误!")
except etree.XPathEvalError:
print("数据库里未找到记录!")
dict_media['标题'] = title
dict_media['发布时间'] = release_time
dict_media['来源(转载来源)'] = resource
dict_media['网址'] = each_url
time_now = datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S')
dict_media['采集时间'] = time_now
dict_media['采集人'] = '档案馆'
json_dict = json.dumps(dict_media, ensure_ascii=False, indent=4)
print(json_dict)
judge_identifier = not_found_judge(raw_html, r)
# 判断网页是不是404 not found
if judge_identifier:
cur.execute(insert_result, (conf_id, 'detail', each_url, html_filter, html_file, pdf_file,
time_now, news_heading, None, json_dict))
conn.commit()
json_data = json.dumps(dict_data)
f_data.seek(0, 0)
f_data.write(json_data)
sum_i += 1
with open(html_file, 'w+', encoding='UTF-8') as f1:
f1.write(html_filter)
# html转pdf
pdfkit.from_url(each_url, pdf_file, configuration=confg, options=options)
print('该新闻《{}》pdf格式已转换成功。'.format(title))
time.sleep(sleep_time)
else:
# 将404 not found 记录进数据库
html_filter = '404 not found'
cur.execute(insert_result, (conf_id, 'detail', each_url, html_filter, '', '',
time_now, news_heading, None, json_dict))
conn.commit()
json_data = json.dumps(dict_data)
f_data.seek(0, 0)
f_data.write(json_data)
print('该新闻《{}》网页不存在, 以‘404 not found’为网页内容存入数据库。'.format(title))
sum_i += 1
else:
sum_i += 1
print('{} 栏目 的 第 {} 条新闻 已爬取过且保存在数据库中!'.format(news_heading, sum_i))
except IOError:
print("Warning: wkhtmltopdf读取文件失败, 可能是网页无法打开或者图片/css样式丢失。")
except IndexError:
print("该栏目《{}》下的媒体新闻已全部爬取完!".format(news_heading))
break
print('第{}页已经爬取完'.format(page))
page += 1
print('{} 栏目下 共有{}页 {}条媒体新闻'.format(news_heading, page - 1, sum_i))
# 读取通知公告简报每个页面的url,获取通知公告简报的每条新闻的归档元数据,并将页面转成pdf格式保存
def get_notice_info(url_list, f_data):
global dict_data
# 读取字典中的数据
f_data.seek(0, 0)
content = f_data.read()
if content:
dict_data = json.loads(content)
# 通知公告数累加器
sum_i = 0
# 通知公告简报页数计数器
page = 1
news_heading = '通知公告简报'
# 获取配置表的id,赋值给结果表
conf_id = get_conf_id(news_heading)
# 通知公告简报
dict_notice = dict()
dict_notice = {'网站名称': spider_name, '网站域名': spider_url}
# 创建文件夹
# 先判断文件夹是否存在,不存在则创建文件夹
now_dir = os.getcwd()
new_dir = now_dir + '/' + news_heading
dir_judge = os.path.exists(new_dir)
if not dir_judge:
os.mkdir(new_dir)
# 每一页的url
for url in url_list:
r = requests.get(url, headers=headers)
r.encoding = 'UTF-8'
raw_html = r.text
html = etree.HTML(raw_html)
links_list = get_xpath_content(html, '通知公告简报网址xpath')
title_list = get_xpath_content(html, '通知公告简报标题xpath')
# 每一条通知的url + 每一个标题
for each_url, title in zip(links_list, title_list):
print('正在爬取 {} 栏目下,第 {} 页 总第 {} 条通知公告。'.format(news_heading, page, sum_i + 1))
# 存储每一个学术预告链接URL的记录,放进字典和数据库,如果已经存在,则不存储
judge = each_url in dict_data.keys()
try:
if not judge:
dict_data[each_url] = title
r = requests.get(each_url, headers=headers)
r.encoding = 'UTF-8'
raw_html = r.text
html = etree.HTML(raw_html)
html_filter = sensitive_word_filter(raw_html)
html_filter = path_rewrite(html_filter)
timestamp = round(time.time())
html_file = new_dir + '/' + str(timestamp) + '.html'
pdf_file = new_dir + '/' + str(timestamp) + '.pdf'
# 对跳转微信公众号文章的链接做处理
if 'weixin' in each_url:
title = html.xpath('//h2[@class="rich_media_title"]/text()')
title = ''.join(title)
title = title.strip()
dict_notice['所属栏目'] = news_heading
# 从数据库获取xpath, 并根据xpath获取内容
try:
cur.execute("SELECT name from t_spider_config_xpath where name like %s",
news_heading + '%')
xpath_name = cur.fetchall()
for each in xpath_name:
# [6:-5]表示去掉开头‘通知公告简报’四个字符和结尾‘xpath’五个字符
dict_notice[each[0][6:-5]] = get_xpath_content(html, each[0])
except IndexError:
print("xpath配置错误!")
except etree.XPathEvalError:
print("数据库里未找到记录!")
dict_notice['标题'] = title
dict_notice['网址'] = each_url
time_now = datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S')
dict_notice['采集时间'] = time_now
dict_notice['采集人'] = '档案馆'
if dict_notice['发布时间']:
release_time = dict_notice['发布时间']
else:
release_time = None
json_dict = json.dumps(dict_notice, ensure_ascii=False, indent=4)
print(json_dict)
judge_identifier = not_found_judge(raw_html, r)
# 判断网页是不是404 not found
if judge_identifier:
cur.execute(insert_result, (conf_id, 'detail', each_url, html_filter, html_file, pdf_file,
time_now, news_heading, release_time, json_dict))
conn.commit()
json_data = json.dumps(dict_data)
f_data.seek(0, 0)
f_data.write(json_data)
sum_i += 1
with open(html_file, 'w+', encoding='UTF-8') as f1:
f1.write(html_filter)
# html转pdf
pdfkit.from_url(each_url, pdf_file, configuration=confg, options=options)
print('该通知《{}》pdf格式已转换成功。'.format(title))
time.sleep(sleep_time)
else:
# 将404 not found 记录进数据库
html_filter = '404 not found'
cur.execute(insert_result, (conf_id, 'detail', each_url, html_filter, '', '',
time_now, news_heading, None, json_dict))
conn.commit()
json_data = json.dumps(dict_data)
f_data.seek(0, 0)
f_data.write(json_data)
print('该通知《{}》网页不存在, 以‘404 not found’为网页内容存入数据库。'.format(title))
sum_i += 1
else:
sum_i += 1
print('{} 栏目 的 第 {} 条通知 已爬取过且保存在数据库中!'.format(news_heading, sum_i))
except IOError:
print("Warning: wkhtmltopdf读取文件失败, 可能是网页无法打开或者图片/css样式丢失。")
except IndexError:
print("该栏目《{}》下的通知公告简报已全部爬取完!".format(news_heading))
break
print('第{}页已经爬取完'.format(page))
page += 1
print('{} 栏目下 共有{}页 {}条通知公告简报'.format(news_heading, page - 1, sum_i))
# 读取学术预告每个页面的url,获取学术预告的每条新闻的归档元数据,并将页面转成pdf格式保存
def get_academic_info(url_list, f_data):
global dict_data
# 读取字典中的数据
f_data.seek(0, 0)
content = f_data.read()
if content:
dict_data = json.loads(content)
# 讲座数累加器
sum_i = 0
# 学术预告页数计数器
page = 1
news_heading = '学术预告'
# 获取配置表的id,赋值给结果表
conf_id = get_conf_id(news_heading)
dict_academic = dict()
dict_academic = {'网站名称': spider_name, '网站域名': spider_url}
# 创建文件夹
# 先判断文件夹是否存在,不存在则创建文件夹
now_dir = os.getcwd()
new_dir = now_dir + '/' + news_heading
dir_judge = os.path.exists(new_dir)
if not dir_judge:
os.mkdir(new_dir)
# 每一页的url
for url in url_list:
r = requests.get(url, headers=headers)
r.encoding = 'UTF-8'
raw_html = r.text
html = etree.HTML(raw_html)
# 筛选处理讲座链接
links_list = get_xpath_content(html, '学术预告网址xpath')
temp = []
for each in links_list:
if 'http' in each:
temp.append(each)
links_list = temp
title_list = get_xpath_content(html, '学术预告标题xpath')
# 每一条讲座的url + 每一个标题
for each_url, title in zip(links_list, title_list):
print('正在爬取 {} 栏目下,第 {} 页 总第 {} 条讲座。'.format(news_heading, page, sum_i + 1))
# 存储每一个学术预告链接URL的记录,放进字典和数据库,如果已经存在,则不存储
judge = each_url in dict_data.keys()
try:
if not judge:
dict_data[each_url] = title
r = requests.get(each_url, headers=headers)
r.encoding = 'UTF-8'
raw_html = r.text
html = etree.HTML(raw_html)
html_filter = sensitive_word_filter(raw_html)
html_filter = path_rewrite(html_filter)
timestamp = round(time.time())
html_file = new_dir + '/' + str(timestamp) + '.html'
pdf_file = new_dir + '/' + str(timestamp) + '.pdf'
dict_academic['所属栏目'] = news_heading
# 从数据库获取xpath, 并根据xpath获取内容
try:
cur.execute("SELECT name from t_spider_config_xpath where name like %s", news_heading + '%')
xpath_name = cur.fetchall()
for each in xpath_name:
# [4:-5]表示去掉开头‘学术预告’四个字符和结尾‘xpath’五个字符
dict_academic[each[0][4:-5]] = get_xpath_content(html, each[0])
except IndexError:
print("xpath配置错误!")
except etree.XPathEvalError:
print("数据库里未找到记录!")
dict_academic['标题'] = title
dict_academic['网址'] = each_url
time_now = datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S')
dict_academic['采集时间'] = time_now
dict_academic['采集人'] = '档案馆'
json_dict = json.dumps(dict_academic, ensure_ascii=False, indent=4)
print(json_dict)
judge_identifier = not_found_judge(raw_html)
# 判断网页是不是404 not found
if judge_identifier:
cur.execute(insert_result, (conf_id, 'detail', each_url, html_filter, html_file, pdf_file,
time_now, news_heading, None, json_dict))
conn.commit()
json_data = json.dumps(dict_data)
f_data.seek(0, 0)
f_data.write(json_data)
sum_i += 1
with open(html_file, 'w+', encoding='UTF-8') as f1:
f1.write(html_filter)
# html转pdf
pdfkit.from_url(each_url, pdf_file, configuration=confg, options=options)
print('该讲座预告《{}》pdf格式已转换成功。'.format(title))
time.sleep(sleep_time)
else:
# 将404 not found 记录进数据库
html_filter = '404 not found'
cur.execute(insert_result, (conf_id, 'detail', each_url, html_filter, '', '',
time_now, news_heading, None, json_dict))
conn.commit()
json_data = json.dumps(dict_data)
f_data.seek(0, 0)
f_data.write(json_data)
print('该讲座预告《{}》网页不存在, 以‘404 not found’为网页内容存入数据库。'.format(title))
sum_i += 1
else:
sum_i += 1
print('{} 栏目 的 第 {} 条讲座预告 已爬取过且保存在数据库中!'.format(news_heading, sum_i))
except IOError:
print("Warning: wkhtmltopdf读取文件失败, 可能是网页无法打开或者图片/css样式丢失。")
except IndexError:
print("该栏目《{}》下的讲座预告已全部爬取完!".format(news_heading))
break
print('第{}页已经爬取完'.format(page))
page += 1
print('{} 栏目下 共有{}页 {}条讲座预告'.format(news_heading, page - 1, sum_i))
# 读取快讯每个页面的url,获取快讯的每条新闻的归档元数据,并将页面转成pdf格式保存
def get_express_info(url_list, f_data):
global dict_data
# 读取字典中的数据
f_data.seek(0, 0)
content = f_data.read()
if content:
dict_data = json.loads(content)
# 快讯新闻数累加器
sum_i = 0
# 快讯新闻页数计数器
page = 1
# 快讯发布时间处理计数器
i = 0
news_heading = '快讯'
# 获取配置表的id,赋值给结果表
conf_id = get_conf_id(news_heading)
dict_express = dict()
dict_express = {'网站名称': spider_name, '网站域名': spider_url}
# 创建文件夹
# 先判断文件夹是否存在,不存在则创建文件夹
now_dir = os.getcwd()
new_dir = now_dir + '/' + news_heading
dir_judge = os.path.exists(new_dir)
if not dir_judge:
os.mkdir(new_dir)
for url in url_list:
# 存储每一个快讯链接URL的记录,放进数据库,如果已经存在,则不存储
judge = url in dict_data.keys()
try:
if not judge:
# 存储快讯链接及页数名
express_title = '快讯第' + str(page) + '页'
dict_data[url] = express_title
json_data = json.dumps(dict_data)
f_data.seek(0, 0)
f_data.write(json_data)
r = requests.get(url, headers=headers)
r.encoding = 'UTF-8'
raw_html = r.text
html = etree.HTML(raw_html)
html_filter = sensitive_word_filter(raw_html)
timestamp = round(time.time())
html_file = new_dir + '/' + str(timestamp) + '.html'
pdf_file = new_dir + '/' + str(timestamp) + '.pdf'
# 解析快讯发布时间,标题,内容
release_time_list, title_list, content_list = [], [], []
try:
cur.execute("SELECT name from t_spider_config_xpath where name like %s", '快讯' + '%')
xpath_name = cur.fetchall()
for each in xpath_name:
# [2:-5]表示去掉开头‘快讯’两个字符和结尾‘xpath’五个字符
dict_key = each[0][2:-5]
if dict_key == '标题':
title_list = get_xpath_content(html, each[0])
if dict_key == '发布时间':
release_time_list = get_xpath_content(html, each[0])
if dict_key == '具体内容':
content_list = get_xpath_content(html, each[0])
if dict_key != '类栏目标题':
dict_express[dict_key] = ''
except IndexError:
print("xpath配置错误!")
except etree.XPathEvalError:
print("数据库里未找到记录!")
# 格式化发布时间
temp_list = []
for each in release_time_list:
each = each.strip()
# print(each)
temp_list.append(each)
release_time_list = []
while i < len(temp_list)-1:
release_time = temp_list[i] + '月' + temp_list[i+1] + '日'
release_time_list.append(release_time)
i += 2
# 将计数器清零
i = 0
# 格式化快讯内容
temp_list = []
for each in content_list:
each = each.strip()
temp_list.append(each)
content_list = temp_list
for release_time, title, content in zip(release_time_list, title_list, content_list):
print('正在爬取 {} 栏目下,第 {} 页 总第 {} 条快讯。'.format(news_heading, page, sum_i + 1))
print('发布时间:{}, 快讯标题:{}, 快讯内容:{}'.format(release_time, title, content))
# 更新字典,并转成json格式
dict_express['所属栏目'] = news_heading
dict_express['标题'] = title
dict_express['发布时间'] = release_time
dict_express['具体内容'] = content
time_now = datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S')
dict_express['采集时间'] = time_now
dict_express['采集人'] = '档案馆'
json_dict = json.dumps(dict_express, ensure_ascii=False, indent=4)
print(json_dict)
cur.execute(insert_result, (conf_id, 'detail', url, html_filter, html_file, pdf_file,
time_now, news_heading, None, json_dict))
conn.commit()
sum_i += 1
with open(html_file, 'w+', encoding='UTF-8') as f1:
f1.write(html_filter)
# html转pdf
pdfkit.from_string(html_filter, pdf_file, configuration=confg, options=options)
print('快讯第 {} 页pdf格式已转换成功。'.format(page))
time.sleep(sleep_time)
else:
print('{} 栏目 第{}页快讯 已爬取过且保存在数据库中!'.format(news_heading, page))
sum_i += 20
except IOError:
print("Warning: wkhtmltopdf读取文件失败, 可能是网页无法打开或者图片/css样式丢失。")
except IndexError:
print("该栏目《{}》下的新闻已全部爬取完!".format(news_heading))
break
print('第{}页已经爬取完'.format(page))
page += 1
print('{} 栏目下 共有{}页 {}条快讯'.format(news_heading, page - 1, sum_i))
# 获取专题的各个详细页面html,并转成pdf格式保存
def get_topic_info(url_dict, f_data):
global dict_data
# 读取字典中的数据
f_data.seek(0, 0)
content = f_data.read()
if content:
dict_data = json.loads(content)
# 专题数累加器
sum_i = 0
news_heading = '专题'
# 获取配置表的id,赋值给结果表
conf_id = get_conf_id(news_heading)
dict_topic = dict()
dict_topic = {'网站名称': spider_name, '网站域名': spider_url}
# 创建文件夹
# 先判断文件夹是否存在,不存在则创建文件夹
now_dir = os.getcwd()
new_dir = now_dir + '/' + news_heading
dir_judge = os.path.exists(new_dir)
if not dir_judge:
os.mkdir(new_dir)
# key: 专题标题, value:专题链接
for key, value in url_dict.items():
print('正在爬取 {} 栏目下,第 {} 个专题。'.format(news_heading, sum_i + 1))
# 存储每一个专题链接URL的记录,放进字典和数据库,如果已经存在,则不存储
judge = value in dict_data.keys()
try:
if not judge:
dict_data[value] = key
res = requests.get(value, headers=headers)
res.encoding = 'UTF-8'
raw_html = res.text
# 判断网页是不是‘404 not found’
judge_identifier = not_found_judge(raw_html)
if judge_identifier:
html_filter = sensitive_word_filter(raw_html)
timestamp = round(time.time())
html_file = new_dir + '/' + str(timestamp) + '.html'
pdf_file = new_dir + '/' + str(timestamp) + '.pdf'
# 更新字典,并转成json格式
dict_topic['所属栏目'] = news_heading
dict_topic['标题'] = key
dict_topic['网址'] = value
time_now = datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S')
dict_topic['采集时间'] = time_now
json_dict = json.dumps(dict_topic, ensure_ascii=False, indent=4)
print(json_dict)
cur.execute(insert_result, (conf_id, 'detail', value, html_filter, html_file, pdf_file,
time_now, news_heading, None, json_dict))
json_data = json.dumps(dict_data)
f_data.seek(0, 0)
f_data.write(json_data)
conn.commit()
with open(html_file, 'w+', encoding='UTF-8') as f1:
f1.write(html_filter)
# html转pdf
pdfkit.from_url(value, pdf_file, configuration=confg, options=options)
print('该专题《{}》pdf格式已转换成功。'.format(key))
time.sleep(sleep_time)
else:
# 将404 not found 记录进数据库
html_filter = '404 not found'
time_now = datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S')
cur.execute(insert_result,
(conf_id, 'detail', value, html_filter, '', '', time_now, news_heading, None, ''))
conn.commit()
json_data = json.dumps(dict_data)
f_data.seek(0, 0)
f_data.write(json_data)
print('该专题《{}》网页不存在, 以‘404 not found’为网页内容存入数据库。'.format(key))
else:
print('{} 栏目 {} 专题已爬取过且保存在数据库中!'.format(news_heading, key))
except IOError:
print("Warning: wkhtmltopdf读取文件失败, 可能是网页无法打开或者图片/css样式丢失。")
except IndexError:
print("该栏目《{}》下的新闻已全部爬取完!".format(news_heading))
break
sum_i += 1
print('{} 栏目下 共有{}条专题'.format(news_heading, sum_i))
# 判断网页是否是404_not_found, 并返回一个判断标识, 0为空网页,1为正常网页
def not_found_judge(html, r=None):
judge_identifier = 1
# temp/temp_2,3,4 找到'404 not found'/'页面不存在'返回下标,找不到为-1
# 如果网页编码为gb2312,则对网页重新编码解析
if r:
encode_judge = html.find('gb2312')
if encode_judge:
r.encoding = 'gb2312'
html = r.text
temp = html.find('404 Not Found')
temp_2 = html.find('页面不存在')
temp_3 = html.find('页面未找到')
temp_4 = html.find('Page Not Found')
temp_5 = html.find('<div class="content guery" style="display:inline-block;display:-moz-inline-stack;zoom:1;*display:inline; max-width:280px">')
if temp != -1 or temp_2 != -1 or temp_3 != -1 or temp_4 != -1 or temp_5 != -1:
judge_identifier = 0
print('该网页目前无法访问!')
return judge_identifier
# 敏感词过滤
def sensitive_word_filter(content):
ah = Ac_auto.ac_automation()
path = 'sensitive_words.txt'
ah.parse(path)
content = ah.words_replace(content)
# text1 = "新疆骚乱苹果新品发布会"
# text2 = ah.words_replace(text1)
# print(text1)
# print(text2)
return content
# 网页图片相对路径转绝对路径
# 读入一个html原码,修正图片路径后return一个新的html代码
def path_rewrite(html):
new_html = re.sub('="/uploadfile/', '="http://news.cqu.edu.cn/uploadfile/', html)
return new_html
def main():
"""
获取所有的栏目链接
all_news_urls[0-4]: 爬取的第一大类:新闻模块(包括综合新闻、教学科研、招生就业、交流合作、校园生活栏目)
all_news_urls[5]:爬取的第二大类:媒体重大
all_news_urls[6]:爬取的第三大类:通知公告简报
all_news_urls[7]:爬取的第四大类:学术预告
all_news_urls[8]:爬取的第五大类:快讯
all_news_urls[9]:爬取的第六大类:专题
"""
with open('dict_data.txt', 'r+') as f_data:
all_news_urls = all_urls_list(f_data)
# 获取每个栏目下每页的链接
# 爬取的第一大类:新闻模块(包括综合新闻、教学科研、招生就业、交流合作、校园生活栏目)
for url in all_news_urls[:5]:
url_list = get_url_list(url, all_news_urls, f_data)
get_news_info(url_list, url, all_news_urls, f_data)
time.sleep(sleep_time)
# 爬取的第二大类:媒体重大
url = all_news_urls[5]
url_list = get_url_list(url, all_news_urls, f_data)
get_media_info(url_list, f_data)
time.sleep(sleep_time)
# 爬取的第三大类:通知公告简报
url = all_news_urls[6]
url_list = get_url_list(url, all_news_urls, f_data)
get_notice_info(url_list, f_data)
time.sleep(sleep_time)
# 爬取的第四大类:学术预告
url = all_news_urls[7]
url_list = get_url_list(url, all_news_urls, f_data)
get_academic_info(url_list, f_data)
time.sleep(sleep_time)
# 爬取的第五大类:快讯
url = all_news_urls[8]
url_list = get_url_list(url, all_news_urls, f_data)
get_express_info(url_list, f_data)
time.sleep(sleep_time)
# 爬取的第六大类:专题。
url = all_news_urls[9]
url_dict = get_topic_url_list(url, f_data)
get_topic_info(url_dict, f_data)
time.sleep(sleep_time)
print('{} {} 的爬虫任务已完成!'.format(spider_name, spider_url))
cur = conn.cursor()
if __name__ == '__main__':
main()
# 爬虫结束,更新爬虫状态为-1,停止
cur.execute("UPDATE t_spider_task SET status = -1 WHERE id = %s", task_id)
cur.close()
conn.commit()
conn.close()
| StarcoderdataPython |
6401584 | # Copyright 2021, <NAME>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import logging
from rad.rest.client.exceptions import RADError
from rad.rest.client.util.print import print_table
from rad.rest.client.api.authentication_1 import Session
from rad.rest.client.api.zfsmgr_1 import ZfsDataset
class CmdZfsGetFilesystems:
name = 'get-filesystems'
aliases = []
@staticmethod
def init_parser(subparsers, parent_parser):
parser = subparsers.add_parser(CmdZfsGetFilesystems.name,
aliases=CmdZfsGetFilesystems.aliases,
parents=[parent_parser],
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
description='Get ZFS filesystems',
help='Get ZFS filesystems')
parser.add_argument('-s', '--sort',
help='Sort the filesystems')
parser.add_argument('poolname',
nargs='*',
help='Name of the pool')
def __init__(self, options):
with Session(protocol=options.protocol, hostname=options.hostname, port=options.port) as session:
raise RADError('NYI')
| StarcoderdataPython |
3262930 | from __future__ import annotations
import dataclasses
import logging
import os
import sys
import time
from typing import TYPE_CHECKING, Dict, Iterator, List, Tuple
import click
if TYPE_CHECKING:
import mypy_boto3_ec2.service_resource as ec2_resources
from mypy_boto3_cloudwatch.client import CloudWatchClient
from mypy_boto3_ec2 import EC2ServiceResource
from mypy_boto3_lambda import LambdaClient
from mypy_boto3_route53 import Route53Client
from mypy_boto3_sns import SNSClient
from mypy_boto3_sqs import SQSClient
from mypy_boto3_ssm.client import SSMClient
LOGGER = logging.getLogger(__name__)
LOGGER.setLevel(os.getenv("GRAPL_LOG_LEVEL", "INFO"))
LOGGER.addHandler(logging.StreamHandler(stream=sys.stdout))
IN_PROGRESS_STATUSES = {
"Pending",
"InProgress",
"Delayed",
}
def ticker(n: int) -> Iterator[None]:
for _ in range(n):
time.sleep(1)
yield None
@dataclasses.dataclass
class State:
grapl_region: str
grapl_deployment_name: str
grapl_version: str
aws_profile: str
ec2: EC2ServiceResource
ssm: SSMClient
cloudwatch: CloudWatchClient
sns: SNSClient
route53: Route53Client
sqs: SQSClient
lambda_: LambdaClient
# Prefer this to `pass_obj`
pass_graplctl_state = click.make_pass_decorator(State)
@dataclasses.dataclass
class Tag:
key: str
value: str
def into_boto_tag_specification(self) -> Dict[str, str]:
return {"Key": self.key, "Value": self.value}
@classmethod
def from_boto_tag_specification(cls, tag_specification: Dict[str, str]) -> Tag:
return cls(key=tag_specification["Key"], value=tag_specification["Value"])
@dataclasses.dataclass
class Ec2Instance:
instance_id: str
private_ip_address: str
private_dns_name: str
tags: List[Tag]
@classmethod
def from_boto_instance(cls, instance: ec2_resources.Instance) -> Ec2Instance:
return cls(
instance_id=instance.instance_id,
private_ip_address=instance.private_ip_address,
private_dns_name=instance.private_dns_name,
tags=[Tag.from_boto_tag_specification(tag) for tag in instance.tags],
)
def get_command_results(
ssm: SSMClient, command_id: str, instance_ids: List[str]
) -> Iterator[Tuple[str, str]]:
"""Poll until the command result is available for the given
command_id. Yields the tuple (instance_id, result) from each
instance.
"""
LOGGER.info(f"waiting for ssm command {command_id} to complete")
while 1:
commands = ssm.list_commands(CommandId=command_id)
if (
len(commands["Commands"]) < 1
or commands["Commands"][0]["Status"] in IN_PROGRESS_STATUSES
):
time.sleep(2)
else:
LOGGER.info(f"ssm command {command_id} is complete")
break
for instance_id in instance_ids:
invocation = ssm.get_command_invocation(
CommandId=command_id,
InstanceId=instance_id,
PluginName="runShellScript",
)
if invocation["Status"] == "Success":
yield instance_id, invocation["StandardOutputContent"].strip()
else:
LOGGER.error(
f"command {command_id} instance {instance_id}: {invocation['StandardErrorContent']}"
)
raise Exception(
f"ssm command {command_id} failed on instance {instance_id} with Status: \"{invocation['Status']}\""
)
| StarcoderdataPython |
167941 | <reponame>Staberinde/data-hub-api<gh_stars>1-10
from django_filters.rest_framework import DjangoFilterBackend
from rest_framework.mixins import ListModelMixin
from rest_framework.viewsets import GenericViewSet
from config.settings.types import HawkScope
from datahub.core.auth import PaaSIPAuthentication
from datahub.core.hawk_receiver import (
HawkAuthentication,
HawkResponseSigningMixin,
HawkScopePermission,
)
from datahub.metadata.registry import registry
def _create_metadata_view(mapping):
has_filters = mapping.filterset_fields or mapping.filterset_class
model = mapping.queryset.model
attrs = {
'authentication_classes': (PaaSIPAuthentication, HawkAuthentication),
'permission_classes': (HawkScopePermission,),
'required_hawk_scope': HawkScope.metadata,
'filter_backends': (DjangoFilterBackend,) if has_filters else (),
'filterset_class': mapping.filterset_class,
'filterset_fields': mapping.filterset_fields,
'pagination_class': None,
'queryset': mapping.queryset,
'serializer_class': mapping.serializer,
'__doc__': f'List all {model._meta.verbose_name_plural}.',
}
view_set = type(
f'{mapping.model.__name__}ViewSet',
(HawkResponseSigningMixin, GenericViewSet, ListModelMixin),
attrs,
)
return view_set.as_view({
'get': 'list',
})
urls_args = []
# programmatically generate metadata views
for name, mapping in registry.mappings.items():
view = _create_metadata_view(mapping)
urls_args.append(((name, view), {'name': name}))
| StarcoderdataPython |
6620344 | <gh_stars>0
# SRSI (Stochastic Relative Strength Index)
# https://school.stockcharts.com/doku.php?id=technical_indicators:stochrsi
# https://www.investopedia.com/terms/s/stochrsi.asp
# StochRSI osilatörü, genelleştirilmiş bir fiyat değişikliği analizinden ziyade
# belirli bir menkul kıymetin tarihsel performansına uyumlu daha hassas bir gösterge
# oluşturmak için her iki momentum göstergesinden de yararlanmak üzere geliştirilmiştir.
# Argümanlar:
# close(pandas.Series): veri kümesi 'Kapat' sütunu.
# window(int): n periyodu.
# smooth1(int): Stokastik RSI'nin hareketli ortalaması.
# smooth2(int): hareketli ortalama %K
# fillna(bool): True ise, nan değerlerini doldur.
import pandas as pd
from _utilities import IndicatorMixin
from momentum_rsi import RSIIndicator
class StochRSIIndicator(IndicatorMixin):
def __init__(
self,
close: pd.Series,
window: int = 14,
smooth1: int = 3,
smooth2: int = 3,
fillna: bool = False,
):
self._close = close
self._window = window
self._smooth1 = smooth1
self._smooth2 = smooth2
self._fillna = fillna
self._run()
def _run(self):
self._rsi = RSIIndicator(
close=self._close, window=self._window, fillna=self._fillna).rsi()
lowest_low_rsi = self._rsi.rolling(self._window).min()
self._stochrsi = (self._rsi - lowest_low_rsi) / (
self._rsi.rolling(self._window).max() - lowest_low_rsi)
self._stochrsi_k = self._stochrsi.rolling(self._smooth1).mean()
def stochrsi(self):
stochrsi_series = self._check_fillna(self._stochrsi)
return pd.Series(stochrsi_series, name="stochrsi")
def stochrsi_k(self):
stochrsi_k_series = self._check_fillna(self._stochrsi_k)
return pd.Series(stochrsi_k_series, name="stochrsi_k")
def stochrsi_d(self):
stochrsi_d_series = self._stochrsi_k.rolling(self._smooth2).mean()
stochrsi_d_series = self._check_fillna(stochrsi_d_series)
return pd.Series(stochrsi_d_series, name="stochrsi_d") | StarcoderdataPython |
203543 | import falcon
import uuid
try:
import simplejson as json
except ImportError:
import json
from didery.routing import *
from didery.db.dbing import BaseSurveyDB, DB, DB_SURVEY_RESULTS_NAME
def testSurveyPost(client):
surveyResult = {
"ip_address": "127.0.0.1"
}
response = client.simulate_post(SURVEY_BASE_PATH, body=json.dumps(surveyResult).encode())
resp_data = json.loads(response.content)
resp_key = list(resp_data.keys())[0]
assert len(resp_data) == 1
assert resp_data[resp_key]["survey_data"] == surveyResult
def testSurveyGetAll(client):
surveyResult = {
"ip_address": "127.0.0.1"
}
client.simulate_post(SURVEY_BASE_PATH, body=json.dumps(surveyResult).encode())
response = json.loads(client.simulate_get(SURVEY_BASE_PATH).content)
assert len(response["data"]) == 1
for survey in response["data"].values():
assert survey["survey_data"] == surveyResult
def testSurveyGet(client):
surveyResult = {
"ip_address": "127.0.0.1"
}
response = client.simulate_post(SURVEY_BASE_PATH, body=json.dumps(surveyResult).encode())
id = list(json.loads(response.content).keys())[0]
response = client.simulate_get("{}/{}".format(SURVEY_BASE_PATH, id))
assert json.loads(response.content)["survey_data"] == surveyResult
def testSurveyGetAllInvalidQueryString(client):
# Test that query params have values
response = client.simulate_get(SURVEY_BASE_PATH, query_string="offset&limit=10")
exp_result = {
"title": "Malformed Query String",
"description": "url query string missing value(s)."
}
assert response.status == falcon.HTTP_400
assert json.loads(response.content) == exp_result
response = client.simulate_get(SURVEY_BASE_PATH, query_string="offset=10&limit")
exp_result = {
"title": "Malformed Query String",
"description": "url query string missing value(s)."
}
assert response.status == falcon.HTTP_400
assert json.loads(response.content) == exp_result
def testSurveyGetAllInvalidQueryValue(client):
# Test that query params values are ints
response = client.simulate_get(SURVEY_BASE_PATH, query_string="offset=a&limit=10")
exp_result = {
"title": "Malformed Query String",
"description": "url query string value must be a number."
}
assert response.status == falcon.HTTP_400
assert json.loads(response.content) == exp_result
response = client.simulate_get(SURVEY_BASE_PATH, query_string="offset=10&limit=d")
exp_result = {
"title": "Malformed Query String",
"description": "url query string value must be a number."
}
assert response.status == falcon.HTTP_400
assert json.loads(response.content) == exp_result
def testSurveyGetAllNegativeQueryValue(client):
# Test that query params values are ints
response = client.simulate_get(SURVEY_BASE_PATH, query_string="offset=-1&limit=10")
exp_result = {
"title": "Malformed Query String",
"description": "url query string value must be a positive number."
}
assert response.status == falcon.HTTP_400
assert json.loads(response.content) == exp_result
response = client.simulate_get(SURVEY_BASE_PATH, query_string="offset=0&limit=-10")
exp_result = {
"title": "Malformed Query String",
"description": "url query string value must be a positive number."
}
assert response.status == falcon.HTTP_400
assert json.loads(response.content) == exp_result
def testSurveyGetAllEmptyQueryValue(client):
response = client.simulate_get(SURVEY_BASE_PATH, query_string="offset=10&limit=")
exp_result = {
"title": "Malformed Query String",
"description": "url query string value must be a number."
}
assert response.status == falcon.HTTP_400
assert json.loads(response.content) == exp_result
response = client.simulate_get(SURVEY_BASE_PATH, query_string="offset=&limit=10")
exp_result = {
"title": "Malformed Query String",
"description": "url query string value must be a number."
}
assert response.status == falcon.HTTP_400
assert json.loads(response.content) == exp_result
def testValidGetAllWithQueryString(client):
db = BaseSurveyDB(DB(DB_SURVEY_RESULTS_NAME))
exp_result = {"data": {}}
for i in range(0, 11):
history = {
"id": "did:dad:NOf6ZghvGNbFc_wr3CC0tKZHz1qWAR4lD5aM-i0zSjw=",
"changed": "2000-01-01T00:00:01+00:00",
"signer": 1,
"signers": [
"NOf6ZghvGNbFc_wr3CC0tKZHz1qWAR4lD5aM-i0zSjw=",
"NOf6ZghvGNbFc_wr3CC0tKZHz1qWAR4lD5aM-i0zSjw=",
"NOf6ZghvGNbFc_wr3CC0tKZHz1qWAR4lD5aM-i0zSjw="
]
}
uid = str(uuid.uuid4())
db.save(uid, history)
exp_result["data"][uid] = history
response = client.simulate_get(SURVEY_BASE_PATH, query_string="offset=0&limit=11")
result = json.loads(response.content)
assert response.status == falcon.HTTP_200
assert result == exp_result
response = client.simulate_get(SURVEY_BASE_PATH, query_string="offset=0&limit=20")
result = json.loads(response.content)
assert response.status == falcon.HTTP_200
assert result == exp_result
response = client.simulate_get(SURVEY_BASE_PATH, query_string="offset=0&limit=0")
result = json.loads(response.content)
exp_result = {"data": {}}
assert response.status == falcon.HTTP_200
assert result == exp_result
response = client.simulate_get(SURVEY_BASE_PATH, query_string="offset=100&limit=10")
assert response.status == falcon.HTTP_200
assert json.loads(response.content) == exp_result
def testPostBodySize(client):
surveyResult = {
"Name": "xyz",
"Email": "<EMAIL>",
"Response": {
"Rank each of the five game concepts on ease of navigation.-SeedQuest": "1",
"Rank each of the five game concepts on ease of navigation.-Cliffside": "1",
"Rank each of the five game concepts on ease of navigation.-Laboratory": "1",
"Rank each of the five game concepts on ease of navigation.-Mind Palace": "1",
"Rank each of the five game concepts on ease of navigation.-Flatlands": "1",
"Rank each of the five game concepts on how intuitive and enjoyable the gameplay is.-SeedQuest": "1",
"Rank each of the five game concepts on how intuitive and enjoyable the gameplay is.-Laboratory": "1",
"Rank each of the five game concepts on how intuitive and enjoyable the gameplay is.-Mind Palace": "1",
"Rank each of the five game concepts on how intuitive and enjoyable the gameplay is.-Flatlands": "1",
"Rank each of the five game concepts on how quickly you were able to learn the game path.-SeedQuest": "1",
"Rank each of the five game concepts on how quickly you were able to learn the game path.-Cliffside": "1",
"Rank each of the five game concepts on how quickly you were able to learn the game path.-Laboratory": "1",
"Rank each of the five game concepts on how quickly you were able to learn the game path.-Mind Palace": "1",
"Rank each of the five game concepts on how quickly you were able to learn the game path.-Flatlands": "1",
"Rank each of the five game concepts on overall experience.-SeedQuest": "4th",
"Rank each of the five game concepts on overall experience.-Cliffside": "3rd",
"Rank each of the five game concepts on overall experience.-Laboratory": "4th",
"Rank each of the five game concepts on overall experience.-Memory Palace": "5th",
"Rank each of the five game concepts on overall experience.-Flatlands": "5th",
"Do you have any other comments or suggestions about any of the game concepts-Game Navigation": "ewfsdcxcdsfewrfsdczxds",
"Do you have any other comments or suggestions about any of the game concepts-Memorability": "1",
"Do you have any other comments or suggestions about any of the game concepts-Art Style": "1"
}
}
data = json.dumps(surveyResult)
assert len(data) > 1000
response = client.simulate_post(SURVEY_BASE_PATH, body=json.dumps(surveyResult).encode())
assert response.status == falcon.HTTP_201
| StarcoderdataPython |
1770792 | <filename>rando/feedback/helpers.py<gh_stars>0
from rando import logger
from rando.core.helpers import GeotrekClient
def send_report(**data):
record = dict(
name=data['name'],
email=data['email'],
category=data['category'],
comment=data['comment'],
geom=''
)
if data['latitude'] and data['longitude']:
record['geom'] = '{"type": "Point", "coordinates":[%s, %s]}' % (
data.pop('longitude'),
data.pop('latitude'))
client = GeotrekClient()
client.login()
reply = client.post('/report/add/', data=record, allow_redirects=False)
if reply.status_code != 302:
logger.error("Error at creating feedback report")
logger.error(reply.content)
raise Exception("Could not send record")
| StarcoderdataPython |
3342774 | #!/usr/bin/python
"""
xdomainserver.py -- a standalone (very) basic Python Web Server
used to serve up the mod_pubsub web applications from a different
port to the Python PubSub Server to support cross domain testing.
It was built by extending the Python SimpleHTTPServer.
It filters any html files it opens, replacing (at the moment)
'src="/kn' with 'src="http://PubSubServer:Port/kn" where the
"http://PubSubServer:Port" is obtained by reading the prologue.js
file used by the PubSub Server to setup cross domain scripting.
To run the cross domain test setup you need to :
1. Setup mod_pubsub/kn_apps/kn_lib/prologue.js as required
2. Start pubsub.py using the -a or --auto switch - this forces
it to read prologue.js to determine the port to run on.
You should not specify the port command line param, just the
document root and topic root (if required).
3. Start xdomainserver.py - which will also read prologue.js to
determine the server URL to use in its substitutions
You can specify a port on the command line, that is,
xdomainserver.py 8080
Note that the port needs to be different to where pubsub.py is
running
4. Goto http://localhost[:port]/ which will go straight to the
Demo Web Applications index.html page.
5. Run apps/test as required.
Contact Information:
http://mod-pubsub.sf.net/
<EMAIL>
"""
# Copyright 2000-2004 KnowNow, Inc. All rights reserved.
#
# @KNOWNOW_LICENSE_START@
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# 3. Neither the name of the KnowNow, Inc., nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# @KNOWNOW_LICENSE_END@
#
#
# $Id: xdomainserver.py,v 1.6 2004/04/19 05:39:15 bsittler Exp $
import SimpleHTTPServer
import types
import sys
import mmap
import os
from StringIO import StringIO
from serverutils import *
# define an alias (to save my poor weary fingers!)
BaseClass = SimpleHTTPServer.SimpleHTTPRequestHandler
# define the strings to search for
searchStrList = ['src="/kn', "src='/kn"]
# define the replacement strings
# NOTE there needs to a 1-1 correlation between the search strings and the replace strings
replaceStrList = ['src="%s', "src='%s"]
# define the default mod_pubsub server location
defaultMpsServer = "http://localhost"
class CrossDomainRequestHandler(BaseClass):
def translate_path(self, path):
""" do a bit of clean up on the path to allow
the base class to work in more situations
"""
# strip off any query params
path = path.split("?")[0]
return BaseClass.translate_path(self, path)
def copyfile(self, source, outputfile):
"""If source is an html file perform the cross-domain server search/replace
prior to letting the base class do its work
"""
# assume source is untouched
copySource = source
# if source is an html file then memory map it to make search/replace easier
if type(source) == types.FileType:
if source.name.find(".html") != -1:
mmapSource = mmap.mmap(source.fileno(), 0, access=mmap.ACCESS_READ)
strSource = mmapSource.read(mmapSource.size())
idx = 0
for searchStr in searchStrList:
strSource = strSource.replace(searchStr, replaceStrList[idx])
idx = idx + 1
copySource = StringIO(strSource)
#print strSource
# let the base class do the rest of the work
return BaseClass.copyfile(self, copySource, outputfile)
def main(argv):
# check assumption that we are starting from the python_pubsub directory!
appsDir = os.path.join("..", "kn_apps")
if not os.access(appsDir, os.F_OK):
"Apps directory: %s is not an accessible directory, exiting...\n" % appsDir
# change to the kn_apps dir (as this basic HTTP server only references the
# current directory and below)
os.chdir(appsDir)
# start out using the default mod_pubsub server address
mpsServer = defaultMpsServer
# read the prologue.js file to get the actual server address
try:
prologuePath = os.path.join(appsDir, 'kn_lib', 'prologue.js')
mpsServer = readPubSubServerAddress (prologuePath)
except IOError, (errno, strerror):
sys.stderr.write(
"Warning, problem accessing %s, (%s): %s\n" % (prologuePath, errno, strerror)
)
sys.stderr.flush()
pass
# set the PubSub Server name in the replacement list
for i in range(0,len(replaceStrList)):
replaceStrList[i] = replaceStrList[i] % mpsServer
print "\nUsing mod_pubsub server: %s\n" % mpsServer
SimpleHTTPServer.test(CrossDomainRequestHandler)
if __name__ == "__main__": main(sys.argv)
# End of xdomainserver.py
| StarcoderdataPython |
9673270 | <gh_stars>1-10
"""The tests for the Buienradar sensor platform."""
from unittest.mock import patch
from homeassistant.components.buienradar.const import DOMAIN
from homeassistant.const import CONF_LATITUDE, CONF_LONGITUDE
from tests.common import MockConfigEntry
CONDITIONS = ["stationname", "temperature"]
TEST_CFG_DATA = {CONF_LATITUDE: 51.5288504, CONF_LONGITUDE: 5.4002156}
async def test_smoke_test_setup_component(hass):
"""Smoke test for successfully set-up with default config."""
mock_entry = MockConfigEntry(domain=DOMAIN, unique_id="TEST_ID", data=TEST_CFG_DATA)
mock_entry.add_to_hass(hass)
with patch(
"homeassistant.components.buienradar.sensor.BrSensor.entity_registry_enabled_default"
) as enabled_by_default_mock:
enabled_by_default_mock.return_value = True
await hass.config_entries.async_setup(mock_entry.entry_id)
await hass.async_block_till_done()
for cond in CONDITIONS:
state = hass.states.get(f"sensor.buienradar_{cond}")
assert state.state == "unknown"
| StarcoderdataPython |
9635205 | """
Schema for config file
"""
CFG_SCHEMA = {
'main': {
'experiment_name_prefix': str,
'seed': int,
'num_workers': int,
'parallel': bool,
'gpus_to_use': str,
'trains': bool,
'image_shape': int,
'paths': {
'train': str,
'validation': str,
'logs': str,
},
},
'train': {
'num_epochs': int,
'grad_clip': float,
'dropout': float,
'num_hid': int,
'batch_size': int,
'in_channel': int,
'z_shape': int,
'save_model': bool,
'lr': {
'lr_gen_value': float,
'lr_des_value': float,
},
},
}
| StarcoderdataPython |
6400937 | <reponame>curiousrohan1/stockbot
from pandas_datareader import data as pdr
from datetime import datetime
import time
import yfinance as yf
import sqlite3
import configparser
yf.pdr_override()
# noinspection SpellCheckingInspection
tickers = ['HTZ', 'LK', 'OAS', 'XSPA', 'VAL', 'VISL', 'GNUS', 'NE', 'FRSX', 'GCI',
'SRNE', 'LTM', 'DGLY', 'TUES', 'BIOL', 'CBL', 'CIDM', 'MARK', 'CHAP', 'NSPR']
config = configparser.ConfigParser()
config.read('stockbot.properties')
# noinspection SpellCheckingInspection
conn = sqlite3.connect(config.get('main', 'dbFile'))
def get_data(ticker_list):
now = datetime.now()
cur_date = now.strftime('%Y-%m-%d')
cur_time = now.strftime('%H:%M')
if '06:30' <= cur_time <= '13:00':
data = pdr.get_quote_yahoo(ticker_list)
c = conn.cursor()
for symbol, price in data.get('price').items():
c.execute("INSERT INTO QUOTES VALUES (?, ?, ?, ?)",
(cur_date, cur_time, symbol, price))
conn.commit()
try:
while True:
get_data(tickers)
time.sleep(60)
except KeyboardInterrupt:
conn.close()
| StarcoderdataPython |
3403246 | <gh_stars>10-100
#!/usr/bin/python
PLOTTER='matplotlib'
#PLOTTER='gnuplot'
COLORS = {"broken": "#FF3F3F",
"good": "#7FFF7F",
"---": "#CFCFCF",
"test": "#FFFF7F",
"missing1": "#FFFF7F",
"missing2": "#FFFF7F",
"failed": "#FF3F3F",
"succeeded": "#7FFF7F",
"NIY": "#CFCFCF",
"undefined": "#CFCFCF",
"default": "#FFFFFF",
True:"#FFFFFF",
False:"#FF3F3F"}
# The format of simulators is a dict of dictionaries.
# The keys are the simulator identification's name. This is a unique
# name that is displayed as headline in the index table.
# The simulator references the simulator type required by the parttest.
# The command tells how to invoke it and the spice file syntax
# The values are either full paths or just the executable that is in
# the PATH.
# The command can contain spaces to facilitate the use of wrapper scripts
# e.g. '/usr/local/bin/spice-wrapper gnucap'
# The folder value defines the storage of the result files
# e.g. [PARTNAME]/[folder]/index.html
SIMULATORS = {
'ngspice_25': {'simulator':'ngspice',
'command': 'ngspice',
'options': '-r X -b', ## -r X suppresses the "no plot" error
'folder': 'ng25'},
'gnucap_0.35': {'simulator':'gnucap',
'command': '/usr/bin/gnucap',
'options': '-b',
'folder': 'gc0.35'}
}
| StarcoderdataPython |
6526073 | def verifica(num):
teste = []
cont = menor = maior = 0
for a in num.split():
teste.append(int(a))
for c in teste:
if cont == 0:
maior = c
menor = c
cont += 1
else:
if maior < c:
maior = c
if menor > c:
menor = c
return f'{maior} {menor}'
# return teste
print(verifica('4 5 29 54 4 0 -214 542 -64 1 -3 6 -6'), '542 -214')
print(verifica('1 2 3 4 5'))
print(verifica("1 2 -3 4 5")) # return "5 -3"
print(verifica("1 9 3 4 -5")) # return "9 -5" | StarcoderdataPython |
8174574 | import os
from setuptools import setup, find_packages
import versioneer
install_requires = ["distributed", "requests", "cryptography"]
setup(
name="dask-saturn",
version=versioneer.get_version(),
cmdclass=versioneer.get_cmdclass(),
maintainer="Saturn Cloud Developers",
maintainer_email="<EMAIL>",
license="BSD-3-Clause",
classifiers=[
"Development Status :: 4 - Beta",
"License :: OSI Approved :: BSD License",
"Intended Audience :: Developers",
"Intended Audience :: Science/Research",
"Topic :: Scientific/Engineering",
"Topic :: System :: Distributed Computing",
"Programming Language :: Python :: 3",
"Programming Language :: Python :: 3.6",
"Programming Language :: Python :: 3.7",
],
keywords="dask saturn cloud distributed cluster",
description="Dask Cluster objects in Saturn Cloud",
long_description=(open("README.md").read() if os.path.exists("README.md") else ""),
long_description_content_type="text/markdown",
url="https://saturncloud.io/",
project_urls={
"Documentation": "http://docs.saturncloud.io",
"Source": "https://github.com/saturncloud/dask-saturn",
"Issue Tracker": "https://github.com/saturncloud/dask-saturn/issues",
},
packages=find_packages(),
package_data={"dask_saturn": ["*.yaml"]},
install_requires=install_requires,
tests_require=["pytest"],
zip_safe=False,
)
| StarcoderdataPython |
1834068 | <reponame>heltonricardo/URI
for g in range(int(input())):
a, b = [int(x) for x in input().split()]
s = 26**a * 10**b if a or b else 0
print(s)
| StarcoderdataPython |
6631892 | <gh_stars>1-10
from ...exceptions import BadRequestException
from ...utils import get_temp_dir
from biosimulators_utils.combine.data_model import (
CombineArchive,
CombineArchiveContent,
)
from biosimulators_utils.combine.io import (
CombineArchiveWriter,
)
from biosimulators_utils.sedml.data_model import (
SedDocument,
Model,
ModelAttributeChange,
OneStepSimulation,
SteadyStateSimulation,
UniformTimeCourseSimulation,
Algorithm,
AlgorithmParameterChange,
Task,
DataGenerator,
Variable,
Report,
Plot2D,
Plot3D,
DataSet,
Curve,
Surface,
AxisScale,
)
from biosimulators_utils.sedml.io import (
SedmlSimulationWriter,
)
import connexion
import flask
import os
import requests
import requests.exceptions
import src.utils
import werkzeug.datastructures # noqa: F401
import werkzeug.wrappers.response # noqa: F401
def handler(body, files=None):
''' Create a COMBINE/OMEX archive.
Args:
body (:obj:`dict`): dictionary with schema ``CombineArchiveSpecsAndFiles`` with the
specifications of the COMBINE/OMEX archive to create
files (:obj:`list` of :obj:`werkzeug.datastructures.FileStorage`, optional): files (e.g., SBML
file)
Returns:
:obj:`werkzeug.wrappers.response.Response` or :obj:`str`: response with COMBINE/OMEX
archive or a URL to a COMBINE/OMEX archive
'''
download = body.get('download', False)
archive_specs = body['specs']
files = connexion.request.files.getlist('files')
# create temporary working directory
temp_dirname = get_temp_dir()
# create temporary files for archive
archive_dirname = os.path.join(temp_dirname, 'archive')
archive_filename = os.path.join(temp_dirname, 'project.omex')
# initialize archive
archive = CombineArchive()
# build map from model filenames to file objects
filename_map = {
file.filename: file
for file in files
}
# add files to archive
for content in archive_specs['contents']:
content_type = content['location']['value']['_type']
if content_type == 'SedDocument':
sed_doc = export_sed_doc(content['location']['value'])
# save SED document to file
try:
SedmlSimulationWriter().run(
sed_doc,
os.path.join(archive_dirname, content['location']['path']),
validate_models_with_languages=False)
except ValueError as exception:
raise BadRequestException(
title='`{}` does not contain a configuration for a valid SED-ML document.'.format(
content['location']['value']),
instance=exception,
)
elif content_type == 'CombineArchiveContentFile':
file = filename_map.get(
content['location']['value']['filename'], None)
if not file:
raise BadRequestException(
title='File with name `{}` was not uploaded'.format(
content['location']['value']['filename']),
instance=ValueError(),
)
filename = os.path.join(archive_dirname,
content['location']['path'])
if not os.path.isdir(os.path.dirname(filename)):
os.makedirs(os.path.dirname(filename))
file.save(filename)
elif content_type == 'CombineArchiveContentUrl':
filename = os.path.join(archive_dirname,
content['location']['path'])
if not os.path.isdir(os.path.dirname(filename)):
os.makedirs(os.path.dirname(filename))
content_url = content['location']['value']['url']
try:
response = requests.get(content_url)
response.raise_for_status()
except requests.exceptions.RequestException as exception:
title = 'COMBINE/OMEX archive content could not be loaded from `{}`'.format(
content_url)
raise BadRequestException(
title=title,
instance=exception,
)
with open(filename, 'wb') as file:
file.write(response.content)
else:
raise BadRequestException(
title='Content of type `{}` is not supported'.format(
content_type),
instance=NotImplementedError('Invalid content')
) # pragma: no cover: unreachable due to schema validation
content = CombineArchiveContent(
location=content['location']['path'],
format=content['format'],
master=content['master'],
)
archive.contents.append(content)
# package COMBINE/OMEX archive
CombineArchiveWriter().run(archive, archive_dirname, archive_filename)
if download:
return flask.send_file(archive_filename,
mimetype='application/zip',
as_attachment=True,
attachment_filename='project.omex')
else:
# save COMBINE/OMEX archive to S3 bucket
archive_url = src.utils.save_file_to_s3_bucket(archive_filename, public=True)
# return URL for archive in S3 bucket
return archive_url
def export_sed_doc(sed_doc_specs):
""" Export the specifications of SED document to SED-ML
Args:
sed_doc_specs (``SedDocument``)
Returns:
:obj:`SedDocument`
"""
sed_doc = SedDocument(
level=sed_doc_specs['level'],
version=sed_doc_specs['version'],
)
# add models to SED document
model_id_map = {}
for model_spec in sed_doc_specs['models']:
model = Model(
id=model_spec.get('id'),
name=model_spec.get('name', None),
language=model_spec.get('language'),
source=model_spec.get('source'),
)
sed_doc.models.append(model)
model_id_map[model.id] = model
for change_spec in model_spec['changes']:
change = ModelAttributeChange(
target=change_spec.get('target').get('value'),
new_value=change_spec.get('newValue'),
)
model.changes.append(change)
for ns in change_spec.get('target').get('namespaces', []):
change.target_namespaces[ns.get('prefix', None)] = ns['uri']
# add simulations to SED document
simulation_id_map = {}
for sim_spec in sed_doc_specs['simulations']:
if sim_spec['_type'] == 'SedOneStepSimulation':
sim = OneStepSimulation(
id=sim_spec.get('id'),
name=sim_spec.get('name', None),
step=sim_spec.get('step'),
)
elif sim_spec['_type'] == 'SedSteadyStateSimulation':
sim = SteadyStateSimulation(
id=sim_spec.get('id'),
name=sim_spec.get('name', None),
)
elif sim_spec['_type'] == 'SedUniformTimeCourseSimulation':
sim = UniformTimeCourseSimulation(
id=sim_spec.get('id'),
name=sim_spec.get('name', None),
initial_time=sim_spec.get('initialTime'),
output_start_time=sim_spec.get('outputStartTime'),
output_end_time=sim_spec.get('outputEndTime'),
number_of_steps=sim_spec.get('numberOfSteps'),
)
else:
raise BadRequestException(
title='Simulations of type `{}` are not supported'.format(
sim_spec['_type']),
instance=NotImplementedError('Invalid simulation')
) # pragma: no cover: unreachable due to schema validation
alg_spec = sim_spec.get('algorithm')
sim.algorithm = Algorithm(kisao_id=alg_spec.get('kisaoId'))
for change_spec in alg_spec.get('changes'):
sim.algorithm.changes.append(
AlgorithmParameterChange(
kisao_id=change_spec.get('kisaoId'),
new_value=change_spec.get('newValue'),
)
)
sed_doc.simulations.append(sim)
simulation_id_map[sim.id] = sim
# add tasks to SED document
task_id_map = {}
for task_spec in sed_doc_specs['tasks']:
if task_spec['_type'] == 'SedTask':
model_id = task_spec.get('model').get('id')
sim_id = task_spec.get('simulation').get('id')
model = model_id_map.get(model_id, None)
sim = simulation_id_map.get(sim_id, None)
if not model:
raise BadRequestException(
title='Model `{}` for task `{}` does not exist'.format(
model_id, task_spec.get('id')),
instance=ValueError('Model does not exist'),
)
if not sim:
raise BadRequestException(
title='Simulation `{}` for task `{}` does not exist'.format(
sim_id, task_spec.get('id')),
instance=ValueError('Simulation does not exist'),
)
task = Task(
id=task_spec.get('id'),
name=task_spec.get('name', None),
model=model,
simulation=sim,
)
else:
raise BadRequestException(
title='Tasks of type `{}` are not supported'.format(
task_spec['_type']),
instance=NotImplementedError('Invalid task')
) # pragma: no cover: unreachable due to schema validation
sed_doc.tasks.append(task)
task_id_map[task.id] = task
# add data generators to SED document
data_gen_id_map = {}
for data_gen_spec in sed_doc_specs['dataGenerators']:
data_gen = DataGenerator(
id=data_gen_spec.get('id'),
name=data_gen_spec.get('name', None),
math=data_gen_spec.get('math'),
)
for var_spec in data_gen_spec['variables']:
task_id = var_spec.get('task').get('id')
task = task_id_map.get(task_id, None)
if not task:
raise BadRequestException(
title='Task `{}` for variable `{}` does not exist'.format(
task_id, var_spec.get('id')),
instance=ValueError('Task does not exist'),
)
var = Variable(
id=var_spec.get('id'),
name=var_spec.get('name', None),
task=task,
symbol=var_spec.get('symbol', None),
)
target_spec = var_spec.get('target', None)
if target_spec:
var.target = target_spec['value']
for ns in target_spec.get('namespaces', []):
var.target_namespaces[ns.get('prefix', None)] = ns['uri']
data_gen.variables.append(var)
sed_doc.data_generators.append(data_gen)
data_gen_id_map[data_gen.id] = data_gen
# add outputs to SED document
for output_spec in sed_doc_specs['outputs']:
if output_spec['_type'] == 'SedReport':
output = Report(
id=output_spec.get('id'),
name=output_spec.get('name', None),
)
for data_set_spec in output_spec['dataSets']:
data_gen_id = data_set_spec['dataGenerator']['id']
data_gen = data_gen_id_map.get(
data_gen_id, None)
if not data_gen:
raise BadRequestException(
title='Data generator `{}` for output `{}` does not exist'.format(
data_gen_id, output_spec.get('id')),
instance=ValueError('Data generator does not exist'),
)
data_set = DataSet(
id=data_set_spec.get('id'),
name=data_set_spec.get('name', None),
label=data_set_spec.get('label', None),
data_generator=data_gen,
)
output.data_sets.append(data_set)
elif output_spec['_type'] == 'SedPlot2D':
output = Plot2D(
id=output_spec.get('id'),
name=output_spec.get('name', None),
)
for curve_spec in output_spec['curves']:
x_data_gen_id = curve_spec['xDataGenerator']['id']
y_data_gen_id = curve_spec['yDataGenerator']['id']
x_data_gen = data_gen_id_map.get(x_data_gen_id, None)
y_data_gen = data_gen_id_map.get(y_data_gen_id, None)
if not x_data_gen:
raise BadRequestException(
title='X data generator `{}` for curve `{}` does not exist'.format(
x_data_gen_id, output_spec.get('id')),
instance=ValueError('Data generator does not exist'),
)
if not y_data_gen:
raise BadRequestException(
title='Y data generator `{}` for curve `{}` does not exist'.format(
y_data_gen_id, output_spec.get('id')),
instance=ValueError('Data generator does not exist'),
)
curve = Curve(
id=curve_spec.get('id'),
name=curve_spec.get('name', None),
x_data_generator=x_data_gen,
y_data_generator=y_data_gen,
x_scale=AxisScale[output_spec['xScale']],
y_scale=AxisScale[output_spec['yScale']],
)
output.curves.append(curve)
elif output_spec['_type'] == 'SedPlot3D':
output = Plot3D(
id=output_spec.get('id'),
name=output_spec.get('name', None),
)
for surface_spec in output_spec['surfaces']:
x_data_gen_id = surface_spec['xDataGenerator']['id']
y_data_gen_id = surface_spec['yDataGenerator']['id']
z_data_gen_id = surface_spec['zDataGenerator']['id']
x_data_gen = data_gen_id_map.get(x_data_gen_id, None)
y_data_gen = data_gen_id_map.get(y_data_gen_id, None)
z_data_gen = data_gen_id_map.get(z_data_gen_id, None)
if not x_data_gen:
raise BadRequestException(
title='X data generator `{}` for surface `{}` does not exist'.format(
x_data_gen_id, output_spec.get('id')),
instance=ValueError('Data generator does not exist'),
)
if not y_data_gen:
raise BadRequestException(
title='Y data generator `{}` for surface `{}` does not exist'.format(
y_data_gen_id, output_spec.get('id')),
instance=ValueError('Data generator does not exist'),
)
if not z_data_gen:
raise BadRequestException(
title='X data generator `{}` for surface `{}` does not exist'.format(
z_data_gen_id, output_spec.get('id')),
instance=ValueError('Data generator does not exist'),
)
surface = Surface(
id=surface_spec.get('id'),
name=surface_spec.get('name', None),
x_data_generator=x_data_gen,
y_data_generator=y_data_gen,
z_data_generator=z_data_gen,
x_scale=AxisScale[output_spec['xScale']],
y_scale=AxisScale[output_spec['yScale']],
z_scale=AxisScale[output_spec['zScale']],
)
output.surfaces.append(surface)
else:
raise BadRequestException(
title='Outputs of type `{}` are not supported'.format(
output_spec['_type']),
instance=NotImplementedError('Invalid output')
) # pragma: no cover: unreachable due to schema validation
sed_doc.outputs.append(output)
return sed_doc
| StarcoderdataPython |
5010640 | <filename>core/base_train.py
import tensorflow as tf
from misc.utils import Summarizer
class BaseTrain:
def __init__(self, sess, model, config, logger):
self.model = model
self.logger = logger
self.summarizer = Summarizer(sess, config)
self.config = config
self.sess = sess
self.init = tf.group(tf.global_variables_initializer(), tf.local_variables_initializer())
self.sess.run(self.init)
def train(self):
for cur_epoch in range(self.config.epoch):
self.logger.info('epoch: {}'.format(int(cur_epoch)))
self.train_epoch()
self.test_epoch()
def train_epoch(self):
"""
implement the logic of epoch:
-loop over the number of iterations in the config and call the train step
-add any summaries you want using the summary
"""
raise NotImplementedError
def test_epoch(self):
"""
implement the logic of the train step
- run the tensorflow session
- return any metrics you need to summarize
"""
raise NotImplementedError
| StarcoderdataPython |
9656277 | <filename>liBlog/visitor/admin.py<gh_stars>1-10
from django.contrib import admin
from .models import Visitor, BannedIP, UntrackedUserAgent
class VisitorAdmin(admin.ModelAdmin):
search_fields = ['ip_address', 'user_agent', 'url']
list_display = ['ip_address', 'user', 'user_agent', 'url', 'page_views',
'last_update', 'session_start']
admin.site.register(Visitor, VisitorAdmin)
admin.site.register(BannedIP)
admin.site.register(UntrackedUserAgent) | StarcoderdataPython |
12835290 | from django.urls import path
from django.conf.urls import url
from django.urls import include
from api.seq import views
urlpatterns = [
path('chipseq/', include('api.seq.chipseq.urls')),
path('counts', views.counts, name='counts'),
path('mapped', views.mapped, name='mapped'),
path('type', views.data_type, name='type'),
]
| StarcoderdataPython |
5106170 | <gh_stars>0
# <NAME> U Department of Astronomy and Astrophysics NYC 2016
# <EMAIL>
#--[DESCRIPTION]---------------------------------------------------------#
'''
Date: May 2016
Handeler for twitter json text
'''
#--[PROGRAM-OPTIONS]------------------------------------------------------#
import json
import nltk
import os
import re
import socket
import string
import sys
from numpy.random import ranf
from datetime import datetime
from time import gmtime
from time import sleep
from time import strftime
from traceback import format_exc
from Udp_Client import Report
from nltk.tokenize import word_tokenize
from nltk.corpus import stopwords
nltk.data.path.append('/root/SHARED/nltk_data/')
OUT_PUT_PATH = '/root/SHARED/Tweet_Output/'
JSON_PATH = '/root/SHARED/Tweets/'
OUTPUT_LIST = []
#--[PROGRAM-OPTIONS]------------------------------------------------------#
general = ['gw', 'le', 'lo', 'll', 'lm', 'li', 'tn', 'tl', 'ls', 'th',
'ti', 'te', 'do', 'dj', 'yo', 'ya', 'dg', 'yb', 'da', 'dy',
'uy', 'ys', 'ahaha', 'dp', 'pffttt', 'l', 'btw', 'ea', 'et',
'rt', 'ul', 'rf', 'rm', 'ro', 'rj', 'wd', 'omg', 'ba', 'wa',
'ju', 'bn', 'wk', 'bi', 'wi', 'bk', 'wtf', 'bs', 'wy', 'om',
'oa', 'uni', 'ck', 'vid', 'cl', 'xc', 'ca', 'cf', 'cr', 'pr',
'pp', 'pa', 'pi', 'tk', 'hr', 'hi', 'ha', 'md', 'ma', 'ml',
'mi', 'us', 'mt', 'mv', 'ms', 'mr', 'ue', 'ae', 'ad', 'ak', 'vn',
'ay', 'vr', 'ar', 'ia', 'ie', 'ig', 'nb', 'ny', 'nt', 'fr', 'ft',
'fu', 'fa', 'fd', 'fe', 'fi', 'fl', 'sfeh', 'ki', 'kn', 'sk', 'kp',
'sn', 'sl', 'sf', 'nd', 'lk', 'gd', 'like', 'this']
punctuation = list(string.punctuation)
general_upper = [word.upper() for word in general]
general_cap = [word.title() for word in general]
stop = json.dumps(stopwords.words('english'))
stop = stop.replace('[', '').replace('"', '').replace(']', '').replace(' ', '')
stop = stop.split(',')
stop_upper = [word.upper() for word in stop]
stop_cap = [word.title() for word in stop]
letters = [letter for letter in
string.ascii_lowercase +
string.ascii_uppercase
if letter not in
['a', 'A', 'I', 'i']]
nums = []
for i in range(10000):
nums.append(str(i))
nums.append(str(i / 1e1))
all_stops = general + general_upper + general_cap + letters + punctuation + stop + nums
emoticons_str = r"""
(?:
[:=;] # Eyes
[oO\-]? # Nose (optional)
[D\)\]\(\]/\\OpP] # Mouth
)"""
regex_str = [
emoticons_str,
r'<[^>]+>', # HTML tags
r'(?:@[\w_]+)', # @-mentions
r"(?:\#+[\w_]+[\w\'_\-]*[\w_]+)", # hash-tags
# URLs
r'http[s]?://(?:[a-z]|[0-9]|[$-_@.&+]|[!*\(\),]|(?:%[0-9a-f][0-9a-f]))+',
r'(?:(?:\d+,?)+(?:\.?\d+)?)', # numbers
r"(?:[a-z][a-z'\-_]+[a-z])", # words with - and '
r'(?:[\w_]+)', # other words
r'(?:\S)' # anything else
]
tokens_re = re.compile(r'(' + '|'.join(regex_str) + ')', re.VERBOSE | re.IGNORECASE)
emoticon_re = re.compile(r'^' + emoticons_str + '$', re.VERBOSE | re.IGNORECASE)
def tokenize(s):
return word_tokenize(s)
def preprocess(s, lowercase=True):
tokens = tokenize(s)
if lowercase:
tokens = [token if emoticon_re.search(token)
else token.lower()
for token in tokens]
return tokens
def Clean_List_of_Sentence(tweet):
keepers = []
for word in tweet:
if word.lower() in all_stops:
# pass
continue
for x in punctuation:
word.replace(x, '').replace(x + x, '').replace(x + x + x, '')
if word.startswith(('#', '@', 'http', '//', '/', '~', ':', '\\n', '\\')):
pass
# continue
if word.endswith(('#', '@', 'http', '//', '/', '~', ':', '\\n', '\\')):
pass
# continue
if word == '':
# pass
continue
marker = False
new_word = ''
word_len = len(word)
for letter in word:
if word.lower().count(letter.lower()) >= word_len // 2:
# pass
continue
if letter in punctuation:
pass
# continue
if letter in nums:
pass
# continue
new_word += letter
marker = True
if new_word in all_stops:
continue
if marker == True:
keepers.append(new_word.lower())
return keepers
def getNetworkIp():
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
s.setsockopt(socket.SOL_SOCKET, socket.SO_BROADCAST, 1)
s.connect(('<broadcast>', 0))
return str(s.getsockname()[0])
BLACK, RED, GREEN, YELLOW, BLUE, MAGENTA, CYAN, WHITE = range(8)
def ct(text, colour=WHITE):
seq = "\x1b[1;%dm" % (30 + colour) + text + "\x1b[0m"
return seq
host = "192.168.1.102"
port = 50000
server_address = (host, port)
hostname = socket.gethostname()
jobs_done = 0
report_jobs = 0
last_tweet = None
_slp_time = int(2. * ranf())
os.system("clear")
print ct(hostname, GREEN)
print ct("Starting after", GREEN), ct(str(_slp_time), YELLOW), ct("seconds...", GREEN)
for i in range(_slp_time):
sys.stdout.write(ct("\rStarting in : ", GREEN) + ct(str(_slp_time - i), YELLOW) + ct(" seconds", GREEN))
sys.stdout.flush()
sleep(1)
print ct("\nStarting Now\n", GREEN)
sleep(1 + ranf())
while True:
# Start job.
os.system('clear')
os.system('cat /root/SHARED/start.txt')
_dots = '.'
### MAKE SOCKET ###
# Load socket.
print "Loading socket"
s = socket.socket()
try:
s.connect(server_address)
except Exception as e:
sleep(ranf())
sys.stdout.write("\rwaiting for connection" + _dots)
sys.stdout.flush()
_dots += '.'
continue
else:
print "Connecting to", server_address
_dots = '.'
finally:
print "Done."
print "\n"
### RECIEVE FILE NAME ###
# Recieve filename.
file_name = s.recv(512)
while file_name == '':
sys.stdout.write("\rWaiting for filename" + _dots)
sys.stdout.flush()
_dots += '.'
sleep(ranf())
print '\n\t--> [RECIEVED] : [', file_name, ']\n'
print '\t--> [CHECKING FILE]\n'
if not os.path.isfile(JSON_PATH + file_name):
print '\t--> [BAD FILE]\n'
continue
elif not file_name.startswith(('20')):
print '\t--> [BAD FILE]\n'
continue
### LOAD INPUT FILE ###
# Read in raw file.
print '\t--> [LOADING INPUT FILE]\n'
raw_tweets = []
with open(JSON_PATH + file_name, "r") as f:
for raw_tweet in f.readlines():
if len(raw_tweet) > 1:
raw_tweets.append(json.loads(raw_tweet))
print '\t--> [INPUT FILE LOADED] : [', len(raw_tweets), '] Tweets \n'
#### DELETE INPUT FILE FROM INBOX ###
# Delete old file from SHARED drive.
print '\t--> [DELETING INPUT FILE FROM INBOX]\n'
while os.path.isfile(JSON_PATH + file_name):
try:
os.remove(JSON_PATH + file_name)
except OSError as e:
print '\t\t--> [ERROR][FILE NOT DELETED]'
else:
print '\t--> [DELETED]\n'
### BUZZ WORDS ###
# Get buzwords.
print "Getting Buzzwords"
buzzwords = []
with open('/root/SHARED/Tweet_Code/buzzword.txt', 'r') as f:
for word in f.readlines():
buzzwords.append(word.replace('\n', ''))
print "Done."
# Misc markers & values
last_tweet_id = 0
finished_tweets = []
# Main loop for each tweet.
for tweet in raw_tweets:
# Check to see if this is a repeat.
if tweet['id_str'] == last_tweet_id:
print ct("\t--> [ last_tweet ]", YELLOW)
# Words to keep.
keepers = []
# This tweet.
#tweet = json.loads(tweet)
# Process and clean.
token = preprocess(tweet['text'].encode('ascii', 'ignore'))
cleaned = Clean_List_of_Sentence(token)
# TODO
# Check each word against language specific dictionary.
for word in cleaned:
if tweet['lang'] == 'en':
if word[0].encode("ascii", 'ignore') in string.ascii_lowercase:
with open('/root/SHARED/Tweet_Code/Words_By_Alpha/' + word[0].lower(), 'r') as word_check:
if word in word_check.read():
keepers.append(word)
finished_tweets.append(word)
else:
keepers.append(word)
finished_tweets.append(word)
### BUZZ WORDS ###
# Check if buzzword.
_BUZZ_WORD = False
if word in buzzwords:
_BUZZ_WORD = True
elif word.upper() in buzzwords:
_BUZZ_WORD = True
elif word.lower() in buzzwords:
_BUZZ_WORD = True
# If it is a buzz word:
if _BUZZ_WORD:
os.system('cat /root/SHARED/buzz.txt')
_buzwrd_path = '/root/SHARED/Tweet_Output/Buzzwords/' + word + '.txt'
if not os.path.isfile(_buzwrd_path):
_buzmode = "w"
else:
_buzmode = "a"
with open(_buzwrd_path, mode=_buzmode) as buzzword:
buzzword.write(json.dumps(cleaned) + '\n')
with open('/root/SHARED/Tweet_Output/Buzzwords/fulltweet/' + word + '.txt', mode=_buzmode) as buzzword:
buzzword.write(json.dumps(tweet) + '\n')
# Add one to counter.
jobs_done += 1
report_jobs +=1
# Save last tweet.
last_tweet_id = tweet['id_str']
# If there are words:
if len(keepers):
print tweet['lang'], keepers
### LOG OUTPUT ###
# Write all clean words to disc.
with open(OUT_PUT_PATH + 'Clean_Words/' + hostname + '.txt', "a") as clean_words:
clean_words.write('\n' + json.dumps(finished_tweets))
### REPORT ###
# Report stats to Controller.
try:
Report(report_jobs)
# If error then skip
except:
print ' --> ' + ct('[CAN NOT REPORT TO UDP_Server]', RED)
# If report worked:
else:
# If Report works then reset report_jobs.
# Controller keeps active count.
report_jobs = 0
# Banner message.
os.system('cat ~/SHARED/finished.txt')
| StarcoderdataPython |
1787738 | import socket
import msvcrt
sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
sock.bind(('127.0.0.1',20002))
while True:
data, addr = sock.recvfrom(1024)
print(data.decode())
sock.close() | StarcoderdataPython |
9634311 | <filename>app/barometer/migrations/0001_initial.py
# Generated by Django 3.1.4 on 2020-12-06 15:00
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Barometer',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('temperature', models.DecimalField(decimal_places=2, max_digits=5)),
('humidity', models.DecimalField(decimal_places=2, max_digits=5)),
('pressure_pha', models.DecimalField(decimal_places=2, max_digits=6)),
],
options={
'verbose_name': 'Barometer',
'verbose_name_plural': 'Barometer',
},
),
]
| StarcoderdataPython |
357689 | class Struct(object):
def __init__(self, data=None, **kwds):
if not data:
data = {}
for name, value in data.items():
if name:
setattr(self, name, self._wrap(value))
for name, value in kwds.items():
if name:
setattr(self, name, self._wrap(value))
def _wrap(self, value):
if isinstance(value, (tuple, list, set, frozenset)):
return type(value)([self._wrap(v) for v in value])
else:
return Struct(value) if isinstance(value, dict) else value
def clone(self, **kwds):
return Struct(self.__to_dict__(), **kwds)
def __repr__(self):
return "Struct: " + repr(self.__dict__)
def __len__(self):
return len(self.__dict__)
def __to_dict__(self):
res = {}
res.update(self.__dict__)
for k in res.keys():
if isinstance(res[k], Struct):
res[k] = res[k].__to_dict__()
elif isinstance(res[k], (tuple, list, set, frozenset)):
res[k] = [i.__to_dict__() if isinstance(i, Struct) else i for i in res[k]]
return res
class StructDefault(Struct):
def __getattr__(self, item):
return self._default_
| StarcoderdataPython |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.