id stringlengths 1 7 | text stringlengths 6 1.03M | dataset_id stringclasses 1
value |
|---|---|---|
186160 | #-------------------------------------------------------------------------------
# The evolution of a uniform, magnetized conducting fluid.
#-------------------------------------------------------------------------------
from math import *
from Spheral import *
from SpheralTestUtilities import *
from SpheralVisitDump import dumpPhysicsState
from findLastRestart import *
# Load the mpi module if we"re parallel.
import loadmpi
mpi, procID, numProcs = loadmpi.loadmpi()
from GenerateNodeDistribution3d import *
from CubicNodeGenerator import GenerateCubicNodeDistribution
title("Dedner magnetic divergence test")
#-------------------------------------------------------------------------------
# Generic problem parameters
#-------------------------------------------------------------------------------
commandLine(seed = "lattice",
n = 20,
rho0 = 1.0,
V0 = Vector3d(1.0, 1.0, 0.0),
Bz = 1.0/sqrt(4*pi),
P0 = 6.0,
nPerh = 1.3,
mu0 = 1.0,
gamma = 5.0/3.0,
r0 = 1.0/sqrt(8),
divBCleaner = 'none',
mu = 1.0,
Qlimiter = True,
balsaraCorrection = False,
epsilon2 = 1e-2,
negligibleSoundSpeed = 1e-5,
csMultiplier = 1e-4,
hmin = 1e-5,
hmax = 1.0,
hminratio = 0.05,
HsmoothFraction = 0.0,
cfl = 0.25,
XSPH = True,
epsilonTensile = 0.0,
nTensile = 8,
HEvolution = Hydro3d.HEvolutionType.IdealH,
compatibleEnergy = False,
gradhCorrection = True,
limitIdealH = False,
neighborSearchType = Neighbor3d.NeighborSearchType.GatherScatter,
numGridLevels = 20,
topGridCellSize = 2.0,
origin = Vector3d(0.0, 0.0, 0.0),
goalTime = 1.0,
maxSteps = None,
statsStep = 10,
smoothIters = 0,
sumForMassDensity = Hydro3d.MassDensityType.RigorousSumDensity,
restoreCycle = None,
graphics = False,
)
def plotField(x, F, titleStr, filename):
import pylab as p
import griddata as g
import numpy
p.ion()
p.clf()
xhat = Vector3d(1, 0, 0)
yhat = Vector3d(0, 1, 0)
numInternalNodes = len(x.internalValues())
indices = [i for i in xrange(numInternalNodes) if abs(x[i].z) < 1e-8]
xs = numpy.array([x[i].dot(xhat) for i in indices])
ys = numpy.array([x[i].dot(yhat) for i in indices])
x1 = p.linspace(-0.5, 1.5, 50)
y1 = p.linspace(-0.5, 1.5, 50)
xg, yg = p.meshgrid(x1, y1)
if isinstance(F, VectorField3d) or isinstance(F[0], Vector3d):
Fxs = numpy.array([F[i].dot(xhat) for i in indices])
Fys = numpy.array([F[i].dot(yhat) for i in indices])
Fxg = g.griddata(xs, ys, Fxs, xg, yg)
Fyg = g.griddata(xs, ys, Fys, xg, yg)
p.quiver(xg, yg, Fxg, Fyg)
else:
# levels = [0.1*i for i in xrange(32)]
Fs = numpy.array([F[i] for i in indices])
Fg = g.griddata(xs, ys, Fs, xg, yg)
p.contour(xg, yg, Fg, 30)
p.colorbar()
p.title(titleStr)
p.savefig(filename)
#-------------------------------------------------------------------------------
# Interpolation kernels.
#-------------------------------------------------------------------------------
WT = TableKernel3d(BSplineKernel3d(), 1000)
WTPi = TableKernel3d(BSplineKernel3d(), 1000)
output("WT")
output("WTPi")
kernelExtent = WT.kernelExtent()
#-------------------------------------------------------------------------------
# A few derived variables.
#-------------------------------------------------------------------------------
nx = ny = n
nz = int(2 * 2 * kernelExtent * nPerh)
nzx = 1.0*nz/nx
xmin = (-0.5, -0.5, -0.5*nzx)
xmax = (1.5, 1.5, 1.5*nzx)
u0 = P0 / ((gamma-1.0)*rho0)
dataDir = "Dedner-divB-%ix%ix%i" % (n, n, n)
#-------------------------------------------------------------------------------
# Check if the necessary output directories exist. If not, create them.
#-------------------------------------------------------------------------------
#-------------------------------------------------------------------------------
# Material properties.
#-------------------------------------------------------------------------------
eos = GammaLawGasMKS3d(gamma, mu)
#-------------------------------------------------------------------------------
# Make the NodeList.
#-------------------------------------------------------------------------------
# Perfectly conducting node list.
nodes = ConductingFluidNodeList("nodes", eos, WT, WTPi)
output("nodes")
nodes.HsmoothFraction = HsmoothFraction
nodes.XSPH = XSPH
nodes.nodesPerSmoothingScale = nPerh
nodes.epsilonTensile = epsilonTensile
nodes.nTensile = nTensile
nodes.hmin = hmin
nodes.hmax = hmax
nodes.hminratio = hminratio
output("nodes.HsmoothFraction")
output("nodes.nodesPerSmoothingScale")
output("nodes.epsilonTensile")
output("nodes.nTensile")
output("nodes.XSPH")
output("nodes.hmin")
output("nodes.hmax")
output("nodes.hminratio")
#-------------------------------------------------------------------------------
# Construct the neighbor object.
#-------------------------------------------------------------------------------
neighbor1 = NestedGridNeighbor3d(nodes,
neighborSearchType,
numGridLevels,
topGridCellSize,
origin,
kernelExtent)
nodes.registerNeighbor(neighbor1)
#-------------------------------------------------------------------------------
# Set the node properties.
#-------------------------------------------------------------------------------
x = nodes.positions()
v = nodes.velocity()
B = nodes.magneticInduction()
if restoreCycle is None:
from ParMETISDistributeNodes import distributeNodes3d
generator = GenerateNodeDistribution3d(nx, ny, nz, rho0, seed,
xmin = xmin,
xmax = xmax,
nNodePerh = nPerh,
SPH = True)
distributeNodes3d((nodes, generator))
output("mpi.reduce(nodes.numInternalNodes, mpi.MIN)")
output("mpi.reduce(nodes.numInternalNodes, mpi.MAX)")
output("mpi.reduce(nodes.numInternalNodes, mpi.SUM)")
# Set node specific thermal energies
nodes.specificThermalEnergy(ScalarField3d("tmp", nodes, u0))
# Set nodal magnetic inductions.
r = [sqrt(xi.x**2 + xi.y**2) for xi in x.internalValues()]
for nodeID in xrange(nodes.numInternalNodes):
ri = r[nodeID]/r0
if ri < 1.0:
Bx = (ri**8 - 2*ri**4 + 1)/sqrt(4*pi)
else:
Bx = 0.0
B[nodeID] = Vector3d(Bx, 0, Bz)
v[nodeID] = V0
# Plot the B field configuration "before."
#plotField(x, B, 'B before div cleaning', 'B-before.png')
# plotField(x, [Bi.x for Bi in B.internalValues()], 'Bx before div cleaning', 'Bx-before.png')
# Jot down the analytic maximum divergence of B. The expression for
# div B = dBx/dx + dBy/dy + dBz/dz is (16*x*r**2/r0**4)*((r/r0)**4 - 1).
#proj = Vector3d(1., 1., 0)
#rs = [xi.dot(proj) for xi in x.internalValues()]
#divBs = [(16*x[i].x*rs[i]**2/r0**4)*((rs[i]/r0)**4 - 1) for i in xrange(len(x.internalValues()))]
#maxDivB0 = max(divBs)
# Plot div B "before."
#plotField(x, divBs, 'div B before div cleaning', 'divB-before.png')
#-------------------------------------------------------------------------------
# Check if the necessary output directories exist. If not, create them.
#-------------------------------------------------------------------------------
simName = 'Dedner-divB-%ix%ix%i'%(n, n, nz)
dataDir = '/p/lscratcha/jnjohnso/' + simName
visitDir = dataDir + "/visit"
restartDir = dataDir + "/restart"
import os, sys
if mpi.rank == 0:
if restoreCycle is None:
import shutil
if os.path.exists(visitDir):
shutil.rmtree(visitDir)
if os.path.exists(restartDir):
shutil.rmtree(restartDir)
if not os.path.exists(visitDir):
os.makedirs(visitDir)
if not os.path.exists(restartDir):
os.makedirs(restartDir)
mpi.barrier()
#-------------------------------------------------------------------------------
# Construct a DataBase to hold our node list
#-------------------------------------------------------------------------------
db = DataBase3d()
output("db")
output("db.appendNodeList(nodes)")
output("db.numNodeLists")
output("db.numFluidNodeLists")
#-------------------------------------------------------------------------------
# Construct the artificial viscosities for the problem.
#-------------------------------------------------------------------------------
q = MonaghanGingoldViscosity3d(1.0, 0.75)
#q = PriceMonaghanDissipation(1.0, 1.0, 1.0, 0.75, 1.0)
##-------------------------------------------------------------------------------
## Construct the hydro physics object.
##-------------------------------------------------------------------------------
hydro = Hydro3d(WT, WTPi, q, compatibleEnergy, gradhCorrection)
hydro.cfl = cfl
hydro.HEvolution = HEvolution
hydro.sumForMassDensity = sumForMassDensity
hydro.HsmoothMin = hmin
hydro.HsmoothMax = hmax
#output("hydro")
#output("hydro.cfl")
#output("hydro.HEvolution")
#output("hydro.sumForMassDensity")
#output("hydro.HsmoothMin")
#output("hydro.HsmoothMax")
#output("hydro.kernel()")
#output("hydro.PiKernel()")
#output("hydro.valid()")
#-------------------------------------------------------------------------------
# Construct an MHD object.
#-------------------------------------------------------------------------------
mhd = MHD(WT, mu0)
if divBCleaner == 'none':
mhd.divBCleaner = MHD.BDivergenceCleanerType.noCleaner
elif divBCleaner == 'hyperbolic':
mhd.divBCleaner = MHD.BDivergenceCleanerType.hyperbolicCleaner
elif divBCleaner == 'GreensFn':
mhd.divBCleaner = MHD.BDivergenceCleanerType.GreensFnProjCleaner
elif divBCleaner == 'BiotSavart':
mhd.divBCleaner = MHD.BDivergenceCleanerType.BiotSavartProjCleaner
else:
raise ValueError, "divBCleaner must be 'hyperBolic', 'GreensFn', 'BiotSavart', or 'none'."
#-------------------------------------------------------------------------------
# Create boundary conditions.
#-------------------------------------------------------------------------------
xPlane1 = Plane3d(Vector3d(-0.5, 0.0, 0.0), Vector3d( 1.0, 0.0, 0.0))
xPlane2 = Plane3d(Vector3d( 1.5, 0.0, 0.0), Vector3d(-1.0, 0.0, 0.0))
yPlane1 = Plane3d(Vector3d( 0.0,-0.5, 0.0), Vector3d( 0.0, 1.0, 0.0))
yPlane2 = Plane3d(Vector3d( 0.0, 1.5, 0.0), Vector3d( 0.0,-1.0, 0.0))
zPlane1 = Plane3d(Vector3d( 0.0, 0.0,-0.5*nzx), Vector3d( 0.0, 0.0, 1.0))
zPlane2 = Plane3d(Vector3d( 0.0, 0.0, 1.5*nzx), Vector3d( 0.0, 0.0,-1.0))
xbc = PeriodicBoundary3d(xPlane1, xPlane2)
ybc = PeriodicBoundary3d(yPlane1, yPlane2)
zbc = PeriodicBoundary3d(zPlane1, zPlane2)
hydro.appendBoundary(xbc)
hydro.appendBoundary(ybc)
hydro.appendBoundary(zbc)
mhd.appendBoundary(xbc)
mhd.appendBoundary(ybc)
mhd.appendBoundary(zbc)
#-------------------------------------------------------------------------------
# Construct a time integrator.
#-------------------------------------------------------------------------------
integrator = SynchronousRK2Integrator3d(db)
integrator.appendPhysicsPackage(hydro)
integrator.appendPhysicsPackage(mhd)
integrator.verbose = True
integrator.rigorousBoundaries = True
integrator.lastDt = 1e-3
output("integrator")
output("integrator.havePhysicsPackage(hydro)")
output("integrator.havePhysicsPackage(mhd)")
output("integrator.valid()")
#-------------------------------------------------------------------------------
# Build the controller.
#-------------------------------------------------------------------------------
#raw_input()
restartBaseName = '%s/%s'%(restartDir, simName)
control = SpheralController(integrator, WT,
statsStep = statsStep,
initializeMassDensity = True,
restartBaseName = restartBaseName)
output("control")
#print 'max |div B| (0):', maxDivB0
# Restore if desired.
if restoreCycle is not None:
if restoreCycle == -1:
restoreCycle = findLastRestart(simName)
control.loadRestartFile(restoreCycle)
else:
dumpPhysicsState(integrator, simName, visitDir, dumpDerivatives = True)
output("integrator.dtGrowth")
# If we're using a projection scheme to clean div B, advance one step and
# read off our diagnostics.
if mhd.divBCleaner == MHD.BDivergenceCleanerType.GreensFnProjCleaner or \
mhd.divBCleaner == MHD.BDivergenceCleanerType.BiotSavartProjCleaner:
control.advance(control.time() + 1e-10, 1)
maxDivB1 = max(mhd.maxDivB(), abs(mhd.minDivB()))
# Otherwise, go get 'em!
else:
while control.time() < goalTime:
dt = goalTime/10
control.advance(min(goalTime, control.time() + dt), maxSteps)
control.dropRestartFile()
dumpPhysicsState(integrator, simName, visitDir, dumpDerivatives = True)
maxDivB1 = max(mhd.maxDivB(), abs(mhd.minDivB()))
print 'max |div B| (1):', maxDivB1
# Plot the final field configuration (and its divergence).
#plotField(x, B, 'B after div cleaning', 'B-after.png')
#plotField(x, [Bi.x for Bi in B.internalValues()], 'Bx after div cleaning', 'Bx-after.png')
#plotField(x, nodes.magneticDivergence(), 'div B after div cleaning', 'divB-after.png')
| StarcoderdataPython |
35359 | <filename>code/src/main.py<gh_stars>100-1000
import torch
import utility
import data
import model
import loss
from option import args
from trainer import Trainer
def print_network(net):
num_params = 0
for param in net.parameters():
num_params += param.numel()
print(net)
print('Total number of parameters: %d' % num_params)
def print_setting(net, args):
print('init this train:')
print_network(net)
print('training model:', args.model)
print('scale:', args.scale)
print('resume from ', args.resume)
print('output patch size', args.patch_size)
print('model setting: n_resblocks:', args.n_resblocks,
'n_feats:', args.n_feats, 'block_feats:', args.block_feats)
print('optimization setting: ', args.optimizer)
print('total epochs:', args.epochs)
print('lr:', args.lr, 'lr_decay at:', args.decay_type, 'decay gamma:', args.gamma)
print('train loss:', args.loss)
print('save_name:', args.save)
torch.manual_seed(args.seed)
checkpoint = utility.checkpoint(args)
if checkpoint.ok:
loader = data.Data(args)
model = model.Model(args, checkpoint)
print_setting(model, args)
loss = loss.Loss(args, checkpoint) if not args.test_only else None
t = Trainer(args, loader, model, loss, checkpoint)
while not t.terminate():
t.train()
t.test()
checkpoint.done()
| StarcoderdataPython |
1729051 | # -*- coding: utf-8 -*-
# @Author : jjxu
# @time: 2019/1/17 10:23
from app.libs.redprint import Redprint
from app.validators.user_form import UserEmailForm
api = Redprint("client")
@api.route("/register", methods=["POST"])
def register():
# 1/0
form = UserEmailForm().validate_for_api()
print(form)
return "register"
| StarcoderdataPython |
148003 | <reponame>globus-gladier/kanzus_client
from .flow_data_transfer import TransferFlow
from .flow_data_block_transfer import BlockTransferFlow
from .flow_stills import StillsFlow
from .flow_publish import PublishFlow
from .flow_prime import PrimeFlow
__all__ = ['TransferFlow',
'BlockTransferFlow',
'StillsFlow',
'PublishFlow',
'PrimeFlow']
| StarcoderdataPython |
4829316 | from __future__ import unicode_literals
import hashlib
import hmac
import re
import time
from .common import InfoExtractor
from ..compat import compat_str
from ..utils import (
ExtractorError,
js_to_json,
int_or_none,
parse_iso8601,
try_get,
unescapeHTML,
update_url_query,
)
class ABCIE(InfoExtractor):
IE_NAME = 'abc.net.au'
_VALID_URL = r'https?://(?:www\.)?abc\.net\.au/news/(?:[^/]+/){1,2}(?P<id>\d+)'
_TESTS = [{
'url': 'http://www.abc.net.au/news/2014-11-05/australia-to-staff-ebola-treatment-centre-in-sierra-leone/5868334',
'md5': 'cb3dd03b18455a661071ee1e28344d9f',
'info_dict': {
'id': '5868334',
'ext': 'mp4',
'title': 'Australia to help staff Ebola treatment centre in Sierra Leone',
'description': 'md5:809ad29c67a05f54eb41f2a105693a67',
},
'skip': 'this video has expired',
}, {
'url': 'http://www.abc.net.au/news/2015-08-17/warren-entsch-introduces-same-sex-marriage-bill/6702326',
'md5': 'db2a5369238b51f9811ad815b69dc086',
'info_dict': {
'id': 'NvqvPeNZsHU',
'ext': 'mp4',
'upload_date': '20150816',
'uploader': 'ABC News (Australia)',
'description': 'Government backbencher Warren Entsch introduces a cross-party sponsored bill to legalise same-sex marriage, saying the bill is designed to promote "an inclusive Australia, not a divided one.". Read more here: http://ab.co/1Mwc6ef',
'uploader_id': 'NewsOnABC',
'title': 'Marriage Equality: Warren Entsch introduces same sex marriage bill',
},
'add_ie': ['Youtube'],
'skip': 'Not accessible from Travis CI server',
}, {
'url': 'http://www.abc.net.au/news/2015-10-23/nab-lifts-interest-rates-following-westpac-and-cba/6880080',
'md5': 'b96eee7c9edf4fc5a358a0252881cc1f',
'info_dict': {
'id': '6880080',
'ext': 'mp3',
'title': 'NAB lifts interest rates, following Westpac and CBA',
'description': 'md5:f13d8edc81e462fce4a0437c7dc04728',
},
}, {
'url': 'http://www.abc.net.au/news/2015-10-19/6866214',
'only_matching': True,
}]
def _real_extract(self, url):
video_id = self._match_id(url)
webpage = self._download_webpage(url, video_id)
mobj = re.search(
r'inline(?P<type>Video|Audio|YouTube)Data\.push\((?P<json_data>[^)]+)\);',
webpage)
if mobj is None:
expired = self._html_search_regex(r'(?s)class="expired-(?:video|audio)".+?<span>(.+?)</span>', webpage, 'expired', None)
if expired:
raise ExtractorError('%s said: %s' % (self.IE_NAME, expired), expected=True)
raise ExtractorError('Unable to extract video urls')
urls_info = self._parse_json(
mobj.group('json_data'), video_id, transform_source=js_to_json)
if not isinstance(urls_info, list):
urls_info = [urls_info]
if mobj.group('type') == 'YouTube':
return self.playlist_result([
self.url_result(url_info['url']) for url_info in urls_info])
formats = [{
'url': url_info['url'],
'vcodec': url_info.get('codec') if mobj.group('type') == 'Video' else 'none',
'width': int_or_none(url_info.get('width')),
'height': int_or_none(url_info.get('height')),
'tbr': int_or_none(url_info.get('bitrate')),
'filesize': int_or_none(url_info.get('filesize')),
} for url_info in urls_info]
self._sort_formats(formats)
return {
'id': video_id,
'title': self._og_search_title(webpage),
'formats': formats,
'description': self._og_search_description(webpage),
'thumbnail': self._og_search_thumbnail(webpage),
}
class ABCIViewIE(InfoExtractor):
IE_NAME = 'abc.net.au:iview'
_VALID_URL = r'https?://iview\.abc\.net\.au/(?:[^/]+/)*video/(?P<id>[^/?#]+)'
_GEO_COUNTRIES = ['AU']
# ABC iview programs are normally available for 14 days only.
_TESTS = [{
'url': 'https://iview.abc.net.au/show/gruen/series/11/video/LE1927H001S00',
'md5': '67715ce3c78426b11ba167d875ac6abf',
'info_dict': {
'id': 'LE1927H001S00',
'ext': 'mp4',
'title': "Series 11 Ep 1",
'series': "Gruen",
'description': 'md5:52cc744ad35045baf6aded2ce7287f67',
'upload_date': '20190925',
'uploader_id': 'abc1',
'timestamp': 1569445289,
},
'params': {
'skip_download': True,
},
}]
def _real_extract(self, url):
video_id = self._match_id(url)
video_params = self._download_json(
'https://iview.abc.net.au/api/programs/' + video_id, video_id)
title = unescapeHTML(video_params.get('title') or video_params['seriesTitle'])
stream = next(s for s in video_params['playlist'] if s.get('type') in ('program', 'livestream'))
house_number = video_params.get('episodeHouseNumber') or video_id
path = '/auth/hls/sign?ts={0}&hn={1}&d=android-tablet'.format(
int(time.time()), house_number)
sig = hmac.new(
b'android.content.res.Resources',
path.encode('utf-8'), hashlib.sha256).hexdigest()
token = self._download_webpage(
'http://iview.abc.net.au{0}&sig={1}'.format(path, sig), video_id)
def tokenize_url(url, token):
return update_url_query(url, {
'hdnea': token,
})
for sd in ('1080', '720', 'sd', 'sd-low'):
sd_url = try_get(
stream, lambda x: x['streams']['hls'][sd], compat_str)
if not sd_url:
continue
formats = self._extract_m3u8_formats(
tokenize_url(sd_url, token), video_id, 'mp4',
entry_protocol='m3u8_native', m3u8_id='hls', fatal=False)
if formats:
break
self._sort_formats(formats)
subtitles = {}
src_vtt = stream.get('captions', {}).get('src-vtt')
if src_vtt:
subtitles['en'] = [{
'url': src_vtt,
'ext': 'vtt',
}]
is_live = video_params.get('livestream') == '1'
if is_live:
title = self._live_title(title)
return {
'id': video_id,
'title': title,
'description': video_params.get('description'),
'thumbnail': video_params.get('thumbnail'),
'duration': int_or_none(video_params.get('eventDuration')),
'timestamp': parse_iso8601(video_params.get('pubDate'), ' '),
'series': unescapeHTML(video_params.get('seriesTitle')),
'series_id': video_params.get('seriesHouseNumber') or video_id[:7],
'season_number': int_or_none(self._search_regex(
r'\bSeries\s+(\d+)\b', title, 'season number', default=None)),
'episode_number': int_or_none(self._search_regex(
r'\bEp\s+(\d+)\b', title, 'episode number', default=None)),
'episode_id': house_number,
'uploader_id': video_params.get('channel'),
'formats': formats,
'subtitles': subtitles,
'is_live': is_live,
}
| StarcoderdataPython |
189382 | # 字符串 str= 'reverse this string', 请使用三种方法翻转字符串。
# 方法1:
str= 'reverse this string'
print(str[::-1])
# 方法2:
str= 'reverse this string'
length=len(str)
str1=''
for i in range(length,0,-1):
str1+=str[i-1]
print(str1)
# 方法3:
str= 'reverse this string'
str3=reversed(str)
str4=''
for i in str3:
str4+=i
print(str4)
# 最后一个可以直接使用拼接方法,更简单.
# 其余两个方法掌握的也很不错.
| StarcoderdataPython |
49077 | <filename>TAO/Firewall/BUZZDIRECTION/BUZZ_1120/LP/Scripts/Lp_UserInterface.py
import cmd
import os
import Lp_FrontEndFunctions
import Lp_CursesDriver
import Lp_XmlParser
import Lp_RpcDispatcher
import string
import sys
import socket
import textwrap
import time
import subprocess
import signal
import platform
import threading
from datetime import datetime
BLOCKER_PORT = 1340
RPC_DISPATCH_PORT = 1339
PRINT_PORT = 1338
BACKEND_PORT = 1337
FRONTEND_PORT = 1336
#Prints anything recieved from the backend on port 1338.
class PrintThread(threading.Thread):
def __init__(self,processor,lFile):
self.sock=socket.socket(socket.AF_INET,socket.SOCK_DGRAM)
self.sock.bind(('127.0.0.1',PRINT_PORT))
self.sock.settimeout(.1)
self.cmdLoop=processor
self.log=lFile
threading.Thread.__init__(self)
def run(self):
while 1:
try:
stringIn,addr=self.sock.recvfrom(1024)
if stringIn.find('RECV')>=0:
self.sock.sendto(stringIn,('127.0.0.1',RPC_DISPATCH_PORT))
elif stringIn == '!!#QUIT':
break
else:
print stringIn,
self.log.write(stringIn)
sys.stdout.flush()
except:
continue
self.sock.close()
#Parses input and executes the appropriate command.
class LpInputProcessing(cmd.Cmd):
def __init__(self,functionsIn,lFile,lark,lsock):
self.prompt="LP> "
self.functions=functionsIn
self.helpDict={}
self.Modules={}
self.architecture=''
self.fMaps={}
self.logFile=lFile
self.lpArch=lark
self.defaultOutDir=''
self.lpSock=lsock
self.printBlocker = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
self.printBlocker.bind(('127.0.0.1',BLOCKER_PORT))
cmd.Cmd.__init__(self)
def setDefaultOutDir(self):
self.defaultOutDir = Lp_XmlParser.parseDefaultDir()
def preloop(self):
self.logFile.write(self.prompt)
self.do_help("")
#This function is executed immediatly before the command entered by the user is handled by the
#cmd class. For a command that is a key in the function dictionary, the user input is handled
#by the genericCmd function.
def precmd(self,line):
usrIn=line.split(" ")
fName=self.__resolveFuncName(usrIn[0])
if fName=='quit':
return 'quit'
for mod in self.Modules:
#Is the commanded function a key in the dictionary for the module 'mod'?
if fName in self.Modules[mod]:
oldDict={}
oldDict=self.Modules[mod][fName].copy()
cmdRes=self.genericCmd(mod,usrIn)
if cmdRes>=0 and fName=='burn':
self.lpSock.settimeout(60)
try:
burnConf = self.lpSock.recv(1024)
while burnConf.find('RECV RPC')<0:
burnConf = self.printBlocker.recv(1024)
retCode = burnConf[burnConf.find('rc=')+3:len(burnConf)-1]
if retCode == '0':
print "Burn Successful"
self.logFile.write("Burn Successful.\n")
return 'abort'
else:
print "Burn rpc returned an error: %s"%retCode
print "The implant may have burned."
self.lpSock.setblocking(1)
return ''
except socket.timeout:
print "Did not receive confirmation of burn. The implant may have burned."
self.logFile.write("Did not receive confirmation of burn. The implant may have burned.\n")
self.lpSock.setblocking(1)
return 'abort'
elif cmdRes>=0 and fName=="upgrade":
stillActive=1
rpcRes=-1
ret=self.functions.cmdGeneric('port',
{'command':'!port','outport':str(FRONTEND_PORT),
'endOnString':'DONE'},
{})
self.lpSock.settimeout(60)
while stillActive==1:
try:
self.lpSock.sendto('!stat\n',('127.0.0.1',BACKEND_PORT))
stillActive=0
lineIn=self.lpSock.recv(1024)
while lineIn.find('DONE')<0:
if lineIn.find('RECV')>=0:
rpcRes=int(lineIn[lineIn.find('rc=')+3:len(lineIn)])
elif lineIn.find('RPC ID')>=0:
newLine=lineIn.find('\n')
rpcN=int(lineIn[7:newLine])
if rpcN==cmdRes:
prog = lineIn[lineIn.find('Sent:')+6:
lineIn.find('\nTotal')]
totalPkts = lineIn[lineIn.find('RPC:')+5:]
print "\rProgress: %.1f%s"%((float(prog)/float(totalPkts)*
100),'%'),
sys.stdout.flush()
stillActive=1
lineIn=self.lpSock.recv(1024)
line=''
time.sleep(.5)
except socket.timeout:
line='abort'
print 'Socket timed out while receiving status. Backend is not responding.'
self.logFile.write('Socket timed out while receiving status. Backend is not responding.\n')
break
except KeyboardInterrupt:
print "\nUpfile transfer aborted."
self.logFile.write('\nUpfile transfer aborted.\n')
return 'abort'
if rpcRes == 0:
print "Upgrade file sent."
self.logFile.write("Upgrade file sent.\n")
return 'abort'
else:
self.functions.cmdGeneric('port',{'command':'!port','outport':str(PRINT_PORT),
'endOnString':'DONE'},{})
self.lpSock.setblocking(1)
cmdRes = rpcRes
if cmdRes == -2:
line='abort'
break
self.Modules[mod][fName]=oldDict
return ""
return line
def deleteMod(self,toUnload,okToDelete):
if okToDelete>0:
return
keys=self.Modules.keys()
for key in keys:
if self.Modules[key]['iface']==toUnload[0]:
del self.Modules[key]
del self.helpDict[key]
def printTunnelCmd(self,tunnelCommand,okToPrint):
if okToPrint==0:
print tunnelCommand[0]
#Handles user input if the commanded function has an entry in the function dictionary.
#Arguments:
# mod: the module that contains this function
# usrIn: raw input that the user entered at the command prompt
#Return:
# An integer representing the result of the function call
def genericCmd(self,mod,usrIn):
fName=self.__resolveFuncName(usrIn[0])
newIn=[]
#Strip empty strings from usrIn
for el in usrIn:
if el!='':
newIn.append(el)
usrIn=newIn
try:
#Check if this function takes input arguments
if self.Modules[mod][fName]['noargs']=="true":
if len(usrIn)>1:
print "The command %s does not accept input arguments."%fName
self.logFile.write("The command %s does not accept input arguments.\n"%fName)
return
except KeyError:
print "No functions found for module '%s'"%mod
self.logFile.write("No functions found for module '%s'"%mod)
return -1
if self.Modules[mod][fName]['confirm']=='1':
try:
confirmation=raw_input("Are you sure you want to execute %s?\nY\N: "%fName)
except KeyboardInterrupt:
print
return -1
self.logFile.write("Are you sure you want to execute %s?"%fName)
self.logFile.write((confirmation+'\n'))
if confirmation!='y' and confirmation!='Y':
return -1
#Check if this function should use a curses form. If so, then extract the necessary arguments
#from the curses return and call the function.
if self.Modules[mod][fName]['curses']=="true" and len(usrIn)==1:
self.logFile.write('Curses form entered.\n')
arguments=[]
formType=''
if fName=='redirtunnel':
formType='redir'
elif fName=='outtunnel':
formType='out'
else:
formType='default'
form=Lp_CursesDriver.CursesDriver(
self.Modules[mod][fName]['cursesPrompts'],
len(self.Modules[mod][fName]['cursesPrompts']),
formType
)
arguments=form.runCurses()
self.logFile.write('***Raw output from curses form***\n')
self.logFile.write('%s\n%s\n******\n'%(arguments[0],arguments[1]))
#clear the screen to eliminate any oddities that result from the terminal being resized.
os.system('clear')
res=0
for arg in arguments:
validArg=self.__checkArg(arg)
if arg!=[] and validArg==1:
argDict=dict(arg)
if 'transprot' in argDict:
protocol=argDict['transprot']
if protocol=='udp' or protocol=='UDP':
argDict['transprot']='17'
elif protocol=='tcp' or protocol=='TCP':
argDict['transprot']='6'
else:
print "Bad Protocol Selection."
self.logFile.write("Bad Protocol Selection.\n")
return -1
argList=argDict.items()
#build string for creating this tunnel on command line
if fName=='redirtunnel' or fName=='outtunnel':
cmdString=fName+' netprot 2048'
for el in argList:
cmdString+=' '+el[0]+' '+el[1]
tunnelCmd='\nCommand to create this tunnel via command line:\n%s\n'%cmdString
keys=argDict.keys()
for key in keys:
try:
self.Modules[mod][fName][key]=argDict[key].rstrip()
except KeyError:
print "assignment failed"
self.logFile.write("assignment failed\n")
return
res=self.functions.cmdGeneric(fName,self.Modules[mod][fName],{})
if fName=='redirtunnel' or fName=='outtunnel':
self.printTunnelCmd([tunnelCmd],0)
elif arg!=[] and validArg!=0:
print "Error. Input not provided for %s."%validArg
self.logFile.write("Error. Input not provided for %s.\n"%validArg)
return res
if self.Modules[mod][fName]['useDirList']=='1' and len(usrIn)==1:
try:
dirListParams=self.Modules[mod][fName]['dirListParams']
directory=dirListParams['baseDir']
if dirListParams['prependCWD']=='1':
directory='%s%s'%(os.getcwd(),directory)
if dirListParams['appendImplantArch']=='1':
directory='%s/%s'%(directory,self.architecture)
dirListing=[]
initialList=os.listdir(directory)
for item in initialList:
if item.find(dirListParams['fileEx'])>=0:
dirListing.append(item)
moduleIndexes={}
while 1:
print dirListParams['prePrint']
try:
if dirListParams['showIfaceNumbers']=='1':
for i in range(0, len(dirListing),1):
modName=dirListing[i].split(dirListParams['modNameSplitChar'])[0]
ifaceArgs=[modName,self.architecture, self.lpArch]
iface=Lp_XmlParser.parseIface(ifaceArgs)
if iface<0:
print "Unable to find iface number for %s."%modName
print "Ensure the xml file for this module is located in the proper directory."
else:
print "%d: %s"%(iface,dirListing[i])
#Create the mapping of iface number to position in list
#This allows user to enter iface number to select mod
moduleIndexes[iface]=i
input=raw_input(dirListParams['listPrompt'])
input=int(input)
selection=moduleIndexes[input]
self.Modules[mod][fName][dirListParams['promptToSet']]='%s/%s'%(directory,
dirListing[int(selection)])
break
else:
for i in range(0, len(dirListing),1):
print "%d: %s"%(i,dirListing[i])
selection=raw_input(dirListParams['listPrompt'])
self.Modules[mod][fName][dirListParams['promptToSet']]='%s/%s'%(directory,
dirListing[int(selection)])
break
except (IndexError, ValueError, KeyError):
print "\nInvalid selection.\n"
if dirListParams['requireXml']=='1':
modStripped=dirListing[int(selection)].split(dirListParams['modNameSplitChar'])[0]
modFd=open('%s/%s.xml'%(directory,modStripped))
modFd.close()
except IOError:
print 'No xml configuration file found for this module.'
return -1
except OSError:
print "%s not found."%directory
self.logFile.write("%s not found.\n"%directory)
except KeyboardInterrupt:
print
return -1
if self.Modules[mod][fName]['useSwitch']=='1' and len(usrIn)==1:
try:
switchParams=self.Modules[mod][fName]['switchParams']
while 1:
response=raw_input(str(switchParams['prompt']))
possibleInputs=switchParams['switchOpts'].keys()
correctKey=0
for input in possibleInputs:
input=str(input)
res=input.find(response)
if res>=0:
correctKey=input
if correctKey==0:
print "Invalid selection."
else:
for argument in switchParams['switchOpts'][correctKey]:
try:
self.Modules[mod][fName][str(argument[0])]=str(argument[1])
except KeyError:
print "Internal Error: argument specified in switch not found as an argument for %s."%fName
break
except KeyboardInterrupt:
print
return -1
if self.Modules[mod][fName]['printFunc'] != [] and len(usrIn)==1:
self.printFunctionOut(self.Modules[mod][fName]['printFunc'])
if self.Modules[mod][fName]['useDefaultDir'] != [] and len(usrIn) == 1:
argument = self.Modules[mod][fName]['useDefaultDir']
self.Modules[mod][fName][argument] = self.defaultOutDir
#Extract arguments from the entered command and assign values in the function dictionary
#if no other method of obtaining arguments is defined for this command.
if len(usrIn)>1:
for i in range(1,len(usrIn)-1,2):
try:
res=self.Modules[mod][fName][usrIn[i]]
self.Modules[mod][fName][usrIn[i]]=usrIn[i+1]
except:
print "Incorrect argument: %s"%usrIn[i]
self.logFile.write("Incorrect argument: %s\n"%usrIn[i])
return
if self.Modules[mod][fName]['useArgConfirm']=='1':
res=self.functions.cmdGeneric(fName, self.Modules[mod][fName],
self.Modules[mod][fName]['argConfirmParams'])
else:
res=self.functions.cmdGeneric(fName, self.Modules[mod][fName],{})
return res
#Used to print the result of a function when the lp needs to confirm the completion of the function that
#caused the print.
def printFunctionOut(self,func):
self.printBlocker.sendto('!!#TURN_OFF_PRINTING',('127.0.0.1',RPC_DISPATCH_PORT))
if func=='mods':
print "******************Loaded Modules*****************"
self.logFile.write("******************Loaded Modules*****************\n")
res = self.functions.cmdGeneric('mods',{'command':'!mods'},{})
self.printBlocker.sendto('!!#REG_BLOCK%d'%res,('127.0.0.1',RPC_DISPATCH_PORT))
result = self.printBlocker.recv(1024)
elif func=='listtunnels':
res = self.functions.cmdGeneric('listtunnels',{'command':'!call','ciface':'34',
'cfunc':'2','cprov':'1'},{})
self.printBlocker.sendto('!!#REG_BLOCK%d'%res,('127.0.0.1',RPC_DISPATCH_PORT))
result = self.printBlocker.recv(1024)
self.printBlocker.sendto('!!#TURN_ON_PRINTING',('127.0.0.1',RPC_DISPATCH_PORT))
def parseXml(self, mod, requireLpEx):
Lp_XmlParser.parseMod([self.helpDict, self.Modules, mod, self.architecture, self.lpArch, self.fMaps,
requireLpEx, self.functions], 0)
#The call command can be used to browse loaded modules and call any function from any loaded module.
#It is intended as an advanced/developer command, and therefore will not be shown by help.
#It can be called by entering 'call' at the lp prompt.
def do_call(self,line):
usrIn=line.split(" ")
if len(usrIn)!=1:
return
#print "Incorrect number of arguments. Enter \'help call\' for usage information."
elif usrIn[0]=="":
res=self.functions.cmdCall(self.printBlocker)
else:
return
#print "Incorrect number of arguments. Enter \'help call\' for usage information."
#Prints all of the functions available from each module by going through helpDict
def do_help(self,line):
usrIn=line.split(" ")
if line=="":
print "****Available Commands****"
self.logFile.write("****Available Commands****\n")
keys=self.helpDict.keys()
for key in keys:
self.__printModuleFunctions(key)
else:
#Check if user entered a module name
if usrIn[0] in self.helpDict:
self.__printModuleFunctions(usrIn[0])
return
keys=self.helpDict.keys()
#Check if user entered module iface number
for key in keys:
try:
modStr='padding/%s_debug.mo'%key
ifaceNum=Lp_XmlParser.parseIface([modStr, self.architecture, self.lpArch])
if str(ifaceNum)==usrIn[0]:
self.__printModuleFunctions(key)
return
except KeyError:
break
fName=self.__resolveFuncName(usrIn[0])
#Check if user entered a function name or number
for key in keys:
if fName in self.helpDict[key]:
use='\n%s\n'%textwrap.fill(self.helpDict[key][fName]['usage'])
text='%s\n'%textwrap.fill(self.helpDict[key][fName]['text'])
print use
self.logFile.write('%s\n'%use)
print text
self.logFile.write('%s\n'%text)
return
print "No help information found for: %s"%fName
self.logFile.write("No help information found for: %s\n"%fName)
#Binding for help command
def do_h(self,line):
self.do_help(line)
#Exits the LP without printing modules after receiving confirmation of burn.
def do_abort(self,line):
print "Goodbye"
return True
#Prints modules loaded and then exits the LP
def do_exit(self,line):
#Display loaded modules on exit. Port is changed to 1336 so that the front end can make sure
#that the entire list is printed before exiting.
try:
self.printFunctionOut('mods')
res=functions.cmdGeneric('term',{'command':'!term','endOnString':'DONE'},{})
print "Goodbye"
self.logFile.write("Goodbye\n")
self.logFile.write('Session terminated at %s'%str(datetime.now()))
return True
except KeyboardInterrupt:
print "Goodbye"
self.logFile.write("Goodbye\n")
self.logFile.write('Session terminated at %s'%str(datetime.now()))
return True
#Command binding to exit LP
def do_quit(self,line):
self.do_exit(line)
return True
#Command binding to exit LP
def do_logout(self,line):
self.do_exit(line)
return True
#Command binding to exit LP
def do_EOF(self,line):
self.do_exit(line)
return True
def emptyline(self):
pass
def setArch(self,inArch):
self.architecture=inArch
def __sort(self,modKey,toSort):
toReturn=[]
fMaps={}
fNums=[]
for fName in toSort:
fNum=self.helpDict[modKey][fName]['fnum']
fMaps[int(str(fNum).split('.')[1])]=fName
fNums.append(int(str(fNum).split('.')[1]))
fNums=sorted(fNums)
for num in fNums:
toReturn.append(fMaps[num])
return toReturn
#Checks if there are any empty strings in a list returned by CursesDriver
def __checkArg(self,toCheck):
for element in toCheck:
if element[1]=='':
return element[0]
return 1
def __resolveFuncName(self,name):
try:
function=self.fMaps[name]
except KeyError:
function=name
return function
def __printModuleFunctions(self,modName):
fKeys=self.helpDict[modName].keys()
if len(fKeys)<=0:
return
print 'Module: %s'%modName
self.logFile.write('Module: %s\n'%modName)
sortedfKeys=self.__sort(modName,fKeys)
for fKey in sortedfKeys:
if self.helpDict[modName][fKey]['nodisplay']!="true":
disp=" %s: %s"%(self.helpDict[modName][fKey]['fnum'],fKey)
print disp
self.logFile.write((disp+'\n'))
print
#Forces backend to ignore TERM signals sent to the front end on ctrl-c
def preexec_fcn():
signal.signal(signal.SIGINT,signal.SIG_IGN)
#Connects to the implant and prints the currently loaded modules and implant uptime.
#Arguments:
# proc: the Lp Input Processing object
# sock: the socket used to communicate with backend
#Return:
# 1 on success
# -1 in fail
def showWelcome(proc,func,sock,outFile,lpArch):
currentDirectory=os.getcwd()
supportedArchs={'062':'x86_64','003':'i386','020':'ppc','021':'ppc64',
'002':'sparc','008':'mips_be','010':'mips_le',
'040':'arm','043':'sparcv9'}
openArgs={"command":"!open","dstip":sys.argv[1],"dstport":sys.argv[2],"srcip":sys.argv[3],
"srcport":sys.argv[4],"keyfile":sys.argv[5],'endOnString':'DONE'}
res = func.cmdGeneric('open',openArgs, {})
try:
line=sock.recv(1024)
except:
print "Failed to connect to implant."
return -1
lpexList = []
allArches = []
print "Loading Lp Extensions...",
sys.stdout.flush()
#load all lp extention files available
try:
allArches = os.listdir('%s/../Mods/App/Buzzdirection/'%(currentDirectory))
except OSError:
pass
for oneArch in allArches:
lpexDir = '%s/../Mods/App/Buzzdirection/%s/'%(currentDirectory,oneArch)
lpexList += os.listdir(lpexDir)
if lpArch == 'i386':
lpexExtension = '.lx32'
else:
lpexExtension = '.lx64'
for lpex in lpexList:
if lpex.find(lpexExtension)>=0:
fileLoc = '%s%s'%(lpexDir,lpex)
func.cmdGeneric('lpex',{'command':'!lpex','lpexfile':fileLoc,'endOnString':'DONE'},{})
print "\r",
#Parse xml files for preloaded modules and print currently loaded modules
func.cmdGeneric('port',{'command':'!port','outport':str(FRONTEND_PORT),'endOnString':'DONE'},{})
res=func.cmdGeneric('mods',{'command':'!mods','endOnString':'DONE'},{})
print "******************Loaded Modules*****************",
try:
line=sock.recv(1024)
#while line.find("Device ID")<0:
while line.find("RECV")<0:
print line,
outFile.write(line)
if line.find('name')<0 and line.find('--')<0 and line.find("Device")<0:
modName=line[8:25]
modName=modName.strip()
module=(modName+'.mo')
if len(module)>3:
proc.parseXml(module,0)
if modName=='PlatCore':
platCoreArch=line[46:len(line)].strip()
try:
proc.setArch(supportedArchs[platCoreArch])
except KeyError:
print "The reported implant architecture type of %s is not supported."%platCoreArch
return -1
line=sock.recv(1024)
except socket.timeout:
print "Failed to receive module list."
outFile.write("Failed to receive module list.")
except KeyboardInterrupt:
return -1
func.cmdGeneric('port',{'command':'!port','outport':str(PRINT_PORT),'endOnString':'DONE'},{})
res=func.cmdGeneric('uptime',{'command':'!call','ciface':'2','cprov':'0','cfunc':'15'},{})
proc.printBlocker.sendto('!!#REG_BLOCK%d'%res,('127.0.0.1',RPC_DISPATCH_PORT))
result = proc.printBlocker.recv(1024)
return 1
if __name__=='__main__':
try:
functions=0
processor=0
printThread=0
log=0
lpSock=0
out=0
ark=platform.architecture()[0]
if ark=='32bit':
lpArk='i386'
else:
lpArk='x86_64'
curDir=os.getcwd()
out=open((curDir+'/back.log'),'w+')
try:
logFiles=os.listdir('%s/Logs'%os.getcwd())
except OSError:
os.mkdir('%s/Logs'%curDir)
logFiles=os.listdir('%s/Logs'%os.getcwd())
numLogs=len(logFiles)
logFiles.sort()
#If there are more than 20 log files, delete the oldest one.
if numLogs>20 and sys.argv[6]=='1':
try:
os.remove('%s/Logs/%s'%(curDir,logFiles[0]))
except OSError:
print "Unable to remove oldest logfile."
date='%s'%datetime.date(datetime.now())
cTime='%s'%datetime.time(datetime.now())
cTime=cTime[:cTime.find('.')]
logname='%s_%s_lp.log'%(date,cTime)
log=open('%s/Logs/%s'%(curDir,logname),'w+')
lpSock=socket.socket(socket.AF_INET,socket.SOCK_DGRAM)
lpSock.bind(('127.0.0.1',FRONTEND_PORT))
try:
subprocess.call([(curDir+'/'+lpArk+'/ThrowUser_LinuxUser'),(curDir+'/'+lpArk+'/blob.lp')],
stdout=out,preexec_fn=preexec_fcn)
except:
print "Unable to locate back end executatble. This should be located in Lp/<LP Architecture>"
out.close()
ThreadExit=0
lpSock.close()
sys.exit()
time.sleep(1)
functions=Lp_FrontEndFunctions.Lp_FrontEndFcns(lpSock,log)
processor=LpInputProcessing(functions,log,lpArk,lpSock)
processor.parseXml('Lp.mo',0)
processor.setDefaultOutDir()
functions.setProc(processor)
printThread=PrintThread(processor,log)
printThread.daemon=True
printThread.start()
rpcDispatch=Lp_RpcDispatcher.RpcDispatcher(processor)
rpcDispatch.start()
res=showWelcome(processor,functions,lpSock,log,lpArk)
lpSock.sendto("!!#TURN_ON_PRINTING",('127.0.0.1',RPC_DISPATCH_PORT))
if res>0:
processor.cmdloop()
lpSock.sendto("!!#QUIT",('127.0.0.1',RPC_DISPATCH_PORT))
lpSock.sendto("!!#QUIT",('127.0.0.1',PRINT_PORT))
subprocess.call(['killall','ThrowUser_LinuxUser'])
out.close()
log.close()
lpSock.close()
except KeyboardInterrupt:
if functions!=0:
res=functions.cmdGeneric('term',{'command':'!term','endOnString':'DONE'},{})
if out !=0:
try:
out.close()
except IOError:
pass
print "Goodbye"
if log!=0:
log.write('Goodbye\n')
log.write('Session terminated at %s'%str(datetime.now()))
log.close()
lpSock.sendto("!!#QUIT",('127.0.0.1',RPC_DISPATCH_PORT))
lpSock.sendto("!!#QUIT",('127.0.0.1',PRINT_PORT))
subprocess.call(['killall','ThrowUser_LinuxUser'])
if lpSock != 0:
lpSock.close()
| StarcoderdataPython |
1729237 | import requests
from bs4 import BeautifulSoup
URL = input()
page = requests.get(URL)
soup = BeautifulSoup(page.content, "html.parser")
text = soup.find(id="maincontent")
paragraphs = text.find_all("p")
for paragraph in paragraphs:
print(paragraph.text, "\n")
| StarcoderdataPython |
153586 | <filename>pset6/hello.py
from cs50 import get_string
# Prompt uset for name
n = get_string("What is your name? ")
print("hello,", n)
| StarcoderdataPython |
99081 | <reponame>DanielSoaresFranco/Aulas.py
def mensagem(cor='', msg='', firula='', tamanho=0):
if '\n' in msg:
linha = msg.find('\n')
else:
linha = len(msg)
limpa = '\033[m'
if tamanho == 0:
tamanho = firula * (linha + 4)
if firula == '':
print(f'{cor} {msg} {limpa}')
else:
print(f'{cor}{tamanho}{limpa}')
print(f'{cor} {msg} \033[m')
print(f'{cor}{tamanho}{limpa}')
| StarcoderdataPython |
3306224 | <filename>tools.py
import datetime
import functools
import io
import math
import pathlib
import pickle
import re
import uuid
import imageio
import gym
import numpy as np
import tensorflow as tf
import tensorflow.compat.v1 as tf1
import tensorflow_probability as tfp
from tensorflow.keras.mixed_precision import experimental as prec
from tensorflow_probability import distributions as tfd
import tfplot
import logging
class AttrDict(dict):
__setattr__ = dict.__setitem__
__getattr__ = dict.__getitem__
class Module(tf.Module):
def save(self, filename):
values = tf.nest.map_structure(lambda x: x.numpy(), self.variables)
with pathlib.Path(filename).open('wb') as f:
pickle.dump(values, f)
def load(self, filename):
with pathlib.Path(filename).open('rb') as f:
values = pickle.load(f)
tf.nest.map_structure(lambda x, y: x.assign(y), self.variables, values)
def get(self, name, actor, *args, **kwargs):
# Create or get layer by name to avoid mentioning it in the constructor.
if not hasattr(self, '_modules'):
self._modules = {}
if name not in self._modules:
self._modules[name] = actor(*args, **kwargs)
return self._modules[name]
def nest_summary(structure):
if isinstance(structure, dict):
return {k: nest_summary(v) for k, v in structure.items()}
if isinstance(structure, list):
return [nest_summary(v) for v in structure]
if hasattr(structure, 'shape'):
return str(structure.shape).replace(', ', 'x').strip('(), ')
return '?'
def graph_summary(writer, fn, *args):
step = tf.summary.experimental.get_step()
def inner(*args):
tf.summary.experimental.set_step(step)
with writer.as_default():
fn(*args)
return tf.numpy_function(inner, args, [])
@tfplot.autowrap(figsize=(2, 2))
def plot_scatter(x: np.ndarray, y: np.ndarray, *, ax, min_v=-1, max_v=+1, color='red'):
margin = .1
ax.scatter(x, y, s=5, c=color)
ax.set_xlim(min_v - margin, max_v + margin)
ax.set_ylim(min_v - margin, max_v + margin)
ax.axis('off')
@tfplot.autowrap(figsize=(2, 2))
def plot_step(x: np.ndarray, y: np.ndarray, *, ax, color='k', min_y=-1, max_y=1):
margin = 0.1
ax.step(x, y, color=color)
ax.text(x[0] + margin, min_y + margin, 'return={:.2f}'.format(np.sum(y)))
ax.set_ylim(min_y - margin, max_y + margin)
def lidar_to_image(scan, min_v=-1, max_v=+1, color: str = "k"):
# shift pi/2 just to align for visualization
angles = tf.linspace(math.pi / 2 - math.radians(270.0 / 2),
math.pi / 2 + math.radians(270.0 / 2),
scan.shape[-1])[::-1]
batch_video = []
for b in range(scan.shape[0]):
single_episode = []
for t in range(scan.shape[1]):
x = scan[b, t, :] * tf.cos(angles)
y = scan[b, t, :] * tf.sin(angles)
data = plot_scatter(x, y, min_v=min_v, max_v=max_v, color=color)[:, :, :3] # no alpha channel
single_episode.append(data)
video = tf.stack(single_episode)
batch_video.append(video)
return tf.stack(batch_video)
def reward_to_image(reward_data, min_y=-1, max_y=1):
batch_video = []
for b in range(reward_data.shape[0]):
r = reward_data[b, :]
x = range(r.shape[0])
img = plot_step(x, r, min_y=min_y, max_y=max_y)[:, :, :3] # return RGBA image, then discard "alpha" channel
batch_video.append(img)
return tf.stack(batch_video)
def flat_gif_summary(video, fps=10, name="lidar"):
frames = []
for i in range(video.shape[0]):
frames.append(video[i].numpy().astype(np.uint8))
imageio.mimsave('./{}.gif'.format(name), frames, fps=fps)
def video_summary(name, video, step=None, fps=100):
name = name if isinstance(name, str) else name.decode('utf-8')
if np.issubdtype(video.dtype, np.floating):
video = np.clip(255 * video, 0, 255).astype(np.uint8)
B, T, H, W, C = video.shape
try:
frames = video.transpose((1, 2, 0, 3, 4)).reshape((T, H, B * W, C))
summary = tf1.Summary()
image = tf1.Summary.Image(height=H * 3, width=W, colorspace=C)
image.encoded_image_string = encode_gif(frames, fps)
summary.value.add(tag=name + '/gif', image=image)
tf.summary.experimental.write_raw_pb(summary.SerializeToString(), step)
except (IOError, OSError) as e:
print('GIF summaries require ffmpeg in $PATH.', e)
frames = video.transpose((0, 2, 1, 3, 4)).reshape((1, B * H, T * W, C))
tf.summary.image(name + '/grid', frames, step)
def encode_gif(frames, fps):
from subprocess import Popen, PIPE
h, w, c = frames[0].shape
pxfmt = {1: 'gray', 3: 'rgb24'}[c]
cmd = ' '.join([
f'ffmpeg -y -f rawvideo -vcodec rawvideo',
f'-r {fps:.02f} -s {w}x{h} -pix_fmt {pxfmt} -i - -filter_complex',
f'[0:v]split[x][z];[z]palettegen[y];[x]fifo[x];[x][y]paletteuse',
f'-r {fps:.02f} -f gif -'])
proc = Popen(cmd.split(' '), stdin=PIPE, stdout=PIPE, stderr=PIPE)
for image in frames:
proc.stdin.write(image.tostring())
out, err = proc.communicate()
if proc.returncode:
raise IOError('\n'.join([' '.join(cmd), err.decode('utf8')]))
del proc
return out
def simulate(agents, env, config, datadir, writer, prefix='train', steps=0, episodes=0, sim_state=None,
agents_ids=None):
if agents_ids is None:
agents_ids = ['A']
n_agents = len(agents_ids)
# these are used to collect statistic of the first agent only
cum_reward = 0.0 # episode level
episode_progresses = [] # episode level
max_progresses = [] # collection level
cum_rewards = [] # collection level
main_id = agents_ids[0] # the agent w.r.t. we collect statistics
# Initialize or unpack simulation state.
if sim_state is None:
step, episode = 0, 0
dones = {agent_id: True for agent_id in agents_ids}
length = np.zeros(n_agents, np.int32)
obs = {agent_id: None for agent_id in agents_ids}
agent_states = {agent_id: None for agent_id in agents_ids}
else:
step, episode, dones, length, obs, agent_states = sim_state
cum_reward = {id: 0.0 for id in agents_ids}
while (steps and step < steps) or (episodes and episode < episodes):
# Reset envs if necessary.
if any(dones.values()):
obs = env.reset()
if len(episode_progresses) > 0: # at least 1 episode
max_progresses.append(max(episode_progresses))
cum_rewards.append(cum_reward)
cum_reward = 0.0
# Step agents.
obs = {id: {k: np.stack([v]) for k, v in o.items()} for id, o in obs.items()}
actions = dict()
for i, agent_id in enumerate(agents_ids):
actions[agent_id], agent_states[agent_id] = agents[i](obs[agent_id], np.stack([dones[agent_id]]),
agent_states[agent_id])
actions[agent_id] = np.array(actions[agent_id][0])
assert len(actions) == len(agents_ids)
# Step envs.
obs, rewards, dones, infos = env.step(actions)
# update episode-level information
cum_reward = cum_reward + rewards[main_id]
episode_progresses.append(infos[main_id]['lap'] + infos[main_id]['progress'] - 1)
done = any(dones.values())
episode += int(done)
length += 1 # episode length until termination
step += (int(done) * length).sum() # num sim steps
length *= (1 - done)
# when the loop is over, write statistics for the 1st agent
metrics_dict = {'progress': max_progresses,
'return': cum_rewards}
summarize_collection(metrics_dict, config, datadir, writer, prefix)
# Return new state to allow resuming the simulation.
return (step - steps, episode - episodes, dones, length, obs, agent_states), np.mean(cum_rewards)
def summarize_collection(metrics_dict, config, datadir, writer, prefix):
for metric_name, metric_list in metrics_dict.items():
metrics = [(f'{prefix}/{metric_name}_mean', np.mean(metric_list)),
(f'{prefix}/{metric_name}_std', np.std(metric_list))]
step = count_episodes(datadir)[1] * config.action_repeat
with writer.as_default(): # Env might run in a different thread.
tf.summary.experimental.set_step(step)
[tf.summary.scalar(k, v) for k, v in metrics]
def count_videos(directory):
filenames = directory.glob('**/*.mp4')
return sum(1 for _ in filenames)
def count_episodes(directory):
filenames = directory.glob('*.npz')
lengths = [int(n.stem.rsplit('-', 1)[-1]) - 1 for n in filenames]
episodes, steps = len(lengths), sum(lengths)
return episodes, steps
def count_steps(datadir, config):
return count_episodes(datadir)[1] * config.action_repeat
def load_episodes(directory, rescan, length=None, balance=False, seed=0):
directory = pathlib.Path(directory).expanduser()
random = np.random.RandomState(seed)
cache = {}
while True:
for filename in directory.glob('*.npz'):
if filename not in cache:
try:
with filename.open('rb') as f:
episode = np.load(f)
episode = {k: episode[k] for k in episode.keys()}
except Exception as e:
print(f'Could not load episode: {e}')
continue
cache[filename] = episode
keys = list(cache.keys())
for index in random.choice(len(keys), rescan):
episode = cache[keys[index]]
if length:
total = len(next(iter(episode.values())))
available = total - length
if available < 1:
print(f'[Info] Skipped short episode of length {available}.')
continue
if balance:
index = min(random.randint(0, total), available)
else:
index = int(random.randint(0, available + 1)) # +1 for include the last step in the sampled episode
episode = {k: v[index: index + length] for k, v in episode.items()}
yield episode
def preprocess(obs, config):
dtype = prec.global_policy().compute_dtype
obs = obs.copy()
with tf.device('cpu:0'):
if 'image' in obs:
obs['image'] = tf.cast(obs['image'], dtype) / 255.0 - 0.5
if 'lidar' in obs:
obs['lidar'] = tf.cast(obs['lidar'], dtype) / 15.0 - 0.5
if 'lidar_occupancy' in obs:
# note: when using `lidar_occupancy` the reconstruction models return a Bernoulli distribution
# for this reason, we don't center the observation in 0, but let it in [0, 1]
obs['lidar_occupancy'] = tf.cast(obs['lidar_occupancy'], dtype)
if 'reward' in obs:
clip_rewards = dict(none=lambda x: x, tanh=tf.tanh,
clip=lambda x: tf.clip_by_value(x, config.clip_rewards_min, config.clip_rewards_max))[
config.clip_rewards]
obs['reward'] = clip_rewards(obs['reward'])
return obs
def load_dataset(directory, config):
episode = next(load_episodes(directory, 1))
types = {k: v.dtype for k, v in episode.items()}
shapes = {k: (None,) + v.shape[1:] for k, v in episode.items()}
generator = lambda: load_episodes(
directory, config.train_steps, config.batch_length,
config.dataset_balance)
dataset = tf.data.Dataset.from_generator(generator, types, shapes)
dataset = dataset.map(functools.partial(preprocess, config=config))
dataset = dataset.batch(config.batch_size, drop_remainder=True)
dataset = dataset.prefetch(10)
return dataset
class SampleDist:
def __init__(self, dist, samples=100):
self._dist = dist
self._samples = samples
@property
def name(self):
return 'SampleDist'
def __getattr__(self, name):
return getattr(self._dist, name)
def mean(self):
samples = self._dist.sample(self._samples)
return tf.reduce_mean(samples, 0)
def mode(self):
sample = self._dist.sample(self._samples)
logprob = self._dist.log_prob(sample)
return tf.gather(sample, tf.argmax(logprob))[0]
def entropy(self):
sample = self._dist.sample(self._samples)
logprob = self.log_prob(sample)
return -tf.reduce_mean(logprob, 0)
class OneHotDist:
def __init__(self, logits=None, probs=None):
self._dist = tfd.Categorical(logits=logits, probs=probs)
self._num_classes = self.mean().shape[-1]
self._dtype = prec.global_policy().compute_dtype
@property
def name(self):
return 'OneHotDist'
def __getattr__(self, name):
return getattr(self._dist, name)
def prob(self, events):
indices = tf.argmax(events, axis=-1)
return self._dist.prob(indices)
def log_prob(self, events):
indices = tf.argmax(events, axis=-1)
return self._dist.log_prob(indices)
def mean(self):
return self._dist.probs_parameter()
def mode(self):
return self._one_hot(self._dist.mode())
def sample(self, amount=None):
amount = [amount] if amount else []
indices = self._dist.sample(*amount)
sample = self._one_hot(indices)
probs = self._dist.probs_parameter()
sample += tf.cast(probs - tf.stop_gradient(probs), self._dtype)
return sample
def _one_hot(self, indices):
return tf.one_hot(indices, self._num_classes, dtype=self._dtype)
class TanhBijector(tfp.bijectors.Bijector):
def __init__(self, validate_args=False, name='tanh'):
super().__init__(
forward_min_event_ndims=0,
validate_args=validate_args,
name=name)
def _forward(self, x):
return tf.nn.tanh(x)
def _inverse(self, y):
dtype = y.dtype
y = tf.cast(y, tf.float32)
y = tf.where(
tf.less_equal(tf.abs(y), 1.),
tf.clip_by_value(y, -0.99999997, 0.99999997), y)
y = tf.atanh(y)
y = tf.cast(y, dtype)
return y
@staticmethod
def _forward_log_det_jacobian(x):
log2 = tf.math.log(tf.constant(2.0, dtype=x.dtype))
return 2.0 * (log2 - x - tf.nn.softplus(-2.0 * x))
def lambda_return(
reward, value, pcont, bootstrap, lambda_, axis):
# Setting lambda=1 gives a discounted Monte Carlo return.
# Setting lambda=0 gives a fixed 1-step return.
assert reward.shape.ndims == value.shape.ndims, (reward.shape, value.shape)
if isinstance(pcont, (int, float)):
pcont = pcont * tf.ones_like(reward)
dims = list(range(reward.shape.ndims))
dims = [axis] + dims[1:axis] + [0] + dims[axis + 1:]
if axis != 0:
reward = tf.transpose(reward, dims)
value = tf.transpose(value, dims)
pcont = tf.transpose(pcont, dims)
if bootstrap is None:
bootstrap = tf.zeros_like(value[-1])
next_values = tf.concat([value[1:], bootstrap[None]], 0)
inputs = reward + pcont * next_values * (1 - lambda_)
returns = static_scan(
lambda agg, cur: cur[0] + cur[1] * lambda_ * agg,
(inputs, pcont), bootstrap, reverse=True)
if axis != 0:
returns = tf.transpose(returns, dims)
return returns
class Adam(tf.Module):
def __init__(self, name, modules, lr, clip=None, wd=None, wdpattern=r'.*'):
self._name = name
self._modules = modules
self._clip = clip
self._wd = wd
self._wdpattern = wdpattern
self._opt = tf.optimizers.Adam(lr)
self._opt = prec.LossScaleOptimizer(self._opt, 'dynamic')
self._variables = None
@property
def variables(self):
return self._opt.variables()
def __call__(self, tape, loss):
if self._variables is None:
variables = [module.variables for module in self._modules]
self._variables = tf.nest.flatten(variables)
count = sum(np.prod(x.shape) for x in self._variables)
print(f'[Init] Found {count} {self._name} parameters.')
assert len(loss.shape) == 0, loss.shape
with tape:
loss = self._opt.get_scaled_loss(loss)
grads = tape.gradient(loss, self._variables)
grads = self._opt.get_unscaled_gradients(grads)
norm = tf.linalg.global_norm(grads)
if self._clip:
grads, _ = tf.clip_by_global_norm(grads, self._clip, norm)
if self._wd:
context = tf.distribute.get_replica_context()
context.merge_call(self._apply_weight_decay)
self._opt.apply_gradients(zip(grads, self._variables))
return norm
def _apply_weight_decay(self, strategy):
print('Applied weight decay to variables:')
for var in self._variables:
if re.search(self._wdpattern, self._name + '/' + var.name):
print('- ' + self._name + '/' + var.name)
strategy.extended.update(var, lambda v: self._wd * v)
class StreamToLogger(object):
"""
Fake file-like stream object that redirects writes to a logger instance.
"""
def __init__(self, logger, log_level=logging.INFO):
self.logger = logger
self.log_level = log_level
self.linebuf = ''
def write(self, buf):
for line in buf.rstrip().splitlines():
self.logger.log(self.log_level, line.rstrip())
def args_type(default):
if isinstance(default, bool):
return lambda x: bool(['False', 'True'].index(x))
if isinstance(default, int):
return lambda x: float(x) if ('e' in x or '.' in x) else int(x)
if isinstance(default, pathlib.Path):
return lambda x: pathlib.Path(x).expanduser()
return type(default)
def static_scan(fn, inputs, start, reverse=False):
last = start
outputs = [[] for _ in tf.nest.flatten(start)]
indices = range(len(tf.nest.flatten(inputs)[0]))
if reverse:
indices = reversed(indices)
for index in indices:
inp = tf.nest.map_structure(lambda x: x[index], inputs)
last = fn(last, inp)
[o.append(l) for o, l in zip(outputs, tf.nest.flatten(last))]
if reverse:
outputs = [list(reversed(x)) for x in outputs]
outputs = [tf.stack(x, 0) for x in outputs]
return tf.nest.pack_sequence_as(start, outputs)
def _mnd_sample(self, sample_shape=(), seed=None, name='sample'):
return tf.random.normal(
tuple(sample_shape) + tuple(self.event_shape),
self.mean(), self.stddev(), self.dtype, seed, name)
tfd.MultivariateNormalDiag.sample = _mnd_sample
def _cat_sample(self, sample_shape=(), seed=None, name='sample'):
assert len(sample_shape) in (0, 1), sample_shape
assert len(self.logits_parameter().shape) == 2
indices = tf.random.categorical(
self.logits_parameter(), sample_shape[0] if sample_shape else 1,
self.dtype, seed, name)
if not sample_shape:
indices = indices[..., 0]
return indices
tfd.Categorical.sample = _cat_sample
class Every:
def __init__(self, every):
self._every = every
self._last = None
def __call__(self, step):
if self._last is None:
self._last = step
return True
if step >= self._last + self._every:
self._last += self._every
return True
return False
class Once:
def __init__(self):
self._once = True
def __call__(self):
if self._once:
self._once = False
return True
return False
| StarcoderdataPython |
4832402 | from __future__ import unicode_literals
from builtins import str
import six
@six.python_2_unicode_compatible
class Token:
def __init__(self, string="", metadata=None):
self.string = string
self.metadata = metadata or {}
def __str__(self):
return self.string
def __repr__(self):
return '<Token "{}">'.format(str(self))
def update(self, fn):
"""A token update function is used when updating or optionally
when cloning a token."""
# TODO: we require functions to have two parameters, JS doesn't care
self.string = fn(self.string, self.metadata)
return self
def clone(self, fn=None):
"""Applies the given function to the wrapped string token."""
fn = fn or (lambda s, m: s)
return Token(fn(self.string, self.metadata), self.metadata)
| StarcoderdataPython |
3265753 | <filename>version_2/demo/software/example/overlay/usr/bin/axil2ipb.py
#!/usr/bin/python
f=open("/dev/ipb_0","r+b",0)
import struct
import mmap
import time
regs=mmap.mmap(f.fileno(),0x10,mmap.MAP_SHARED,mmap.ACCESS_WRITE,offset=0x000)
def set_val(mm,pos,val):
s=struct.pack("<L",val)
mm[(pos*4):((pos+1)*4)]=s
def fset_val(mm,pos,val):
s=struct.pack("<L",val)
mm.seek(pos*4,0)
mm.write(s)
def get_val(mm,pos):
s=mm[(pos*4):((pos+1)*4)]
v=struct.unpack("<L",s)[0]
return v
for i in range(0,4):
print i, hex(get_val(regs,i))
#fset_val(regs,0xc,0x123456)
i=0
while 1:
set_val(regs,1,i)
i=(i+1)%8
time.sleep(0.5)
| StarcoderdataPython |
3347106 | <reponame>py-az-cli/py-az-cli
from ..... pyaz_utils import _call_az
def list(resource_group, workspace_name):
'''
List all data export ruleses for a given workspace.
Required Parameters:
- resource_group -- Name of resource group. You can configure the default group using `az configure --defaults group=<name>`
- workspace_name -- Name of the Log Analytics Workspace.
'''
return _call_az("az monitor log-analytics workspace data-export list", locals())
def show(name, resource_group, workspace_name):
'''
Show a data export rule for a given workspace.
Required Parameters:
- name -- Name of the data export rule
- resource_group -- Name of resource group. You can configure the default group using `az configure --defaults group=<name>`
- workspace_name -- Name of the Log Analytics Workspace.
'''
return _call_az("az monitor log-analytics workspace data-export show", locals())
def create(destination, name, resource_group, tables, workspace_name, enable=None):
'''
Create a data export rule for a given workspace.
Required Parameters:
- destination -- The destination resource ID. It should be a storage account, an event hub namespace or an event hub. If event hub namespace is provided, event hub would be created for each table automatically.
- name -- Name of the data export rule
- resource_group -- Name of resource group. You can configure the default group using `az configure --defaults group=<name>`
- tables -- An array of tables to export.
- workspace_name -- Name of the Log Analytics Workspace.
Optional Parameters:
- enable -- Enable this data export rule.
'''
return _call_az("az monitor log-analytics workspace data-export create", locals())
def update(name, resource_group, tables, workspace_name, add=None, destination=None, enable=None, force_string=None, remove=None, set=None):
'''
Update a data export rule for a given workspace.
Required Parameters:
- name -- Name of the data export rule
- resource_group -- Name of resource group. You can configure the default group using `az configure --defaults group=<name>`
- tables -- An array of tables to export.
- workspace_name -- Name of the Log Analytics Workspace.
Optional Parameters:
- add -- Add an object to a list of objects by specifying a path and key value pairs. Example: --add property.listProperty <key=value, string or JSON string>
- destination -- The destination resource ID. It should be a storage account, an event hub namespace or an event hub. If event hub namespace is provided, event hub would be created for each table automatically.
- enable -- Enable this data export rule.
- force_string -- When using 'set' or 'add', preserve string literals instead of attempting to convert to JSON.
- remove -- Remove a property or an element from a list. Example: --remove property.list <indexToRemove> OR --remove propertyToRemove
- set -- Update an object by specifying a property path and value to set. Example: --set property1.property2=<value>
'''
return _call_az("az monitor log-analytics workspace data-export update", locals())
def delete(name, resource_group, workspace_name, yes=None):
'''
Delete a data export rule for a given workspace.
Required Parameters:
- name -- Name of the data export rule
- resource_group -- Name of resource group. You can configure the default group using `az configure --defaults group=<name>`
- workspace_name -- Name of the Log Analytics Workspace.
Optional Parameters:
- yes -- Do not prompt for confirmation.
'''
return _call_az("az monitor log-analytics workspace data-export delete", locals())
| StarcoderdataPython |
1618381 | import os
import asyncio
import hashlib
import pathlib
import synapse.tests.utils as s_t_utils
import synapse.tools.pullfile as s_pullfile
class TestPullFile(s_t_utils.SynTest):
async def test_pullfile(self):
async with self.getTestAxon() as axon:
axonurl = axon.getLocalUrl()
testhash = hashlib.sha256(b'test').hexdigest()
visihash = hashlib.sha256(b'visi').hexdigest()
nonehash = hashlib.sha256(b'none').hexdigest()
testbash = hashlib.sha256(b'test').digest()
visibash = hashlib.sha256(b'visi').digest()
self.eq(((4, visibash), (4, testbash)), await axon.puts([b'visi', b'test']))
with self.getTestDir() as wdir:
outp = self.getTestOutp()
self.eq(0, await s_pullfile.main(['-a', axonurl,
'-o', wdir,
'-l', testhash,
'-l', nonehash], outp))
oldcwd = os.getcwd()
os.chdir(wdir)
self.eq(0, await s_pullfile.main(['-a', axonurl,
'-l', visihash], outp))
os.chdir(oldcwd)
with open(pathlib.Path(wdir, testhash), 'rb') as fd:
self.eq(b'test', fd.read())
with open(pathlib.Path(wdir, visihash), 'rb') as fd:
self.eq(b'visi', fd.read())
self.true(outp.expect(f'{nonehash} not in axon store'))
self.true(outp.expect(f'Fetching {testhash} to file'))
self.true(outp.expect(f'Fetching {visihash} to file'))
| StarcoderdataPython |
1650957 | # Generated by Django 3.0.9 on 2020-10-07 12:35
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('users', '0002_auto_20200923_1557'),
]
operations = [
migrations.RenameField(
model_name='customermore',
old_name='collector',
new_name='collectors',
),
]
| StarcoderdataPython |
96506 | <reponame>levinsamuel/rand
from datetime import datetime
from pymongo import MongoClient
from bson.objectid import ObjectId
import pprint
import logging
import json
from people import Person
logging.basicConfig()
log = logging.getLogger('mongocl')
log.setLevel(logging.DEBUG)
def client():
return MongoClient('localhost', 27017)
def post(prsn):
"""Create or update a person, based on presence of ID"""
# Create the list of people from our data
with client() as mcl:
# Database
ppldb = mcl.ppldb
# collection (kind of like a table)
pplclxn = ppldb.people
pd = prsn.to_dict()
pplclxn.insert_one(pd)
# Create a handler for our read (GET) people
def read(id=None):
"""
This function responds to a request for /api/people
with the complete lists of people
:return: sorted list of people
"""
# Create the list of people from our data
with client() as mcl:
# Database
ppldb = mcl.ppldb
# collection (kind of like a table)
pplclxn = ppldb.people
log.debug(pplclxn)
if id is None:
ppl = [Person(p) for p in pplclxn.find()]
log.debug(ppl)
else:
p = pplclxn.find_one({'lname': id})
return Person(p)
return ppl
# return [PEOPLE[key] for key in sorted(PEOPLE.keys())]
| StarcoderdataPython |
145727 | from __future__ import absolute_import, unicode_literals
from django.contrib import messages
from django.core.files.base import ContentFile
from django.db import transaction
from django.http import Http404, HttpResponseRedirect
from django.shortcuts import get_object_or_404
from django.template import RequestContext
from django.urls import reverse, reverse_lazy
from django.utils.translation import ugettext_lazy as _
from mayan.apps.common.generics import (
AddRemoveView, ConfirmView, FormView, SingleObjectCreateView,
SingleObjectDeleteView, SingleObjectDetailView,
SingleObjectDynamicFormCreateView, SingleObjectDynamicFormEditView,
SingleObjectDownloadView, SingleObjectEditView, SingleObjectListView
)
from mayan.apps.common.mixins import ExternalObjectMixin
from mayan.apps.documents.events import event_document_type_edited
from mayan.apps.documents.models import DocumentType
from mayan.apps.documents.permissions import permission_document_type_edit
from mayan.apps.events.classes import EventType
from mayan.apps.events.models import StoredEventType
from ..classes import WorkflowAction
from ..events import event_workflow_edited
from ..forms import (
WorkflowActionSelectionForm, WorkflowForm, WorkflowPreviewForm,
WorkflowStateActionDynamicForm, WorkflowStateForm, WorkflowTransitionForm,
WorkflowTransitionTriggerEventRelationshipFormSet
)
from ..icons import (
icon_workflow_list, icon_workflow_state, icon_workflow_state_action,
icon_workflow_transition
)
from ..links import (
link_setup_workflow_create, link_setup_workflow_state_create,
link_setup_workflow_state_action_selection,
link_setup_workflow_transition_create
)
from ..models import (
Workflow, WorkflowState, WorkflowStateAction, WorkflowTransition
)
from ..permissions import (
permission_workflow_create, permission_workflow_delete,
permission_workflow_edit, permission_workflow_tools,
permission_workflow_view,
)
from ..tasks import task_launch_all_workflows
__all__ = (
'WorkflowImageView', 'WorkflowPreviewView',
'SetupWorkflowListView', 'SetupWorkflowCreateView', 'SetupWorkflowEditView',
'SetupWorkflowDeleteView', 'SetupWorkflowDocumentTypesView',
'SetupWorkflowStateActionCreateView', 'SetupWorkflowStateActionDeleteView',
'SetupWorkflowStateActionEditView', 'SetupWorkflowStateActionListView',
'SetupWorkflowStateActionSelectionView', 'SetupWorkflowStateCreateView',
'SetupWorkflowStateDeleteView', 'SetupWorkflowStateEditView',
'SetupWorkflowStateListView', 'SetupWorkflowTransitionCreateView',
'SetupWorkflowTransitionDeleteView', 'SetupWorkflowTransitionEditView',
'SetupWorkflowTransitionListView',
'SetupWorkflowTransitionTriggerEventListView', 'ToolLaunchAllWorkflows'
)
class SetupDocumentTypeWorkflowsView(AddRemoveView):
main_object_permission = permission_document_type_edit
main_object_model = DocumentType
main_object_pk_url_kwarg = 'pk'
secondary_object_model = Workflow
secondary_object_permission = permission_workflow_edit
list_available_title = _('Available workflows')
list_added_title = _('Workflows assigned this document type')
related_field = 'workflows'
def get_actions_extra_kwargs(self):
return {'_user': self.request.user}
def get_extra_context(self):
return {
'object': self.main_object,
'subtitle': _(
'Removing a workflow from a document type will also '
'remove all running instances of that workflow.'
),
'title': _(
'Workflows assigned the document type: %s'
) % self.main_object,
}
def action_add(self, queryset, _user):
with transaction.atomic():
event_document_type_edited.commit(
actor=_user, target=self.main_object
)
for obj in queryset:
self.main_object.workflows.add(obj)
event_workflow_edited.commit(
action_object=self.main_object, actor=_user, target=obj
)
def action_remove(self, queryset, _user):
with transaction.atomic():
event_document_type_edited.commit(
actor=_user, target=self.main_object
)
for obj in queryset:
self.main_object.workflows.remove(obj)
event_workflow_edited.commit(
action_object=self.main_object, actor=_user,
target=obj
)
obj.instances.filter(
document__document_type=self.main_object
).delete()
class SetupWorkflowListView(SingleObjectListView):
model = Workflow
object_permission = permission_workflow_view
def get_extra_context(self):
return {
'hide_object': True,
'no_results_icon': icon_workflow_list,
'no_results_main_link': link_setup_workflow_create.resolve(
context=RequestContext(request=self.request)
),
'no_results_text': _(
'Workflows store a series of states and keep track of the '
'current state of a document. Transitions are used to change the '
'current state to a new one.'
),
'no_results_title': _(
'No workflows have been defined'
),
'title': _('Workflows'),
}
class SetupWorkflowCreateView(SingleObjectCreateView):
extra_context = {'title': _('Create workflow')}
form_class = WorkflowForm
model = Workflow
post_action_redirect = reverse_lazy(
viewname='document_states:setup_workflow_list'
)
view_permission = permission_workflow_create
def get_save_extra_data(self):
return {'_user': self.request.user}
class SetupWorkflowDeleteView(SingleObjectDeleteView):
model = Workflow
object_permission = permission_workflow_delete
post_action_redirect = reverse_lazy(
viewname='document_states:setup_workflow_list'
)
def get_extra_context(self):
return {
'title': _(
'Delete workflow: %s?'
) % self.object,
}
class SetupWorkflowEditView(SingleObjectEditView):
form_class = WorkflowForm
model = Workflow
object_permission = permission_workflow_edit
post_action_redirect = reverse_lazy(
viewname='document_states:setup_workflow_list'
)
def get_extra_context(self):
return {
'title': _(
'Edit workflow: %s'
) % self.object,
}
def get_save_extra_data(self):
return {'_user': self.request.user}
class SetupWorkflowDocumentTypesView(AddRemoveView):
main_object_permission = permission_workflow_edit
main_object_model = Workflow
main_object_pk_url_kwarg = 'pk'
secondary_object_model = DocumentType
secondary_object_permission = permission_document_type_edit
list_available_title = _('Available document types')
list_added_title = _('Document types assigned this workflow')
related_field = 'document_types'
def get_actions_extra_kwargs(self):
return {'_user': self.request.user}
def get_extra_context(self):
return {
'object': self.main_object,
'subtitle': _(
'Removing a document type from a workflow will also '
'remove all running instances of that workflow for '
'documents of the document type just removed.'
),
'title': _(
'Document types assigned the workflow: %s'
) % self.main_object,
}
def action_add(self, queryset, _user):
with transaction.atomic():
event_workflow_edited.commit(
actor=_user, target=self.main_object
)
for obj in queryset:
self.main_object.document_types.add(obj)
event_document_type_edited.commit(
action_object=self.main_object, actor=_user, target=obj
)
def action_remove(self, queryset, _user):
with transaction.atomic():
event_workflow_edited.commit(
actor=_user, target=self.main_object
)
for obj in queryset:
self.main_object.document_types.remove(obj)
event_document_type_edited.commit(
action_object=self.main_object, actor=_user,
target=obj
)
self.main_object.instances.filter(
document__document_type=obj
).delete()
# Workflow state actions
class SetupWorkflowStateActionCreateView(SingleObjectDynamicFormCreateView):
form_class = WorkflowStateActionDynamicForm
object_permission = permission_workflow_edit
def get_class(self):
try:
return WorkflowAction.get(name=self.kwargs['class_path'])
except KeyError:
raise Http404(
'{} class not found'.format(self.kwargs['class_path'])
)
def get_extra_context(self):
return {
'navigation_object_list': ('object', 'workflow'),
'object': self.get_object(),
'title': _(
'Create a "%s" workflow action'
) % self.get_class().label,
'workflow': self.get_object().workflow
}
def get_form_extra_kwargs(self):
return {
'request': self.request,
'action_path': self.kwargs['class_path']
}
def get_form_schema(self):
return self.get_class()().get_form_schema(request=self.request)
def get_instance_extra_data(self):
return {
'action_path': self.kwargs['class_path'],
'state': self.get_object()
}
def get_object(self):
return get_object_or_404(klass=WorkflowState, pk=self.kwargs['pk'])
def get_post_action_redirect(self):
return reverse(
viewname='document_states:setup_workflow_state_action_list',
kwargs={'pk': self.get_object().pk}
)
class SetupWorkflowStateActionDeleteView(SingleObjectDeleteView):
model = WorkflowStateAction
object_permission = permission_workflow_edit
def get_extra_context(self):
return {
'navigation_object_list': (
'object', 'workflow_state', 'workflow'
),
'object': self.get_object(),
'title': _('Delete workflow state action: %s') % self.get_object(),
'workflow': self.get_object().state.workflow,
'workflow_state': self.get_object().state,
}
def get_post_action_redirect(self):
return reverse(
viewname='document_states:setup_workflow_state_action_list',
kwargs={'pk': self.get_object().state.pk}
)
class SetupWorkflowStateActionEditView(SingleObjectDynamicFormEditView):
form_class = WorkflowStateActionDynamicForm
model = WorkflowStateAction
object_permission = permission_workflow_edit
def get_extra_context(self):
return {
'navigation_object_list': (
'object', 'workflow_state', 'workflow'
),
'object': self.get_object(),
'title': _('Edit workflow state action: %s') % self.get_object(),
'workflow': self.get_object().state.workflow,
'workflow_state': self.get_object().state,
}
def get_form_extra_kwargs(self):
return {
'request': self.request,
'action_path': self.get_object().action_path,
}
def get_form_schema(self):
return self.get_object().get_class_instance().get_form_schema(
request=self.request
)
def get_post_action_redirect(self):
return reverse(
viewname='document_states:setup_workflow_state_action_list',
kwargs={'pk': self.get_object().state.pk}
)
class SetupWorkflowStateActionListView(SingleObjectListView):
object_permission = permission_workflow_edit
def get_extra_context(self):
return {
'hide_object': True,
'navigation_object_list': ('object', 'workflow'),
'no_results_icon': icon_workflow_state_action,
'no_results_main_link': link_setup_workflow_state_action_selection.resolve(
context=RequestContext(
request=self.request, dict_={
'object': self.get_workflow_state()
}
)
),
'no_results_text': _(
'Workflow state actions are macros that get executed when '
'documents enters or leaves the state in which they reside.'
),
'no_results_title': _(
'There are no actions for this workflow state'
),
'object': self.get_workflow_state(),
'title': _(
'Actions for workflow state: %s'
) % self.get_workflow_state(),
'workflow': self.get_workflow_state().workflow,
}
def get_form_schema(self):
return {'fields': self.get_class().fields}
def get_source_queryset(self):
return self.get_workflow_state().actions.all()
def get_workflow_state(self):
return get_object_or_404(klass=WorkflowState, pk=self.kwargs['pk'])
class SetupWorkflowStateActionSelectionView(FormView):
form_class = WorkflowActionSelectionForm
view_permission = permission_workflow_edit
def form_valid(self, form):
klass = form.cleaned_data['klass']
return HttpResponseRedirect(
redirect_to=reverse(
viewname='document_states:setup_workflow_state_action_create',
kwargs={'pk': self.get_object().pk, 'class_path': klass}
)
)
def get_extra_context(self):
return {
'navigation_object_list': (
'object', 'workflow'
),
'object': self.get_object(),
'title': _('New workflow state action selection'),
'workflow': self.get_object().workflow,
}
def get_object(self):
return get_object_or_404(klass=WorkflowState, pk=self.kwargs['pk'])
# Workflow states
class SetupWorkflowStateCreateView(ExternalObjectMixin, SingleObjectCreateView):
external_object_class = Workflow
external_object_permission = permission_workflow_edit
external_object_pk_url_kwarg = 'pk'
form_class = WorkflowStateForm
def get_extra_context(self):
return {
'object': self.get_workflow(),
'title': _(
'Create states for workflow: %s'
) % self.get_workflow()
}
def get_instance_extra_data(self):
return {'workflow': self.get_workflow()}
def get_source_queryset(self):
return self.get_workflow().states.all()
def get_success_url(self):
return reverse(
viewname='document_states:setup_workflow_state_list',
kwargs={'pk': self.kwargs['pk']}
)
def get_workflow(self):
return self.external_object
class SetupWorkflowStateDeleteView(SingleObjectDeleteView):
model = WorkflowState
object_permission = permission_workflow_edit
pk_url_kwarg = 'pk'
def get_extra_context(self):
return {
'navigation_object_list': ('object', 'workflow_instance'),
'object': self.get_object(),
'title': _(
'Delete workflow state: %s?'
) % self.object,
'workflow_instance': self.get_object().workflow,
}
def get_success_url(self):
return reverse(
viewname='document_states:setup_workflow_state_list',
kwargs={'pk': self.get_object().workflow.pk}
)
class SetupWorkflowStateEditView(SingleObjectEditView):
form_class = WorkflowStateForm
model = WorkflowState
object_permission = permission_workflow_edit
pk_url_kwarg = 'pk'
def get_extra_context(self):
return {
'navigation_object_list': ('object', 'workflow_instance'),
'object': self.get_object(),
'title': _(
'Edit workflow state: %s'
) % self.object,
'workflow_instance': self.get_object().workflow,
}
def get_success_url(self):
return reverse(
viewname='document_states:setup_workflow_state_list',
kwargs={'pk': self.get_object().workflow.pk}
)
class SetupWorkflowStateListView(ExternalObjectMixin, SingleObjectListView):
external_object_class = Workflow
external_object_permission = permission_workflow_view
external_object_pk_url_kwarg = 'pk'
object_permission = permission_workflow_view
def get_extra_context(self):
return {
'hide_object': True,
'no_results_icon': icon_workflow_state,
'no_results_main_link': link_setup_workflow_state_create.resolve(
context=RequestContext(
self.request, {'object': self.get_workflow()}
)
),
'no_results_text': _(
'Create states and link them using transitions.'
),
'no_results_title': _(
'This workflow doesn\'t have any states'
),
'object': self.get_workflow(),
'title': _('States of workflow: %s') % self.get_workflow()
}
def get_source_queryset(self):
return self.get_workflow().states.all()
def get_workflow(self):
return self.external_object
# Transitions
class SetupWorkflowTransitionCreateView(ExternalObjectMixin, SingleObjectCreateView):
external_object_class = Workflow
external_object_permission = permission_workflow_edit
external_object_pk_url_kwarg = 'pk'
form_class = WorkflowTransitionForm
def get_extra_context(self):
return {
'object': self.get_workflow(),
'title': _(
'Create transitions for workflow: %s'
) % self.get_workflow()
}
def get_form_kwargs(self):
kwargs = super(
SetupWorkflowTransitionCreateView, self
).get_form_kwargs()
kwargs['workflow'] = self.get_workflow()
return kwargs
def get_instance_extra_data(self):
return {'workflow': self.get_workflow()}
def get_source_queryset(self):
return self.get_workflow().transitions.all()
def get_success_url(self):
return reverse(
viewname='document_states:setup_workflow_transition_list',
kwargs={'pk': self.kwargs['pk']}
)
def get_workflow(self):
return self.external_object
class SetupWorkflowTransitionDeleteView(SingleObjectDeleteView):
model = WorkflowTransition
object_permission = permission_workflow_edit
pk_url_kwarg = 'pk'
def get_extra_context(self):
return {
'object': self.get_object(),
'navigation_object_list': ('object', 'workflow_instance'),
'title': _(
'Delete workflow transition: %s?'
) % self.object,
'workflow_instance': self.get_object().workflow,
}
def get_success_url(self):
return reverse(
viewname='document_states:setup_workflow_transition_list',
kwargs={'pk': self.get_object().workflow.pk}
)
class SetupWorkflowTransitionEditView(SingleObjectEditView):
form_class = WorkflowTransitionForm
model = WorkflowTransition
object_permission = permission_workflow_edit
pk_url_kwarg = 'pk'
def get_extra_context(self):
return {
'navigation_object_list': ('object', 'workflow_instance'),
'object': self.get_object(),
'title': _(
'Edit workflow transition: %s'
) % self.object,
'workflow_instance': self.get_object().workflow,
}
def get_form_kwargs(self):
kwargs = super(
SetupWorkflowTransitionEditView, self
).get_form_kwargs()
kwargs['workflow'] = self.get_object().workflow
return kwargs
def get_success_url(self):
return reverse(
viewname='document_states:setup_workflow_transition_list',
kwargs={'pk': self.get_object().workflow.pk}
)
class SetupWorkflowTransitionListView(ExternalObjectMixin, SingleObjectListView):
external_object_class = Workflow
external_object_permission = permission_workflow_view
external_object_pk_url_kwarg = 'pk'
object_permission = permission_workflow_view
def get_extra_context(self):
return {
'hide_object': True,
'no_results_icon': icon_workflow_transition,
'no_results_main_link': link_setup_workflow_transition_create.resolve(
context=RequestContext(
self.request, {'object': self.get_workflow()}
)
),
'no_results_text': _(
'Create a transition and use it to move a workflow from '
' one state to another.'
),
'no_results_title': _(
'This workflow doesn\'t have any transitions'
),
'object': self.get_workflow(),
'title': _(
'Transitions of workflow: %s'
) % self.get_workflow()
}
def get_source_queryset(self):
return self.get_workflow().transitions.all()
def get_workflow(self):
return self.external_object
class SetupWorkflowTransitionTriggerEventListView(ExternalObjectMixin, FormView):
external_object_class = WorkflowTransition
external_object_permission = permission_workflow_edit
external_object_pk_url_kwarg = 'pk'
form_class = WorkflowTransitionTriggerEventRelationshipFormSet
def dispatch(self, *args, **kwargs):
EventType.refresh()
return super(
SetupWorkflowTransitionTriggerEventListView, self
).dispatch(*args, **kwargs)
def form_valid(self, form):
try:
for instance in form:
instance.save()
except Exception as exception:
messages.error(
message=_(
'Error updating workflow transition trigger events; %s'
) % exception, request=self.request
)
else:
messages.success(
message=_(
'Workflow transition trigger events updated successfully'
), request=self.request
)
return super(
SetupWorkflowTransitionTriggerEventListView, self
).form_valid(form=form)
def get_extra_context(self):
return {
'form_display_mode_table': True,
'navigation_object_list': ('object', 'workflow'),
'object': self.get_object(),
'subtitle': _(
'Triggers are events that cause this transition to execute '
'automatically.'
),
'title': _(
'Workflow transition trigger events for: %s'
) % self.get_object(),
'workflow': self.get_object().workflow,
}
def get_initial(self):
obj = self.get_object()
initial = []
# Return the queryset by name from the sorted list of the class
event_type_ids = [event_type.id for event_type in EventType.all()]
event_type_queryset = StoredEventType.objects.filter(
name__in=event_type_ids
)
# Sort queryset in Python by namespace, then by label
event_type_queryset = sorted(
event_type_queryset, key=lambda x: (x.namespace, x.label)
)
for event_type in event_type_queryset:
initial.append({
'transition': obj,
'event_type': event_type,
})
return initial
def get_object(self):
return self.external_object
def get_post_action_redirect(self):
return reverse(
viewname='document_states:setup_workflow_transition_list',
kwargs={'pk': self.get_object().workflow.pk}
)
class ToolLaunchAllWorkflows(ConfirmView):
extra_context = {
'title': _('Launch all workflows?'),
'subtitle': _(
'This will launch all workflows created after documents have '
'already been uploaded.'
)
}
view_permission = permission_workflow_tools
def view_action(self):
task_launch_all_workflows.apply_async()
messages.success(
message=_('Workflow launch queued successfully.'),
request=self.request
)
class WorkflowImageView(SingleObjectDownloadView):
attachment = False
model = Workflow
object_permission = permission_workflow_view
def get_file(self):
workflow = self.get_object()
return ContentFile(workflow.render(), name=workflow.label)
def get_mimetype(self):
return 'image'
class WorkflowPreviewView(SingleObjectDetailView):
form_class = WorkflowPreviewForm
model = Workflow
object_permission = permission_workflow_view
def get_extra_context(self):
return {
'hide_labels': True,
'title': _('Preview of: %s') % self.get_object()
}
| StarcoderdataPython |
1623161 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import urllib2
import json
import sys
import os
import time
import tempfile
import re
import datetime
reload(sys)
# sys.setdefaultencoding("utf-8")
# url = 'localhost'
# header = {"Accept": " application/json", "Content-Type": " application/json"}
# request = urllib2.Request(url, headers=header)
# response = urllib2.urlopen(request).read()
# temp = json.loads(response)['data']
# record = {}
# for dic in temp:
# if dic['name'] == "闵行":
# record = dic
# invalid_keys = ['sitenumber', 'rain', 'visibility']
# for key in invalid_keys:
# del record[key]
# print json.dumps(record, ensure_ascii=False)
# basedir = os.path.split(os.path.realpath(__file__))[0]
# def delete_file_folder(src):
# if os.path.isfile(src):
# try:
# os.remove(src)
# except:
# pass
# if os.path.isdir(src):
# for item in os.listdir(src):
# itemsrc = os.path.join(src, item)
# print itemsrc
# delete_file_folder(itemsrc)
# try:
# os.rmdir(src)
# except:
# pass
# l = ['阴', '多云']
# pic_str = '阴有的雨转多云多云有的雨'
# pic_buf = []
# reg = r'有.*?雨'
# pattern = re.compile(reg)
# special = re.findall(pattern, pic_str)
# temp = []
# print len(special)
# for a in special:
# temp.append((a.decode('utf8')[0]+ a.decode('utf8')[-1]).encode('utf8'))
# for n in temp:
# print n
# for f in l:
# if pic_str.find(f) != -1:
# pic_buf.append(f)
# for ele in pic_buf:
# if ele == '有雨':
# pic_buf[pic_buf.index(ele)] = '小雨'
# for ele in pic_buf:
# print ele
# s = "2016-01-08T17:50:00.000+08:00"
# pattern_d = re.compile('\d{4}-\d{2}-\d{2}.*?')
# pattern_h = re.compile('\d{2}:\d{2}:\d{2}.*?')
# d_temp = re.findall(pattern_d, s)[0]
# h_temp = re.findall(pattern_h, s)[0]
# print d_temp + ' ' + h_temp
buf = {}
L = []
buf['test'] = 3
L.append(buf)
print L
buf = {}
buf['test'] = 5
L.append(buf)
# def f4(seq):
# order preserving
# noDupes = []
# [noDupes.append(i) for i in seq if not noDupes.count(i)]
# return noDupes
# print f4(L)
# l = [1 ,2 ,3 ,3, 4 , 5,]
# print l.index(3)
# def strTotsp(arg):
# return int(time.mktime(time.strptime(arg, '%Y-%m-%d %H:%M:%S')))
# base_time = "2016-03-03 12:43:15"
# a = datetime.datetime.strptime(
# base_time, "%Y-%m-%d %H:%M:%S")
# d2 = a - datetime.timedelta(days=1)
# print strTotsp(str(d2))
# List = [1, 4, 5, 6, 5, 7]
# print List.index(5)
def DataCheck(data):
if isinstance(data, (str, int)) and data != '':
return True
return False
data = {'datatime': '2010', 'tempe': '25', 'id': 34}
for key in data.keys():
print DataCheck(data[key])
List = [1, 2, 3, 4]
try:
List.index(5)
except:
print 'out'
| StarcoderdataPython |
156902 | #!/usr/bin/env python3
import unittest
import torch
from torch.distributions import Distribution
from Lgpytorch.distributions import MultitaskMultivariateNormal, MultivariateNormal
from Lgpytorch.likelihoods import SoftmaxLikelihood
from Lgpytorch.test.base_likelihood_test_case import BaseLikelihoodTestCase
class TestSoftmaxLikelihood(BaseLikelihoodTestCase, unittest.TestCase):
seed = 0
def _create_conditional_input(self, batch_shape=torch.Size([])):
return torch.randn(*batch_shape, 5, 6)
def _create_marginal_input(self, batch_shape=torch.Size([])):
mat = torch.randn(*batch_shape, 6, 5, 5)
return MultitaskMultivariateNormal.from_batch_mvn(
MultivariateNormal(torch.randn(*batch_shape, 6, 5), mat @ mat.transpose(-1, -2))
)
def _create_targets(self, batch_shape=torch.Size([])):
return torch.distributions.Categorical(probs=torch.tensor([0.25, 0.25, 0.25, 0.25])).sample(
torch.Size([*batch_shape, 5])
)
def create_likelihood(self):
return SoftmaxLikelihood(num_features=6, num_classes=4)
def _test_conditional(self, batch_shape):
likelihood = self.create_likelihood()
input = self._create_conditional_input(batch_shape)
output = likelihood(input)
self.assertIsInstance(output, Distribution)
self.assertEqual(output.sample().shape, torch.Size([*batch_shape, 5]))
def _test_log_prob(self, batch_shape):
likelihood = self.create_likelihood()
input = self._create_marginal_input(batch_shape)
target = self._create_targets(batch_shape)
output = likelihood.expected_log_prob(target, input)
self.assertTrue(torch.is_tensor(output))
self.assertEqual(output.shape, batch_shape + torch.Size([5]))
def _test_marginal(self, batch_shape):
likelihood = self.create_likelihood()
input = self._create_marginal_input(batch_shape)
output = likelihood(input)
self.assertTrue(isinstance(output, Distribution))
self.assertEqual(output.sample().shape[-len(batch_shape) - 1 :], torch.Size([*batch_shape, 5]))
class TestSoftmaxLikelihoodNoMixing(TestSoftmaxLikelihood):
seed = 0
def create_likelihood(self):
return SoftmaxLikelihood(num_features=6, num_classes=6, mixing_weights=False)
| StarcoderdataPython |
68997 | import pandas as pd
def preprocess():
train_data = pd.read_csv("datasets/adult/adult.data", sep = ', ', header=None, names = ('age', 'workclass', 'fnlwgt', 'education', 'education-num', 'marital-status',
'occupation', 'relationship', 'race', 'sex', 'capital-gain', 'capital-loss',
'hours-per-week', 'native-country', '>50K'), na_values = '?')
test_data = pd.read_csv("datasets/adult/adult.test", sep = ', ', header=None, names = ('age', 'workclass', 'fnlwgt', 'education', 'education-num', 'marital-status',
'occupation', 'relationship', 'race', 'sex', 'capital-gain', 'capital-loss',
'hours-per-week', 'native-country', '>50K'), na_values = '?', skiprows = 1)
cont_cols = ['age', 'fnlwgt', 'education-num', 'capital-gain', 'capital-loss', 'hours-per-week']
#cato_cols = ['workclass', 'education', 'marital-status', 'occupation', 'relationship', 'race', 'sex', 'native-country']
label_col = ['>50K']
cato_train_data = train_data.drop(cont_cols, axis = 1)
X_train, y_train = cato_train_data.drop(label_col, axis = 1), cato_train_data[label_col]
cato_test_data = test_data.drop(cont_cols, axis = 1)
X_test, y_test = cato_test_data.drop(label_col, axis = 1), cato_test_data[label_col]
X_total = pd.concat([X_train, X_test], axis = 0)
N = X_train.shape[0]
OH_total = pd.get_dummies(X_total)
OH_train = OH_total[:N]
OH_test = OH_total[N:]
new_X_train = OH_train.to_numpy()
new_X_test = OH_test.to_numpy()
new_y_train = y_train['>50K'].map({'<=50K': 0, '>50K': 1}).astype(int).to_numpy()
new_y_test = y_test['>50K'].map({'<=50K.': 0, '>50K.': 1}).astype(int).to_numpy()
return (new_X_train, new_y_train), (new_X_test, new_y_test) | StarcoderdataPython |
3319027 | from datetime import datetime
import offchain
from offchain import FundPullPreApprovalStatus
from flask import Response
from flask.testing import Client
from tests.wallet_tests.resources.seeds.one_funds_pull_pre_approval import TIMESTAMP
from wallet.services.offchain import (
offchain as offchain_service,
fund_pull_pre_approval as fppa_service,
)
FUNDS_PULL_PRE_APPROVAL_ID = "28992c81-e85a-4771-995a-af1d22bcaf63"
FUNDS_PULL_PRE_APPROVAL_ID_2 = "e1f7f846-f9e6-46f9-b184-c949f8d6b197"
BILLER_ADDRESS = "tdm1pzmhcxpnyns7m035ctdqmexxad8ptgazxhllvyscesqdgp"
BILLER_ADDRESS_2 = "tdm1pvjua68j72mhmp3n7jkuthmxlkj0g57gkpegq6qgkjfxwc"
ADDRESS = "tdm1pwm5m35ayknjr0s67pk9xdf5mwp3nwq6ef67s55gpjwrqf"
ADDRESS_2 = "tdm1pztdjx2z8wp0q25jakqeklk0nxj2wmk2kg9whu8c3fdm9u"
CURRENCY = "XUS"
def invent_preapproval(description):
return fppa_service.FPPAObject(
my_actor_address=ADDRESS,
funds_pull_pre_approval=offchain.FundPullPreApprovalObject(
funds_pull_pre_approval_id=f"{BILLER_ADDRESS}_123",
address=ADDRESS,
biller_address=BILLER_ADDRESS,
scope=offchain.FundPullPreApprovalScopeObject(
type=offchain.FundPullPreApprovalType.consent,
expiration_timestamp=TIMESTAMP,
max_cumulative_amount=offchain.ScopedCumulativeAmountObject(
unit=offchain.TimeUnit.month,
value=1,
max_amount=offchain.CurrencyObject(
amount=111222333,
currency="XUS",
),
),
max_transaction_amount=offchain.CurrencyObject(
amount=111222333,
currency="XUS",
),
),
status=FundPullPreApprovalStatus.pending,
description=description,
),
biller_name="Bond",
created_timestamp=datetime.utcnow(),
updated_at=datetime.utcnow(),
inbound=False,
)
class TestGetFundsPullPreApprovals:
def test_get_funds_pull_pre_approvals(
self, authorized_client: Client, mock_method
) -> None:
expected_len = 3
expected_preapprovals = [
invent_preapproval(str(i)) for i in range(expected_len)
]
print(expected_preapprovals)
calls = mock_method(
fppa_service, "get_funds_pull_pre_approvals", expected_preapprovals
)
rv: Response = authorized_client.get(
"/offchain/funds_pull_pre_approvals",
)
assert rv.status_code == 200
assert rv.get_data() is not None
assert len(calls) == 1
assert calls[0][0] == 1 # account_id
funds_pull_pre_approvals = rv.get_json()["funds_pull_pre_approvals"]
assert funds_pull_pre_approvals is not None
assert len(funds_pull_pre_approvals) == expected_len
for i in range(expected_len):
print(rv.get_json(), funds_pull_pre_approvals)
assert funds_pull_pre_approvals[i]["description"] == str(i)
class TestUpdateFundPullPreApprovalStatus:
def test_approve(self, authorized_client: Client, mock_method):
calls = mock_method(fppa_service, "approve", will_return=None)
rv: Response = authorized_client.put(
f"/offchain/funds_pull_pre_approvals/{FUNDS_PULL_PRE_APPROVAL_ID}",
json={
"funds_pull_pre_approval_id": "1234",
"status": FundPullPreApprovalStatus.valid,
},
)
assert rv.status_code == 204
assert len(calls) == 1
assert calls[0][0] == FUNDS_PULL_PRE_APPROVAL_ID
def test_reject(self, authorized_client: Client, mock_method):
calls = mock_method(fppa_service, "reject", will_return=None)
rv: Response = authorized_client.put(
f"/offchain/funds_pull_pre_approvals/{FUNDS_PULL_PRE_APPROVAL_ID}",
json={
"funds_pull_pre_approval_id": "1234",
"status": FundPullPreApprovalStatus.rejected,
},
)
assert rv.status_code == 204
assert len(calls) == 1
assert calls[0][0] == FUNDS_PULL_PRE_APPROVAL_ID
def test_close(self, authorized_client: Client, mock_method):
calls = mock_method(fppa_service, "close", will_return=None)
rv: Response = authorized_client.put(
f"/offchain/funds_pull_pre_approvals/{FUNDS_PULL_PRE_APPROVAL_ID}",
json={
"funds_pull_pre_approval_id": "1234",
"status": FundPullPreApprovalStatus.closed,
},
)
assert rv.status_code == 204
assert len(calls) == 1
assert calls[0][0] == FUNDS_PULL_PRE_APPROVAL_ID
def test_failure(self, authorized_client: Client, mock_method):
calls = mock_method(
fppa_service,
"approve",
will_raise=fppa_service.FundsPullPreApprovalCommandNotFound,
)
rv: Response = authorized_client.put(
f"/offchain/funds_pull_pre_approvals/{FUNDS_PULL_PRE_APPROVAL_ID}",
json={
"funds_pull_pre_approval_id": "1234",
"status": FundPullPreApprovalStatus.valid,
},
)
assert rv.status_code == 404
assert len(calls) == 1
assert calls[0][0] == FUNDS_PULL_PRE_APPROVAL_ID
class TestCreateAndApprove:
def test_success(self, authorized_client: Client, mock_method):
request_body = {
"biller_address": BILLER_ADDRESS,
"funds_pull_pre_approval_id": FUNDS_PULL_PRE_APPROVAL_ID,
"scope": {
"type": "consent",
"expiration_timestamp": TIMESTAMP,
"max_cumulative_amount": {
"unit": "week",
"value": 1,
"max_amount": {
"amount": 100,
"currency": CURRENCY,
},
},
"max_transaction_amount": {
"amount": 10,
"currency": CURRENCY,
},
},
"description": "bla la la",
}
calls_to_create_and_approve = mock_method(fppa_service, "create_and_approve")
rv: Response = authorized_client.post(
"/offchain/funds_pull_pre_approvals",
json=request_body,
)
assert rv.status_code == 200
assert len(calls_to_create_and_approve) == 1
call = calls_to_create_and_approve[0]
assert call.pop("account_id") == 1
assert call.pop("biller_address") == request_body["biller_address"]
assert (
call.pop("funds_pull_pre_approval_id")
== request_body["funds_pull_pre_approval_id"]
)
assert call.pop("funds_pull_pre_approval_type") == request_body["scope"]["type"]
assert (
call.pop("expiration_timestamp")
== request_body["scope"]["expiration_timestamp"]
)
assert (
call.pop("max_cumulative_unit")
== request_body["scope"]["max_cumulative_amount"]["unit"]
)
assert (
call.pop("max_cumulative_unit_value")
== request_body["scope"]["max_cumulative_amount"]["value"]
)
assert (
call.pop("max_cumulative_amount")
== request_body["scope"]["max_cumulative_amount"]["max_amount"]["amount"]
)
assert (
call.pop("max_cumulative_amount_currency")
== request_body["scope"]["max_cumulative_amount"]["max_amount"]["currency"]
)
assert (
call.pop("max_transaction_amount")
== request_body["scope"]["max_transaction_amount"]["amount"]
)
assert (
call.pop("max_transaction_amount_currency")
== request_body["scope"]["max_transaction_amount"]["currency"]
)
assert call.pop("description") == request_body["description"]
# Are there unexpected arguments?
assert len(call) == 0
class TestOffchainV2View:
def test_success(self, authorized_client: Client, monkeypatch):
x_request_id = "f7ed63c3-eab9-4bd5-8094-497ba626e564"
response_data = b"bond"
def mock(sender_address, request_body):
assert sender_address == ADDRESS
assert request_body == b'{"dog": "gurki"}'
return 200, response_data
monkeypatch.setattr(offchain_service, "process_inbound_command", mock)
rv: Response = authorized_client.post(
"/offchain/v2/command",
json={"dog": "gurki"},
headers={"X-REQUEST-ID": x_request_id, "X-REQUEST-SENDER-ADDRESS": ADDRESS},
)
assert rv.status_code == 200
assert rv.data == response_data
| StarcoderdataPython |
112860 | from typing import Optional, Union
import tensorflow as tf
from tensorflow.python.framework.convert_to_constants import (
convert_variables_to_constants_v2_as_graph,
)
from tensorflow.keras import Sequential, Model
import keras_flops.flops_registory
def get_flops(model: Union[Model, Sequential], batch_size: Optional[int] = None) -> int:
"""
Calculate FLOPS for tf.keras.Model or tf.keras.Sequential .
Ignore operations used in only training mode such as Initialization.
Use tf.profiler of tensorflow v1 api.
"""
if not isinstance(model, (Sequential, Model)):
raise KeyError(
"model arguments must be tf.keras.Model or tf.keras.Sequential instanse"
)
if batch_size is None:
batch_size = 1
# convert tf.keras model into frozen graph to count FLOPS about operations used at inference
# FLOPS depends on batch size
inputs = [
tf.TensorSpec([batch_size] + inp.shape[1:], inp.dtype) for inp in model.inputs
]
real_model = tf.function(model).get_concrete_function(inputs)
frozen_func, _ = convert_variables_to_constants_v2_as_graph(real_model)
# Calculate FLOPS with tf.profiler
run_meta = tf.compat.v1.RunMetadata()
opts = tf.compat.v1.profiler.ProfileOptionBuilder.float_operation()
flops = tf.compat.v1.profiler.profile(
graph=frozen_func.graph, run_meta=run_meta, cmd="scope", options=opts
)
# print(frozen_func.graph.get_operations())
# TODO: show each FLOPS
return flops.total_float_ops
| StarcoderdataPython |
3399005 | <gh_stars>1-10
# Copyright (c) FULIUCANSHENG.
# Licensed under the MIT License.
from typing import Any, Callable, Dict, List, Optional, Set, Tuple, Union
from PIL import Image
from unitorch.models.vit import ViTProcessor as _ViTProcessor
from unitorch.cli import cached_path
from unitorch.cli import (
add_default_section_for_init,
add_default_section_for_function,
register_process,
)
from unitorch.cli.models import (
BaseInputs,
BaseOutputs,
BaseTargets,
GenerationOutputs,
GenerationTargets,
)
from unitorch.cli.models.vit import pretrained_vit_infos
class ViTProcessor(_ViTProcessor):
def __init__(
self,
vision_config_path: str,
):
super().__init__(
vision_config_path=vision_config_path,
)
@classmethod
@add_default_section_for_init("core/process/vit")
def from_core_configure(cls, config, **kwargs):
config.set_default_section("core/process/vit")
pretrained_name = config.getoption("pretrained_name", "default-vit")
vision_config_name_or_path = config.getoption("vision_config_path", pretrained_name)
vision_config_path = (
pretrained_vit_infos[vision_config_name_or_path]["vision_config"]
if vision_config_name_or_path in pretrained_vit_infos
else vision_config_name_or_path
)
vision_config_path = cached_path(vision_config_path)
return {
"vision_config_path": vision_config_path,
}
@register_process("core/process/vit_image_classification")
def _processing_image_classifictaion(
self,
image: Union[Image.Image, str],
):
if isinstance(image, str):
image = Image.open(image)
outputs = super().processing_image_classifictaion(image=image)
return BaseInputs(pixel_values=outputs.image)
| StarcoderdataPython |
3232212 | <reponame>noklam/blog<filename>_demo/leetcode/617.merge-two-binary-trees.py
#
# @lc app=leetcode id=617 lang=python3
#
# [617] Merge Two Binary Trees
#
# @lc code=start
# Definition for a binary tree node.
# class TreeNode:
# def __init__(self, val=0, left=None, right=None):
# self.val = val
# self.left = left
# self.right = right
class Solution:
def mergeTrees(
self, root1: Optional[TreeNode], root2: Optional[TreeNode]
) -> Optional[TreeNode]:
from collections import deque
# Edge case where one tree is empty
if not root1 and root2:
return root1 or root2
stack = deque()
stack.append((root1, root2))
while stack:
nodes = stack.popleft()
if nodes[0] is None or nodes[1] is None:
continue
nodes[0].val += nodes[1].val
if nodes[0].left == None:
nodes[0].left = nodes[1].left
else:
stack.append((nodes[0].left, nodes[1].left))
if nodes[0].right == None:
nodes[0].right = nodes[1].right
else:
stack.append((nodes[0].right, nodes[1].right))
return root1 # @lc code=end
| StarcoderdataPython |
3280600 | import numpy as np
from numpy import linalg as la
import matplotlib.pyplot as plt
import matplotlib.axes as ax
#matplotlib inline
import psyneulink as pnl
import psyneulink.core.components.functions.transferfunctions
from psyneulink.core.components.functions.learningfunctions import BackPropagation
nouns = ['oak', 'pine', 'rose', 'daisy', 'canary', 'robin', 'salmon', 'sunfish']
relations = ['is', 'has', 'can']
is_list = ['living', 'living thing', 'plant', 'animal', 'tree', 'flower', 'bird', 'fish', 'big', 'green', 'red',
'yellfsow']
has_list = ['roots', 'leaves', 'bark', 'branches', 'skin', 'feathers', 'wings', 'gills', 'scales']
can_list = ['grow', 'move', 'swim', 'fly', 'breathe', 'breathe underwater', 'breathe air', 'walk', 'photosynthesize']
descriptors = [nouns, is_list, has_list, can_list]
truth_nouns = np.identity(len(nouns))
truth_is = np.zeros((len(nouns), len(is_list)))
truth_is[0, :] = [1, 1, 1, 0, 1, 0, 0, 0, 1, 0, 0, 0]
truth_is[1, :] = [1, 1, 1, 0, 1, 0, 0, 0, 1, 0, 0, 0]
truth_is[2, :] = [1, 1, 1, 0, 0, 1, 0, 0, 0, 0, 0, 0]
truth_is[3, :] = [1, 1, 1, 0, 0, 1, 0, 0, 0, 0, 0, 0]
truth_is[4, :] = [1, 1, 0, 1, 0, 0, 1, 0, 0, 0, 0, 1]
truth_is[5, :] = [1, 1, 0, 1, 0, 0, 1, 0, 0, 0, 0, 1]
truth_is[6, :] = [1, 1, 0, 1, 0, 0, 0, 1, 1, 0, 1, 0]
truth_is[7, :] = [1, 1, 0, 1, 0, 0, 0, 1, 1, 0, 0, 0]
truth_has = np.zeros((len(nouns), len(has_list)))
truth_has[0, :] = [1, 1, 1, 1, 0, 0, 0, 0, 0]
truth_has[1, :] = [1, 1, 1, 1, 0, 0, 0, 0, 0]
truth_has[2, :] = [1, 1, 0, 0, 0, 0, 0, 0, 0]
truth_has[3, :] = [1, 1, 0, 0, 0, 0, 0, 0, 0]
truth_has[4, :] = [0, 0, 0, 0, 1, 1, 1, 0, 0]
truth_has[5, :] = [0, 0, 0, 0, 1, 1, 1, 0, 0]
truth_has[6, :] = [0, 0, 0, 0, 0, 0, 0, 1, 1]
truth_has[7, :] = [0, 0, 0, 0, 0, 0, 0, 1, 1]
truth_can = np.zeros((len(nouns), len(can_list)))
truth_can[0, :] = [1, 0, 0, 0, 0, 0, 0, 0, 1]
truth_can[1, :] = [1, 0, 0, 0, 0, 0, 0, 0, 1]
truth_can[2, :] = [1, 0, 0, 0, 0, 0, 0, 0, 1]
truth_can[3, :] = [1, 0, 0, 0, 0, 0, 0, 0, 1]
truth_can[4, :] = [1, 1, 0, 1, 1, 0, 1, 1, 0]
truth_can[5, :] = [1, 1, 0, 1, 1, 0, 1, 1, 0]
truth_can[6, :] = [1, 1, 1, 0, 1, 1, 0, 0, 0]
truth_can[7, :] = [1, 1, 1, 0, 1, 1, 0, 0, 0]
truths = [[truth_nouns], [truth_is], [truth_has], [truth_can]]
def gen_input_vals(nouns, relations):
X_1=np.vstack((np.identity(len(nouns)),np.ones((1,len(nouns)))))
X_1=X_1.T
X_2=np.vstack((np.identity(len(relations)),np.ones((1,len(relations)))))
X_2=X_2.T
return (X_1, X_2)
nouns_onehot, rels_onehot = gen_input_vals(nouns, relations)
r_1 = np.shape(nouns_onehot)[0]
c_1 = np.shape(nouns_onehot)[1]
r_2 = np.shape(rels_onehot)[0]
c_2 = np.shape(rels_onehot)[1]
############### THIS IS THE PART WHERE WE START BUILDING TRANSFER MECHANISMS ########################
#In order to build in biases, add an extra node to every layer, including the inputs
nouns_in = pnl.TransferMechanism(name="nouns_input",
default_variable=np.zeros(c_1)
)
rels_in = pnl.TransferMechanism(name="rels_input",
default_variable=np.zeros(c_2)
)
h1 = pnl.TransferMechanism(name="hidden_nouns",
size=9,
function=psyneulink.core.components.functions.transferfunctions.Logistic()
)
h2 = pnl.TransferMechanism(name="hidden_mixed",
size=16,
function=psyneulink.core.components.functions.transferfunctions.Logistic()
)
out_sig_I = pnl.TransferMechanism(name="sig_outs_I",
size=len(nouns),
function=psyneulink.core.components.functions.transferfunctions.Logistic()
)
out_sig_is = pnl.TransferMechanism(name="sig_outs_is",
size=len(is_list),
function=psyneulink.core.components.functions.transferfunctions.Logistic()
)
out_sig_has = pnl.TransferMechanism(name="sig_outs_has",
size=len(has_list),
function=psyneulink.core.components.functions.transferfunctions.Logistic()
)
out_sig_can = pnl.TransferMechanism(name="sig_outs_can",
size=len(can_list),
function=psyneulink.core.components.functions.transferfunctions.Logistic()
)
###################### THIS IS THE PART WHERE I PUT IN THE FORCED RANDOM MATRICES #########################
#alla de maps
map_nouns_h1 = pnl.MappingProjection(matrix=np.random.rand(c_1,c_1),
name="map_nouns_h1"
)
map_rel_h2 = pnl.MappingProjection(matrix=np.random.rand(c_2,16),
name="map_relh2"
)
map_h1_h2 = pnl.MappingProjection(matrix=np.random.rand(c_1,16),
name="map_h1_h2"
)
map_h2_I = pnl.MappingProjection(matrix=np.random.rand(16,len(nouns)),
name="map_h2_I"
)
map_h2_is = pnl.MappingProjection(matrix=np.random.rand(16,len(is_list)),
name="map_h2_is"
)
map_h2_has = pnl.MappingProjection(matrix=np.random.rand(16,len(has_list)),
name="map_h2_has"
)
map_h2_can = pnl.MappingProjection(matrix=np.random.rand(16,len(can_list)),
name="map_h2_can"
)
#################### THIS IS THE PART WHERE WE START BUILDING OUT ALL THE PROCESSES ########################
p11 = pnl.Pathway(pathway=[nouns_in,
map_nouns_h1,
h1,
map_h1_h2,
h2])
p12 = pnl.Pathway(pathway=[rels_in,
map_rel_h2,
h2])
p21 = pnl.Pathway(pathway=[h2,
map_h2_I,
out_sig_I])
p22 = pnl.Pathway(pathway=[h2,
map_h2_is,
out_sig_is])
p23 = pnl.Pathway(pathway=[h2,
map_h2_has,
out_sig_has])
p24 = pnl.Pathway(pathway=[h2,
map_h2_can,
out_sig_can])
############################# THIS IS WHERE WE BUILD OUT THE COMPOSITION ###################################
rumel_comp = pnl.Composition(pathways=[(p11, BackPropagation),
(p12, BackPropagation),
(p21, BackPropagation),
(p22, BackPropagation),
(p23, BackPropagation),
(p24, BackPropagation),
],
learning_rate=.5)
rumel_comp.show_graph(output_fmt='jupyter')
############################## THIS IS WHERE WE SETUP THE LOOP VARIABLES #########################################
# THESE ARRAYS STORE THE ERROR VALUES FROM THE SIG OUTPUT AND BIN OUTPUT
delta_bin_array=[]
delta_sig_array=[]
# SET NUMBER OF EPOCHS:
epochs=1000
# CREATE CONDITIONAL:
div = epochs / 100
spits=np.arange(0,epochs,div)
#CREATE KILLSWITCH:
kill=0
############################## THIS IS WHERE WE RUN THE COMPOSITION #########################################
for epoch in range(epochs):
print("epoch number", epoch)
for noun in range(len(nouns)):
for rel_out in range(3):
# K GIVES THE OUTPUT OF THE COMPOSITION
k = rumel_comp.learn(inputs={nouns_in: nouns_onehot[noun],
rels_in: rels_onehot[rel_out],
},
targets={out_sig_I: truth_nouns[noun],
out_sig_is: truth_is[noun],
out_sig_has: truth_has[noun],
out_sig_can: truth_can[noun]
},
)
# PUT K INTO AN ARRAY SO WE CAN MANIPULATE ITS VALUES
k_array = np.array(k)
# IT_K GIVES THE OUTPUT FROM THIS SPECIFIC RUN
it_k = k[np.shape(k_array)[0] - 1]
# THE DELTAS ADD UP THE SQUARED ERROR FROM EVERY OUTPUT OF K (I, IS, HAS, AND CAN)
delta = 0
delta = np.sum((truth_nouns[noun] - np.round(it_k[0])) ** 2)
delta = delta + np.sum((truth_is[noun] - np.round(it_k[1])) ** 2)
delta = delta + np.sum((truth_has[noun] - np.round(it_k[2])) ** 2)
delta = delta + np.sum((truth_can[noun] - np.round(it_k[3])) ** 2)
delta = delta / (len(nouns) + len(is_list) + len(has_list) + len(can_list))
delta_sig = 0
delta_sig = np.sum((truth_nouns[noun] - (it_k[0])) ** 2)
delta_sig = delta_sig + np.sum((truth_is[noun] - (it_k[1])) ** 2)
delta_sig = delta_sig + np.sum((truth_has[noun] - (it_k[2])) ** 2)
delta_sig = delta_sig + np.sum((truth_can[noun] - (it_k[3])) ** 2)
delta_sig = delta_sig / (len(nouns) + len(is_list) + len(has_list) + len(can_list))
# THE ARRAYS STORE THE ERROR FROM EVERY RUN. TO SMOOTH THESE, WE CAN AVERAGE THEM OVER EPOCHS.
delta_bin_array = np.append(delta_bin_array, delta)
delta_sig_array = np.append(delta_sig_array, delta_sig)
# PRINT PROGRESS INFORMATION
if np.isin(epoch, spits):
print('the average sum squared error on sigmoids for this epoch was', np.sum(delta_sig_array[-25:]) / 24)
print('the average sum squared error on binaries for this epoch was', np.sum(delta_bin_array[-25:]) / 24)
# KILL THE LOOP ONCE THE LABELS CONVERGE TO ZERO ERROR FOR A CERTAIN NUMBER OF EPOCHS
if (np.sum(delta_bin_array[-25:]) / 24) == 0.0:
kill = kill + 1
if kill >= 99:
break
######################## SETUP THE LABEL ERRORS TO BE GRAPHED #######################################
delta_bin_array=np.array(delta_bin_array)
delta_bin_array_trunc = np.array(delta_bin_array[0:int(np.floor(len(delta_bin_array) / 24) * 24)])
height = len(delta_bin_array_trunc) / 24
delta_bin_array_trunc=np.reshape(delta_bin_array_trunc,(int(height),24))
delta_bin_epochs = np.sum(delta_bin_array_trunc, axis=1) / 24
######################## SETUP THE SIGMOID ERRORS TO BE GRAPHED #######################################
delta_sig_array=np.array(delta_sig_array)
delta_sig_array_trunc = delta_sig_array[0:int(np.floor(len(delta_sig_array) / 24) * 24)]
delta_sig_array_trunc=np.reshape(delta_sig_array_trunc,(int(height),24))
delta_sig_epochs = np.sum(delta_sig_array_trunc, axis=1) / 24
######################## DO THE PLOTTING #######################################
plt.plot(delta_bin_epochs)
plt.title('Label error as a function of epochs')
plt.ylabel('Error')
plt.xlabel('Epoch')
plt.show()
plt.plot(delta_sig_epochs)
plt.title('sigmoid error as a function of epochs')
plt.ylabel('Error')
plt.xlabel('Epoch')
plt.show()
| StarcoderdataPython |
3393169 | <gh_stars>1-10
# -*- coding: utf-8 -*-
import os
from glob import glob
from os.path import join
import torch
import torchvision
import transformers
import more_itertools
import numpy as np
import matplotlib.pyplot as plt
import torch.nn.functional as F
from tqdm.auto import tqdm
from PIL import Image
from einops import rearrange
from . import utils
def generate_images(text, tokenizer, dalle, vae, top_k, top_p, images_num, image_prompts=None, temperature=1.0, bs=8,
seed=None, use_cache=True):
# TODO docstring
if seed is not None:
utils.seed_everything(seed)
vocab_size = dalle.get_param('vocab_size')
text_seq_length = dalle.get_param('text_seq_length')
image_seq_length = dalle.get_param('image_seq_length')
total_seq_length = dalle.get_param('total_seq_length')
device = dalle.get_param('device')
text = text.lower().strip()
input_ids = tokenizer.encode_text(text, text_seq_length=text_seq_length)
pil_images, ppl_scores = [], []
for chunk in more_itertools.chunked(range(images_num), bs):
chunk_bs = len(chunk)
with torch.no_grad():
attention_mask = torch.tril(torch.ones((chunk_bs, 1, total_seq_length, total_seq_length), device=device))
out = input_ids.unsqueeze(0).repeat(chunk_bs, 1).to(device)
has_cache = False
if image_prompts is not None:
prompts_idx, prompts = image_prompts.image_prompts_idx, image_prompts.image_prompts
prompts = prompts.repeat(chunk_bs, 1)
for idx in tqdm(range(out.shape[1], total_seq_length)):
idx -= text_seq_length
if image_prompts is not None and idx in prompts_idx:
out = torch.cat((out, prompts[:, idx].unsqueeze(1)), dim=-1)
else:
logits, has_cache = dalle(out, attention_mask,
has_cache=has_cache, use_cache=use_cache, return_loss=False)
logits = logits[:, -1, vocab_size:]
logits /= temperature
filtered_logits = transformers.top_k_top_p_filtering(logits, top_k=top_k, top_p=top_p)
probs = torch.nn.functional.softmax(filtered_logits, dim=-1)
sample = torch.multinomial(probs, 1)
out = torch.cat((out, sample), dim=-1)
codebooks = out[:, -image_seq_length:]
logits, _ = dalle(out, attention_mask, has_cache=has_cache, use_cache=use_cache, return_loss=False)
logits = rearrange(logits, 'b n c -> b c n')
image_logits = logits[:, vocab_size:, -image_seq_length:- 1].contiguous().float()
out = out.contiguous().long()
ppl_scores.append(
ce_to_ppl(F.cross_entropy(
image_logits,
out[:, -image_seq_length + 1:],
reduction='none',
))
)
images = vae.decode(codebooks)
pil_images += utils.torch_tensors_to_pil_list(images)
ppl_scores = torch.cat(ppl_scores)
indexes = ppl_scores.argsort()
sorted_pil_images = []
for idx in indexes:
sorted_pil_images.append(pil_images[idx.item()])
return sorted_pil_images, ppl_scores[indexes].cpu().numpy().tolist()
def ce_to_ppl(ce):
indexes = torch.where(ce)
ce[indexes] = torch.exp(ce[indexes])
ppl = ce.sum(1) / torch.unique(indexes[0], return_counts=True)[1]
return ppl
def super_resolution(pil_images, realesrgan, batch_size=4):
result = []
for pil_image in pil_images:
with torch.no_grad():
sr_image = realesrgan.predict(np.array(pil_image), batch_size=batch_size)
result.append(sr_image)
return result
def cherry_pick_by_ruclip(pil_images, text, clip_predictor, count=4):
""" expected ruclip models """
with torch.no_grad():
text_latents = clip_predictor.get_text_latents([text])
image_latents = clip_predictor.get_image_latents(pil_images)
logits_per_image = torch.matmul(image_latents, text_latents.t())
scores = logits_per_image.view(-1)
top_pil_images = []
indexes = scores.argsort(descending=True)[:count]
for idx in indexes:
top_pil_images.append(pil_images[idx])
return top_pil_images, scores[indexes].cpu().numpy().tolist()
def show(pil_images, nrow=4, size=14, save_dir=None, show=True):
"""
:param pil_images: list of images in PIL
:param nrow: number of rows
:param size: size of the images
:param save_dir: dir for separately saving of images, example: save_dir='./pics'
"""
if save_dir is not None:
os.makedirs(save_dir, exist_ok=True)
count = len(glob(join(save_dir, 'img_*.png')))
for i, pil_image in enumerate(pil_images):
pil_image.save(join(save_dir, f'img_{count+i}.png'))
pil_images = [pil_image.convert('RGB') for pil_image in pil_images]
imgs = torchvision.utils.make_grid(utils.pil_list_to_torch_tensors(pil_images), nrow=nrow)
if not isinstance(imgs, list):
imgs = [imgs.cpu()]
fix, axs = plt.subplots(ncols=len(imgs), squeeze=False, figsize=(size, size))
for i, img in enumerate(imgs):
img = img.detach()
img = torchvision.transforms.functional.to_pil_image(img)
if save_dir is not None:
count = len(glob(join(save_dir, 'group_*.png')))
img.save(join(save_dir, f'group_{count+i}.png'))
if show:
axs[0, i].imshow(np.asarray(img))
axs[0, i].set(xticklabels=[], yticklabels=[], xticks=[], yticks=[])
if show:
fix.show()
plt.show()
def classic_convert_emoji_to_rgba(np_image, lower_thr=240, upper_thr=255, width=2):
import cv2 # noqa
img = np_image[:, :, :3].copy()
lower = np.array([lower_thr, lower_thr, lower_thr], dtype='uint8')
upper = np.array([upper_thr, upper_thr, upper_thr], dtype='uint8')
mask = cv2.inRange(img, lower, upper)
ret, thresh = cv2.threshold(mask, 0, 255, 0)
contours, hierarchy = cv2.findContours(thresh, cv2.RETR_TREE, cv2.CHAIN_APPROX_NONE)
a_channel = np.ones((512, 512), dtype=np.uint8)*255
if len(contours) != 0:
contours = sorted(contours, key=lambda x: x.shape[0])[-7:]
cv2.fillPoly(a_channel, contours, (0, 0, 0))
cv2.drawContours(a_channel, contours, -1, (0, 0, 0), width)
img = cv2.cvtColor(img, cv2.COLOR_RGB2RGBA)
img[:, :, 3] = a_channel
return img
def convert_emoji_to_rgba(pil_images, emojich_unet, device='cpu', bs=1, score_thr=0.99):
final_images, runs = [], []
with torch.no_grad():
for chunk in more_itertools.chunked(pil_images, bs):
images = []
for pil_image in chunk:
image = np.array(pil_image.resize((512, 512)))[:, :, :3]
image = image.astype(np.float32) / 255.0
image = torch.from_numpy(image).permute(2, 0, 1)
images.append(image)
images = torch.nn.utils.rnn.pad_sequence(images, batch_first=True)
pred_masks = emojich_unet(images.to(device))
pred_masks = torch.softmax(pred_masks, 1)
scores, pred_masks = torch.max(pred_masks, 1)
pred_masks = pred_masks.int().cpu().numpy()
pred_masks = (pred_masks * 255).astype(np.uint8)
for pil_image, pred_mask, score in zip(chunk, pred_masks, scores):
score = score.mean().item()
final_image = np.zeros((512, 512, 4), np.uint8)
final_image[:, :, :3] = np.array(pil_image.resize((512, 512)))[:, :, :3]
if score > score_thr:
run = 'unet'
final_image[:, :, -1] = pred_mask
else:
run = 'classic'
final_image = classic_convert_emoji_to_rgba(final_image)
final_image = Image.fromarray(final_image)
final_images.append(final_image)
runs.append(run)
return final_images, runs
def show_rgba(rgba_pil_image):
img = np.array(rgba_pil_image)
fig, ax = plt.subplots(1, 3, figsize=(10, 10), dpi=100)
ax[0].imshow(img[:, :, :3])
ax[1].imshow(img[:, :, -1])
mask = np.repeat(np.expand_dims(img[:, :, -1] < 128, -1), 3, axis=-1)
img = img[:, :, :3]
img[mask[:, :, 0], 0] = 64
img[mask[:, :, 0], 1] = 255
img[mask[:, :, 0], 2] = 64
ax[2].imshow(img)
| StarcoderdataPython |
3331734 | <gh_stars>1-10
import argparse
def get_args():
parser = argparse.ArgumentParser()
parser.add_argument("--path", help="path to the config file directory")
# Folder settings
parser.add_argument("--prefix", help="experiment prefix, if given creates subfolder in experiment directory")
parser.add_argument('--new_dir', default=False, type=int, help='If True, concat datetime string to exp_dir.')
parser.add_argument('--dont_save', default=False, type=int,
help="if True, nothing is saved to disk. Note: this doesn't work") # TODO this doesn't work
# Running protocol
parser.add_argument('--resume', default='', type=str, metavar='PATH',
help='path to latest checkpoint (default: none)')
parser.add_argument('--mode', default='train', type=str,
choices=['train', 'val', 'rollout'],
help='mode of the program (training, validation, or generate rollout)')
# Misc
parser.add_argument('--seed', default=-1, type=int,
help='overrides config/default seed for more convenient seed setting.')
parser.add_argument('--gpu', default=-1, type=int,
help='will set CUDA_VISIBLE_DEVICES to selected value')
parser.add_argument('--strict_weight_loading', default=True, type=int,
help='if True, uses strict weight loading function')
parser.add_argument('--deterministic', default=False, type=int,
help='if True, sets fixed seeds for torch and numpy')
parser.add_argument('--n_val_samples', default=10, type=int,
help='number of validation episodes')
parser.add_argument('--save_dir', type=str,
help='directory for saving the generated rollouts in rollout mode')
parser.add_argument('--config_override', default='', type=str,
help='override to config file in format "key1.key2=val1,key3=val2"')
# Debug
parser.add_argument('--debug', default=False, type=int,
help='if True, runs in debug mode')
# Note
parser.add_argument('--notes', default='', type=str,
help='Notes for the run')
return parser.parse_args()
| StarcoderdataPython |
50099 | <filename>LeetCode/Problems/15. 3Sum.py
class Solution(object):
def threeSum(self, nums):
"""
:type nums: List[int]
:rtype: List[List[int]]
"""
nums.sort()
res = []
for i,a in enumerate(nums):
# If same as the previous value just continue, already checked
if i>0 and a == nums[i-1]:
continue
# Using left and right pointers
l,r = i+1, len(nums)-1
while l < r:
threeSum = a + nums[l] + nums[r]
if threeSum > 0:
r -= 1
elif threeSum < 0:
l += 1
else:
res.append([a,nums[l],nums[r]])
l += 1
# Making sure that left pointer is not same as its previous value
while nums[l] == nums[l-1] and l<r:
l += 1
return res
# Time Limit Exceed
# res = []
# if len(nums) < 3:
# return res
# nums.sort()
# for i in range(0,len(nums)):
# temp = 0
# for j in range(i+1,len(nums)):
# temp = nums[i] + nums[j]
# temp = temp * -1
# if temp in nums[j+1:]:
# res1 = [nums[i],nums[j],temp]
# if res1 not in res:
# res.append(res1)
# return res
| StarcoderdataPython |
1602703 | # -*- coding: utf-8 -*-
# Copyright 2011 Nelen & Schuurmans
from django.conf import settings
from django.core.management.base import BaseCommand
from lizard_auth_server.models import Token
import datetime
import logging
import pytz
logger = logging.getLogger(__name__)
TOKEN_TIMEOUT = datetime.timedelta(minutes=settings.SSO_TOKEN_TIMEOUT_MINUTES)
class Command(BaseCommand):
args = ""
help = "Clear expired SSO tokens from the database."
def handle(self, *args, **options):
max_age = datetime.datetime.now(tz=pytz.UTC) - TOKEN_TIMEOUT
Token.objects.filter(created__lt=max_age).delete()
| StarcoderdataPython |
193923 | from .dataclass import frequency
ANALYTICS_API_URL = "https://appstoreconnect.apple.com/analytics/api/v1"
# TODO: use Config class Instead of config dict.
class Config():
def __init__(self, app_id):
self.startTime = None
self.endTime = None
self.adamId = [app_id]
self.group = None
self.frequency = frequency.days
self.dimensionFilters = []
self.measures = []
class MeasuresConfig(Config):
def __init__(self, app_id):
self.group = {}
class SourcesConfig(Config):
def __init__(self, app_id):
self.measures: list = []
self.dimension: str = None
| StarcoderdataPython |
144906 | <filename>snipping/main.py
"""Main
"""
import sys
from snipping import application
from snipping import prompt_toolkit
def main():
init_file = None
if len(sys.argv) > 1:
init_file = sys.argv[1]
app = application.get_application(init_file=init_file)
return prompt_toolkit.run(app)
| StarcoderdataPython |
3331359 | # a * x + b * y = gcd(a, b)
def egcd(a: int, b: int) -> (int, int, int):
if a == 0:
return b, 0, 1
else:
gcd, x, y = egcd(b % a, a)
return gcd, y - (b // a) * x, x
if __name__ == '__main__':
print(egcd(50, 30))
| StarcoderdataPython |
1653434 | from process_json import *
import numpy as np
BS = 32
"""def create_tags(text,a):
#Text includes entities marked as BEG__w1 w2 w3__END. Transform to a tags list.
mya = a.lower()
a = mya.split()
tags = []
inside = False
for w in text.split():
w_stripped = w.strip()
if w_stripped== 'BEG____END':
continue
if w_stripped.startswith("BEG__") and w_stripped.endswith("__END"):
concept = w_stripped.split("_")[2]
if concept.lower() in a:
tags.append('B-ans')
if inside: # something went wrong, leave as is
print("Inconsistent markup.")
else:
tags.append('O')
elif w_stripped.startswith("BEG__"):
assert not inside
inside = True
concept = [w_stripped.split("_", 2)[-1]]
elif w_stripped.endswith("__END"):
if not inside:
if w_stripped[:-5].lower() in a:
tags.append('I') #might be B
else:
tags.append('O')
else:
concept.append(w_stripped.rsplit("_", 2)[0])
if any(c.lower() in a for c in concept):
tags.append('B-ans')
for w in concept:
tags.append('I-ans')
tags.pop(-1)
else:
for w in concept:
tags.append('O')
inside = False
else:
if inside:
concept.append(w_stripped)
else:
tags.append('O')
return ' '.join(tags)"""
def create_tags(text,a):
"""
Text includes entities marked as BEG__w1 w2 w3__END. Transform to a tags list.
"""
a = a.lower()
tags = []
inside = False
for w in text.split():
w_stripped = w.strip()
if w_stripped == 'BEG____END':
continue
if w_stripped.startswith("BEG__") and w_stripped.endswith("__END"):
concept = w_stripped.split("_")[2]
if a in concept.lower():
tags.append('B-ans')
if inside: # something went wrong, leave as is
print("Inconsistent markup.")
else:
tags.append('O')
elif w_stripped.startswith("BEG__"):
assert not inside
inside = True
concept = [w_stripped.split("_", 2)[-1]]
elif w_stripped.endswith("__END"):
if not inside:
if a in w_stripped[:-5].lower():
tags.append('I') #might be B
else:
tags.append('O')
else:
concept.append(w_stripped.rsplit("_", 2)[0])
if a in ' '.join(concept).lower():
tags.append('B-ans')
for w in concept:
tags.append('I-ans')
tags.pop(-1)
else:
for w in concept:
tags.append('O')
inside = False
else:
if inside:
concept.append(w_stripped)
else:
tags.append('O')
return ' '.join(tags)
class JsonData(JsonDataset):
def __init__(self, dataset_file):
super().__init__(dataset_file)
self.dataset_counter = 0
def json_to_plain(self, remove_notfound=False, stp="no-ent", include_q_cands=False):
"""
:param stp: no-ent | ent; whether to mark entities in passage; if ent, a multiword entity is treated as 1 token
:return: {"id": "",
"p": "",
"q", "",
"a", "",
"c", [""]}
"""
count = 0
for datum in self.dataset[DATA_KEY]:
for qa in datum[DOC_KEY][QAS_KEY]:
fields = {}
qa_txt_option = (" " + qa[QUERY_KEY]) if include_q_cands else ""
# cand = [w for w in to_entities(datum[DOC_KEY][TITLE_KEY] + " " +
# datum[DOC_KEY][CONTEXT_KEY] + qa_txt_option).lower().split() if w.startswith('@entity')]
cand = [w for w in to_entities(datum[DOC_KEY][TITLE_KEY] + " " +
datum[DOC_KEY][CONTEXT_KEY]).lower().split() if w.startswith('@entity')]
cand_q = [w for w in to_entities(qa_txt_option).lower().split() if w.startswith('@entity')]
if stp == "no-ent":
c = {ent_to_plain(e) for e in set(cand)}
a = ""
for ans in qa[ANS_KEY]:
if ans[ORIG_KEY] == "dataset":
a = ans[TXT_KEY].lower()
if remove_notfound:
if a not in c:
found_umls = False
for ans in qa[ANS_KEY]:
if ans[ORIG_KEY] == "UMLS":
umls_answer = ans[TXT_KEY].lower()
if umls_answer in c:
found_umls = True
a = umls_answer
if not found_umls:
continue
fields["c"] = list(c)
assert a
fields["a"] = a
document = remove_entity_marks(
datum[DOC_KEY][TITLE_KEY] + " " + datum[DOC_KEY][CONTEXT_KEY]).replace(
"\n"," ").lower()
doc_tags = create_tags(datum[DOC_KEY][TITLE_KEY] + " " + datum[DOC_KEY][CONTEXT_KEY],a).split()
fields["p"] = document
assert len(doc_tags)==len(fields["p"].split())
fields["p_beg_end"] = (datum[DOC_KEY][TITLE_KEY] + " " + datum[DOC_KEY][CONTEXT_KEY]).replace(
"\n"," ")
fields["p_tags"] = doc_tags
fields["q"] = remove_entity_marks(qa[QUERY_KEY]).replace("\n", " ").lower()
q_tags = create_tags(qa[QUERY_KEY],a).split()
assert len(q_tags)==len(fields["q"].split())
fields["q_tags"] = q_tags
self.dataset_counter += 1
####INGORE THIS OPTION, WE ARE ONLY WORKING WITH NO ENT OPTION
elif stp == "ent":
c = set(cand)
c_q = set(cand_q)
a = ""
for ans in qa[ANS_KEY]:
if ans[ORIG_KEY] == "dataset":
a = plain_to_ent(ans[TXT_KEY].lower())
if remove_notfound:
if a not in c:
found_umls = False
for ans in qa[ANS_KEY]:
if ans[ORIG_KEY] == "UMLS":
umls_answer = plain_to_ent(ans[TXT_KEY].lower())
if umls_answer in c:
found_umls = True
a = umls_answer
if not found_umls:
continue
fields["c"] = list(c) + list(c_q)
assert a
fields["a"] = a
document = to_entities(datum[DOC_KEY][TITLE_KEY] + " " + datum[DOC_KEY][CONTEXT_KEY]).replace(
"\n", " ").lower()
fields["p"] = document
fields["q"] = to_entities(qa[QUERY_KEY]).replace("\n", " ").lower()
else:
raise NotImplementedError
fields["id"] = qa[ID_KEY]
yield fields
def get_dataset_counter(self):
return self.dataset_counter
class MyDataReader():
def __init__(self,data_path = '/Users/ahmedkoptanmacbook/Imp/ASU/Course Content/Spring 2020/CSE576NLP/Project/clicr_dataset/' + 'dev1.0.json',bs=None):
self.sample_counter=0
self.d = JsonData(data_path)
self.bs = bs
def send_batches(self,remove_notfound = True):
data = []
for i, inst in enumerate(self.d.json_to_plain(remove_notfound=remove_notfound, stp='no-ent')):
if i>=self.sample_counter:
data.append(inst)
self.sample_counter+= 1
if self.bs!=None:
if self.sample_counter % self.bs == 0:
return data
return data
def get_data_size(self):
return self.d.get_dataset_counter()
| StarcoderdataPython |
1677732 | """
The tool to check the availability or syntax of domain, IP or URL.
::
██████╗ ██╗ ██╗███████╗██╗ ██╗███╗ ██╗ ██████╗███████╗██████╗ ██╗ ███████╗
██╔══██╗╚██╗ ██╔╝██╔════╝██║ ██║████╗ ██║██╔════╝██╔════╝██╔══██╗██║ ██╔════╝
██████╔╝ ╚████╔╝ █████╗ ██║ ██║██╔██╗ ██║██║ █████╗ ██████╔╝██║ █████╗
██╔═══╝ ╚██╔╝ ██╔══╝ ██║ ██║██║╚██╗██║██║ ██╔══╝ ██╔══██╗██║ ██╔══╝
██║ ██║ ██║ ╚██████╔╝██║ ╚████║╚██████╗███████╗██████╔╝███████╗███████╗
╚═╝ ╚═╝ ╚═╝ ╚═════╝ ╚═╝ ╚═══╝ ╚═════╝╚══════╝╚═════╝ ╚══════╝╚══════╝
Provides the base of all availability checker classes.
Author:
<NAME>, @funilrys, contactTATAfunilrysTODTODcom
Special thanks:
https://pyfunceble.github.io/#/special-thanks
Contributors:
https://pyfunceble.github.io/#/contributors
Project link:
https://github.com/funilrys/PyFunceble
Project documentation:
https://pyfunceble.readthedocs.io/en/dev/
Project homepage:
https://pyfunceble.github.io/
License:
::
Copyright 2017, 2018, 2019, 2020, 2021 <NAME>
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import multiprocessing
from datetime import datetime
from typing import Dict, List, Optional
from sqlalchemy.orm import Session
import PyFunceble.checker.utils.whois
import PyFunceble.facility
import PyFunceble.factory
import PyFunceble.storage
from PyFunceble.checker.availability.params import AvailabilityCheckerParams
from PyFunceble.checker.availability.status import AvailabilityCheckerStatus
from PyFunceble.checker.base import CheckerBase
from PyFunceble.checker.syntax.domain import DomainSyntaxChecker
from PyFunceble.checker.syntax.ip import IPSyntaxChecker
from PyFunceble.checker.syntax.url import URLSyntaxChecker
from PyFunceble.helpers.regex import RegexHelper
from PyFunceble.query.dns.query_tool import DNSQueryTool
from PyFunceble.query.http_status_code import HTTPStatusCode
from PyFunceble.query.netinfo.address import AddressInfo
from PyFunceble.query.netinfo.hostbyaddr import HostByAddrInfo
from PyFunceble.query.whois.query_tool import WhoisQueryTool
class AvailabilityCheckerBase(CheckerBase):
"""
Provides the base of all our availability checker classes.
:param str subject:
Optional, The subject to work with.
:param bool use_extra_rules:
Optional, Activates/Disables the usage of our own set of extra rules.
:param bool use_whois_lookup:
Optional, Activates/Disables the usage of the WHOIS lookup to gather
the status of the given :code:`subject`.
:param bool use_dns_lookup:
Optional, Activates/Disables the usage of the DNS lookup to gather the
status of the given :code:`subject`.
:param bool use_netinfo_lookup:
Optional, Activates/Disables the usage of the network information
lookup module to gather the status of the given :code:`subject`.
:param bool use_http_code_lookup:
Optional, Activates/Disables the usage of the HTTP status code lookup
to gather the status of the given :code:`subject`.
:param bool use_reputation_lookup:
Optional, Activates/Disables the usage of the reputation dataset
lookup to gather the status of the given :code:`subject`.
:param bool do_syntax_check_first:
Optional, Activates/Disables the check of the status before the actual
status gathering.
:param bool use_whois_db:
Optional, Activates/Disable the usage of a local database to store the
WHOIS datasets.
"""
# pylint: disable=too-many-public-methods, too-many-instance-attributes
STD_USE_EXTRA_RULES: bool = True
STD_USE_WHOIS_LOOKUP: bool = True
STD_USE_DNS_LOOKUP: bool = True
STD_USE_NETINFO_LOOKUP: bool = True
STD_USE_HTTP_CODE_LOOKUP: bool = True
STD_USE_REPUTATION_LOOKUP: bool = False
STD_USE_WHOIS_DB: bool = True
dns_query_tool: Optional[DNSQueryTool] = None
whois_query_tool: Optional[WhoisQueryTool] = None
addressinfo_query_tool: Optional[AddressInfo] = None
hostbyaddr_query_tool: Optional[HostByAddrInfo] = None
http_status_code_query_tool: Optional[HTTPStatusCode] = None
domain_syntax_checker: Optional[DomainSyntaxChecker] = None
ip_syntax_checker: Optional[IPSyntaxChecker] = None
url_syntax_checker: Optional[URLSyntaxChecker] = None
_use_extra_rules: bool = False
_use_whois_lookup: bool = False
_use_dns_lookup: bool = False
_use_netinfo_lookup: bool = False
_use_http_code_lookup: bool = False
_use_reputation_lookup: bool = False
_use_whois_db: bool = False
status: Optional[AvailabilityCheckerStatus] = None
params: Optional[AvailabilityCheckerParams] = None
def __init__(
self,
subject: Optional[str] = None,
*,
use_extra_rules: Optional[bool] = None,
use_whois_lookup: Optional[bool] = None,
use_dns_lookup: Optional[bool] = None,
use_netinfo_lookup: Optional[bool] = None,
use_http_code_lookup: Optional[bool] = None,
use_reputation_lookup: Optional[bool] = None,
do_syntax_check_first: Optional[bool] = None,
db_session: Optional[Session] = None,
use_whois_db: Optional[bool] = None,
) -> None:
self.dns_query_tool = DNSQueryTool().guess_all_settings()
self.whois_query_tool = WhoisQueryTool()
self.addressinfo_query_tool = AddressInfo()
self.hostbyaddr_query_tool = HostByAddrInfo()
self.http_status_code_query_tool = HTTPStatusCode()
self.domain_syntax_checker = DomainSyntaxChecker()
self.ip_syntax_checker = IPSyntaxChecker()
self.url_syntax_checker = URLSyntaxChecker()
self.db_session = db_session
self.params = AvailabilityCheckerParams()
self.status = AvailabilityCheckerStatus()
self.status.params = self.params
self.status.dns_lookup_record = self.dns_query_tool.lookup_record
self.status.whois_lookup_record = self.whois_query_tool.lookup_record
if use_extra_rules is not None:
self.use_extra_rules = use_extra_rules
else:
self.guess_and_set_use_extra_rules()
if use_whois_lookup is not None:
self.use_whois_lookup = use_whois_lookup
else:
self.guess_and_set_use_whois_lookup()
if use_dns_lookup is not None:
self.use_dns_lookup = use_dns_lookup
else:
self.guess_and_set_dns_lookup()
if use_netinfo_lookup is not None:
self.use_netinfo_lookup = use_netinfo_lookup
else:
self.guess_and_set_use_netinfo_lookup()
if use_http_code_lookup is not None:
self.use_http_code_lookup = use_http_code_lookup
else:
self.guess_and_set_use_http_code_lookup()
if use_reputation_lookup is not None:
self.use_reputation_lookup = use_reputation_lookup
else:
self.guess_and_set_use_reputation_lookup()
if use_whois_db is not None:
self.use_whois_db = use_whois_db
else:
self.guess_and_set_use_whois_db()
super().__init__(
subject, do_syntax_check_first=do_syntax_check_first, db_session=db_session
)
@property
def use_extra_rules(self) -> bool:
"""
Provides the current value of the :code:`_use_extra_rules` attribute.
"""
return self._use_extra_rules
@use_extra_rules.setter
def use_extra_rules(self, value: bool) -> None:
"""
Sets the value which authorizes the usage of the special rule.
:param value:
The value to set.
:raise TypeError:
When the given :code:`value` is not a :py:class:`bool`.
"""
if not isinstance(value, bool):
raise TypeError(f"<value> should be {bool}, {type(value)} given.")
self._use_extra_rules = self.params.use_extra_rules = value
def set_use_extra_rules(self, value: bool) -> "AvailabilityCheckerBase":
"""
Sets the value which authorizes the usage of the special rule.
:param value:
The value to set.
"""
self.use_extra_rules = value
return self
@property
def use_whois_lookup(self) -> bool:
"""
Provides the current value of the :code:`_use_whois_lookup` attribute.
"""
return self._use_whois_lookup
@use_whois_lookup.setter
def use_whois_lookup(self, value: bool) -> None:
"""
Sets the value which authorizes the usage of the WHOIS lookup.
:param value:
The value to set.
:raise TypeError:
When the given :code:`value` is not a :py:class:`bool`.
"""
if not isinstance(value, bool):
raise TypeError(f"<value> should be {bool}, {type(value)} given.")
self._use_whois_lookup = self.params.use_whois_lookup = value
def set_use_whois_lookup(self, value: bool) -> "AvailabilityCheckerBase":
"""
Sets the value which authorizes the usage of the WHOIS lookup.
:param value:
The value to set.
"""
self.use_whois_lookup = value
return self
@property
def use_dns_lookup(self) -> bool:
"""
Provides the current value of the :code:`_use_dns_lookup` attribute.
"""
return self._use_dns_lookup
@use_dns_lookup.setter
def use_dns_lookup(self, value: bool) -> None:
"""
Sets the value which authorizes the usage of the DNS Lookup.
:param value:
The value to set.
:raise TypeError:
When the given :code:`value` is not a :py:class:`bool`.
"""
if not isinstance(value, bool):
raise TypeError(f"<value> should be {bool}, {type(value)} given.")
self._use_dns_lookup = self.params.use_dns_lookup = value
def set_use_dns_lookup(self, value: bool) -> "AvailabilityCheckerBase":
"""
Sets the value which authorizes the usage of the DNS Lookup.
:param value:
The value to set.
"""
self.use_dns_lookup = value
return self
@property
def use_netinfo_lookup(self) -> bool:
"""
Provides the current value of the :code:`_use_netinfo_lookup` attribute.
"""
return self._use_netinfo_lookup
@use_netinfo_lookup.setter
def use_netinfo_lookup(self, value: bool) -> None:
"""
Sets the value which authorizes the usage of the network information
lookup.
:param value:
The value to set.
:raise TypeError:
When the given :code:`value` is not a :py:class:`bool`.
"""
if not isinstance(value, bool):
raise TypeError(f"<value> should be {bool}, {type(value)} given.")
self._use_netinfo_lookup = self.params.use_netinfo_lookup = value
def set_use_netinfo_lookup(self, value: bool) -> "AvailabilityCheckerBase":
"""
Sets the value which authorizes the usage of the network information
lookup.
:param value:
The value to set.
"""
self.use_netinfo_lookup = value
return self
@property
def use_http_code_lookup(self) -> None:
"""
Provides the current value of the :code:`_use_http_code_lookup` attribute.
"""
return self._use_http_code_lookup
@use_http_code_lookup.setter
def use_http_code_lookup(self, value: bool) -> None:
"""
Sets the value which authorizes the usage of the HTTP status code
lookup.
:param value:
The value to set.
:raise TypeError:
When the given :code:`value` is not a :py:class:`bool`.
"""
if not isinstance(value, bool):
raise TypeError(f"<value> should be {bool}, {type(value)} given.")
self._use_http_code_lookup = self.params.use_http_code_lookup = value
def set_use_http_code_lookup(self, value: bool) -> "AvailabilityCheckerBase":
"""
Sets the value which authorizes the usage of the HTTP status code
lookup.
:param value:
The value to set.
"""
self.use_http_code_lookup = value
return self
@property
def use_reputation_lookup(self) -> bool:
"""
Provides the current value of the :code:`_use_reputation_lookup` attribute.
"""
return self._use_reputation_lookup
@use_reputation_lookup.setter
def use_reputation_lookup(self, value: bool) -> None:
"""
Sets the value which authorizes the usage of the reputation
lookup.
:param value:
The value to set.
:raise TypeError:
When the given :code:`value` is not a :py:class:`bool`.
"""
if not isinstance(value, bool):
raise TypeError(f"<value> should be {bool}, {type(value)} given.")
self._use_reputation_lookup = self.params.use_reputation_lookup = value
def set_use_reputation_lookup(self, value: bool) -> "AvailabilityCheckerBase":
"""
Sets the value which authorizes the usage of the reputation
lookup.
:param value:
The value to set.
"""
self.use_reputation_lookup = value
return self
@property
def use_whois_db(self) -> bool:
"""
Provides the current value of the :code:`_use_whois_db` attribute.
"""
return self._use_whois_db
@use_whois_db.setter
def use_whois_db(self, value: bool) -> None:
"""
Sets the value which authorizes the usage of the WHOIS DB.
:param value:
The value to set.
:param TypeError:
When the given :code:`use_whois_db` is not a :py:class:`bool`.
"""
if not isinstance(value, bool):
raise TypeError(f"<value> should be {bool}, {type(value)} given.")
self._use_whois_db = self.params.use_whois_db = value
def set_use_whois_db(self, value: bool) -> "AvailabilityCheckerBase":
"""
Sets the value which authorizes the usage of the WHOIS DB.
:param value:
The value to set.
"""
self.use_whois_db = value
return self
def subject_propagator(self) -> "CheckerBase":
"""
Propagate the currently set subject.
.. warning::
You are not invited to run this method directly.
"""
self.dns_query_tool.set_subject(self.idna_subject)
self.whois_query_tool.set_subject(self.idna_subject)
self.addressinfo_query_tool.set_subject(self.idna_subject)
self.hostbyaddr_query_tool.set_subject(self.idna_subject)
self.http_status_code_query_tool.set_subject(self.idna_subject)
self.domain_syntax_checker.subject = self.idna_subject
self.ip_syntax_checker.subject = self.idna_subject
self.url_syntax_checker.subject = self.idna_subject
self.status = AvailabilityCheckerStatus()
self.status.params = self.params
self.status.dns_lookup_record = self.dns_query_tool.lookup_record
self.status.whois_lookup_record = self.whois_query_tool.lookup_record
self.status.subject = self.subject
self.status.idna_subject = self.idna_subject
self.status.status = None
self.query_syntax_checker()
return self
def should_we_continue_test(self, status_post_syntax_checker: str) -> bool:
"""
Checks if we are allowed to continue a standard testing.
"""
return bool(
not self.status.status
or status_post_syntax_checker == PyFunceble.storage.STATUS.invalid
)
def guess_and_set_use_extra_rules(self) -> "AvailabilityCheckerBase":
"""
Try to guess and set the value of the :code:`use_extra_rules` attribute
from the configuration file.
"""
if PyFunceble.facility.ConfigLoader.is_already_loaded():
self.use_extra_rules = PyFunceble.storage.CONFIGURATION.lookup.special
else:
self.use_extra_rules = self.STD_USE_EXTRA_RULES
return self
def guess_and_set_use_whois_lookup(self) -> "AvailabilityCheckerBase":
"""
Try to guess and set the value of the :code:`use_whois` attribute
from the configuration file.
"""
if PyFunceble.facility.ConfigLoader.is_already_loaded():
self.use_whois_lookup = PyFunceble.storage.CONFIGURATION.lookup.whois
else:
self.use_whois_lookup = self.STD_USE_WHOIS_LOOKUP
return self
def guess_and_set_dns_lookup(self) -> "AvailabilityCheckerBase":
"""
Try to guess and set the value of the :code:`use_dns_lookup` attribute
from the configuration file.
"""
if PyFunceble.facility.ConfigLoader.is_already_loaded():
self.use_dns_lookup = PyFunceble.storage.CONFIGURATION.lookup.dns
else:
self.use_dns_lookup = self.STD_USE_DNS_LOOKUP
return self
def guess_and_set_use_netinfo_lookup(self) -> "AvailabilityCheckerBase":
"""
Try to guess and set the value of the :code:`use_netinfo_lookup` attribute
from the configuration file.
"""
if PyFunceble.facility.ConfigLoader.is_already_loaded():
self.use_netinfo_lookup = PyFunceble.storage.CONFIGURATION.lookup.netinfo
else:
self.use_netinfo_lookup = self.STD_USE_NETINFO_LOOKUP
return self
def guess_and_set_use_http_code_lookup(self) -> "AvailabilityCheckerBase":
"""
Try to guess and set the value of the :code:`use_http_code_lookup` attribute
from the configuration file.
"""
if PyFunceble.facility.ConfigLoader.is_already_loaded():
self.use_http_code_lookup = (
PyFunceble.storage.CONFIGURATION.lookup.http_status_code
)
else:
self.use_http_code_lookup = self.STD_USE_HTTP_CODE_LOOKUP
return self
def guess_and_set_use_reputation_lookup(self) -> "AvailabilityCheckerBase":
"""
Try to guess and set the value of the :code:`use_reputation_lookup` attribute
from the configuration file.
"""
if PyFunceble.facility.ConfigLoader.is_already_loaded():
self.use_reputation_lookup = (
PyFunceble.storage.CONFIGURATION.lookup.reputation
)
else:
self.use_reputation_lookup = self.STD_USE_REPUTATION_LOOKUP
return self
def guess_and_set_use_whois_db(self) -> "AvailabilityCheckerBase":
"""
Try to guess and set the value of the :code:`use_whois_db` attribute.
"""
if PyFunceble.facility.ConfigLoader.is_already_loaded():
self.use_whois_db = PyFunceble.storage.CONFIGURATION.cli_testing.whois_db
else:
self.use_whois_db = self.STD_USE_WHOIS_DB
def guess_all_settings(
self,
) -> "AvailabilityCheckerBase": # pragma: no cover ## Method are more important
"""
Try to guess all settings.
"""
to_ignore = ["guess_all_settings"]
for method in dir(self):
if method in to_ignore or not method.startswith("guess_"):
continue
getattr(self, method)()
return self
def query_syntax_checker(self) -> "AvailabilityCheckerBase":
"""
Queries the syntax checker.
"""
PyFunceble.facility.Logger.info(
"Started to check the syntax of %r", self.status.idna_subject
)
self.status.second_level_domain_syntax = (
self.domain_syntax_checker.is_valid_second_level()
)
self.status.subdomain_syntax = self.domain_syntax_checker.is_valid_subdomain()
self.status.domain_syntax = bool(self.status.subdomain_syntax) or bool(
self.status.second_level_domain_syntax
)
self.status.ipv4_syntax = self.ip_syntax_checker.is_valid_v4()
self.status.ipv6_syntax = self.ip_syntax_checker.is_valid_v6()
self.status.ipv4_range_syntax = self.ip_syntax_checker.is_valid_v4_range()
self.status.ipv6_range_syntax = self.ip_syntax_checker.is_valid_v6_range()
self.status.ip_syntax = bool(self.status.ipv4_syntax or self.status.ipv6_syntax)
self.status.url_syntax = self.url_syntax_checker.is_valid()
PyFunceble.facility.Logger.info(
"Finished to check the syntax of %r", self.status.idna_subject
)
return self
@CheckerBase.ensure_subject_is_given
def query_dns_record(self) -> Optional[Dict[str, Optional[List[str]]]]:
"""
Tries to query the DNS record(s) of the given subject.
"""
PyFunceble.facility.Logger.info(
"Started to try to query the DNS record of %r.",
self.status.idna_subject,
)
result = dict()
if self.status.subdomain_syntax:
lookup_order = ["NS", "A", "AAAA", "CNAME", "DNAME"]
elif self.status.domain_syntax:
lookup_order = ["NS", "CNAME", "A", "AAAA", "DNAME"]
elif self.status.ip_syntax:
lookup_order = ["PTR"]
else:
lookup_order = []
if lookup_order:
for record_type in lookup_order:
local_result = self.dns_query_tool.set_query_record_type(
record_type
).query()
if local_result:
result[record_type] = local_result
break
PyFunceble.facility.Logger.debug("DNS Record:\n%r", result)
PyFunceble.facility.Logger.info(
"Finished to try to query the DNS record of %r",
self.status.idna_subject,
)
return result
def try_to_query_status_from_whois(
self,
) -> "AvailabilityCheckerBase":
"""
Tries to get and the status from the WHOIS record.
.. warning::
If the configuration is loaded, this method try to query from the
best database first.
If it's not found it will try to query it from the best WHOIS server
then add it into the database (if the expiration date
extraction is successful).
.. note::
The addition into the WHOIS database is only done if this method is
running in a process with a name that does not starts with
:code:`PyFunceble` (case sensitive).
"""
PyFunceble.facility.Logger.info(
"Started to try to query the status of %r from: WHOIS Lookup",
self.status.idna_subject,
)
if (
PyFunceble.facility.ConfigLoader.is_already_loaded() and self.use_whois_db
): # pragma: no cover ## Not interesting enough to spend time on it.
whois_object = PyFunceble.checker.utils.whois.get_whois_dataset_object(
db_session=self.db_session
)
known_record = whois_object[self.subject]
if known_record and not isinstance(known_record, dict):
# Comes from DB engine.
known_record = known_record.to_dict()
if not known_record:
# We assume that expired dataset are never saved into the
# dataset.
self.status.expiration_date = (
self.whois_query_tool.get_expiration_date()
)
self.status.whois_record = self.whois_query_tool.lookup_record.record
if (
self.status.expiration_date
and not multiprocessing.current_process().name.startswith(
PyFunceble.storage.PROJECT_NAME.lower()
)
):
whois_object.update(
{
"subject": self.subject,
"idna_subject": self.idna_subject,
"expiration_date": self.status.expiration_date,
"epoch": str(
datetime.strptime(
self.status.expiration_date, "%d-%b-%Y"
).timestamp()
),
}
)
else:
self.status.expiration_date = known_record["expiration_date"]
self.status.whois_record = None
else:
self.status.expiration_date = self.whois_query_tool.get_expiration_date()
self.status.whois_record = self.whois_query_tool.lookup_record.record
if self.status.expiration_date:
self.status.status = PyFunceble.storage.STATUS.up
self.status.status_source = "WHOIS"
PyFunceble.facility.Logger.info(
"Could define the status of %r from: WHOIS Lookup",
self.status.idna_subject,
)
PyFunceble.facility.Logger.info(
"Finished to try to query the status of %r from: WHOIS Lookup",
self.status.idna_subject,
)
return self
def try_to_query_status_from_dns(self) -> "AvailabilityCheckerBase":
"""
Tries to query the status from the DNS lookup.
"""
PyFunceble.facility.Logger.info(
"Started to try to query the status of %r from: DNS Lookup",
self.status.idna_subject,
)
lookup_result = self.query_dns_record()
if lookup_result:
self.status.dns_lookup = lookup_result
self.status.status = PyFunceble.storage.STATUS.up
self.status.status_source = "DNSLOOKUP"
PyFunceble.facility.Logger.info(
"Could define the status of %r from: DNS Lookup",
self.status.idna_subject,
)
PyFunceble.facility.Logger.info(
"Finished to try to query the status of %r from: DNS Lookup",
self.status.idna_subject,
)
return self
def try_to_query_status_from_netinfo(self) -> "AvailabilityCheckerBase":
"""
Tries to query the status from the network information.
"""
PyFunceble.facility.Logger.info(
"Started to try to query the status of %r from: NETINFO Lookup",
self.status.idna_subject,
)
if self.status.domain_syntax:
lookup_result = self.addressinfo_query_tool.get_info()
elif self.status.ip_syntax:
lookup_result = self.hostbyaddr_query_tool.get_info()
elif self.status.idna_subject.isdigit():
lookup_result = None
else:
lookup_result = self.addressinfo_query_tool.get_info()
if lookup_result:
self.status.netinfo = lookup_result
self.status.status = PyFunceble.storage.STATUS.up
self.status.status_source = "NETINFO"
PyFunceble.facility.Logger.info(
"Could define the status of %r from: NETINFO Lookup",
self.status.idna_subject,
)
PyFunceble.facility.Logger.info(
"Finished to try to query the status of %r from: NETINFO Lookup",
self.status.idna_subject,
)
return self
def try_to_query_status_from_http_status_code(self) -> "AvailabilityCheckerBase":
"""
Tries to query the status from the HTTP status code.
"""
PyFunceble.facility.Logger.info(
"Started to try to query the status of %r from: HTTP Status code Lookup",
self.status.idna_subject,
)
if not self.status.url_syntax and not RegexHelper("[^a-z0-9._]").match(
self.idna_subject, return_match=False
):
# The regex is there because while testing for domain, sometime we
# may see something like mailto:xxx@yyy.de
self.http_status_code_query_tool.set_subject(
f"http://{self.idna_subject}:80"
)
lookup_result = self.http_status_code_query_tool.get_status_code()
if (
lookup_result
and lookup_result
!= self.http_status_code_query_tool.STD_UNKNOWN_STATUS_CODE
):
self.status.http_status_code = lookup_result
if (
PyFunceble.facility.ConfigLoader.is_already_loaded()
): # pragma: no cover ## Special behavior.
dataset = PyFunceble.storage.HTTP_CODES
else:
dataset = PyFunceble.storage.STD_HTTP_CODES
if (
not self.status.status
or self.status.status == PyFunceble.storage.STATUS.down
) and (
self.status.http_status_code in dataset.list.up
or self.status.http_status_code in dataset.list.potentially_up
):
self.status.status = PyFunceble.storage.STATUS.up
self.status.status_source = "HTTP CODE"
PyFunceble.facility.Logger.info(
"Could define the status of %r from: HTTP Status code Lookup",
self.status.idna_subject,
)
else:
self.status.http_status_code = None
PyFunceble.facility.Logger.info(
"Finished to try to query the status of %r from: HTTP Status code Lookup",
self.status.idna_subject,
)
return self
def try_to_query_status_from_syntax_lookup(self) -> "AvailabilityCheckerBase":
"""
Tries to query the status from the syntax.
"""
PyFunceble.facility.Logger.info(
"Started to try to query the status of %r from: Syntax Lookup",
self.status.idna_subject,
)
if (
not self.status.domain_syntax
and not self.status.ip_syntax
and not self.status.url_syntax
):
self.status.status = PyFunceble.storage.STATUS.invalid
self.status.status_source = "SYNTAX"
PyFunceble.facility.Logger.info(
"Could define the status of %r from: Syntax Lookup",
self.status.idna_subject,
)
PyFunceble.facility.Logger.info(
"Finished to try to query the status of %r from: Syntax Lookup",
self.status.idna_subject,
)
return self
def try_to_query_status_from_reputation(self) -> "AvailabilityCheckerBase":
"""
Tries to query the status from the reputation lookup.
"""
raise NotImplementedError()
@CheckerBase.ensure_subject_is_given
@CheckerBase.update_status_date_after_query
def query_status(self) -> "AvailabilityCheckerBase":
"""
Queries the status and for for more action.
"""
raise NotImplementedError()
# pylint: disable=useless-super-delegation
def get_status(self) -> Optional[AvailabilityCheckerStatus]:
return super().get_status()
| StarcoderdataPython |
1622390 | from flask.ext.assets import Environment
import os
from . import pipeline
from shutil import copytree, rmtree, copy
assets = Environment()
assets.register('js', pipeline.js)
assets.register('js_map', pipeline.js_map)
assets.register('js_polyfills_ie9', pipeline.js_polyfills_ie9)
assets.register('js_polyfills_ie8', pipeline.js_polyfills_ie8)
assets.register('js_promise', pipeline.js_promise)
assets.register('js_ga_payment', pipeline.js_ga_payment)
assets.register('govuk', pipeline.govuk)
assets.register('govuk_print', pipeline.govuk_print)
assets.register('govuk_ie8', pipeline.govuk_ie8)
assets.register('govuk_ie7', pipeline.govuk_ie7)
assets.register('govuk_ie6', pipeline.govuk_ie6)
assets.register('elements', pipeline.elements)
assets.register('elements_ie8', pipeline.elements_ie8)
assets.register('elements_ie7', pipeline.elements_ie7)
assets.register('elements_ie6', pipeline.elements_ie6)
def register_assets(app):
dir = os.path.dirname(__file__)
# Destroy the dist folder on startup to increase the predictability of the
# startup process across successive builds
rmtree(os.path.join(dir, '.dist'), True)
assets.init_app(app)
# Copy various images from the original stylesheet directory into the built output
# Reason being, we are using flask assets to compress the css to a new folder
# but the images don't automatically come along with it so we have to copy them manually
folders = [
'images',
'stylesheets/external-links',
'stylesheets/fonts',
'stylesheets/images'
]
for folder in folders:
src = os.path.join(dir, '.land-registry-elements/assets', folder)
dest = os.path.join(dir, '.dist', folder)
rmtree(dest, True)
copytree(src, dest)
# Copy the fonts stylesheet to the output directory
copy(os.path.join(dir, '.land-registry-elements/assets/stylesheets/fonts.css'), os.path.join(dir, '.dist/stylesheets'))
return assets
| StarcoderdataPython |
1734616 | <filename>sensors_communication/src/sonic_stream.py
#!/usr/bin/env python
import json
import rospy
from std_msgs.msg import String
from sonic_sensor import HCSR04
if __name__ == "__main__":
rospy.init_node('sonic_data_streamer')
pub = rospy.Publisher('/sonic_data', String, queue_size=10)
rate = rospy.Rate(20)
sensors = rospy.get_param('/sonic_sensors')
initialized_sensors = {}
for sensor, params in sensors.items():
initialized_sensors[sensor] = HCSR04(**params)
while not rospy.is_shutdown():
readings = {}
for sensor_name, initialized_sensor in initialized_sensors.items():
readings[sensor_name] = initialized_sensor.get_measurement()
msg = String()
msg.data = json.dumps(readings)
pub.publish(msg)
rate.sleep()
| StarcoderdataPython |
113961 | __version__ = "0.8.0-alpha.8"
__api_version__ = "v1"
| StarcoderdataPython |
3348463 | <filename>lib/spack/spack/container/writers/__init__.py
# Copyright 2013-2021 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
"""Writers for different kind of recipes and related
convenience functions.
"""
import collections
import copy
import spack.environment
import spack.schema.env
import spack.tengine as tengine
import spack.util.spack_yaml as syaml
from spack.container.images import build_info, commands_for
from spack.container.images import os_package_manager_for
#: Caches all the writers that are currently supported
_writer_factory = {}
def writer(name):
"""Decorator to register a factory for a recipe writer.
Each factory should take a configuration dictionary and return a
properly configured writer that, when called, prints the
corresponding recipe.
"""
def _decorator(factory):
_writer_factory[name] = factory
return factory
return _decorator
def create(configuration):
"""Returns a writer that conforms to the configuration passed as input.
Args:
configuration: how to generate the current recipe
"""
name = spack.environment.config_dict(configuration)['container']['format']
return _writer_factory[name](configuration)
def recipe(configuration):
"""Returns a recipe that conforms to the configuration passed as input.
Args:
configuration: how to generate the current recipe
"""
return create(configuration)()
class PathContext(tengine.Context):
"""Generic context used to instantiate templates of recipes that
install software in a common location and make it available
directly via PATH.
"""
def __init__(self, config):
self.config = spack.environment.config_dict(config)
self.container_config = self.config['container']
@tengine.context_property
def run(self):
"""Information related to the run image."""
images_config = self.container_config['images']
# Check if we have custom images
image = images_config.get('final', None)
# If not use the base OS image
if image is None:
image = images_config['os']
Run = collections.namedtuple('Run', ['image'])
return Run(image=image)
@tengine.context_property
def build(self):
"""Information related to the build image."""
images_config = self.container_config['images']
# Check if we have custom images
image = images_config.get('build', None)
# If not select the correct build image based on OS and Spack version
if image is None:
operating_system = images_config['os']
spack_version = images_config['spack']
image_name, tag = build_info(operating_system, spack_version)
image = ':'.join([image_name, tag])
Build = collections.namedtuple('Build', ['image'])
return Build(image=image)
@tengine.context_property
def strip(self):
"""Whether or not to strip binaries in the image"""
return self.container_config.get('strip', True)
@tengine.context_property
def paths(self):
"""Important paths in the image"""
Paths = collections.namedtuple('Paths', [
'environment', 'store', 'view'
])
return Paths(
environment='/opt/spack-environment',
store='/opt/software',
view='/opt/view'
)
@tengine.context_property
def monitor(self):
"""Enable using spack monitor during build."""
Monitor = collections.namedtuple('Monitor', [
'enabled', 'host', 'disable_auth', 'prefix', 'keep_going', 'tags'
])
monitor = self.config.get("monitor")
# If we don't have a monitor group, cut out early.
if not monitor:
return Monitor(False, None, None, None, None, None)
return Monitor(
enabled=True,
host=monitor.get('host'),
prefix=monitor.get('prefix'),
disable_auth=monitor.get("disable_auth"),
keep_going=monitor.get("keep_going"),
tags=monitor.get('tags')
)
@tengine.context_property
def manifest(self):
"""The spack.yaml file that should be used in the image"""
import jsonschema
# Copy in the part of spack.yaml prescribed in the configuration file
manifest = copy.deepcopy(self.config)
manifest.pop('container')
if "monitor" in manifest:
manifest.pop("monitor")
# Ensure that a few paths are where they need to be
manifest.setdefault('config', syaml.syaml_dict())
manifest['config']['install_tree'] = self.paths.store
manifest['view'] = self.paths.view
manifest = {'spack': manifest}
# Validate the manifest file
jsonschema.validate(manifest, schema=spack.schema.env.schema)
return syaml.dump(manifest, default_flow_style=False).strip()
@tengine.context_property
def os_packages_final(self):
"""Additional system packages that are needed at run-time."""
return self._os_packages_for_stage('final')
@tengine.context_property
def os_packages_build(self):
"""Additional system packages that are needed at build-time."""
return self._os_packages_for_stage('build')
@tengine.context_property
def os_package_update(self):
"""Whether or not to update the OS package manager cache."""
os_packages = self.container_config.get('os_packages', {})
return os_packages.get('update', True)
def _os_packages_for_stage(self, stage):
os_packages = self.container_config.get('os_packages', {})
package_list = os_packages.get(stage, None)
return self._package_info_from(package_list)
def _package_info_from(self, package_list):
"""Helper method to pack a list of packages with the additional
information required by the template.
Args:
package_list: list of packages
Returns:
Enough information to know how to update the cache, install
a list opf packages, and clean in the end.
"""
if not package_list:
return package_list
image_config = self.container_config['images']
image = image_config.get('build', None)
if image is None:
os_pkg_manager = os_package_manager_for(image_config['os'])
else:
os_pkg_manager = self.container_config['os_packages']['command']
update, install, clean = commands_for(os_pkg_manager)
Packages = collections.namedtuple(
'Packages', ['update', 'install', 'list', 'clean']
)
return Packages(update=update, install=install,
list=package_list, clean=clean)
@tengine.context_property
def extra_instructions(self):
Extras = collections.namedtuple('Extra', ['build', 'final'])
extras = self.container_config.get('extra_instructions', {})
build, final = extras.get('build', None), extras.get('final', None)
return Extras(build=build, final=final)
@tengine.context_property
def labels(self):
return self.container_config.get('labels', {})
def __call__(self):
"""Returns the recipe as a string"""
env = tengine.make_environment()
t = env.get_template(self.template_name)
return t.render(**self.to_dict())
# Import after function definition all the modules in this package,
# so that registration of writers will happen automatically
import spack.container.writers.singularity # noqa
import spack.container.writers.docker # noqa
| StarcoderdataPython |
3297434 | <filename>FeatureVectorGeneration/extract_total_img.py
from xml.etree.ElementTree import ElementTree
import glob
import subprocess
import os
import codecs
import threading
import time
import hoggen
taskParam_noThread = 10
taskParam_apklistfilename = './apklist.txt'
taskParam_resultfilename = 'Hogs.txt'
taskParam_rawdatadirname = "/media/ktg/FA58AC6258AC1EFF/Research/DNN_AndMal/RawData/"
def recordImageFeats(mainFname,filename):
#if os.path.exists(mainFname):
# return
mf = open(mainFname,'w')
fd, err = hoggen.genhog(filename)
if err == -1:
return None, err
strfd = ''
for h in fd:
mf.write(str(h)+' ')
strfd += str(h)+' '
mf.write('\n')
mf.close()
return strfd, err
class CollectIMGFeatures(threading.Thread):
def __init__(self, imgs, apklist, locklist, flog) :
threading.Thread.__init__(self)
self.imgs = imgs
self.locklist = locklist
self.apklist = apklist
self.flog = flog
def run(self):
for apk in self.apklist:
print("stage 1: "+apk+' start')
dirname = taskParam_rawdatadirname+apk
pnglist = glob.glob(dirname+'/*.png')
jpglist = glob.glob(dirname+'/*.jpg')
giflist = glob.glob(dirname+'/*.gif')
mainFname = ''
for png in pnglist:
mainFname=dirname+'/'+os.path.basename(png)
mainFname+='_hog.txt'
strfd, err = recordImageFeats(mainFname,png)
if not err == -1:
self.imgs.add(strfd)
for jpg in jpglist:
mainFname=dirname+'/'+os.path.basename(jpg)
mainFname+='_hog.txt'
strfd, err = recordImageFeats(mainFname,jpg)
if not err == -1:
self.imgs.add(strfd)
for gif in giflist:
mainFname=dirname+'/'+os.path.basename(gif)
mainFname+='_'
mainFname+=os.path.basename(gif).split('_')[1]
mainFname+='_hog.txt'
strfd, err = recordImageFeats(mainFname,gif)
if not err == -1:
self.imgs.add(strfd)
self.flog.write(apk)
self.flog.write('\n')
self.flog.flush()
############################## main ##############################
fapklist = open(taskParam_apklistfilename,'r') # get apk list
lines = fapklist.readlines()
fapklist.close()
noofapks = len(lines)
#seperate the total apks into the serveral group (for assigning the apks to the threads)
listOfapklist = list()
for m in range(0, taskParam_noThread+1):
listOfapklist.append(list())
order = 0
for line in lines:
order = order+1
listOfapklist[(order%taskParam_noThread)].append(line[:-1])
tImgHogSet = set() # total img feature set
loglist = list()
for m in range(0, taskParam_noThread+1):
fhowmany = open('howmany'+str(m)+'.txt','w')
loglist.append(fhowmany)
# thread creation (# of thread = taskParam_noThread)
locklist = list()
for m in range(0, taskParam_noThread+1):
locklist.append(threading.Lock())
t = list()
for m in range(0, taskParam_noThread+1):
t.append(CollectIMGFeatures(tImgHogSet, listOfapklist[m], locklist, loglist[m]))
#thread start
for m in range(0, taskParam_noThread+1):
t[m].start()
#thread end
for m in range(0, taskParam_noThread+1):
t[m].join()
#log all features
fhlog = open(taskParam_resultfilename,'a')
for hitem in tImgHogSet:
fhlog.write(hitem)
fhlog.write('\n')
fhlog.close()
############################## end ############################### | StarcoderdataPython |
3223285 | import topogenesis as tg
import numpy as np
np.random.seed(0)
# agent class
class agent():
def __init__(self, origin, stencil, id):
# define the origin attribute of the agent and making sure that it is an intiger
self.origin = np.array(origin).astype(int)
# define old origin attribute and assigning the origin to it as the initial state
self.old_origin = self.origin
# define stencil of the agent
self.stencil = stencil
#define agent id
self.id = id
# definition of walking method for agents
def walk(self, env):
# find available spaces
#######################
# retrieve the list of neighbours of the agent based on the stencil
neighs = env.availibility.find_neighbours_masked(self.stencil, loc = self.origin)
# find availability of neighbours
neighs_availibility = env.availibility.flatten()[neighs]
# separate available neighbours
free_neighs = neighs[neighs_availibility==1]
# check to see if there is any available neighbour
if len(free_neighs)==0:
return 1
# retrieve the myvalue of each neighbour
free_neighs_myvalue = env.myvalue.flatten()[free_neighs]
# find the neighbour with maximum my value
selected_neigh = free_neighs[np.argmax(free_neighs_myvalue)]
# update information
####################
# set the current origin as the ol origin
self.old_origin = self.origin
# update the current origin with the new selected neighbour
self.origin = np.array(np.unravel_index(selected_neigh, env.availibility.shape)).flatten()
# update environment information
################################
# making previous position available
env.availibility[tuple(self.old_origin)] = env.availibility[tuple(self.old_origin)] * 0 + 1
# removing agent from previous position
env.agent_origin[tuple(self.old_origin)] *= 0
# making the current position unavailable
env.availibility[tuple(self.origin)] *= 0
# adding agent to the new position
env.agent_origin[tuple(self.origin)] = self.id
# initiate availibility lattice
unit = 1
bounds = np.array([[0,0,0],[lattice_size,lattice_size,lattice_size]])
avail_lattice = tg.lattice(bounds, unit=unit, default_value=1, dtype=int)
# randomly scattering the agents
selected_cells = np.random.choice(avail_lattice.size, agent_count)
agent_ind = np.array(np.unravel_index(selected_cells, avail_lattice.shape))
# creating neighborhood definition
stencil = tg.stencil(np.array([[[0, 0, 0],
[0, 1, 0],
[0, 0, 0]],
[[0, 1, 0],
[1, 1, 1],
[0, 1, 0]],
[[0, 0, 0],
[0, 1, 0],
[0, 0, 0]]]),origin=np.array([1,1,1]))
agents= []
# creating agent objects
for id, ind in enumerate(agent_ind.T.tolist()):
myagent = agent(ind, stencil, id+1)
agents.append(myagent)
# environment class
class environment():
def __init__(self, lattices, agents):
self.availibility = lattices["availibility"]
self.myvalue = lattices["myvalue"]
self.agent_origin = self.availibility * 0
self.agents = agents
self.update_agents()
def update_agents(self):
for a in self.agents:
# making previous position available
self.availibility[tuple(a.old_origin)] = self.availibility[tuple(a.old_origin)] * 0 + 1
# removing agent from previous position
self.agent_origin[tuple(a.old_origin)] *= 0
# making the current position unavailable
self.availibility[tuple(a.origin)] *= 0
# adding agent to the new position
self.agent_origin[tuple(a.origin)] = a.id
def walk_agents(self):
# iterate over egents and perform the walk
for a in self.agents:
a.walk(self)
# update the agent states in environment
self.update_agents()
# construct a dummy value field
###############################
# create a series of sin values for 0 to pi
sin_a = np.sin(np.arange(lattice_size+1) / float(lattice_size) * np.pi).astype(np.float16)
# compute the outer product of the series with itself to create a radial value field
myvalue_2d_field = np.outer(sin_a,sin_a)
# add extra dimension to array to make it comaptible with lattices
myvalue_field = myvalue_2d_field[:, :, None] * sin_a[None, None, :]
# construct the lattice
myvalue_lattice = tg.to_lattice(myvalue_field, np.array([0,0,0]))
# initiate the environment
env_lattices = {"availibility": avail_lattice,
"myvalue": myvalue_lattice}
env = environment(env_lattices, agents)
# main simulation
agent_history = []
for i in range(max_iteration):
# print(env.availibility)
# print(env.agent_origin)
agn_org = [a.origin for a in env.agents]
agent_history.append(np.array(agn_org).tolist())
env.walk_agents()
Agent_History = agent_history
print(type(Agent_History[0])) | StarcoderdataPython |
1749491 | <gh_stars>10-100
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import state_ops
from tensorflow.python.framework import ops
from tensorflow.python.training import optimizer
import tensorflow as tf
import numpy as np
class Grad(optimizer.Optimizer):
def __init__(self, learning_rate=0.001, use_locking=False, name="myGrad"):
super(Grad, self).__init__(use_locking, name)
self._lr = learning_rate
self._lr_t = None
def _prepare(self):
self._lr_t = ops.convert_to_tensor(self._lr, name="learning_rate")
def _create_slots(self, var_list):
pass
def _apply_dense(self, grad, var):
lr_t = math_ops.cast(self._lr_t, var.dtype.base_dtype)
var_update = state_ops.assign_sub(var, lr_t * grad)
return control_flow_ops.group(*[var_update])
def _apply_sparse(self, grad, var):
raise NotImplementedError("Sparse gradient updates are not supported.")
class Mom(optimizer.Optimizer):
def __init__(self, learning_rate=0.001, beta1=0.9, bias_correction=True, use_locking=False, name="myMom"):
super(Mom, self).__init__(use_locking, name)
self._lr = learning_rate
self._beta1 = beta1
self.bias_correction = bias_correction
self._lr_t = None
self._beta1_t = None
self._beta1_power = None
def _prepare(self):
self._lr_t = ops.convert_to_tensor(self._lr, name="learning_rate")
self._beta1_t = ops.convert_to_tensor(self._beta1, name="beta1")
def _create_slots(self, var_list):
first_var = min(var_list, key=lambda x: x.name)
create_new = self._beta1_power is None
if not create_new and tf.contrib.eager.in_graph_mode():
create_new = (self._beta1_power.graph is not first_var.graph)
if create_new:
with ops.colocate_with(first_var):
self._beta1_power = tf.Variable(self._beta1, name="beta1_power", trainable=False)
for v in var_list:
self._zeros_slot(v, "m", self._name)
def _apply_dense(self, grad, var):
lr_t = math_ops.cast(self._lr_t, var.dtype.base_dtype)
beta1_t = math_ops.cast(self._beta1_t, var.dtype.base_dtype)
beta1_power = math_ops.cast(self._beta1_power, var.dtype.base_dtype)
beta1_fix = 1.0 - beta1_power if self.bias_correction else 1.0
m = self.get_slot(var, "m")
m_t = state_ops.assign(m, beta1_t * m + grad * (1 - beta1_t), use_locking=self._use_locking)
var_update = state_ops.assign_sub(var, lr_t * (m_t / beta1_fix), use_locking=self._use_locking)
return control_flow_ops.group(*[var_update, m_t])
def _apply_sparse(self, grad, var):
raise NotImplementedError("Sparse gradient updates are not supported.")
def _finish(self, update_ops, name_scope):
with ops.control_dependencies(update_ops):
with ops.colocate_with(self._beta1_power):
update_beta1 = self._beta1_power.assign(self._beta1_power * self._beta1_t, use_locking=self._use_locking)
return control_flow_ops.group(*update_ops + [update_beta1], name=name_scope)
class Adam(optimizer.Optimizer):
def __init__(self, learning_rate=0.001, beta1=0.9, beta2=0.999, epsilon=1e-10, use_locking=False, name="myAdam"):
super(Adam, self).__init__(use_locking, name)
self._lr = learning_rate
self._beta1 = beta1
self._beta2 = beta2
self._epsilon = epsilon
self._lr_t = None
self._beta1_t = None
self._beta2_t = None
self._epsilon_t = None
self._beta1_power = None
self._beta2_power = None
def _prepare(self):
self._lr_t = ops.convert_to_tensor(self._lr, name="learning_rate")
self._beta1_t = ops.convert_to_tensor(self._beta1, name="beta1")
self._beta2_t = ops.convert_to_tensor(self._beta2, name="beta2")
self._epsilon_t = ops.convert_to_tensor(self._epsilon, name="epsilon")
def _create_slots(self, var_list):
first_var = min(var_list, key=lambda x: x.name)
create_new = self._beta1_power is None
if not create_new and tf.contrib.eager.in_graph_mode():
create_new = (self._beta1_power.graph is not first_var.graph)
if create_new:
with ops.colocate_with(first_var):
self._beta1_power = tf.Variable(self._beta1, name="beta1_power", trainable=False)
self._beta2_power = tf.Variable(self._beta2, name="beta2_power", trainable=False)
for v in var_list:
self._zeros_slot(v, "m", self._name)
self._zeros_slot(v, "v", self._name)
def _apply_dense(self, grad, var):
lr_t = math_ops.cast(self._lr_t, var.dtype.base_dtype)
beta1_t = math_ops.cast(self._beta1_t, var.dtype.base_dtype)
beta2_t = math_ops.cast(self._beta2_t, var.dtype.base_dtype)
epsilon_t = math_ops.cast(self._epsilon_t, var.dtype.base_dtype)
beta1_power = math_ops.cast(self._beta1_power, var.dtype.base_dtype)
beta2_power = math_ops.cast(self._beta2_power, var.dtype.base_dtype)
beta1_fix = 1 - beta1_power
beta2_fix = 1 - beta2_power
m = self.get_slot(var, "m")
v = self.get_slot(var, "v")
m_t = state_ops.assign(m, beta1_t * m + grad * (1 - beta1_t), use_locking=self._use_locking)
v_t = state_ops.assign(v, beta2_t * v + (grad * grad) * (1 - beta2_t), use_locking=self._use_locking)
var_update = state_ops.assign_sub(var, lr_t * (m_t / beta1_fix) / (math_ops.sqrt(v_t / beta2_fix) + epsilon_t), use_locking=self._use_locking)
return control_flow_ops.group(*[var_update, m_t, v_t])
def _apply_sparse(self, grad, var):
raise NotImplementedError("Sparse gradient updates are not supported.")
def _finish(self, update_ops, name_scope):
with ops.control_dependencies(update_ops):
with ops.colocate_with(self._beta1_power):
update_beta1 = self._beta1_power.assign(self._beta1_power * self._beta1_t, use_locking=self._use_locking)
update_beta2 = self._beta2_power.assign(self._beta2_power * self._beta2_t, use_locking=self._use_locking)
return control_flow_ops.group(*update_ops + [update_beta1, update_beta2], name=name_scope)
class AMSGrad(optimizer.Optimizer):
def __init__(self, learning_rate=0.001, beta1=0.9, beta2=0.999, epsilon=1e-10, use_locking=False, name="myAMSGrad"):
super(AMSGrad, self).__init__(use_locking, name)
self._lr = learning_rate
self._beta1 = beta1
self._beta2 = beta2
self._epsilon = epsilon
self._lr_t = None
self._beta1_t = None
self._beta2_t = None
self._epsilon_t = None
self._beta1_power = None
self._beta2_power = None
def _prepare(self):
self._lr_t = ops.convert_to_tensor(self._lr, name="learning_rate")
self._beta1_t = ops.convert_to_tensor(self._beta1, name="beta1")
self._beta2_t = ops.convert_to_tensor(self._beta2, name="beta2")
self._epsilon_t = ops.convert_to_tensor(self._epsilon, name="epsilon")
def _create_slots(self, var_list):
first_var = min(var_list, key=lambda x: x.name)
create_new = self._beta1_power is None
if not create_new and tf.contrib.eager.in_graph_mode():
create_new = (self._beta1_power.graph is not first_var.graph)
if create_new:
with ops.colocate_with(first_var):
self._beta1_power = tf.Variable(self._beta1, name="beta1_power", trainable=False)
self._beta2_power = tf.Variable(self._beta2, name="beta2_power", trainable=False)
for v in var_list:
self._zeros_slot(v, "m", self._name)
self._zeros_slot(v, "v", self._name)
def _apply_dense(self, grad, var):
lr_t = math_ops.cast(self._lr_t, var.dtype.base_dtype)
beta1_t = math_ops.cast(self._beta1_t, var.dtype.base_dtype)
beta2_t = math_ops.cast(self._beta2_t, var.dtype.base_dtype)
epsilon_t = math_ops.cast(self._epsilon_t, var.dtype.base_dtype)
beta1_power = math_ops.cast(self._beta1_power, var.dtype.base_dtype)
beta2_power = math_ops.cast(self._beta2_power, var.dtype.base_dtype)
beta1_fix = 1 - beta1_power
beta2_fix = 1 - beta2_power
m = self.get_slot(var, "m")
v = self.get_slot(var, "v")
m_t = state_ops.assign(m, beta1_t * m + grad * (1 - beta1_t), use_locking=self._use_locking)
v_t = state_ops.assign(v, tf.maximum(beta2_t * v + (grad * grad) * (1 - beta2_t), v), use_locking=self._use_locking)
var_update = state_ops.assign_sub(var, lr_t * (m_t / beta1_fix) / (math_ops.sqrt(v_t / beta2_fix) + epsilon_t), use_locking=self._use_locking)
return control_flow_ops.group(*[var_update, m_t, v_t])
def _apply_sparse(self, grad, var):
raise NotImplementedError("Sparse gradient updates are not supported.")
def _finish(self, update_ops, name_scope):
with ops.control_dependencies(update_ops):
with ops.colocate_with(self._beta1_power):
update_beta1 = self._beta1_power.assign(self._beta1_power * self._beta1_t, use_locking=self._use_locking)
update_beta2 = self._beta2_power.assign(self._beta2_power * self._beta2_t, use_locking=self._use_locking)
return control_flow_ops.group(*update_ops + [update_beta1, update_beta2], name=name_scope)
class AdamMax(optimizer.Optimizer):
def __init__(self, learning_rate=0.001, beta1=0.9, beta2=0.999, epsilon=1e-10, use_locking=False, name="AdamMax"):
super(AdamMax, self).__init__(use_locking, name)
self._lr = learning_rate
self._beta1 = beta1
self._beta2 = beta2
self._epsilon = epsilon
self._lr_t = None
self._beta1_t = None
self._beta2_t = None
self._epsilon_t = None
self._beta1_power = None
def _prepare(self):
self._lr_t = ops.convert_to_tensor(self._lr, name="learning_rate")
self._beta1_t = ops.convert_to_tensor(self._beta1, name="beta1")
self._beta2_t = ops.convert_to_tensor(self._beta2, name="beta2")
self._epsilon_t = ops.convert_to_tensor(self._epsilon, name="epsilon")
def _create_slots(self, var_list):
first_var = min(var_list, key=lambda x: x.name)
create_new = self._beta1_power is None
if not create_new and tf.contrib.eager.in_graph_mode():
create_new = (self._beta1_power.graph is not first_var.graph)
if create_new:
with ops.colocate_with(first_var):
self._beta1_power = tf.Variable(self._beta1, name="beta1_power", trainable=False)
for v in var_list:
self._zeros_slot(v, "m", self._name)
self._zeros_slot(v, "v", self._name)
def _apply_dense(self, grad, var):
lr_t = math_ops.cast(self._lr_t, var.dtype.base_dtype)
beta1_t = math_ops.cast(self._beta1_t, var.dtype.base_dtype)
beta2_t = math_ops.cast(self._beta2_t, var.dtype.base_dtype)
epsilon_t = math_ops.cast(self._epsilon_t, var.dtype.base_dtype)
beta1_power = math_ops.cast(self._beta1_power, var.dtype.base_dtype)
beta1_fix = 1 - beta1_power
m = self.get_slot(var, "m")
v = self.get_slot(var, "v")
m_t = state_ops.assign(m, beta1_t * m + grad * (1 - beta1_t), use_locking=self._use_locking)
v_t = state_ops.assign(v, tf.maximum(beta2_t * v, grad * grad), use_locking=self._use_locking)
var_update = state_ops.assign_sub(var, lr_t * (m_t / beta1_fix) / (math_ops.sqrt(v_t) + epsilon_t), use_locking=self._use_locking)
return control_flow_ops.group(*[var_update, m_t, v_t])
def _apply_sparse_shared(self, grad, var, indices, scatter_add):
lr_t = math_ops.cast(self._lr_t, var.dtype.base_dtype)
beta1_t = math_ops.cast(self._beta1_t, var.dtype.base_dtype)
beta2_t = math_ops.cast(self._beta2_t, var.dtype.base_dtype)
epsilon_t = math_ops.cast(self._epsilon_t, var.dtype.base_dtype)
beta1_power = math_ops.cast(self._beta1_power, var.dtype.base_dtype)
beta1_fix = 1 - beta1_power
m = self.get_slot(var, "m")
m_t = state_ops.assign(m, m * beta1_t, use_locking=self._use_locking)
with ops.control_dependencies([m_t]):
m_t = scatter_add(m, indices, grad * (1 - beta1_t))
v = self.get_slot(var, "v")
v_t = state_ops.assign(v, v * beta2_t, use_locking=self._use_locking)
with ops.control_dependencies([v_t]):
v_t = scatter_add(v, indices, (grad * grad) * (1 - beta2_t))
var_update = state_ops.assign_sub(var, lr_t * (m_t / beta1_fix) / (math_ops.sqrt(v_t) + epsilon_t), use_locking=self._use_locking)
return control_flow_ops.group(*[var_update, m_t, v_t])
def _apply_sparse(self, grad, var):
return self._apply_sparse_shared(
grad.values, var, grad.indices,
lambda x, i, v: state_ops.scatter_add(x, i, v, use_locking=self._use_locking))
def _finish(self, update_ops, name_scope):
with ops.control_dependencies(update_ops):
with ops.colocate_with(self._beta1_power):
update_beta1 = self._beta1_power.assign(self._beta1_power * self._beta1_t, use_locking=self._use_locking)
return control_flow_ops.group(*update_ops + [update_beta1], name=name_scope)
class AdaShift(optimizer.Optimizer):
def __init__(self, learning_rate=0.001, keep_num=10, beta1=0.9, beta2=0.999, epsilon=1e-10, pred_g_op='max', use_locking=False, name="AdaShift"):
super(AdamShiftMoving, self).__init__(use_locking, name)
self._lr = learning_rate
self._keep_num = keep_num
self._beta2 = beta2
self._beta1 = beta1
self._epsilon = epsilon
self._pred_g_op = pred_g_op
s = np.asarray([(self._beta1**(self._keep_num-i-1)) for i in range(self._keep_num)])
self.s = s / np.sum(s)
self._lr_t = None
self._beta2_t = None
self._epsilon_t = None
def _prepare(self):
self._lr_t = ops.convert_to_tensor(self._lr, name="learning_rate")
self._beta2_t = ops.convert_to_tensor(self._beta2, name="beta2")
self._epsilon_t = ops.convert_to_tensor(self._epsilon, name="epsilon")
def _create_slots(self, var_list):
self.first_var = min(var_list, key=lambda x: x.name)
for v in var_list:
for i in range(self._keep_num+1):
self._zeros_slot(v, "g%d" % i, self._name)
self._zeros_slot(v, "v", self._name)
self._zeros_slot(v, "z", self._name)
self._zeros_slot(v, "b2p", self._name)
def _apply_dense(self, grad, var):
lr_t = math_ops.cast(self._lr_t, var.dtype.base_dtype)
beta2_t = math_ops.cast(self._beta2_t, var.dtype.base_dtype)
epsilon_t = math_ops.cast(self._epsilon_t, var.dtype.base_dtype)
g = [self.get_slot(var, "g%d" % i) for i in range(self._keep_num+1)]
v = self.get_slot(var, "v")
z = self.get_slot(var, "z")
b2p = self.get_slot(var, "b2p")
if self._pred_g_op == 'none':
v_t = state_ops.assign(v, v * beta2_t + tf.square(g[0]) * (1 - beta2_t), use_locking=self._use_locking)
elif self._pred_g_op == 'max':
v_t = state_ops.assign(v, v * beta2_t + tf.reduce_max(tf.square(g[0])) * (1 - beta2_t), use_locking=self._use_locking)
elif self._pred_g_op == 'mean':
v_t = state_ops.assign(v, v * beta2_t + tf.reduce_mean(tf.square(g[0])) * (1 - beta2_t), use_locking=self._use_locking)
else:
assert False
with ops.control_dependencies([v_t]):
g_t = state_ops.assign(g[-1], grad, use_locking=self._use_locking)
for i in range(self._keep_num):
with ops.control_dependencies([g_t]):
g_t = state_ops.assign(g[i], g[i + 1], use_locking=self._use_locking)
with ops.control_dependencies([g_t]):
m_t = tf.reduce_sum([g[i]*self.s[i] for i in range(self._keep_num)], axis=0)
# m_t = tf.reduce_mean(g[:self._keep_num], axis=0)
with ops.control_dependencies([v_t]):
z_t = state_ops.assign(z, tf.cast(tf.logical_or(v_t > 0.0, z > 0.0), tf.float32))
b2p_t = state_ops.assign(b2p, b2p * beta2_t * tf.sign(z_t) + (1.0 - tf.sign(z_t)), use_locking=self._use_locking)
b2_fix = tf.maximum(1e-8, 1.0 - b2p_t)
step_t = z_t * m_t / (math_ops.sqrt(v_t / b2_fix) + epsilon_t)
# if var.name == self.first_var.name: #'discriminator/final_linear/w:0':
# idx = 0
# step_t = tf.Print(step_t, [z_t[idx]], 'z_t', summarize=1000)
# step_t = tf.Print(step_t, [g[i][idx] for i in range(len(g))], 'g', summarize=1000)
# step_t = tf.Print(step_t, [grad[idx]], 'grad', summarize=1000)
# step_t = tf.Print(step_t, [b2p_t[idx]], 'b2p_t', summarize=1000)
# step_t = tf.Print(step_t, [b2_fix], 'beta2_fix', summarize=1000)
# step_t = tf.Print(step_t, [m_t[idx]], 'm_t', summarize=1000)
# step_t = tf.Print(step_t, [tf.sqrt(v_t / b2_fix)[idx]], 'v_t', summarize=1000)
# step_t = tf.Print(step_t, [step_t], 'step', summarize=1000)
var_update = state_ops.assign_sub(var, lr_t * step_t, use_locking=self._use_locking)
return control_flow_ops.group(*([var_update]))
def _apply_sparse_shared(self, grad, var, indices, scatter_add):
raise Exception()
def _apply_sparse(self, grad, var):
return self._apply_sparse_shared(
grad.values, var, grad.indices,
lambda x, i, v: state_ops.scatter_add(x, i, v, use_locking=self._use_locking))
def _finish(self, update_ops, name_scope):
with ops.control_dependencies(update_ops):
return control_flow_ops.group(*update_ops, name=name_scope) | StarcoderdataPython |
59620 | <reponame>Lapu-Lapu/mp-prediction
# -*- coding: utf-8 -*-
"""
This script is used with the data of the VR-Psychophysics Experiment to:
- clean the dataset from training trials
- clean the dataset from catch trials, as well as creating a csv-file
containing all catch trials for catch_analysis
- create a dataset that can be read by the analysis script and the vizualisation script
"""
from __future__ import division, print_function
import pandas as pd
import numpy as np
import re
import matplotlib.pyplot as plt
import os
from src.globs import model
import pickle
import re
MP_PARAMS = ['numprim', 'npsi', 'dyn', 'lvm']
remap_std_modelname = {
"C": "vcgpdm",
"nC": "vgpdm",
"cMAP": "mapcgpdm",
"MAP": "mapgpdm",
"tmp": "tmp",
"dmp": "dmp"
}
def load_data(dirname) -> pd.DataFrame:
files = [dirname + f for f in os.listdir(dirname) if f[-3:] == 'csv']
df = load_csv(files[0])
for i in range(1, len(files)):
df = df.append(load_csv(files[i]), ignore_index=True)
return df
def load_csv(fn):
cols = (
'trialnumber,block,first_seq_from,second_seq_from,' +
'participant,result,correct_sequence,first_seq,second_seq,expPart,' +
'part_input,date,expName,' + 'trialstart_time,' +
'trialend_time,trialtime,answer_time').split(',')
# time_at_videoonset,time_at_videoend,
# time_at_fixation_cross_onset,time_at_key_response,
df = pd.read_csv(fn, usecols=cols)
# change trial index
df = df[~np.isnan(df.trialnumber)]
# df = df[~df.participant_key_response.map(pd.isnull)]
# adapt to new experiment names
df = df.rename(
{
"trials.thisN": 'n_trial',
"trials.thisTrialN": "n_inblock"
}, axis=1)
# get not natural stimulusname
df['generated'] = df.apply(get_artificial, axis=1)
df['natural'] = df.apply(get_natural, axis=1)
return df
def get_artificial(row: pd.Series):
"""
Returns name of generated stimulus.
"""
# when the correct sequence is 1, look for the string in the second sequence,
# where the model name should be displayed
inv = {'1': 'second_seq_from', '2': 'first_seq_from'}
try:
return row[inv[row.correct_sequence]] #.split('.')[0]
except KeyError:
return 'catchtrial'
def get_natural(row: pd.Series):
"""
Returns name of natural stimulus.
"""
try:
return row[row.correct_sequence] #.split('.')[0]
except KeyError:
return 'catchtrial'
def add_model_param_columns(row: pd.Series):
'''split up the "model" into model types and model parameters.
It takes a row, and appends the information stored
in the model-string to it. When used on a dataframe with apply,
this produces a dataframe with new columns containing
movement type, model type, and parameters.
Parameters which are not existing for a given model type are
filled with None.
'''
s = row.model
lst = s.split('_')
movement_type = lst[0]
m = lst[1]
if m[:3] in ['tmp', 'dmp']:
m = m[:3]
params = re.findall(r'\d+', s)
modelname = remap_std_modelname[m]
type_specifics = model[modelname]
d = {'movement': movement_type, 'modeltype': modelname}
for param in MP_PARAMS:
if param in type_specifics['params']:
if param == 'lvm':
d[param] = int(params[1])
else:
d[param] = int(params[0])
else:
d[param] = None
return pd.concat([row, pd.Series(d)])
def training_movement(m):
"""
returns stimulus id, in order to find out, which starting position
of the model-generated movement is used
"""
if m.first_seq < 25:
return m.second_seq
elif m.second_seq <= 25:
return m.first_seq
else:
raise Warning
def load_post_processed_data(rootdir='../VR_data/') -> pd.DataFrame:
"""Open all csv files in root directory."""
df = load_data(rootdir)
# remove training trials
df = df[~df.expPart.str.contains("trainingtrials")]
# then remove the expPart column, for its purpose is fulfilled
df = df.drop('expPart', 1)
''' catchtrials Extraction '''
# catchtrials contain certain strings in the columns 'first_seq' and
# "second_seq" they help to identify the catchtrials
df_catch_1 = df[df['first_seq'].isin(['400ms', '700ms', '1000ms'])]
df_catch_2 = df[df['second_seq'].isin(['400ms', '700ms', '1000ms'])]
df_catch = pd.concat(pd.DataFrame(i) for i in (df_catch_1, df_catch_2))
df_catch.to_csv('data/processed/catchtrial_vr.csv')
# now remove catchtrials from df
df = df[~df['first_seq'].isin(['400ms', '700ms', '1000ms'])]
df = df[~df['second_seq'].isin(['400ms', '700ms', '1000ms'])]
# then remove 'first_seq' and 'second_seq' column, for its purpose is
# fulfulled
# make values numeric
df.first_seq = pd.to_numeric(df.first_seq,
errors='coerce').astype(np.int64)
df.second_seq = pd.to_numeric(df.second_seq,
errors='coerce').astype(np.int64)
df['stimulus_id'] = df.apply(training_movement, axis=1)
df = df.drop(['generated', 'natural'], axis=1)
''' create a column with the model type '''
# 1 == wrong answer (generated movement fooled the participant); 0 ==
# correct answer (natural stimulus selected)
#df = df.drop(["answer_time", "trialend_time",
# "correct_sequence", "part_input", "trials.thisRepN","trials.thisTrialN",
# "trials.thisN", "trials.thisIndex", "date", "expName" ], axis = 1)
# aus "second_seq_from" und "first_seq_from" die column "model" bilden
df['second_seq_from'] = df['second_seq_from'].str.replace('natpasslst', '')
df['second_seq_from'] = df['second_seq_from'].str.replace('natholdlst', '')
df['second_seq_from'] = df['second_seq_from'].str.replace('natretlst', '')
df['first_seq_from'] = df['first_seq_from'].str.replace('natpasslst', '')
df['first_seq_from'] = df['first_seq_from'].str.replace('natholdlst', '')
df['first_seq_from'] = df['first_seq_from'].str.replace('natretlst', '')
df['model'] = df[['first_seq_from',
'second_seq_from']].apply(lambda x: ''.join(x), axis=1)
# # then remove 'first_seq_from' and 'second_seq_from' column, for its
# # purpose is fulfulled
# df = df.drop('first_seq_from', 1)
# df = df.drop('second_seq_from', 1)
df = df.apply(add_model_param_columns, axis=1)
# drop 'model' column
df = df.drop(['model'], axis=1)
# rename a column to let old scripts read it
df = df.rename(columns={'modeltype': 'mp_type'})
PP = {
"ret": "return-bottle",
"hold": "pass-bottle-hold",
"put": "pass-bottle"
}
df['movement'] = df['movement'].apply(lambda x: PP[x])
return df
def get_mptype(d: dict) -> str:
"""from settings
gets model name according to
mp_perception naming convention
"""
model = d['model']
if model == 'vcgpdm':
if d['parts'] == 1:
model = "vgpdm"
if d["mode"] == "MAP":
model = d["mode"].lower() + model[1:]
return model
def get_params(row: pd.Series) -> pd.Series:
"""make new columns for ideal obs params
"""
ser = pd.Series(index=['dyn', 'lvm', 'npsi', 'numprim'], dtype=int)
for param in model[row.mp_type]['params']:
ser[param] = row['settings'][param]
return pd.concat([row, ser])
def process_data_dict(D: dict) -> pd.DataFrame:
training_data = pd.DataFrame(D)
training_data = training_data.rename(
{
"WRAP_DYN": "dyn_warped_error",
"WRAP_PATH": "path_warped_error",
"dyn_ELBO": "dyn_elbo",
"lvm_ELBO": "lvm_elbo"
},
axis=1)
training_data['mp_type'] = training_data['settings'].apply(get_mptype)
training_data['movement'] = training_data.settings.apply(
lambda x: x['dataset'])
training_data['hold'] = training_data.settings.apply(lambda x: x['hold'])
training_data = training_data.apply(get_params, axis=1)
training_data['training_id'] = training_data.apply(trial_id, axis=1)
return training_data
def trial_id(row: pd.Series) -> str:
"""Create unique identifier for a trial.
Arguments:
row -- row of DataFrame containing all trial data
Return string like: "tmp_numprim11_return-bottle_0"
"""
param_str = _parse_params(row)
s = (param_str + "_" + row["movement"] + "_" + str(row["hold"]))
return s
def _parse_params(row: pd.Series) -> (str, str):
m = row["mp_type"]
param_str = m + "_"
for prm_name in model[m]["params"]:
if prm_name in ['mode']:
continue
param_str += prm_name
param_str += str(int(row[prm_name]))
return param_str
def movement_model_id(row):
"""
Return string like: "tmp_numprim11_return-bottle"
"""
return _parse_params(row) + "_" + row["movement"]
def open_pkls(fn: str) -> list:
"""Open pickled list of dictionaries containing model scores."""
with open(fn, "rb") as f:
D = pickle.load(f, fix_imports=True, encoding="latin1")
return D
def remove_brackets(s):
s = s.replace('(', '').replace(')', '')
try:
return int(s)
except ValueError:
return s
inparenthesis = re.compile('\([a-zA-Z0-9\-]*\)')
beforeparenthesis = re.compile('[a-zA-Z0-9\-]*\(')
def parse_filename(s: str) -> pd.Series:
"""
Finds numbers in brackets, associates this value
with a key in front of the bracket.
Returns Series like {'model': 'vcgpdm',...}
"""
A = map(remove_brackets, re.findall(inparenthesis, s))
B = map(remove_brackets, re.findall(beforeparenthesis, s))
return pd.Series({b.replace('-', ''): a for a, b in zip(A, B)})
| StarcoderdataPython |
3386381 | # type: ignore
#
# Autogenerated by Thrift Compiler (0.13.0)
#
# DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING
#
# options string: py
#
import sys
from thrift.protocol.TProtocol import TProtocolException
from thrift.Thrift import (
TApplicationException,
TException,
TFrozenDict,
TMessageType,
TType,
)
from thrift.transport import TTransport
from thrift.TRecursive import fix_spec
all_structs = []
class fb_status(object):
"""
Common status reporting mechanism across all services
"""
DEAD = 0
STARTING = 1
ALIVE = 2
STOPPING = 3
STOPPED = 4
WARNING = 5
_VALUES_TO_NAMES = {
0: "DEAD",
1: "STARTING",
2: "ALIVE",
3: "STOPPING",
4: "STOPPED",
5: "WARNING",
}
_NAMES_TO_VALUES = {
"DEAD": 0,
"STARTING": 1,
"ALIVE": 2,
"STOPPING": 3,
"STOPPED": 4,
"WARNING": 5,
}
fix_spec(all_structs)
del all_structs
| StarcoderdataPython |
36892 | <reponame>jasmine92122/NightClubBackend
# -*- coding: utf-8 -*-
# Generated by Django 1.11.7 on 2018-08-13 12:25
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('places', '0085_placetype_name_plural'),
]
operations = [
migrations.AddField(
model_name='placetype',
name='name_plural_en',
field=models.CharField(blank=True, max_length=20, null=True, verbose_name='Place type'),
),
migrations.AddField(
model_name='placetype',
name='name_plural_fr',
field=models.CharField(blank=True, max_length=20, null=True, verbose_name='Place type'),
),
migrations.AddField(
model_name='placetype',
name='name_plural_ru',
field=models.CharField(blank=True, max_length=20, null=True, verbose_name='Place type'),
),
migrations.AddField(
model_name='placetype',
name='name_plural_uk',
field=models.CharField(blank=True, max_length=20, null=True, verbose_name='Place type'),
),
]
| StarcoderdataPython |
1694010 | <reponame>ikhlo/LinkPrediction_Kaggle<gh_stars>0
import csv
import networkx as nx
import numpy as np
from random import randint
from sklearn.linear_model import LogisticRegression
# Create a graph
G = nx.read_edgelist('edgelist.txt', delimiter=',', create_using=nx.Graph(), nodetype=int)
nodes = list(G.nodes())
n = G.number_of_nodes()
m = G.number_of_edges()
print('Number of nodes:', n)
print('Number of edges:', m)
# Create the training matrix. Each row corresponds to a pair of nodes and
# its class label is 1 if it corresponds to an edge and 0, otherwise.
# Use the following 2 features for each pair of nodes:
# (1) sum of degrees of two nodes
# (2) absolute value of difference of degrees of two nodes
X_train = np.zeros((2*m, 2))
y_train = np.zeros(2*m)
for i,edge in enumerate(G.edges()):
# an edge
X_train[2*i,0] = G.degree(edge[0]) + G.degree(edge[1])
X_train[2*i,1] = abs(G.degree(edge[0]) - G.degree(edge[1]))
y_train[2*i] = 1
# a randomly generated pair of nodes
n1 = nodes[randint(0, n-1)]
n2 = nodes[randint(0, n-1)]
X_train[2*i+1,0] = G.degree(n1) + G.degree(n2)
X_train[2*i+1,1] = abs(G.degree(n1) - G.degree(n2))
y_train[2*i+1] = 0
print('Size of training matrix:', X_train.shape)
# Read test data. Each sample is a pair of nodes
node_pairs = list()
with open('test.txt', 'r') as f:
for line in f:
t = line.split(',')
node_pairs.append((int(t[0]), int(t[1])))
# Create the test matrix. Use the same 2 features as above
X_test = np.zeros((len(node_pairs), 2))
for i,node_pair in enumerate(node_pairs):
X_test[i,0] = G.degree(node_pair[0]) + G.degree(node_pair[1])
X_test[i,1] = abs(G.degree(node_pair[0]) - G.degree(node_pair[1]))
print('Size of test matrix:', X_test.shape)
# Use logistic regression to predict if two nodes are linked by an edge
clf = LogisticRegression()
clf.fit(X_train, y_train)
y_pred = clf.predict_proba(X_test)
y_pred = y_pred[:,1]
# Write predictions to a file
predictions = zip(range(len(y_pred)), y_pred)
with open("submission.csv","w") as pred:
csv_out = csv.writer(pred)
csv_out.writerow(['id','predicted'])
for row in predictions:
csv_out.writerow(row) | StarcoderdataPython |
3358939 | <filename>parsi_io/modules/quranic_extractions.py
#In the name of Allah
import re
import pickle
import pandas as pd
import time
import zipfile
import os
from tqdm import tqdm
from tashaphyne.normalize import strip_tashkeel, strip_tatweel
from camel_tools.utils.normalize import normalize_alef_maksura_ar, normalize_teh_marbuta_ar, normalize_alef_ar
from camel_tools.utils.dediac import dediac_ar
class QuranicExtraction(object):
def __init__(self, model = 'excact', precompiled_patterns = 'prebuilt', num_of_output_in_apprx_model = 20):
'''
Model initialization
"precompiled_patterns" can be 'prebuilt', 'build_and_use' or 'off'
"model" can be 'exact' or 'apprx'
'''
self.model_type = model
if self.model_type == 'exact':
if precompiled_patterns != 'prebuilt':
if precompiled_patterns == 'build_and_use':
self.initialize_from_scratch(create_compiled_patterns=True, save_compiled_patterns=True)
self.use_precompiled_patterns = True
elif precompiled_patterns == 'off':
self.initialize_from_scratch(create_compiled_patterns=False, save_compiled_patterns=False)
self.use_precompiled_patterns = False
else:
"Load previously normalized quran"
with open("quranic_extractions/pickles/quran_df.pickle", 'rb') as f:
self.quran_df = pickle.load(f)
"Load previously normalized qbigram_bag"
with open("quranic_extractions/pickles/qbigram_bag.pickle", 'rb') as f:
self.qbigram_bag = pickle.load(f)
"Load previously compiled qbigram patterns"
with open("quranic_extractions/pickles/qbigram_compiled.pickle", 'rb') as f:
self.qbigram_compiled = pickle.load(f)
"Load previously compiled verses patterns"
print("Loading verses_rules_compiled.pickle. This can take a while...")
if not os.path.exists('quranic_extractions/pickles/verses_rules_compiled.pickle'):
with zipfile.ZipFile('quranic_extractions/pickles/verses_rules_compiled.zip', 'r') as zip_ref:
zip_ref.extractall('quranic_extractions/pickles/')
with open("quranic_extractions/pickles/verses_rules_compiled.pickle", 'rb') as f:
self.verses_rules_compiled = pickle.load(f)
self.use_precompiled_patterns = True
elif self.model_type == 'apprx':
"Load and normalize quran"
self.quran_df = pd.read_csv('quranic_extractions/data/Quran.txt', sep="##|\t", names=['sore', 'aye', 'text'], engine='python')
self.quran_df['text_norm'] = self.quran_df['text'].apply(lambda x: self.normalize(x))
"Hyper Parameters"
self.CHAR_DIFF_FACTOR = 1
self.TUPLE_SIMILARITY_FACTOR = 4
self.AYE_LEN_FACTOR = 0.1
self.SAME_AYE_THRESHOLD = num_of_output_in_apprx_model
self.SAME_AYE_RATIO = 1.3
self.MIN_ACCEPT_SIMILARITY = 6
self.ayes = self.quran_df.to_dict(orient='records')
self.wordsMap = {}
for i in range(len(self.ayes)):
ayeWords = set()
ayeWords.update(self.ayes[i]['text_norm'].split())
for word in ayeWords:
if word not in self.wordsMap:
self.wordsMap[word] = {'tuples': self.get_tuples(word), 'ayes': set()}
self.wordsMap[word]['ayes'].add(i)
stems = ['من', 'ان', 'ما', 'قول', 'فی', 'قال', 'لا', 'کان', 'الا', 'وما', 'ولا', 'یا', 'لم', 'عن', 'علیٰ',
'قد', 'اذا']
for stem in stems:
if stem in self.wordsMap:
self.wordsMap[stem]['ayes'] = set()
"Normalization functions"
def tokenizer(self, text, remove_extra_space = True, split = True):
if not text or not isinstance(text, str):
return ""
text = self.remove_extra_chars(text, remove_extra_space = remove_extra_space)
if split:
return text.split(' ')
else:
return text
def norm_chars(self, text):
text = self.substitute_alphabets(text)
text = self.camel_normal(text)
text = strip_tashkeel(text)
text = strip_tatweel(text)
return text
def remove_extra_chars(self, text, remove_extra_space = True):
#text = re.sub(r"http\S+", "", text)
emoji_pattern = re.compile("["
u"\U0001F600-\U0001F64F" # emoticons
u"\U0001F300-\U0001F5FF" # symbols & pictographs
u"\U0001F680-\U0001F6FF" # transport & map symbols
u"\U0001F1E0-\U0001F1FF" # flags (iOS)
u"\U00002500-\U00002BEF" # chinese char
u"\U00002702-\U000027B0"
u"\U00002702-\U000027B0"
u"\U000024C2-\U0001F251"
u"\U0001f926-\U0001f937"
u"\U00010000-\U0010ffff"
u"\u2640-\u2642"
u"\u2600-\u2B55"
u"\u200d"
u"\u23cf"
u"\u23e9"
u"\u231a"
u"\ufe0f" # dingbats
u"\u3030" # flags (iOS)
"]+", flags=re.UNICODE)
text = emoji_pattern.sub(r' ', text)
empty_char = ' '
items = [
# (r'¬|ۚ| ۖ| ۗ| ۚ| ۙ| ۘ| ۛ|۩|۞|↙|«|»', r''),
# (r':|;|«|؛|!|؟|٪', r''),
#(r'¬|ۚ| ۖ| ۗ| ۚ| ۙ| ۘ| ', r''),
(r'¬|ۚ|ۖ|ۗ|ۚ|ۙ|ۘ', empty_char),
(r':|;|«|؛|!|؟|٪|۩|۞|↙|«|»|_', r' '),
# (r'ـ|ِ|ُ|َ|ٍ|ٌ|ً', r''),
('\r', r' '),
('\f', r' '),
# (r'', r""),
('\u200c', r' '),
# (r"ـ", r""),
(r'ء', empty_char),
(r'۩', empty_char),
# (r'ٰ', r'ا'),
# (r'\u200c', ''),
(r'•|·|●|·|・|∙|。|ⴰ', r' '),
(r',|٬|٫|‚|,|،|،', r' '),
# (r'¬', r''),
# (r'( )+', r' '),
# (r'(\n)+', r'\n'),
# (r'()+', r''),
(u"\ufdf0", empty_char),
(u"\ufdf1", empty_char),
(u"\u2022", empty_char),
(r'( )+', r' ')
]
if not remove_extra_space:
del items[-1]
for item in items:
text = re.sub(item[0], item[1], text)
replacing_words = '[!%#$,\.`~!\^&\*()-+={}\[\]|\\//:;\"\'\<,\>?؛۱۲۳۴۵۶۷۸۹۰1234567890«:؛»@]'
text = text.translate(str.maketrans(replacing_words, len(replacing_words) * ' '))
return text
def camel_normal(self, text):
text = normalize_alef_maksura_ar(text)
text = normalize_teh_marbuta_ar(text)
text = normalize_alef_ar(text)
return dediac_ar(text)
def substitute_alphabets(self, text):
items = [
(r"ﺐ|ﺏ|ﺑ", r"ب"),
(r"ﭖ|ﭗ|ﭙ|ﺒ|ﭘ", r"پ"),
(r"ﭡ|ٺ|ٹ|ﭞ|ٿ|ټ|ﺕ|ﺗ|ﺖ|ﺘ", r"ت"),
(r"ﺙ|ﺛ", r"ث"),
(r"ﺝ|ڃ|ﺠ|ﺟ", r"ج"),
(r"ڃ|ﭽ|ﭼ", r"چ"),
(r"ﺢ|ﺤ|څ|ځ|ﺣ", r"ح"),
(r"ﺥ|ﺦ|ﺨ|ﺧ", r"خ"),
(r"ڏ|ډ|ﺪ|ﺩ", r"د"),
(r"ﺫ|ﺬ|ﻧ", r"ذ"),
(r"ڙ|ڗ|ڒ|ڑ|ڕ|ﺭ|ﺮ", r"ر"),
(r"ﺰ|ﺯ", r"ز"),
(r"ﮊ", r"ژ"),
(r"ݭ|ݜ|ﺱ|ﺲ|ښ|ﺴ|ﺳ", r"س"),
(r"ﺵ|ﺶ|ﺸ|ﺷ", r"ش"),
(r"ﺺ|ﺼ|ﺻ", r"ص"),
(r"ﺽ|ﺾ|ﺿ|ﻀ", r"ض"),
(r"ﻁ|ﻂ|ﻃ|ﻄ", r"ط"),
(r"ﻆ|ﻇ|ﻈ", r"ظ"),
(r"ڠ|ﻉ|ﻊ|ﻋ", r"ع"),
(r"ﻎ|ۼ|ﻍ|ﻐ|ﻏ", r"غ"),
(r"ﻒ|ﻑ|ﻔ|ﻓ", r"ف"),
(r"ﻕ|ڤ|ﻖ|ﻗ", r"ق"),
(r"ﮚ|ﮒ|ﮓ|ﮕ|ﮔ", r"گ"),
(r"ﻝ|ﻞ|ﻠ|ڵ", r"ل"),
(r"ﻡ|ﻤ|ﻢ|ﻣ", r"م"),
(r"ڼ|ﻦ|ﻥ|ﻨ", r"ن"),
(r"ވ|ﯙ|ۈ|ۋ|ﺆ|ۊ|ۇ|ۏ|ۅ|ۉ|ﻭ|ﻮ|ؤ", r"و"),
(r"ﻬ|ھ|ﻩ|ﻫ|ﻪ|ۀ|ە|ہ", r"ه"),
(r"ڭ|ﻚ|ﮎ|ﻜ|ﮏ|ګ|ﻛ|ﮑ|ﮐ|ڪ|ک", r"ك"),
(r"ﭛ|ﻯ|ۍ|ﻰ|ﻱ|ﻲ|ں|ﻳ|ﻴ|ﯼ|ې|ﯽ|ﯾ|ﯿ|ێ|ے|ى|ی|یٰ", r"ي"),
# # (r'¬|ۚ| ۖ| ۗ| ۚ| ۙ| ۘ| ۛ|۩|۞|↙|«|»', r''),
# # (r':|;|«|؛|!|؟|٪', r''),
# (r'¬|ۚ| ۖ| ۗ| ۚ| ۙ| ۘ| ', r''),
# (r':|;|«|؛|!|؟|٪|۩|۞|↙|«|»|_', r' '),
# #(r'ـ|ِ|ُ|َ|ٍ|ٌ|ً', r''),
# ('\r', r' '),
# ('\f', r' '),
# #(r'', r""),
# ('\u200c', r' '),
# #(r"ـ", r""),
# (r'ء', r''),
# (r'۩', r''),
# #(r'ٰ', r'ا'),
# #(r'\u200c', ''),
# (r'•|·|●|·|・|∙|。|ⴰ', r' '),
# (r',|٬|٫|‚|,|،|،', r' '),
# #(r'¬', r''),
# #(r'( )+', r' '),
# #(r'(\n)+', r'\n'),
# #(r'()+', r''),
# (u"\ufdf0", r""),
# (u"\ufdf1", r""),
# (u"\u2022", r""),
# (r'( )+', r' '),
]
for item in items:
text = re.sub(item[0], item[1], text)
return text
def normalize(self, text):
if not text or not isinstance(text, str):
return ""
#text = self.removeLink(text)
#text = re.sub(r'[^\w\s]', '', str(text).strip())
text = strip_tashkeel(text)
text = strip_tatweel(text)
text = self.camel_normal(text)
text = self.remove_extra_chars(text)
text = self.substitute_alphabets(text)
return text
def align_and_get_span(self, input, input_normd, output_bag):
input_rplcd = self.tokenizer(input, remove_extra_space=False, split=False)
input_rplcd_chNormd = self.norm_chars(input_rplcd)
input_rplcd_spltd = []
input_rplcd_splt_index = []
for m in re.finditer(r'\S+', input_rplcd):
input_rplcd_spltd.append(m.group())
input_rplcd_splt_index.append((m.start(), m.end()))
input_rplcd_chNormd_spltd = []
for m in re.finditer(r'\S+', input_rplcd_chNormd):
input_rplcd_chNormd_spltd.append(m.group())
output_bag = sorted(output_bag, key=lambda x: x['input_span'][1] - x['input_span'][0], reverse=True)
for out_ind, output in enumerate(output_bag):
input_normd_span = output['input_span']
"A quranic piece of normalized input"
input_normd_qp = input_normd[input_normd_span[0]:input_normd_span[1]]
input_normd_qp_spltd = input_normd_qp.split()
for start_ind, inp_rplcd_chNormd_token in enumerate(input_rplcd_chNormd_spltd):
if inp_rplcd_chNormd_token == input_normd_qp_spltd[0]:
flag = True
for c_ind, inp_nrmd_qp_tkn in enumerate(input_normd_qp_spltd):
if input_rplcd_chNormd_spltd[start_ind + c_ind] != inp_nrmd_qp_tkn:
flag = False
if flag :
intervals = input_rplcd_splt_index[start_ind:start_ind + c_ind+1]
output_bag[out_ind]['input_span'] = [intervals[0][0], intervals[-1][-1]]
output_bag[out_ind]['extracted'] = input[intervals[0][0]: intervals[-1][-1]]
break
if not flag:
print('\nError\n')
exit()
return output_bag
# tknz_text = self.tokenizer(text, remove_extra_space= False, split = False)
# tknz_normd = normd.split(' ')
"'Exact method' functions"
def rule_maker(self, verse, qbigram_text, index):
"Find the input bigrams in the text of the Quran"
sentencelist = verse.split(" ")
rule = ")(?:" + qbigram_text + ")"
for j in range(0, index):
without_last_space = True if qbigram_text[:8] == F'(?:^| ){va}' else False
left_regexd, neet_to_handle_va = self.regexitize_verse(" ".join(sentencelist[j:index]),
without_last_space=without_last_space)
before_bigram = F'(^|{va}|{va} | )' if neet_to_handle_va else ''
rule = ")(?:" + before_bigram + qbigram_text[2:] + ")" if neet_to_handle_va and j == 0 else rule
rule = "|(?:" + left_regexd + ")" + rule
rule = "(?:" + rule + "(?:"
for j in range(len(sentencelist), index + 2, -1):
va_end_bigram = True if qbigram_text[-3:] == F'{va} ?' else False
right_regexd, _ = self.regexitize_verse(" ".join(sentencelist[index + 2:j]), va_before=va_end_bigram,
without_last_space=True)
after_bigram = '' if va_end_bigram else ' '
rule = rule + '(?:' + after_bigram + right_regexd + ')|'
rule = rule + ')'
return rule
def regexitize_quran_df(self, quran_df):
"Add regex patterns to quran verses"
"Get regextized verbs that need 'alef?' pattern"
verbs_needs_alef_patt = self.get_verbs_needs_alef_patt()
oo_pattern = F"{verbs_needs_alef_patt}"
# oo_repl = "\\1"+"ا"
oo_repl = "\\1" + "ا" + "?"
# qdictionary_keys = list(qdictionary.keys())
for index in quran_df.index:
"وا"
new_verse = re.sub(oo_pattern, oo_repl, quran_df.loc[index]['text_norm'])
quran_df.loc.__setitem__((index, ('text_norm')), new_verse)
def regexitize_qbigrambag(self, qbigram_bag):
"Add regex patterns to quran bigrams"
va_pattern1 = "(^" + "و" + " )"
va_repl1 = "(?:^| )\\1?"
va_pattern2 = "( " + "و" + "$)"
va_repl2 = "\\1 ?"
qbigram_bag_keys = list(qbigram_bag.keys())
for key in qbigram_bag_keys:
new_key = key
word1, word2 = new_key.split(" ")
if 'و' == word1:
new_key.replace(" ", " \\b")
new_key = new_key + "\\b"
new_key = re.sub(va_pattern1, va_repl1, new_key)
elif 'و' == word2:
new_key.replace(" ", "\\b ")
new_key = "\\b" + new_key
new_key = re.sub(va_pattern2, va_repl2, new_key)
else:
new_key.replace(" ", "\\b \\b")
new_key = "\\b" + new_key + "\\b"
qbigram_bag[new_key] = qbigram_bag.pop(key)
def regexitize_verse(self, verse, va_before=False, without_last_space=False):
"Add regex patterns to list of quranic words"
qlist = verse.split(" ")
regexd_verse = ""
for ind in range(len(qlist)):
if 'و' == qlist[ind] and ind == 0 and len(qlist) > 1:
regexd_verse += "\\b" + "و" + " ?"
va_before = True
elif 'و' == qlist[ind] and ind == len(qlist) - 1:
va_before = True
pass
elif 'و' == qlist[ind]:
regexd_verse += "\\b" + "و" + " ?"
va_before = True
else:
regexd_verse += ('\\b' if not va_before else '') + qlist[ind] + "\\b "
va_before = False
if without_last_space and not va_before:
regexd_verse = regexd_verse[:-1]
return regexd_verse, va_before
def get_verbs_needs_alef_patt(self):
verbs_needs_alef = ['ملاقو', 'تتلو', 'یتلو', 'یدعو', 'یعفو', 'واولو', 'اولو', 'امرو', 'ویعفو', 'تبو', 'اندعو',
'باسطو', 'تبلو', 'اشکو', 'ادعو', 'لتتلو', 'یمحو', 'ندعو', 'ساتلو', 'یرجو', 'وادعو', 'اتلو',
'نتلو', 'لتنو', 'ترجو', 'مهلکو', 'لیربو', 'یربو', 'لتارکو', 'لذایقو', 'صالو', 'ویرجو',
'کاشفو', 'لیبلو', 'ونبلو', 'ونبلو', 'مرسلو', 'تدعو', 'لصالو']
verbs_needs_alef_patt = "("
for el in verbs_needs_alef:
verbs_needs_alef_patt += F"\\b{el}\\b|"
verbs_needs_alef_patt = verbs_needs_alef_patt[:-1] + ")"
return verbs_needs_alef_patt
def create_regexitize_bigram_bag(self, quran_df):
"Creating token bag"
qbigram_bag = {}
for ind in quran_df.index:
temp = quran_df.loc[ind]['text_norm'].split(" ")
for j in range(len(temp) - 1):
bigram = temp[j] + " " + temp[j + 1]
try:
qbigram_bag[bigram].append((ind, j))
except:
qbigram_bag[bigram] = [(ind, j)]
"Add regex patterns to qbigram_bag"
self.regexitize_qbigrambag(qbigram_bag)
return qbigram_bag
def initialize_from_scratch(self, create_compiled_patterns=False, save_compiled_patterns=False):
"Initialize regex patterns and data from scratch"
global va
va = "و"
"Read quranic data"
quran_df_index = pd.read_csv('quranic_extractions/data/Quran.txt', names=['id', 'text'], sep="\t")['id']
self.quran_df = pd.read_csv('quranic_extractions/data/Quran.txt', sep="##|\t", names=['surah', 'verse', 'text'],
engine='python')
self.quran_df.index = quran_df_index
"Normalize quran"
self.quran_df['text_norm'] = self.quran_df['text'].apply(lambda x: self.normalize(x))
"Add regex patterns to quran_df"
self.regexitize_quran_df(self.quran_df)
if save_compiled_patterns:
with open("quranic_extractions/pickles/quran_df.pickle", 'wb') as f:
pickle.dump(self.quran_df, f)
self.qbigram_bag = self.create_regexitize_bigram_bag(self.quran_df)
if save_compiled_patterns:
with open("quranic_extractions/pickles/qbigram_bag.pickle", 'wb') as f:
pickle.dump(self.qbigram_bag, f)
if create_compiled_patterns:
"Create qbigrams_compiled"
qbigram_bag_keys = list(self.qbigram_bag.keys())
self.qbigram_compiled = []
for qbigram in qbigram_bag_keys:
self.qbigram_compiled.append(re.compile(qbigram))
if save_compiled_patterns:
with open("quranic_extractions/pickles/qbigram_compiled.pickle", 'wb') as f:
pickle.dump(self.qbigram_compiled, f)
"Create verses_rules_compiled"
if create_compiled_patterns:
qbigram_found_list = []
for qc in self.qbigram_compiled:
qbigram_found_list.append([qc.pattern, self.qbigram_bag[qc.pattern]])
self.verses_rules_compiled = {}
print("Compiling regex patterns for verses...")
for qbigram in tqdm(qbigram_found_list):
for inner_tup in qbigram[1]:
id, index = inner_tup
qbigram_text = qbigram[0]
verse = self.quran_df.loc[id]['text_norm']
self.verses_rules_compiled[F'{id}-{index}'] = re.compile(self.rule_maker(verse, qbigram_text, index))
if save_compiled_patterns:
with open('quranic_extractions/pickles/verses_rules_compiled.pickle', 'wb') as f:
pickle.dump(self.verses_rules_compiled, f)
def extract_verse_exact(self, input_normd, input, use_precompiled_patterns=True):
"re.find on all the quranic regexitized bigram and input bigram"
qbigram_found_list = []
if use_precompiled_patterns:
for qc in self.qbigram_compiled:
if len(qc.findall(input_normd)) != 0:
qbigram_found_list.append([qc.pattern, self.qbigram_bag[qc.pattern]])
else:
qbigram_bag_keys = list(self.qbigram_bag.keys())
for qbigram in qbigram_bag_keys:
if len(re.findall(qbigram, input_normd)) != 0:
qbigram_found_list.append([qbigram, self.qbigram_bag[qbigram]])
output_bag = []
covered_input_index = []
delete_new = False
for qbigram in qbigram_found_list:
for inner_tup in qbigram[1]:
id, index = inner_tup
if use_precompiled_patterns:
matches = list(self.verses_rules_compiled[F'{id}-{index}'].finditer(input_normd))
else:
qbigram_text = qbigram[0]
verse = self.quran_df.loc[id]['text_norm']
rule = self.rule_maker(verse, qbigram_text, index)
matches = list(re.finditer(rule, input_normd))
if len(matches) != 0:
for match in matches:
res_str = input_normd[match.regs[0][0]:match.regs[0][1]]
if (res_str[-1] == ' ') and (res_str[0] != ' '):
new_range = [match.regs[0][0], match.regs[0][1] - 1]
elif (res_str[-1] != ' ') and (res_str[0] == ' '):
new_range = [match.regs[0][0] + 1, match.regs[0][1]]
elif (res_str[-1] == ' ') and (res_str[0] == ' '):
new_range = [match.regs[0][0] + 1, match.regs[0][1] - 1]
else:
new_range = [match.regs[0][0], match.regs[0][1]]
case_ind = 0
while case_ind < len(covered_input_index) and len(covered_input_index) != 0:
case = covered_input_index[case_ind]
if (case[0] <= new_range[0] and new_range[1] < case[1]) or (
case[0] < new_range[0] and new_range[1] <= case[1]):
delete_new = True
break
elif (new_range[0] <= case[0] and case[1] < new_range[1]) or (
new_range[0] < case[0] and case[1] <= new_range[1]):
del covered_input_index[case_ind]
del output_bag[case_ind]
elif (new_range[0] < case[0] and new_range[1] <= case[1] and case[0] < new_range[1]):
new_len = new_range[1] - new_range[0]
case_len = case[1] - case[0]
if new_len > case_len:
covered_input_index[case_ind][0] = new_range[1]
if len(input_normd[covered_input_index[case_ind][0]: covered_input_index[case_ind][
1]].strip().split(" ")) < 2:
del covered_input_index[case_ind]
del output_bag[case_ind]
elif new_len < case_len:
new_range[1] = case[0]
if len(input_normd[new_range[0]: new_range[1]].strip().split(" ")) < 2:
delete_new = True
break
else:
case_ind += 1
elif (case[0] <= new_range[0] and new_range[0] < case[1] and case[1] < new_range[1]):
new_len = new_range[1] - new_range[0]
case_len = case[1] - case[0]
if new_len > case_len:
covered_input_index[case_ind][1] = new_range[0]
if len(input_normd[covered_input_index[case_ind][0]: covered_input_index[case_ind][
1]].strip().split(" ")) < 2:
del covered_input_index[case_ind]
del output_bag[case_ind]
elif new_len < case_len:
new_range[0] = case[1]
if len(input_normd[new_range[0]: new_range[1]].strip().split(" ")) < 2:
delete_new = True
break
else:
case_ind += 1
elif new_range[0] == case[0] and case[1] == new_range[1]:
if output_bag[case_ind]['quran_id'] == id:
delete_new = True
break
else:
case_ind += 1
else:
case_ind += 1
if delete_new:
delete_new = False
continue
#res = (input_normd[new_range[0]:new_range[1]], id)
res = {'input_span':[new_range[0],new_range[1]],
'extracted': input_normd[new_range[0]:new_range[1]],
'quran_id':id,
'verse': self.quran_df.loc[id]['text']}
covered_input_index.append(new_range)
output_bag.append(res)
output_bag = self.align_and_get_span(input, input_normd, output_bag)
return output_bag
"'Apprx. method' functions"
def sort_words(self, w1, w2):
if len(w1) > len(w2):
temp = w1
w1 = w2
w2 = temp
return (w1, w2)
def char_count_diff(self, w1, w2):
diffChar = 0
chrs = set()
chrs.update(list(w1))
chrs.update(list(w2))
for chr in chrs:
diffChar += abs(w1.count(chr) - w2.count(chr))
if w2.count(chr) == 0 or w1.count(chr) == 0:
diffChar += 1
return diffChar
def get_tuples(self, word):
if word in self.wordsMap:
return self.wordsMap[word]['tuples']
tuples = {}
word_len = len(word)
for i in range(word_len):
for j in range(i + 1, word_len):
tuple = word[i] + word[j]
if tuple not in tuples:
tuples[tuple] = 0
tuples[tuple] += 1
if j == i + 1 and tuple == 'ال':
tuples[tuple] -= 0.5
self.wordsMap[word] = {'tuples': tuples, 'ayes': set()}
return tuples
def same_tuple_count(self, w1, w2):
len_w1 = len(w1)
len_w2 = len(w2)
sameTuple = 0
w1_tuples = self.get_tuples(w1)
w2_tuples = self.get_tuples(w2)
for tuple in w1_tuples:
if tuple in w2_tuples:
sameTuple += w1_tuples[tuple] + w2_tuples[tuple]
return sameTuple
def words_similarity(self, w1, w2):
(w1, w2) = self.sort_words(w1, w2)
sameTupleCount = self.same_tuple_count(w1, w2)
charDiff = self.char_count_diff(w1, w2)
res = (self.TUPLE_SIMILARITY_FACTOR * sameTupleCount - self.CHAR_DIFF_FACTOR * charDiff) / (len(w1) + len(w2))
if w1 == w2:
return max(res, 3, len(w1))
return res
def check_aye_similarity(self, aye, senteces_words):
aye_words = aye['text_norm'].split()
result = 0
len_aye_words = len(aye_words)
len_senteces_words = len(senteces_words)
for i in range(len_aye_words):
for j in range(len_senteces_words):
s = 0
k = 0
while i + k < len_aye_words and j + k < len_senteces_words:
sim = self.words_similarity(aye_words[i + k], senteces_words[j + k])
if sim < 0.1:
break
k += 1
s += sim
if k > 0:
s += k * 1.5
if s > result:
result = s
return result
def extract_verse_apprx(self, sentence):
ayat = set()
result = {}
normalizedWords = sentence.split(' ')
for word in normalizedWords:
if word not in self.wordsMap:
continue
for inx in self.wordsMap[word]['ayes']:
if inx in result:
continue
result[inx] = self.check_aye_similarity(self.ayes[inx], normalizedWords) - len(self.ayes[inx]['text_norm'].split()) * self.AYE_LEN_FACTOR
sortedIndexes = sorted(result, key=result.get, reverse=True)[:self.SAME_AYE_THRESHOLD]
if len(sortedIndexes)> 0:
maxValue = result[sortedIndexes[0]]
if len(sortedIndexes) > self.SAME_AYE_THRESHOLD and result[sortedIndexes[self.SAME_AYE_THRESHOLD]] * self.SAME_AYE_RATIO > maxValue:
return
for inx in sortedIndexes:
if maxValue > result[inx] * self.SAME_AYE_RATIO or (len(normalizedWords) > 1 and result[inx] < self.MIN_ACCEPT_SIMILARITY):
break
ayat.add((self.ayes[inx]['text'], str(self.ayes[inx]['sore']) +"##"+ str(self.ayes[inx]['aye']), round(result[inx], 2)))
ayat = [{'verse': res[0], 'quran_id': res[1], 'score': res[2]} for res in ayat]
return ayat
def run(self, text):
"Run the model"
"Normalize input"
input_normd = self.normalize(text)
if self.model_type == 'exact':
return self.extract_verse_exact(input_normd, text, self.use_precompiled_patterns)
elif self.model_type == 'apprx':
return self.extract_verse_apprx(input_normd) | StarcoderdataPython |
168732 | import copy
from dlgo.gotypes import Player, Point
from dlgo.scoring import compute_game_result
from dlgo import zobrist
neighbor_tables = {}
corner_tables = {}
def init_neighbor_table(dim):
rows, cols = dim
new_table = {}
for r in range(1, rows + 1):
for c in range(1, cols + 1):
p = Point(row=r, col=c)
if r == 1:
if c == 1:
neighbor_list = [
Point(row=1, col=2),
Point(row=2, col=1),
]
elif c == cols:
neighbor_list = [
Point(row=2, col=cols),
Point(row=1, col=cols-1),
]
else:
neighbor_list = [
Point(row=1, col=c+1),
Point(row=2, col=c),
Point(row=1, col=c-1),
]
elif r == rows:
if c == 1:
neighbor_list = [
Point(row=rows-1, col=1),
Point(row=rows, col=2),
]
elif c == cols:
neighbor_list = [
Point(row=rows, col=cols-1),
Point(row=rows-1, col=cols),
]
else:
neighbor_list = [
Point(row=rows, col=c-1),
Point(row=rows-1, col=c),
Point(row=rows, col=c+1),
]
else:
if c == 1:
neighbor_list = [
Point(row=r-1, col=1),
Point(row=r, col=2),
Point(row=r+1, col=1),
]
elif c == cols:
neighbor_list = [
Point(row=r+1, col=cols),
Point(row=r, col=cols-1),
Point(row=r-1, col=cols),
]
else:
neighbor_list = [
Point(row=r, col=c+1),
Point(row=r+1, col=c),
Point(row=r, col=c-1),
Point(row=r-1, col=c),
]
new_table[p] = neighbor_list
neighbor_tables[dim] = new_table
def init_corner_table(dim):
rows, cols = dim
new_table = {}
for r in range(1, rows + 1):
for c in range(1, cols + 1):
p = Point(row=r, col=c)
if r == 1:
if c == 1:
corner_list = [
Point(row=2, col=2),
]
elif c == cols:
corner_list = [
Point(row=2, col=cols-1),
]
else:
corner_list = [
Point(row=2, col=c+1),
Point(row=2, col=c-1),
]
elif r == rows:
if c == 1:
corner_list = [
Point(row=rows-1, col=2),
]
elif c == cols:
corner_list = [
Point(row=rows-1, col=cols-1),
]
else:
corner_list = [
Point(row=rows-1, col=c-1),
Point(row=rows-1, col=c+1),
]
else:
if c == 1:
corner_list = [
Point(row=r-1, col=2),
Point(row=r+1, col=2),
]
elif c == cols:
corner_list = [
Point(row=r+1, col=cols-1),
Point(row=r-1, col=cols-1),
]
else:
corner_list = [
Point(row=r+1, col=c+1),
Point(row=r+1, col=c-1),
Point(row=r-1, col=c-1),
Point(row=r-1, col=c+1),
]
new_table[p] = corner_list
corner_tables[dim] = new_table
class Move():
def __init__(self, point=None, is_pass=False, is_resign=False):
assert (point is not None) ^ is_pass ^ is_resign
self.point = point
self.is_play = (self.point is not None)
self.is_pass = is_pass
self.is_resign = is_resign
@classmethod
def play(cls, point):
return Move(point=point)
@classmethod
def pass_turn(cls):
return Move(is_pass=True)
@classmethod
def resign(cls):
return Move(is_resign=True)
def transpose(self):
if self.is_play:
return Move(point=Point(row=self.point.col,col=self.point.row))
else:
return self
def flip_row(self, board_size):
if self.is_play:
return Move(point=Point(row=board_size+1-self.point.row,col=self.point.col))
else:
return self
def flip_col(self, board_size):
if self.is_play:
return Move(point=Point(row=self.point.row,col=board_size+1-self.point.col))
else:
return self
def flip_row_col(self, board_size):
if self.is_play:
return Move(point=Point(row=board_size+1-self.point.row,col=board_size+1-self.point.col))
else:
return self
def __str__(self):
if self.is_pass:
return 'pass'
if self.is_resign:
return 'resign'
return 'play %s' % str(self.point)
def __hash__(self):
return hash((
self.is_play,
self.is_pass,
self.is_resign,
self.point))
def __eq__(self, other):
return (
self.is_play,
self.is_pass,
self.is_resign,
self.point) == (
other.is_play,
other.is_pass,
other.is_resign,
other.point)
__repr__ = __str__
class GoString():
def __init__(self, color, stones):
self.color = color
self.stones = frozenset(stones)
def __eq__(self, other):
return isinstance(other, GoString) and \
self.color == other.color and \
self.stones == other.stones
def __hash__(self):
return(hash(self.stones))
def __deepcopy__(self, memodict={}):
return GoString(self.color, self.stones)
def __str__(self):
go_string_string = '<String '
for stone in self.stones:
go_string_string += (str(stone)+' ')
go_string_string += (str(self.color)+'>')
return go_string_string
__repr__ = __str__
class StringConnection():
def __init__(self, color, strings):
self.color = color
self.strings = frozenset(strings)
def without_strings(self, strings):
new_strings = self.strings - set(strings)
return StringConnection(self.color, new_strings)
def merged_with(self, string_connection):
assert string_connection.color == self.color
combined_strings = self.strings | string_connection.strings
return StringConnection(
self.color,
combined_strings)
def __eq__(self, other):
return isinstance(other, StringConnection) and \
self.color == other.color and \
self.strings == other.strings
def __hash__(self):
return(hash(self.strings))
def __deepcopy__(self, memodict={}):
return StringConnection(self.color, self.strings)
def __str__(self):
connection_string = '[Connection '
for string in self.strings:
connection_string += (str(string)+' ')
connection_string += ']'
return connection_string
__repr__ = __str__
class Region():
def __init__(self, color, points):
self.color = color
self.points = frozenset(points)
def __eq__(self, other):
return isinstance(other, Region) and \
self.color == other.color and \
self.points == other.points
def __hash__(self):
return(hash(self.points))
def __deepcopy__(self, memodict={}):
return Region(self.color, self.points)
def __str__(self):
region_string = '[Region '
for point in self.points:
region_string += (str(point)+' ')
region_string += str(self.color)
region_string += ']'
return region_string
__repr__ = __str__
class Board():
def __init__(self, num_rows, num_cols):
self.num_rows = num_rows
self.num_cols = num_cols
global all_points
try: all_points
except NameError: all_points = None
if all_points is None:
all_points = set()
for r in range(1,self.num_rows+1):
for c in range(1,self.num_cols+1):
all_points.add(Point(col=c,row=r))
self.edge_strings = {Player.black: GoString(Player.black, {}),Player.white: GoString(Player.white, {})}
self.start_regions = {Player.black: Region(Player.black,frozenset(all_points)),Player.white: Region(Player.white,frozenset(all_points))}
self._grid = {}
self._liberties = {}
self._connected = {self.edge_strings[Player.black]: set(), self.edge_strings[Player.white]: set()}
self._connections = {self.edge_strings[Player.black]: StringConnection(Player.black,[self.edge_strings[Player.black]]),
self.edge_strings[Player.white]: StringConnection(Player.white,[self.edge_strings[Player.white]])}
self._region_by_point_black = {p: self.start_regions[Player.black] for p in all_points}
self._region_by_point_white = {p: self.start_regions[Player.white] for p in all_points}
self._regions_by_string = {}
self._strings_by_region = {}
self._safe_strings_by_region = {Player.black: {},Player.white: {}}
self._vital_regions_by_string = {Player.black: {},Player.white: {}}
self._potentially_safe_strings_by_region = {Player.black: {},Player.white: {}}
self._healthy_regions_by_string = {Player.black: {},Player.white: {}}
self._hash = zobrist.EMPTY_BOARD
global neighbor_tables
dim = (num_rows, num_cols)
if dim not in neighbor_tables:
init_neighbor_table(dim)
self.neighbor_table = neighbor_tables[dim]
global corner_tables
dim = (num_rows, num_cols)
if dim not in corner_tables:
init_corner_table(dim)
self.corner_table = corner_tables[dim]
def assign_new_region_to_point(self, color, region, point):
if color==Player.black:
self._region_by_point_black[point] = region
else:
self._region_by_point_white[point] = region
def read_region_by_point(self, color, point):
if color==Player.black:
return self._region_by_point_black[point]
else:
return self._region_by_point_white[point]
def delete_point_from_region_by_point(self, color, point):
if color==Player.black:
del(self._region_by_point_black[point])
else:
del(self._region_by_point_white[point])
def neighbors(self, point):
return self.neighbor_table[point]
def is_on_grid(self, point):
return 1 <= point.row <= self.num_rows and \
1 <= point.col <= self.num_cols
def get(self, point):
string = self._grid.get(point)
if string is None:
return None
return string.color
def get_go_string(self, point):
string = self._grid.get(point)
if string is None:
return None
return string
def string_delete_liberty(self, string, point):
self._liberties[string] = self._liberties[string]-{point}
def string_add_liberty(self, string, point):
self._liberties[string] = self._liberties[string]|{point}
def string_delete_connected(self, string, connected):
self._connected[string] = self._connected[string]-{connected}
def string_add_connected(self, string, connected):
self._connected[string] = self._connected[string]|{connected}
def strings_merged(self, this_string, that_string):
assert this_string.color == that_string.color
combined_stones = this_string.stones | that_string.stones
combined_liberties = (self._liberties[this_string] | self._liberties[that_string]) - combined_stones
combined_connected = (self._connected[this_string] | self._connected[that_string])
new_string = GoString(this_string.color, combined_stones)
del(self._liberties[this_string])
del(self._liberties[that_string])
del(self._connected[this_string])
del(self._connected[that_string])
self._liberties[new_string] = set(combined_liberties)
self._connected[new_string] = set(combined_connected)
return new_string
def num_liberties(self, string):
return len(self._liberties[string])
def find_connections(self, start_string, found_connections = None):
if found_connections == None:
found_connections = set([])
if start_string in found_connections:
return set()
found_connections |= {start_string}
for string in self._connected[start_string]:
found_connections |= self.find_connections(string, found_connections)
return found_connections
def divide_region(self, player, point):
if(len(self.neighbor_table[point])==2):
free_neighbors = []
corner = self.corner_table[point][0]
for neighbor in self.neighbor_table[point]:
if self.get(neighbor)!=player:
free_neighbors.append(neighbor)
if len(free_neighbors)==0:
return []
elif len(free_neighbors)==1:
return free_neighbors
else:
if self._connections.get(self._grid.get(corner)) is self._connections[self.edge_strings[player]]:
return free_neighbors
else:
return [free_neighbors[0]]
if(len(self.neighbor_table[point])==3):
free_neighbors = []
corners = self.corner_table[point]
for neighbor in self.neighbor_table[point]:
if self.get(neighbor)!=player:
free_neighbors.append(neighbor)
if len(free_neighbors)==0:
return []
elif len(free_neighbors)==1:
return free_neighbors
elif len(free_neighbors)==2:
if self.get(self.neighbor_table[point][0])==player:
if (self._connections.get(self._grid.get(corners[1])) is self._connections[self.edge_strings[player]]):
return free_neighbors
else:
return [free_neighbors[0]]
elif self.get(self.neighbor_table[point][2])==player:
if self._connections.get(self._grid.get(corners[0])) is self._connections[self.edge_strings[player]]:
return free_neighbors
else:
return [free_neighbors[0]]
else:
if (self._connections.get(self._grid.get(self.neighbor_table[point][1])) is self._connections[self.edge_strings[player]]):
return free_neighbors
else:
return [free_neighbors[0]]
else:
if (self._connections.get(self._grid.get(corners[0])) is self._connections[self.edge_strings[player]]) and \
(self._connections.get(self._grid.get(corners[1])) is self._connections[self.edge_strings[player]]):
return free_neighbors
elif self._connections.get(self._grid.get(corners[0])) is self._connections[self.edge_strings[player]]:
return [free_neighbors[0],free_neighbors[1]]
elif self._connections.get(self._grid.get(corners[1])) is self._connections[self.edge_strings[player]]:
return [free_neighbors[1],free_neighbors[2]]
elif self._connections.get(self._grid.get(corners[0])) is not None and \
self._connections.get(self._grid.get(corners[0])) is self._connections.get(self._grid.get(corners[1])):
return [free_neighbors[0],free_neighbors[1]]
else:
return [free_neighbors[0]]
region_count = 1
not_region_count = 1
region = [0]*8
not_region = [0]*8
for i in range(4):
if self.get(self.neighbor_table[point][i]) == player:
region[2*i] = 0
if region[2*i-1] != 0:
region_count += 1
not_region[2*i] = not_region_count
else:
not_region[2*i] = 0
if not_region[2*i-1] != 0:
not_region_count += 1
region[2*i] = region_count
if self.get(self.corner_table[point][i]) == player:
region[2*i+1] = 0
if region[2*i] != 0:
region_count += 1
not_region[2*i+1] = not_region_count
else:
not_region[2*i+1] = 0
if not_region[2*i] != 0:
not_region_count += 1
region[2*i+1] = region_count
if region[0] == 1 and region[7] == 1:
return [self.neighbor_table[point][0]]
elif not_region[0] == 1 and not_region[7] == 1:
return []
if region[0] == 0 and region[7] == 0:
i = -1
while region[i] == 0:
not_region[i] = 1
i -= 1
if not_region[0] == 0 and not_region[7] == 0:
i = -1
while not_region[i] == 0:
region[i] = 1
i -= 1
conns = []
for i in range(4):
conns.append(self._connections.get(self._grid.get(self.neighbor_table[point][i])))
conns.append(self._connections.get(self._grid.get(self.corner_table[point][i])))
num_regions = max(region)
conn_before = [None]*num_regions
conn_after = [None]*num_regions
for i in range(-1,7):
if region[i]==0 and region[i+1]!=0:
conn_before[region[i+1]-1] = conns[i]
if region[i]!=0 and region[i+1]==0:
conn_after[region[i]-1] = conns[i+1]
distinct_regions = []
for i in range(num_regions):
if conn_after[i] in [conn_before[index] for index in range(i+1)]:
distinct_regions.append(i+1)
distinct_regions_points = []
for dist_r in distinct_regions:
for i in range(4):
if region[2*i] == dist_r:
distinct_regions_points.append(self.neighbor_table[point][i])
break
return distinct_regions_points
def find_region(self, color, start_pos, visited=None):
if visited is None:
visited = {}
if start_pos in visited:
return set(), set()
all_points = {start_pos}
all_border_strings = set()
visited[start_pos] = True
for neighbor in self.neighbor_table[start_pos]:
if self.get(neighbor) != color:
points, border_strings = self.find_region(color, neighbor, visited)
all_points |= points
all_border_strings |= border_strings
else:
all_border_strings.add(self._grid.get(neighbor))
return all_points, all_border_strings
def find_healthy_regions(self):
for string, regions in self._regions_by_string.items():
healthy_regions = set()
for region in regions:
healthy = True
for region_point in region.points:
if self.get(region_point)==None and region_point not in self._liberties[string]:
healthy = False
break
if healthy:
healthy_regions.add(region)
self._vital_regions_by_string[string.color][string] = set().union(healthy_regions)
self._healthy_regions_by_string[string.color][string] = set().union(healthy_regions)
def find_potentially_safe_strings(self):
for string, regions in self._vital_regions_by_string[Player.black].items():
for region in regions:
if region not in self._safe_strings_by_region[Player.black]:
self._safe_strings_by_region[Player.black][region] = set()
if region not in self._potentially_safe_strings_by_region[Player.black]:
self._potentially_safe_strings_by_region[Player.black][region] = set()
self._safe_strings_by_region[Player.black][region].add(string)
self._potentially_safe_strings_by_region[Player.black][region].add(string)
for string, regions in self._vital_regions_by_string[Player.white].items():
for region in regions:
if region not in self._safe_strings_by_region[Player.white]:
self._safe_strings_by_region[Player.white][region] = set()
if region not in self._potentially_safe_strings_by_region[Player.white]:
self._potentially_safe_strings_by_region[Player.white][region] = set()
self._safe_strings_by_region[Player.white][region].add(string)
self._potentially_safe_strings_by_region[Player.white][region].add(string)
def reduce_healthy_regions(self):
discarded_regions = set()
for region, strings in self._safe_strings_by_region[Player.black].items():
if len(strings)<len(self._strings_by_region[region]):
discarded_regions.add(region)
for safe_string in self._vital_regions_by_string[Player.black]:
self._vital_regions_by_string[Player.black][safe_string] -= {region}
for region in discarded_regions:
del(self._safe_strings_by_region[Player.black][region])
discarded_regions = set()
for region, strings in self._safe_strings_by_region[Player.white].items():
if len(strings)<len(self._strings_by_region[region]):
discarded_regions.add(region)
for safe_string in self._vital_regions_by_string[Player.white]:
self._vital_regions_by_string[Player.white][safe_string] -= {region}
for region in discarded_regions:
del(self._safe_strings_by_region[Player.white][region])
def reduce_potentially_safe_strings(self):
discarded_strings = set()
for string, regions in self._vital_regions_by_string[Player.black].items():
if len(regions)<2:
discarded_strings.add(string)
for vital_region in self._safe_strings_by_region[Player.black]:
self._safe_strings_by_region[Player.black][vital_region] -= {string}
for string in discarded_strings:
del(self._vital_regions_by_string[Player.black][string])
discarded_strings = set()
for string, regions in self._vital_regions_by_string[Player.white].items():
if len(regions)<2:
discarded_strings.add(string)
for vital_region in self._safe_strings_by_region[Player.white]:
self._safe_strings_by_region[Player.white][vital_region] -= {string}
for string in discarded_strings:
del(self._vital_regions_by_string[Player.white][string])
def find_safe_strings_and_vital_regions(self):
old_num_strings = 0
old_num_regions = 0
self.find_healthy_regions()
self.find_potentially_safe_strings()
new_num_strings = len(self._vital_regions_by_string[Player.black])+len(self._vital_regions_by_string[Player.white])
new_num_regions = len(self._safe_strings_by_region[Player.black])+len(self._safe_strings_by_region[Player.white])
while new_num_regions != old_num_regions or new_num_strings != old_num_strings:
self.reduce_potentially_safe_strings()
self.reduce_healthy_regions()
old_num_strings = new_num_strings
old_num_regions = new_num_regions
new_num_strings = len(self._vital_regions_by_string[Player.black])+len(self._vital_regions_by_string[Player.white])
new_num_regions = len(self._safe_strings_by_region[Player.black])+len(self._safe_strings_by_region[Player.white])
def _remove_string(self, string):
for stone_point in string.stones:
for neighbor in self.neighbor_table[stone_point]:
neighbor_string = self._grid.get(neighbor)
if neighbor_string is None:
continue
if neighbor_string is not string:
self.string_add_liberty(neighbor_string, stone_point)
del(self._grid[stone_point])
self._hash ^= zobrist.HASH_CODE[stone_point, string.color]
for conn_string in self._connected[string]:
self.string_delete_connected(conn_string, string)
all_visited_connections = set()
for conn_string in self._connected[string]:
if conn_string not in all_visited_connections:
found_connections = self.find_connections(conn_string)
all_visited_connections |= found_connections
new_connection = StringConnection(conn_string.color,found_connections)
for visited_string in found_connections:
self._connections[visited_string] = new_connection
del(self._connections[string])
old_regions_this = self._regions_by_string.get(string)
new_region_this_stones = string.stones
for region in old_regions_this:
new_region_this_stones |= region.points
new_region_this = Region(string.color, new_region_this_stones)
for region_point in new_region_this.points:
self.assign_new_region_to_point(string.color, new_region_this, region_point)
new_region_this_strings = set()
for region in old_regions_this:
new_region_this_strings |= self._strings_by_region.get(region)
new_region_this_strings -= {string}
self._strings_by_region[new_region_this] = new_region_this_strings
for region_string in new_region_this_strings:
self._regions_by_string[region_string] -= old_regions_this
self._regions_by_string[region_string] |= {new_region_this}
del(self._regions_by_string[string])
for region in old_regions_this:
del(self._strings_by_region[region])
def place_stone(self, player, point):
assert self.is_on_grid(point)
assert self._grid.get(point) is None
adjacent_same_color = []
same_color_connected = []
same_color_connections = []
adjacent_opposite_color = []
liberties = []
div_reg_result = self.divide_region(player, point)
for neighbor in self.neighbor_table[point]:
neighbor_string = self._grid.get(neighbor)
neighbor_connection = self._connections.get(neighbor_string)
if neighbor_string is None:
liberties.append(neighbor)
elif neighbor_string.color == player:
if neighbor_string not in adjacent_same_color:
adjacent_same_color.append(neighbor_string)
if neighbor_connection not in same_color_connections:
same_color_connections.append(neighbor_connection)
else:
if neighbor_string not in adjacent_opposite_color:
adjacent_opposite_color.append(neighbor_string)
for corner in self.corner_table[point]:
corner_connected = self._grid.get(corner)
if ((corner_connected is not None) and (corner_connected.color == player)
and (corner_connected not in same_color_connected)):
same_color_connected.append(corner_connected)
corner_connection = self._connections.get(self._grid.get(corner))
if ((corner_connection is not None) and (corner_connection.color == player)
and (corner_connection not in same_color_connections)):
same_color_connections.append(corner_connection)
if len(self.neighbor_table[point]) < 4:
same_color_connected.append(self.edge_strings[player])
if self._connections.get(self.edge_strings[player]) not in same_color_connections:
same_color_connections.append(self._connections.get(self.edge_strings[player]))
affected_regions = set()
for string in adjacent_same_color:
affected_regions |= self._regions_by_string[string]
new_string = GoString(player, [point])
self._liberties[new_string] = set(liberties)
self._connected[new_string] = set(same_color_connected)
for same_color_string in adjacent_same_color:
new_string = self.strings_merged(new_string, same_color_string)
self._connected[new_string] -= set(adjacent_same_color)
for conn_string in self._connected[new_string]:
self._connected[conn_string] -= set(adjacent_same_color)
for connected_string in self._connected[new_string]:
self.string_add_connected(connected_string, new_string)
new_string_connection = StringConnection(player,[new_string])
for same_color_connection in same_color_connections:
new_string_connection = new_string_connection.merged_with(same_color_connection)
new_string_connection = new_string_connection.without_strings(adjacent_same_color)
for same_color_string in adjacent_same_color:
del(self._connections[same_color_string])
for new_string_point in new_string.stones:
self._grid[new_string_point] = new_string
for new_member_string in new_string_connection.strings:
self._connections[new_member_string] = new_string_connection
self._hash ^= zobrist.HASH_CODE[point, player]
old_region = self.read_region_by_point(player, point)
old_region_strings = self._strings_by_region.get(old_region)
for region in (affected_regions-{old_region}):
self._strings_by_region[region] -= set(adjacent_same_color)
self._strings_by_region[region] |= {new_string}
if old_region_strings==None:
old_region_strings=set()
if len(div_reg_result)==0:
new_string_regions = set()
for string in adjacent_same_color:
new_string_regions |= self._regions_by_string[string]
new_string_regions -= {old_region}
self.delete_point_from_region_by_point(player, point)
for string in adjacent_same_color:
del(self._regions_by_string[string])
if old_region in self._strings_by_region:
del(self._strings_by_region[old_region])
self._regions_by_string[new_string] = new_string_regions
elif len(div_reg_result)==1:
new_region = Region(player,old_region.points - {point})
new_string_regions = set()
for string in adjacent_same_color:
new_string_regions |= self._regions_by_string[string]
new_string_regions -= {old_region}
new_string_regions |= {new_region}
new_region_strings = old_region_strings - set(adjacent_same_color)
new_region_strings.add(new_string)
self.delete_point_from_region_by_point(player, point)
if old_region in self._strings_by_region:
del(self._strings_by_region[old_region])
for string in old_region_strings:
self._regions_by_string[string] -= {old_region}
for string in adjacent_same_color:
del(self._regions_by_string[string])
for region_point in new_region.points:
self.assign_new_region_to_point(player, new_region, region_point)
self._regions_by_string[new_string] = new_string_regions
for string in new_region_strings:
self._regions_by_string[string] |= {new_region}
self._strings_by_region[new_region] = new_region_strings
else:
new_regions = [self.find_region(player,start) for start in div_reg_result]
self.delete_point_from_region_by_point(player, point)
if old_region in self._strings_by_region:
del(self._strings_by_region[old_region])
for string in old_region_strings:
self._regions_by_string[string] -= {old_region}
new_string_regions = set()
for string in adjacent_same_color:
new_string_regions |= self._regions_by_string[string]
new_string_regions -= {old_region}
for string in adjacent_same_color:
del(self._regions_by_string[string])
self._regions_by_string[new_string] = new_string_regions
for new_region_data in new_regions:
new_region = Region(player,new_region_data[0])
for region_point in new_region.points:
self.assign_new_region_to_point(player, new_region, region_point)
for string in new_region_data[1]:
self._regions_by_string[string] |= {new_region}
self._strings_by_region[new_region] = new_region_data[1]
for other_color_string in adjacent_opposite_color:
self.string_delete_liberty(other_color_string, point)
if not self.num_liberties(other_color_string):
self._remove_string(other_color_string)
self.find_safe_strings_and_vital_regions()
def is_self_capture(self, player, point):
friendly_strings = []
for neighbor in self.neighbor_table[point]:
neighbor_string = self._grid.get(neighbor)
if neighbor_string is None:
return False
elif neighbor_string.color == player:
friendly_strings.append(neighbor_string)
else:
if self.num_liberties(neighbor_string) == 1:
return False
if all(self.num_liberties(neighbor) == 1 for neighbor in friendly_strings):
return True
return False
def zobrist_hash(self):
return self._hash
def __eq__(self, other):
return isinstance(other, Board) and \
self.num_rows == other.num_rows and \
self.num_cols == other.num_cols and \
self._grid == other._grid
def __deepcopy__(self, memodict={}):
copied = Board(self.num_rows, self.num_cols)
copied._grid = copy.copy(self._grid)
copied._liberties = copy.copy(self._liberties)
copied._connected = copy.deepcopy(self._connected)
copied._connections = copy.deepcopy(self._connections)
copied._region_by_point_black = copy.copy(self._region_by_point_black)
copied._region_by_point_white = copy.copy(self._region_by_point_white)
copied._regions_by_string = copy.deepcopy(self._regions_by_string)
copied._strings_by_region = copy.deepcopy(self._strings_by_region)
copied._hash = self._hash
return copied
class GameState():
def __init__(self, board, next_player, previous, move):
self.board = board
self.next_player = next_player
self.previous_state = previous
if previous is None:
self.previous_states = frozenset()
else:
self.previous_states = frozenset(
previous.previous_states |
{(previous.next_player, previous.board.zobrist_hash())})
self.last_move = move
def apply_move(self, move):
if move.is_play:
next_board = copy.deepcopy(self.board)
next_board.place_stone(self.next_player, move.point)
else:
next_board = self.board
return GameState(next_board, self.next_player.other, self, move)
@classmethod
def new_game(cls, board_size):
if isinstance(board_size, int):
board_size = (board_size, board_size)
board = Board(*board_size)
return GameState(board, Player.black, None, None)
@property
def situation(self):
return (self.next_player, self.board)
def is_over(self):
if self.last_move is None:
return False
if self.last_move.is_resign:
return True
second_last_move = self.previous_state.last_move
if second_last_move is None:
return False
return self.last_move.is_pass and second_last_move.is_pass
def is_move_self_capture(self, player, move):
if not move.is_play:
return False
return self.board.is_self_capture(player, move.point)
def does_move_violate_ko(self, player, move):
if not move.is_play:
return False
next_board = self.apply_move(move).board
next_situation = (player.other, next_board.zobrist_hash())
ko_found = False
if next_situation in self.previous_states:
next_situation_board = (player.other, next_board)
past_state = self.previous_state
while past_state is not None and not ko_found:
if past_state.situation == next_situation_board:
ko_found = True
past_state = past_state.previous_state
return ko_found
def is_valid_move(self, move):
if self.is_over():
return False
if move.is_pass or move.is_resign:
return True
return (
self.board.get(move.point) is None and
not self.is_move_self_capture(self.next_player, move) and
not self.does_move_violate_ko(self.next_player, move))
def is_sensible_move(self, move):
if self.is_over():
return False
if move.is_pass or move.is_resign:
return True
if self.board.get(move.point) is None:
own_region_played_in = self.board.read_region_by_point(self.next_player, move.point)
other_region_played_in = self.board.read_region_by_point(self.next_player.other, move.point)
if (not self.is_move_self_capture(self.next_player, move) and
self.board._safe_strings_by_region[self.next_player].get(own_region_played_in) == None and
self.board._safe_strings_by_region[self.next_player.other].get(other_region_played_in) == None):
return (not self.does_move_violate_ko(self.next_player, move))
else:
return False
def legal_moves(self):
moves = []
for row in range(1, self.board.num_rows + 1):
for col in range(1, self.board.num_cols + 1):
move = Move.play(Point(row, col))
if self.is_valid_move(move):
moves.append(move)
moves.append(Move.pass_turn())
return moves
def sensible_legal_moves(self):
moves = []
for row in range(1, self.board.num_rows + 1):
for col in range(1, self.board.num_cols + 1):
move = Move.play(Point(row, col))
if self.is_sensible_move(move):
moves.append(move)
moves.append(Move.pass_turn())
return moves
def winner(self):
if not self.is_over():
return None
if self.last_move.is_resign:
return self.next_player
game_result = compute_game_result(self)
return game_result.winner
def margin(self):
if not self.is_over():
return None
game_result = compute_game_result(self)
return game_result.winning_margin
| StarcoderdataPython |
3247535 | <gh_stars>1-10
import unittest
import numpy as np
from PEPit.pep import PEP
from PEPit.point import Point
from PEPit.expression import Expression
from PEPit.function import Function
from PEPit.functions.smooth_strongly_convex_function import SmoothStronglyConvexFunction
class TestPEP(unittest.TestCase):
def setUp(self):
# smooth-strongly convex gradient descent set up
self.L = 1.
self.mu = 0.1
self.gamma = 1 / self.L
# Instantiate PEP
self.problem = PEP()
# Declare a strongly convex smooth function
self.func = self.problem.declare_function(SmoothStronglyConvexFunction, mu=self.mu, L=self.L)
# Start by defining its unique optimal point xs = x_* and corresponding function value fs = f_*
self.xs = self.func.stationary_point()
# Then define the starting point x0 of the algorithm
self.x0 = self.problem.set_initial_point()
# Set the initial constraint that is the distance between x0 and x^*
self.problem.set_initial_condition((self.x0 - self.xs) ** 2 <= 1)
# Run n steps of the GD method
self.x1 = self.x0 - self.gamma * self.func.gradient(self.x0)
# Set the performance metric to the function values accuracy
self.problem.set_performance_metric((self.x1 - self.xs) ** 2)
def test_is_instance(self):
self.assertIsInstance(self.problem, PEP)
self.assertEqual(len(self.problem.list_of_functions), 1)
self.assertEqual(len(self.problem.list_of_points), 1)
self.assertEqual(len(self.problem.list_of_constraints), 1)
self.assertEqual(len(self.problem.list_of_performance_metrics), 1)
self.assertEqual(len(self.func.list_of_constraints), 0)
pepit_tau = self.problem.solve(verbose=0)
self.assertEqual(len(self.func.list_of_constraints), 2)
self.assertEqual(Point.counter, 3)
self.assertEqual(Expression.counter, 2)
self.assertEqual(Function.counter, 1)
def test_eval_points_and_function_values(self):
self.problem.solve(verbose=0)
for triplet in self.func.list_of_points:
point, gradient, function_value = triplet
self.assertIsInstance(point.eval(), np.ndarray)
self.assertIsInstance(gradient.eval(), np.ndarray)
self.assertIsInstance(function_value.eval(), float)
def test_eval_constraint_dual_values(self):
pepit_tau = self.problem.solve(verbose=0)
theoretical_tau = max((1 - self.mu * self.gamma) ** 2, (1 - self.L * self.gamma) ** 2)
self.assertAlmostEqual(pepit_tau, theoretical_tau, delta=theoretical_tau * 10 ** -3)
for condition in self.problem.list_of_constraints:
self.assertIsInstance(condition._dual_variable_value, float)
self.assertAlmostEqual(condition._dual_variable_value, pepit_tau, delta=pepit_tau * 10 ** -3)
for constraint in self.func.list_of_constraints:
self.assertIsInstance(constraint._dual_variable_value, float)
self.assertAlmostEqual(constraint._dual_variable_value,
2 * self.gamma * max(abs(1 - self.mu * self.gamma), abs(1 - self.L * self.gamma)),
delta=2 * self.gamma * 10 ** 3)
def test_trace_trick(self):
# Compute pepit_tau very basically
pepit_tau = self.problem.solve(verbose=0)
# Return the full problem and verify the problem value is still pepit_tau
prob = self.problem.solve(verbose=0, return_full_cvxpy_problem=True, dimension_reduction_heuristic=None)
self.assertAlmostEqual(prob.value, pepit_tau, delta=10 ** -2)
# Return the full tracetrick problem and verify that its value is not pepit_tau anymore but the trace value
prob2 = self.problem.solve(verbose=0, return_full_cvxpy_problem=True, dimension_reduction_heuristic="trace")
self.assertAlmostEqual(prob2.value, 1 / 2, delta=10 ** -2)
# Verify that, even with tracetrick, the solve method returns the worst-case performance, not the trace value.
pepit_tau2 = self.problem.solve(verbose=0, dimension_reduction_heuristic="trace")
self.assertAlmostEqual(pepit_tau, pepit_tau2, delta=10 ** -2)
def tearDown(self):
Point.counter = 0
Expression.counter = 0
Function.counter = 0
| StarcoderdataPython |
131969 | class Monster:
def __init__(self, name, color):
self.name = name
self.color = color
def attack(self):
print('I am attacking...')
class Fogthing(Monster):
def attack(self):
print('I am killing...')
def make_sound(self):
print('Grrrrrrrrrr\n')
fogthing = Fogthing("Fogthing", "Yellow")
fogthing.attack()
fogthing.make_sound() | StarcoderdataPython |
3394307 | <filename>burndown.py
import sys, gitlab, collections, datetime, dateutil.parser, pickle
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.cm as cm
import scipy.interpolate as interpolate
import scipy.signal as signal
def dd_int():
return collections.defaultdict(int)
# hotpatch python-gitlab until a release includes c08c913
if gitlab.__version__ == '0.15':
class Gitlab (gitlab.Gitlab):
def _raw_list(self, path, cls, extra_attrs={}, **kwargs):
params = extra_attrs.copy()
params.update(kwargs.copy())
get_all_results = kwargs.get('all', False)
r = self._raw_get(path, **params)
gitlab.raise_error_from_response(r, gitlab.GitlabListError)
for key in ['all', 'page', 'per_page', 'sudo', 'next_url']:
if key in params:
del params[key]
params['_from_api'] = True
results = [cls(self, item, **params) for item in r.json() if item is not None]
if ('next' in r.links and 'url' in r.links['next'] and get_all_results is True):
args = kwargs.copy()
args['next_url'] = r.links['next']['url']
results.extend(self.list(cls, **args))
return results
else:
Gitlab = gitlab.Gitlab
def main(gitlab_url = None, gitlab_secret = None, project = None, since = None, output = None):
if any([x is None for x in [gitlab_url, gitlab_secret, project]]):
sys.stderr.write("usage: python3 %s <gitlab_url> <gitlab_secret> <project> [since-date-iso-fmt] [output-file]\n" % sys.argv[0])
return 1
all_points = set()
milestone_issues = collections.defaultdict(dd_int)
milestone_names = {}
milestone_start = {}
most_recent = None
cache = None
try:
with open('issue_cache.pickle', 'rb') as f:
cache = pickle.load(f)
all_points = cache['all_points']
milestone_issues = cache['milestone_issues']
milestone_names = cache['milestone_names']
milestone_start = cache['milestone_start']
most_recent = max(all_points)
except (IOError, EOFError):
pass
gl = Gitlab(gitlab_url, gitlab_secret)
proj = gl.projects.get(project)
done = False
page = 1
while not done:
issues = proj.issues.list(order_by='created_at', sort='desc', page=page, per_page=20)
if len(issues) == 0:
break
page += 1
for i in issues:
# open time
open_time = i.created_at
# close time
close_time = None
for note in i.notes.list(order_by='created_at', sort='asc', all=True):
if note.system and note.body.startswith('Status changed to closed'):
close_time = note.created_at
# convert times to datetime obj
open_time = dateutil.parser.parse(open_time)
if close_time is not None:
close_time = dateutil.parser.parse(close_time)
# determine if we have caught up with the cache
if most_recent is not None and open_time <= most_recent:
done = True
break
# resolve milestone
milestone = None, 'None'
if i.milestone is not None:
milestone = i.milestone.iid, i.milestone.title
if not milestone[0] in milestone_names:
milestone_names[milestone[0]] = milestone[1]
if milestone[0] not in milestone_start or open_time < milestone_start[milestone[0]]:
milestone_start[milestone[0]] = open_time
# update deltas
milestone_issues[milestone[0]][open_time] += 1
milestone_issues[milestone[0]][close_time] -= 1
all_points |= set([open_time, close_time])
# Remove 'None' point, it will break everything
all_points -= set([None])
# Save cache
with open('issue_cache.pickle', 'wb') as f:
cache = pickle.dump({
'milestone_issues': milestone_issues,
'all_points': all_points,
'milestone_names': milestone_names,
'milestone_start': milestone_start
}, f)
# Build x and y
x = sorted(all_points)
y = [
np.cumsum([
float(v[t]) for t in x
])
for k, v in sorted(milestone_issues.items(), key=lambda x: milestone_start[x[0]])
]
# Restrict domain
if since is not None:
since = dateutil.parser.parse(since)
x = [t for t in x if t >= since]
y = [
yy[-len(x):]
for yy in y
]
# Filter empty series
labels = [
v
for i, (k, v) in enumerate(sorted(
milestone_names.items(), key=lambda p: milestone_start[p[0]]
))
if any(k != 0 for k in y[i])
]
y = [
yy
for yy in y
if any(k != 0 for k in yy)
]
# Smooth curve
x_rel = [(t - x[0]).total_seconds() for t in x]
xs_rel = np.linspace(x_rel[0], x_rel[-1], 250)
ys = [
signal.savgol_filter(
interpolate.interp1d(x_rel, yy, kind='slinear')(xs_rel),
51,
4
)
for yy in y
]
xs = [x[0] + datetime.timedelta(seconds=t) for t in xs_rel]
# Generate color map
cmap = cm.get_cmap('viridis')
c = [cmap(int(cmap.N*i/len(ys))) for i in range(len(ys))]
# Truncate names
milestone_names = {
i: (n if len(n) < 16 else n[:13]+"...")
for i,n in milestone_names.items()
}
# Generate plot
plt.figure(figsize=(10,4))
plt.stackplot(xs, *ys, labels=labels, colors=c, baseline='zero', edgecolor='none')
plt.legend(loc='upper center', shadow=True, ncol=3, fontsize='12')
plt.ylim(0, plt.ylim()[1]*1.25)
if output is None:
plt.show()
else:
plt.savefig(output)
return 0
if __name__ == "__main__":
sys.exit(main(*sys.argv[1:]))
| StarcoderdataPython |
31434 | <filename>vscode/app.py
from flask import Flask, render_template, request, make_response, g
import os
import socket
import random
import json
import collections
hostname = socket.gethostname()
votes = collections.defaultdict(int)
app = Flask(__name__)
def getOptions():
option_a = 'Cats'
option_b = 'Dogs'
return option_a, option_b
@app.route("/", methods=['POST','GET'])
def hello():
vote = None
option_a, option_b = getOptions()
if request.method == 'POST':
vote = request.form['vote']
vote = option_a if vote == "a" else option_b
votes[vote] = votes[vote] + 1
resp = make_response(render_template(
'index.html',
option_a=option_a,
option_b=option_b,
hostname=hostname,
votes_a=votes[option_a],
votes_b=votes[option_b],
))
return resp
if __name__ == "__main__":
extra_files = []
if "development" == os.getenv("FLASK_ENV"):
app.jinja_env.auto_reload = True
app.config['TEMPLATES_AUTO_RELOAD'] = True
extra_files=[
"./static/stylesheets/style.css"
]
app.run(
host='0.0.0.0',
port=8080,
extra_files=extra_files
)
| StarcoderdataPython |
3331565 | <gh_stars>0
import re
DIRECTIONS = {
'e': lambda a, b: (a+1, b),
'se': lambda a, b: (a+1 if b % 2 else a, b+1),
'ne': lambda a, b: (a+1 if b % 2 else a, b-1),
'w': lambda a, b: (a-1, b),
'sw': lambda a, b: (a-1 if (b+1) % 2 else a, b+1),
'nw': lambda a, b: (a-1 if (b+1) % 2 else a, b-1)
}
lines = [n.strip() for n in open('d24in.txt').read().splitlines()]
pattern = re.compile(r'(e|se|ne|w|sw|nw)')
tiles = [pattern.findall(line) for line in lines]
black_tiles = set()
for tile in tiles:
x, y = (0, 0)
for direction in tile:
x, y = DIRECTIONS[direction](x, y)
if (x, y) in black_tiles:
black_tiles.remove((x, y))
else:
black_tiles.add((x, y))
print(len(black_tiles))
| StarcoderdataPython |
3305213 | <reponame>hefen1/chromium<filename>tools/perf/profile_creators/profile_safe_url_generator.py
# Copyright 2015 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import HTMLParser
import json
import logging
import urllib2
import urlparse
class _HRefParser(HTMLParser.HTMLParser):
def __init__(self):
HTMLParser.HTMLParser.__init__(self)
self.hrefs = []
def handle_starttag(self, tag, attrs):
if tag == "a":
for name, value in attrs:
if name == "href":
self.hrefs.append(value)
def GenerateSafeUrls():
"""Prints a list of safe urls.
Generates a safe list of urls from a seed list. Each href in the HTML
fetched from the url from the seed list is placed into the safe list. The
safe list contains unsanitized urls.
"""
# A list of websites whose hrefs are unlikely to link to sites that contain
# malware.
seed_urls = [
"https://www.cnn.com",
"https://www.youtube.com",
"https://www.facebook.com",
"https://www.twitter.com",
"https://www.yahoo.com",
"https://www.amazon.com",
"https://www.wikipedia.com",
"https://www.bing.com",
"https://www.dailymotion.com",
"https://www.stackoverflow.com",
"https://www.google.com/#q=dumpling",
"http://www.baidu.com/s?wd=rice",
"http://www.baidu.com/s?wd=cow",
"https://www.google.com/#q=fox",
"http://www.yahoo.co.jp/",
"http://www.yandex.ru/",
"https://www.imdb.com/",
"http://www.huffingtonpost.com/",
"https://www.deviantart.com/",
"http://www.wsj.com/",
]
safe_urls = set()
for url in seed_urls:
try:
# Fetch and parse the HTML.
response = urllib2.urlopen(url)
encoding = response.headers.getparam('charset')
html = response.read()
if encoding:
html = html.decode(encoding)
parser = _HRefParser()
parser.feed(html)
except:
logging.exception("Error fetching or parsing url: %s", url)
raise
# Looks for all hrefs.
for relative_url in parser.hrefs:
if not relative_url:
continue
absolute_url = urlparse.urljoin(url, relative_url)
safe_urls.add(absolute_url)
# Sort the urls, to make them easier to view in bulk.
safe_urls_list = list(safe_urls)
safe_urls_list.sort()
print json.dumps(safe_urls_list, indent=2, separators=(",", ":"))
if __name__ == "__main__":
GenerateSafeUrls()
| StarcoderdataPython |
3300903 | <filename>swagger_server/test/operational_controllers/test_resource_permissions_for_roles.py
# coding: utf-8
from __future__ import absolute_import
from ge_core_shared import db_actions, decorators
from flask import json
from project.settings import API_KEY_HEADER
from swagger_server.models import Permission, Resource, Role, RoleResourcePermission
from swagger_server.test import BaseTestCase
class TestOperationalController(BaseTestCase):
NUM_TESTS = 10
@decorators.db_exception
def setUp(self):
super().setUp()
self.headers = {API_KEY_HEADER: "test-api-key"}
self.role_ids = []
for i in range(0, self.NUM_TESTS):
# Create permission
data = {
"name": f"permission_{i}",
"description": f"Permission {i}",
}
permission = db_actions.crud(
model="Permission",
api_model=Permission,
data=data,
action="create"
)
# Create resource
data = {
"urn": f"resource_{i}",
"description": f"Resource {i}",
}
resource = db_actions.crud(
model="Resource",
api_model=Resource,
data=data,
action="create"
)
# Create role
data = {
"label": f"role_{i}",
"description": f"Role {i}",
}
role = db_actions.crud(
model="Role",
api_model=Role,
data=data,
action="create"
)
self.role_ids.append(role.id)
# Create role resource permission
data = {
"role_id": role.id,
"resource_id": resource.id,
"permission_id": permission.id
}
role_resource_permission = db_actions.crud(
model="RoleResourcePermission",
api_model=RoleResourcePermission,
data=data,
action="create"
)
def test_get_resource_permissions_for_roles(self):
"""Test case for get_user_site_role_labels_aggregated
"""
for i in range(1, 10):
response = self.client.open(
'/api/v1/ops/resource_permissions_for_roles?role_ids={}'.format(
",".join(str(e) for e in self.role_ids[0:i])),
method='GET', headers=self.headers)
r_data = json.loads(response.data)
self.assertEqual(len(r_data), i)
# Specifying no role_ids returns an HTTP 400
response = self.client.open(
'/api/v1/ops/resource_permissions_for_roles?role_ids=',
method='GET', headers=self.headers)
self.assertEqual(response.status_code, 400)
| StarcoderdataPython |
3246282 | #!/usr/bin/env python
import sys
from comms import *
import serial
import time
if len(sys.argv) != 3:
print("give me a serial port and address")
exit()
port = sys.argv[1]
s = serial.Serial(port=port, baudrate=COMM_DEFAULT_BAUD_RATE, timeout=0.1)
address = int(sys.argv[2])
client = BLDCControllerClient(s, True)
client.leaveBootloader(address)
time.sleep(0.2)
s.reset_input_buffer()
print(client.readCalibration(address))
| StarcoderdataPython |
118848 | <filename>src/chaos_service/main.py
import click
from flask import Flask
from chaos_service.api import api, config
from chaos_service.config.config_storage import ConfigStorage
@click.group()
@click.version_option("0.0.1")
def cli():
"""
First version of chaos service.
"""
@cli.command()
@click.option('--start-at-request', help='Start returning 500 error at request number N. (This is the default option starting at 10)', required=False)
@click.option('--chance-of-sucess', help='Probability of returning error(0-100). If 100 informed it has 100 percent of chance of returning 200.', required=False)
@click.option('--chance-of-sucess-until-hit', help='Probability of starting to return error(0-100). If 100 informed it has 100 percent of chance of returning 200. When the first error hits, the next calls will be errors', required=False)
def bad_request(start_at_request, chance_of_sucess, chance_of_sucess_until_hit):
"""Start api for to test bad request failures."""
config.create_config("bad_request", start_at_request, chance_of_sucess, chance_of_sucess_until_hit)
app = Flask(__name__)
app.register_blueprint(api)
app.run(port=8080)
@cli.command()
@click.option('--start-at-request', help='Start returning 500 error at request number N. (This is the default option starting at 10)', required=False)
@click.option('--chance-of-sucess', help='Probability of returning error(0-100). If 100 informed it has 100 percent of chance of returning 200.', required=False)
@click.option('--chance-of-sucess-until-hit', help='Probability of starting to return error(0-100). If 100 informed it has 100 percent of chance of returning 200. When the first error hits, the next calls will be errors', required=False)
def internal_server_error(start_at_request, chance_of_sucess, chance_of_sucess_until_hit):
"""Start api to test internal server error failures."""
config.create_config("internal_server_error", start_at_request, chance_of_sucess, chance_of_sucess_until_hit)
app = Flask(__name__)
app.register_blueprint(api)
app.run(port=8080)
@cli.command()
@click.option('--start-at-request', help='Start returning 502 error at request number N. (This is the default option starting at 10)', required=False)
@click.option('--chance-of-sucess', help='Probability of returning error(0-100). If 100 informed it has 100 percent of chance of returning 200.', required=False)
@click.option('--chance-of-sucess-until-hit', help='Probability of starting to return error(0-100). If 100 informed it has 100 percent of chance of returning 200. When the first error hits, the next calls will be errors', required=False)
def bad_gateway(start_at_request, chance_of_sucess, chance_of_sucess_until_hit):
"""Start api for to test bad gateway failures."""
config.create_config("bad_gateway", start_at_request, chance_of_sucess, chance_of_sucess_until_hit)
app = Flask(__name__)
app.register_blueprint(api)
app.run(port=8080)
@cli.command()
@click.option('--start-at-request', help='Start returning 503 error at request number N. (This is the default option starting at 10)', required=False)
@click.option('--chance-of-sucess', help='Probability of returning error(0-100). If 100 informed it has 100 percent of chance of returning 200.', required=False)
@click.option('--chance-of-sucess-until-hit', help='Probability of starting to return error(0-100). If 100 informed it has 100 percent of chance of returning 200. When the first error hits, the next calls will be errors', required=False)
def service_unavailable(start_at_request, chance_of_sucess, chance_of_sucess_until_hit):
"""Start api for to test service unavailable failures."""
config.create_config("service_unavailable", start_at_request, chance_of_sucess, chance_of_sucess_until_hit)
app = Flask(__name__)
app.register_blueprint(api)
app.run(port=8080)
@cli.command()
@click.option('--start-at-request', help='Start returning 504 error at request number N. (This is the default option starting at 10)', required=False)
@click.option('--chance-of-sucess', help='Probability of returning error(0-100). If 100 informed it has 100 percent of chance of returning 200.', required=False)
@click.option('--chance-of-sucess-until-hit', help='Probability of starting to return error(0-100). If 100 informed it has 100 percent of chance of returning 200. When the first error hits, the next calls will be errors', required=False)
def gateway_timeout(start_at_request, chance_of_sucess, chance_of_sucess_until_hit):
"""Start api for to test gateway timeout failures."""
config.create_config("gateway_timeout", start_at_request, chance_of_sucess, chance_of_sucess_until_hit)
app = Flask(__name__)
app.register_blueprint(api)
app.run(port=8080)
@cli.command()
@click.option('--start-at-request', help='Start returning 500 error at request number N. (This is the default option starting at 10)', required=False)
@click.option('--chance-of-sucess', help='Probability of returning error(0-100). If 100 informed it has 100 percent of chance of returning 200.', required=False)
@click.option('--chance-of-sucess-until-hit', help='Probability of starting to return error(0-100). If 100 informed it has 100 percent of chance of returning 200. When the first error hits, the next calls will be errors', required=False)
def connection_refused(start_at_request, chance_of_sucess, chance_of_sucess_until_hit):
"""Start api to test connection refused failures. It will shut down the server when error criteria appers."""
config.create_config("connection_refused", start_at_request, chance_of_sucess, chance_of_sucess_until_hit)
app = Flask(__name__)
app.register_blueprint(api)
app.run(port=8080)
@cli.command()
@click.option('--start-at-request', help='Start returning 500 error at request number N.', required=False)
@click.option('--chance-of-sucess', help='Probability of returning error(0-100). If 100 informed it has 100 percent of chance of returning status 200 OK. (For chaos this is the default option with 90 percent of success rate)', required=False)
@click.option('--chance-of-sucess-until-hit', help='Probability of starting to return error(0-100). If 100 informed it has 100 percent of chance of returning 200. When the first error hits, the next calls will be errors', required=False)
def chaos(start_at_request, chance_of_sucess, chance_of_sucess_until_hit):
"""Start api to test all types of failures randomically. It will shut down the server when error criteria appers."""
if(start_at_request is None and chance_of_sucess is None and chance_of_sucess_until_hit is None):
chance_of_sucess = 90
config.create_config("chaos", start_at_request, chance_of_sucess, chance_of_sucess_until_hit)
app = Flask(__name__)
app.register_blueprint(api)
app.run(port=8080) | StarcoderdataPython |
36331 | <gh_stars>1-10
import os
from setuptools import setup, find_packages
DIR_PATH = os.path.dirname(os.path.abspath(__file__))
with open(os.path.join(DIR_PATH, 'README.md')) as file:
long_description = file.read()
install_requires = [line.rstrip('\n') for line in open(os.path.join(DIR_PATH, 'requirements.txt'))]
setup(
name='dmt',
version='0.1.10',
packages=find_packages(),
author='kedod',
author_email='<EMAIL>',
description='Log time from toggl entries to Jira smoothly.',
url='https://github.com/kedod/dmt',
download_url = 'https://github.com/kedod/dmt/archive/v0.1.10.tar.gz',
keywords=['toggl', 'jira', 'logger', 'logging'],
long_description=long_description,
include_package_data=True,
install_requires=install_requires,
package_data={
'': ['*.yaml']
}
)
| StarcoderdataPython |
188371 | from random import choice
class Game:
def __init__(self, gui: object):
self.gui: object = gui
self.field = gui.grid.field
self.current_sym: str = choice(["X", "0"])
for row in range(3):
for col in range(3):
btn = self.gui.grid.field[row][col]
btn["command"] = lambda row=row, col=col: self.click(row, col)
def click(self, row: int, col: int) -> None:
if self.field[row][col]["text"] == "":
self.field[row][col]["text"] = self.current_sym
winner = self.check_win(self.current_sym)
if winner is not None:
self.show_winner(winner)
self.end_game()
else:
self.change_current_sym()
def change_current_sym(self) -> None:
if self.current_sym == "X":
self.current_sym = "0"
else:
self.current_sym = "X"
def check_win(self, sym: str):
for i in range(3):
if self.check_line(
self.field[i][0],
self.field[i][1],
self.field[i][2],
sym
):
return (self.field[i][0],
self.field[i][1],
self.field[i][2])
elif self.check_line(
self.field[0][i],
self.field[1][i],
self.field[2][i],
sym
):
return (self.field[0][i],
self.field[1][i],
self.field[2][i])
if self.check_line(
self.field[0][0],
self.field[1][1],
self.field[2][2],
sym
):
return (self.field[0][0],
self.field[1][1],
self.field[2][2])
elif self.check_line(
self.field[2][0],
self.field[1][1],
self.field[0][2],
sym
):
return (self.field[2][0],
self.field[1][1],
self.field[0][2])
def check_line(
self,
grid_cell1: object,
grid_cell2: object,
grid_cell3: object,
sym: str
) -> bool:
return (grid_cell1["text"] == sym and
grid_cell2["text"] == sym and
grid_cell3["text"] == sym)
def show_winner(self, *grid_cells: tuple) -> None:
for grid_cell in grid_cells[0]:
grid_cell["bg"] = grid_cell["activebackground"]
def end_game(self) -> None:
self.gui.grid.disable_field(self.current_sym)
def run(self) -> None:
self.gui.window.mainloop()
| StarcoderdataPython |
1625516 | #!/usr/bin/python
# -*- coding: utf-8; -*-
#
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# @author: <NAME>
# @author: <NAME>
__copyright__ = "Copyright 2012, Locaweb IDC"
from netl2api.l2api.exceptions import *
from netl2api.l2api.autocache import L2APIAutoCache
from netl2api.l2api.transport import SysSSHTransport #, TransportManager
__all__ = ["L2API"]
class L2API(L2APIAutoCache):
"""
Base class for L2 operations.
Vendor-specific classes should extend this, declare 'self.__VENDOR__' (vendor str),
'self.__HWTYPE__' (hardware type str), 'self.prompt_mark', 'self.error_mark' and
'self.config_term_cmd' (see transport classes for understand these three last parameters).
Ex.:
class ExampleVendorAPI(L2API):
def __init__(self, *args, **kwargs):
self.__VENDOR__ = "ExampleVendor"
self.__HWTYPE__ = "stackable_switch"
self.prompt_mark = "#"
self.error_mark = "% Error:"
self.config_term_cmd = "terminal length 0"
super(ExampleVendorAPI, self).__init__(*args, **kwargs)
...
def show_version(self):
...
def show_interfaces(self):
....
"""
def __init__(self, host=None, port=None, username=None, passwd=None, transport=None):
super(L2API, self).__init__()
if not hasattr(self, "__VENDOR__"):
raise InvalidParameter("'self.__VENDOR__' is not defined (class '%s')" % self.__class__.__name__)
if not hasattr(self, "__HWTYPE__"):
raise InvalidParameter("'self.__HWTYPE__' is not defined (class '%s')" % self.__class__.__name__)
if not host or type(host) not in (str, unicode):
raise InvalidParameter("'host' parameter is not defined or invalid")
if not username or type(username) not in (str, unicode):
raise InvalidParameter("'username' parameter is not defined or invalid")
if not passwd or type(passwd) not in (str, unicode):
raise InvalidParameter("'passwd' parameter is not defined or invalid")
if not hasattr(self, "prompt_mark"):
self.prompt_mark = "#"
if not hasattr(self, "error_mark"):
self.error_mark = None
if not hasattr(self, "config_term_cmd"):
self.config_term_cmd = None
if not transport:
transport = SysSSHTransport.SysSSH
self.use_cache = True
self.cache_config = {
"show_system": { "ttl": 600,
"clear_on": [] },
"show_hostname": { "ttl": 600,
"clear_on": [] },
"show_version": { "ttl": 600,
"clear_on": [] },
"show_interfaces": { "ttl": 120,
"clear_on": ["enable_interface", "disable_interface",
"change_interface_description"] },
"show_lldp": { "ttl": 180,
"clear_on": [] },
"show_arp": { "ttl": 180,
"clear_on": [] },
"show_uplinks": { "ttl": 180,
"clear_on": [] },
"show_vlans": { "ttl": 180,
"clear_on": ["create_vlan", "destroy_vlan",
"enable_vlan", "disable_vlan",
"change_vlan_description",
"interface_attach_vlan", "interface_detach_vlan",
"lag_attach_vlan", "lag_detach_vlan"] },
"show_lags": { "ttl": 180,
"clear_on": ["create_lag", "destroy_lag",
"enable_lag", "disable_lag",
"change_lag_description",
"lag_attach_interface", "lag_detach_interface"] },
}
#self.transport = TransportManager.TransportPool(transport=transport, max_connections=2, host=host, port=port,
# username=username, passwd=<PASSWORD>, prompt_mark=self.prompt_mark,
# error_mark=self.error_mark, config_term_cmd=self.config_term_cmd)
self.transport = transport(host=host, port=port, username=username, passwd=<PASSWORD>, prompt_mark=self.prompt_mark,
error_mark=self.error_mark, config_term_cmd=self.config_term_cmd)
def dump_config(self):
raise NotImplementedError("Not implemented")
def save_config(self):
raise NotImplementedError("Not implemented")
def show_system(self):
raise NotImplementedError("Not implemented")
def show_hostname(self):
raise NotImplementedError("Not implemented")
def show_version(self):
raise NotImplementedError("Not implemented")
def show_interfaces(self, interface_id=None):
raise NotImplementedError("Not implemented")
def show_lldp(self, interface_id=None):
raise NotImplementedError("Not implemented")
def show_arp(self, interface_id=None):
raise NotImplementedError("Not implemented")
def show_uplinks(self):
raise NotImplementedError("Not implemented")
def show_vlans(self, vlan_id=None):
raise NotImplementedError("Not implemented")
def show_lags(self, lag_id=None):
raise NotImplementedError("Not implemented")
def create_vlan(self, vlan_id=None, vlan_description=None):
raise NotImplementedError("Not implemented")
def create_lag(self, lag_id=None, lag_description=None):
raise NotImplementedError("Not implemented")
def enable_interface(self, interface_id=None):
raise NotImplementedError("Not implemented")
def enable_vlan(self, vlan_id=None):
raise NotImplementedError("Not implemented")
def enable_lag(self, lag_id=None):
raise NotImplementedError("Not implemented")
def disable_interface(self, interface_id=None):
raise NotImplementedError("Not implemented")
def disable_vlan(self, vlan_id=None):
raise NotImplementedError("Not implemented")
def disable_lag(self, lag_id=None):
raise NotImplementedError("Not implemented")
def change_interface_description(self, interface_id=None, interface_description=None):
raise NotImplementedError("Not implemented")
def change_vlan_description(self, vlan_id=None, vlan_description=None):
raise NotImplementedError("Not implemented")
def change_lag_description(self, lag_id=None, lag_description=None):
raise NotImplementedError("Not implemented")
def destroy_vlan(self, vlan_id=None):
raise NotImplementedError("Not implemented")
def destroy_lag(self, lag_id=None):
raise NotImplementedError("Not implemented")
def interface_attach_vlan(self, interface_id=None, vlan_id=None, tagged=True):
raise NotImplementedError("Not implemented")
def interface_detach_vlan(self, interface_id=None, vlan_id=None, tagged=True):
raise NotImplementedError("Not implemented")
def lag_attach_vlan(self, lag_id=None, vlan_id=None, tagged=True):
raise NotImplementedError("Not implemented")
def lag_detach_vlan(self, lag_id=None, vlan_id=None, tagged=True):
raise NotImplementedError("Not implemented")
def lag_attach_interface(self, lag_id=None, interface_id=None):
raise NotImplementedError("Not implemented")
def lag_detach_interface(self, lag_id=None, interface_id=None):
raise NotImplementedError("Not implemented")
# def __del__(self):
# if self.transport is not None:
# try:
# self.transport.close()
# except Exception:
# pass
| StarcoderdataPython |
3342390 | from tkinter import filedialog, Tk
from typing import List
import matplotlib.pyplot as plt
import numpy as np
from lmfit import Model
from lmfit.models import GaussianModel, LinearModel
from pandas import read_csv, read_hdf, DataFrame, set_option
from scipy import fftpack, interpolate
from scipy.optimize import curve_fit
from seaborn import set_style
from range_selector import RangeTool
set_option('column_space', 80)
set_style("whitegrid")
# set_palette(["#9b59b6", "#3498db", "#95a5a6", "#e74c3c", "#34495e", "#2ecc71"])
e = 2.7182818
def find_nearest(array, value):
idx = (np.abs(array - value)).argmin()
return array[idx]
def pick_dat(cols, initdir='RDAT', title="Select file"):
"""
Data reader that is called within many other functions.
:param initdir: This is the directory that the function will open by default to look for the data (.csv or .h5).
:param title: The message to display at the top of the dialogue box.
:param cols: Headers to give to the data.
:return: Pandas DataFrame with headers that contains the selected data.
"""
root = Tk()
root.filename = filedialog.askopenfilename(initialdir="C:\\Users\Josh\IdeaProjects\PulsedNMR\{}".format(initdir),
title=title)
filename_parts = root.filename.split('/')[-1]
if 'csv' in root.filename:
data = read_csv(root.filename, names=cols, engine='c')
return data, filename_parts
elif 'h5' in root.filename:
data = read_hdf(root.filename, 'table', names=cols, engine='c')
return data, filename_parts
else:
print('Unexpected file type. Choose either a .csv or .h5 file.')
'''
Free induction decay (FID). This is the signal of M_x or M_y decaying after a pi/2 pulse.
'''
def T2_from_echo(M_xy, M0, tau):
"""
This function extracts the spin-spin relaxition time from the height difference in initial magnetization
and magnetization in the xy-plane that after a time two tau has passed.
:param M_xy: Magnetization in the xy-plane.
:param M0: Initial magnetization in z direction.
:param tau: Time between the pi/2 and the pi pulse.
:return: Spin-spin relaxation time.
"""
return -2 * tau / (np.log(M_xy / M0))
def echo_as_T2(t, M0, T2, c, ph):
"""
:param t:
:param M0: Initial magnetization in z direction.
:param T2: Spin-spin relaxation time.
:param c: Intercept to compensate for DC-offset.
:param ph: Phase difference.
:return: Magnetization in the xy-plane.
"""
# Old form:
return M0 * (np.exp(-((t - ph) / T2))) + c
# return M0 * (np.exp(-(t / T2) + ph)) + c
def FID_Exponential_fit():
"""
A mixture of smoothing and differentiating is used to determine the point at which the FID shape is dominantly
exponential decay and fits the echo_as_T2 function to the data in this region.
"""
dat, filename = pick_dat(['t', 'm'])
dat.loc[:, 't'] += abs(np.min(dat['t']))
maxi = np.max(dat['m'])
try:
smoothdat = interpolate.UnivariateSpline(dat['t'], dat['m'], k=5, s=200)
grad1 = np.gradient(smoothdat(dat['t']))
grad1_2 = np.gradient(grad1)
grad2 = interpolate.UnivariateSpline(dat['t'], grad1_2, k=3, s=0)
max_pos = dat['t'][int(np.median(np.where(dat['m'] == find_nearest(dat['m'], maxi))[0]))]
roots_range = range(0, len(grad2.roots()))
f = [find_nearest(dat['t'], grad2.roots()[p]) for p in roots_range]
s = [f[i] for i in roots_range if f[i] > max_pos]
b = np.where(dat['t'] == s[0])[0][0]
except ValueError:
b = int(np.median(np.where(dat['m'] == maxi)[0]))
mini = np.min(dat['m'][b:])
mx = np.max(dat['m'][b:]) - mini
max_loc = int(np.median(np.where(dat['m'] == find_nearest(dat['m'], mx + mini))))
max_loc_time = dat['t'][max_loc]
decay_con_amp = mx / e
decay_con_amp_pos = int(
np.median(np.where(dat['m'] == find_nearest(dat['m'], decay_con_amp + mini))))
decay_con_amp_time = dat['t'][decay_con_amp_pos]
decay_time = decay_con_amp_time - max_loc_time
initial = np.array([mx, decay_time, mini, max_loc_time])
boundss = (
[mx * 0.85, decay_time * 0.7, mini * 0.9, max_loc_time * 0.9], [mx * 1.15, decay_time * 1.3, (mini + 0.5) * 1.2,
max_loc_time * 1.1])
popt, pcov = curve_fit(echo_as_T2, xdata=dat['t'][b:], ydata=dat['m'][b:], p0=initial, maxfev=30000,
method='trf', bounds=boundss)
errs = np.diag(pcov)
datas1 = np.array([popt, errs, initial])
datas2 = np.transpose(datas1)
vals = DataFrame(datas2, columns=['Parameter', 'Uncertainty', 'Initial'], index=['M0', 'T2', 'Intercept', 'Phase'])
print('\n', vals)
plt.title('{}'.format(filename))
plt.plot(dat['t'], dat['m'], '+', ms=1.4, color='r')
plt.plot(dat['t'][b:], echo_as_T2(dat['t'][b:], *popt), ls='--', lw=2, color='k')
plt.xlabel("Time (s)")
plt.ylabel("Magnetization (A/m)")
plt.axhline(mx + mini)
plt.axhline(decay_con_amp + mini)
plt.axvline(max_loc_time)
plt.axvline(decay_con_amp_time)
fig_manager = plt.get_current_fig_manager()
fig_manager.window.showMaximized()
plt.show()
def range_to_list():
"""
This function is used to create an array of values from a dataset that's limits are given by a list lower and
upper limits. THIS IS CONFIGURED FOR MY COMPUTER, CHANGE THE DIRECTORY TO USE.
"""
dat1, filename1 = pick_dat(['t', 'm'], "RDAT_Test", "Select dataset to draw from")
dat2 = read_csv("C:\\Users\\Josh\\IdeaProjects\\PulsedNMR\\Ranges\\{}".format(filename1),
names=['Lower Bound', 'LowerIndex', 'Upper Bound', 'UpperIndex'])
xrange = []
yrange = []
xranges = {}
yranges = {}
x_append = xrange.append
y_append = yrange.append
for o in range(0, len(dat2)):
x_append((dat1['t'][dat2['LowerIndex'][o]:dat2['UpperIndex'][o] + 1]).values)
y_append((dat1['m'][dat2['LowerIndex'][o]:dat2['UpperIndex'][o] + 1]).values)
for o in range(0, len(xrange)):
xranges[o] = xrange[o]
yranges[o] = yrange[o]
return xranges, yranges, xrange, yrange, filename1, dat1
def echo_fits():
"""
Fits a Gaussian with a linear background to each of the echo peaks, finds the centroid and top of
the Gaussian, then fits the echo_as_T2 function to the points given by x=centroid, y=top.
"""
xrs, yrs, xr, yr, filename, dat1 = range_to_list()
cents: List[float] = []
cents_uncert: List[float] = []
heights: List[float] = []
heights_uncert: List[float] = []
for i in range(0, len(xrs)):
mdl = GaussianModel(prefix='G_')
lne = LinearModel(prefix='L_')
params = mdl.guess(yrs[i], x=xrs[i])
params += lne.guess(yrs[i], x=xrs[i])
max_y = np.max(yrs[i])
min_y = np.min(yrs[i])
max_x = np.max(yrs[i])
min_x = np.min(yrs[i])
predicted_slope = (max_y - min_y) / (max_x - min_x)
params.add('L_slope', value=predicted_slope, min=predicted_slope * 1.1, max=predicted_slope * 0.9)
params.add('L_intercept', value=min_y, min=min_y * 0.9, max=min_y * 1.1)
params.add('G_height', value=max_y - min_y, min=(max_y - min_y) * 0.99, max=(max_y - min_y) * 1.05)
model = mdl + lne
result = model.fit(yrs[i], params, x=xrs[i], method='leastsq')
cent: float = result.params['G_center'].value
amp: float = result.params['G_height'].value
inter: float = result.params['L_intercept'].value
grad: float = result.params['L_slope'].value
height: float = amp + ((cent * grad) + inter)
heights.append(height)
cents.append(cent)
cents_uncert.append(result.params['G_center'].stderr)
partial_amp = 1
partial_grad = cent
partial_x = grad
partial_inter = 1
amp_term = partial_amp * result.params['G_height'].stderr
grad_term = partial_grad * result.params['L_slope'].stderr
x_term = partial_x * np.mean(np.diff(xrs[i]))
inter_term = partial_inter * result.params['L_intercept'].stderr
height_uncert = np.sqrt(amp_term ** 2 + grad_term ** 2 + x_term ** 2 + inter_term ** 2)
heights_uncert.append(height_uncert)
heights = np.array(heights)
cents = np.array(cents)
maxy = np.max(heights)
miny = np.min(heights)
decay_pos = np.where(heights == find_nearest(heights, maxy / e))[0][0]
decay_pos_time = cents[decay_pos]
avg_y_sep = abs(np.mean(np.diff(heights)))
efit = Model(echo_as_T2)
param = efit.make_params()
param.add('M0', value=maxy, min=maxy * 0.8, max=maxy + (avg_y_sep * 2))
param.add('T2', value=decay_pos_time, min=decay_pos_time * 0.1, max=decay_pos_time * 1.5)
param.add('c', value=miny * 0.3, min=miny * 0.1, max=miny * 1)
param.add('ph', value=cents[0] * 0.1, min=0, max=cents[0] * 1)
result_2 = efit.fit(heights, param, t=cents, method='leastsq', weights=np.sqrt(np.mean(np.diff(dat1['m'])) ** 2 +
np.array(heights_uncert) ** 2) /
heights)
print(result_2.fit_report())
print('\n', result_2.params.pretty_print(fmt='e', precision=2))
ax = plt.gca()
ax.set_xlabel('Time (s)', fontsize=14)
ax.set_ylabel('Magnetization (A/m)', fontsize=14)
xes = np.linspace(np.min(cents), np.max(cents), 100)
y = efit.eval(t=xes, params=result_2.params)
plt.plot(xes, y, antialiased=True)
plt.plot(cents, heights, 'x', ms=8, color='k')
plt.plot(dat1['t'], dat1['m'], lw=2, antialiased=True,
color='#4a4a4a', zorder=1)
plt.title(filename)
plt.xlim(left=0, right=np.max(cents) * 1.1)
plt.ylim(bottom=0, top=result_2.params['M0'].value * 1.3)
plt.axhline(result_2.params['M0'].value, color='k', ls='--', alpha=0.7, lw=1, zorder=2)
plt.axhline(result_2.params['M0'].value / e, color='k', ls='--', alpha=0.7, lw=1, zorder=2)
plt.text(0.9, 0.9, "T_1: {:.4f} s".format(result_2.params['T2'].value), horizontalalignment='center',
verticalalignment="center",
transform=ax.transAxes,
bbox={'pad': 8, 'fc': 'w'}, fontsize=14)
plt.tight_layout()
plt.tick_params(axis='both', which='major', labelsize=13)
fig_manager = plt.get_current_fig_manager()
fig_manager.window.showMaximized()
plt.show()
def simple_echo_fits():
"""
Takes the highest point of each echo and fits the echo_as_T2 function to those points.
"""
xrs, yrs, xr, yr, filename, dat1 = range_to_list()
length = len(yrs)
max_y = [np.max(yrs[i]) for i in range(length)]
max_y_loc = [np.where(yrs[i] == max_y[i])[0][0] for i in range(length)]
cents = [xrs[i][max_y_loc[i]] for i in range(length)]
heights = max_y
# TODO: Find a better value for the uncertainty on y-values.
heights = np.array(heights)
cents = np.array(cents)
maxy = np.max(heights)
miny = np.min(heights)
decay_pos = np.where(heights == find_nearest(heights, maxy / e))[0][0]
decay_pos_time = cents[decay_pos]
avg_y_sep = abs(np.mean(np.diff(heights)))
efit = Model(echo_as_T2)
param = efit.make_params()
param.add('M0', value=maxy, min=maxy * 0.8, max=maxy + (avg_y_sep * 3))
param.add('T2', value=decay_pos_time, min=decay_pos_time * 0.1, max=decay_pos_time * 1.5)
param.add('c', value=miny * 0.3, min=miny * 0.1, max=miny * 1.2)
param.add('ph', value=cents[0] * 0.5, min=0, max=cents[0] * 1)
result_2 = efit.fit(heights, param, t=cents, method='leastsq', weights=np.mean(np.diff(dat1['m'])) / heights)
print(result_2.fit_report())
print('\n', result_2.params.pretty_print())
ax = plt.gca()
ax.set_xlabel('Time (s)', fontsize=14)
ax.set_ylabel('Magnetization (A/m)', fontsize=14)
xes = np.linspace(np.min(cents), np.max(cents), 100)
y = efit.eval(t=xes, params=result_2.params)
plt.plot(xes, y, antialiased=True)
plt.plot(cents, heights, 'x', ms=8, color='k')
plt.plot(dat1['t'], dat1['m'], lw=2, antialiased=True,
color='#4a4a4a', zorder=1)
plt.title(filename)
plt.xlim(left=0, right=np.max(cents) * 1.1)
# plt.ylim(bottom=0, top=result_2.params['M0'].value * 1.1)
plt.axhline(result_2.params['M0'].value, color='k', ls='--', alpha=0.7, lw=1, zorder=2)
plt.axhline(result_2.params['M0'].value / e, color='k', ls='--', alpha=0.7, lw=1, zorder=2)
plt.text(0.9, 0.9, "T_1: {:.4f} s".format(result_2.params['T2'].value), horizontalalignment='center',
verticalalignment="center",
transform=ax.transAxes,
bbox={'pad': 8, 'fc': 'w'}, fontsize=14)
plt.tight_layout()
plt.tick_params(axis='both', which='major', labelsize=13)
fig_manager = plt.get_current_fig_manager()
fig_manager.window.showMaximized()
plt.show()
def interped_fourier_transformer():
"""
Fourier transforms the combined FID signals of different chemical sites to give a frequency (NMR) spectrum.
This is done after having used radial basis function interpolation to remove noise and smooth out high frequency
signals that are not resolvable.
"""
dat, filename = pick_dat(['t', 'm'], 'RDAT')
len2 = 2 * len(dat['m'])
xs = np.linspace(np.min(dat['t']), np.max(dat['t']), len2)
f = interpolate.Rbf(dat['t'], dat['m'], smooth=3, function='gaussian', epsilon=np.mean(np.diff(xs)) * 3)
ys = f(xs)
plt.plot(xs, -ys)
fig_manager = plt.get_current_fig_manager()
fig_manager.window.showMaximized()
plt.show()
sample_rate = round(1 / np.mean(np.diff(dat['t'])), 11)
length = len(xs)
fo = fftpack.fft(-ys)
freq2 = fftpack.fftfreq(length, 1 / sample_rate)
halfln = int(length / 2)
plt.title('{}'.format(filename))
fig_manager = plt.get_current_fig_manager()
fig_manager.window.showMaximized()
plt.plot(dat['t'], dat['m'])
plt.show()
plt.title('{} Fourier Transformed'.format(filename))
plt.plot(freq2[1:halfln], abs(fo[1:halfln]))
plt.xlim(0, 2000)
fig_manager = plt.get_current_fig_manager()
fig_manager.window.showMaximized()
plt.show()
def fourier_transformer():
"""
Fourier transforms the combined FID signals of different chemical sites to give a frequency (NMR) spectrum.
"""
dat, filename = pick_dat(['t', 'm'], 'RDAT', 'Select data to be Fourier Transformed')
sample_rate = round(1 / np.mean(np.diff(dat['t'])), 11)
length = len(dat['t'])
fo = fftpack.fft(dat['m'])
freq4 = [1e6 * x * sample_rate / length for x in np.array(range(0, length))]
halfln = int(length / 2)
plt.title('{}'.format(filename))
fig_manager = plt.get_current_fig_manager()
fig_manager.window.showMaximized()
plt.plot(dat['t'], dat['m'])
plt.show()
fig, ax = plt.subplots()
plt.title('{} Fourier Transformed'.format(filename))
figure, = ax.plot(freq4[1:halfln], abs(fo[1:halfln]))
# Sel = RangeTool(freq4[1:halfln], abs(fo[1:halfln]), figure, ax, 'thing')
fig_manager = plt.get_current_fig_manager()
fig_manager.window.showMaximized()
plt.show()
def pick_ranges():
"""
Tool to read data and present it graphically ready for data ranges, to be used in fitting, to be made. Press tab
to mark the lower bound, shift to mark the upper bound, delete to remove the last range selected, enter to open a
dialog box to save the ranges as a .csv file. Exit closes the plot without saving ranges.
"""
dat, filename = pick_dat(['t', 'm'], 'RDAT', 'Select file to pick ranges in')
fig, ax = plt.subplots()
plt.title('{} Fourier Transformed'.format(filename))
figure, = ax.plot(dat['t'], dat['m'])
Sel = RangeTool(dat['t'], dat['m'], figure, ax, filename)
fig_manager = plt.get_current_fig_manager()
fig_manager.window.showMaximized()
plt.show()
simple_echo_fits()
| StarcoderdataPython |
1608545 | <filename>mayan/apps/storage/tests/test_management_commands.py<gh_stars>100-1000
from django.core import management
from django.utils.encoding import force_text
from mayan.apps.documents.tests.base import GenericDocumentTestCase
from mayan.apps.documents.storages import storage_document_files
from mayan.apps.mimetype.api import get_mimetype
from .mixins import StorageProcessorTestMixin
class StorageProcessManagementCommandTestCase(
StorageProcessorTestMixin, GenericDocumentTestCase
):
def _call_command(self, reverse=None):
options = {
'app_label': 'documents',
'defined_storage_name': storage_document_files.name,
'log_file': force_text(s=self.path_test_file),
'model_name': 'DocumentFile',
'reverse': reverse
}
management.call_command(command_name='storage_process', **options)
def _upload_and_call(self):
self.defined_storage.dotted_path = 'django.core.files.storage.FileSystemStorage'
self.defined_storage.kwargs = {
'location': self.document_storage_kwargs['location']
}
self._upload_test_document()
self.defined_storage.dotted_path = 'mayan.apps.storage.backends.compressedstorage.ZipCompressedPassthroughStorage'
self.defined_storage.kwargs = {
'next_storage_backend': 'django.core.files.storage.FileSystemStorage',
'next_storage_backend_arguments': {
'location': self.document_storage_kwargs['location']
}
}
self._call_command()
def test_storage_processor_command_forwards(self):
self._upload_and_call()
with open(file=self.test_document.file_latest.file.path, mode='rb') as file_object:
self.assertEqual(
get_mimetype(file_object=file_object),
('application/zip', 'binary')
)
self.assertEqual(
self.test_document.file_latest.checksum,
self.test_document.file_latest.checksum_update(save=False)
)
def test_processor_forwards_and_reverse(self):
self._upload_and_call()
self._call_command(reverse=True)
self.defined_storage.dotted_path = 'django.core.files.storage.FileSystemStorage'
self.defined_storage.kwargs = {
'location': self.document_storage_kwargs['location']
}
with open(file=self.test_document.file_latest.file.path, mode='rb') as file_object:
self.assertNotEqual(
get_mimetype(file_object=file_object),
('application/zip', 'binary')
)
self.assertEqual(
self.test_document.file_latest.checksum,
self.test_document.file_latest.checksum_update(save=False)
)
| StarcoderdataPython |
1693212 | <gh_stars>1-10
import random
import time
class Nonogramm:
def __init__(self, size, inputInfo):
self.sizeX = size[0]
self.sizeY = size[1]
self.inputInfoX = inputInfo[0]
self.inputInfoY = inputInfo[1]
self.field = [[0 for x in range(self.sizeX)] for y in range(self.sizeY)]
self.fieldFound = [[0 for x in range(self.sizeX)] for y in range(self.sizeY)]
self.fieldLock = [[0 for x in range(self.sizeX)] for y in range(self.sizeY)]
self.certainty = 0
# 0: unknown
# 1: full
# 2: empty
def clear(self):
self.field = [[0 for x in range(self.sizeX)] for y in range(self.sizeY)]
self.fieldFound = [[0 for x in range(self.sizeX)] for y in range(self.sizeY)]
def printNG(self):
print("Testprint.")
def printInfo(self):
print(self.inputInfoX)
print(self.inputInfoY)
def printField(self):
for y in range(self.sizeY):
print(self.field[y])
def printFieldLock(self):
for y in range(self.sizeY):
print(self.fieldLock[y])
def printinputInfo(self):
print("inputInfoX")
print(self.inputInfoX)
print("inputInfoY")
print(self.inputInfoY)
def findBeginning(self):
for y in range(self.sizeY):
if sum(self.inputInfoY[y]) + len(self.inputInfoY[y]) - 1 == self.sizeX:
tempInputInfoY = []
tempInputInfoY2 = []
for item in range(len(self.inputInfoY[y])):
tempInputInfoY.append(self.inputInfoY[y][item])
if item != (len(self.inputInfoY[y]) - 1):
tempInputInfoY.append(0)
for item in tempInputInfoY:
if item != 0:
for amounthOfOnes in range(item):
tempInputInfoY2.append(1)
else:
tempInputInfoY2.append(0)
self.fieldFound[y] = tempInputInfoY2
self.fieldLock[y] = [1] * self.sizeX
for x in range(self.sizeX):
if sum(self.inputInfoX[x]) + len(self.inputInfoX[x]) - 1 == self.sizeY:
tempInputInfoX = []
tempInputInfoX2 = []
for item in range(len(self.inputInfoX[x])):
tempInputInfoX.append(self.inputInfoX[x][item])
if item != (len(self.inputInfoX[x]) - 1):
tempInputInfoX.append(0)
for item in tempInputInfoX:
if item != 0:
for amounthOfOnes in range(item):
tempInputInfoX2.append(1)
else:
tempInputInfoX2.append(0)
for y in range(self.sizeY):
if tempInputInfoX2[y] == 1:
self.fieldFound[y][x] = tempInputInfoX2[y]
self.fieldLock[y][x] = 1
self.field = self.fieldFound
self.certainty = sum(sum(row) for row in self.fieldLock)
def findEmpty(self):
for y in range(self.sizeY):
if sum(self.fieldFound[y]) == sum(self.inputInfoY[y]):
for x in range(self.sizeX):
if self.fieldFound[y][x] == 0:
self.fieldLock[y][x] = 1
# add y-axis
def findFull(self):
for y in range(self.sizeY):
if self.sizeY - sum(self.fieldLock[y]) == sum(self.inputInfoY[y]) - sum(self.fieldFound[y]):
for x in range(self.sizeX):
if self.fieldLock[y][x] == 0:
self.fieldFound[y][x] = 1
self.field[y][x] = 1
self.fieldLock[y][x] = 1
# doesn't work somehow
# add y-axis
def fillRandom(self):
# before adding findBeginning
# 0.535ms for 2x2 in average
# 5.26ms for 3x3 in average
# 3.86s for 4x4 in average
# 547s for 6x4 measured once
# after adding findBeginning
# 100ms for 4x4 in nAverage
for y in range(self.sizeY):
for x in range(self.sizeX):
if self.fieldLock[y][x] == 0:
self.field[y][x] = random.randint(0,1)
def solveRandom(self):
self.findBeginning()
print("\nFound with [findBeginning]:")
print(self.certainty)
while(self.check() == 0):
self.clear()
self.findBeginning()
self.findEmpty()
#self.printFieldLock()
#self.findFull()
#self.printFieldLock()
self.printField()
self.fillRandom()
def solve(self):
while(self.check() == 0):
self.fillRandom()
self.printField()
def check(self):
horizontalSpacing = 0
for y in range(self.sizeY):
foundinputInfoY = []
foundinputInfoY.append(0)
for x in range(self.sizeX):
if self.field[y][x] == 1:
if x == 0:
foundinputInfoY[0] += 1
else:
if self.field[y][x - 1] == 0:
if foundinputInfoY[0] != 0:
foundinputInfoY.append(0)
foundinputInfoY[len(foundinputInfoY) - 1] += 1
if foundinputInfoY == self.inputInfoY[y]:
horizontalSpacing += 1
if horizontalSpacing == self.sizeY:
horizontalSpacing = 1
else:
horizontalSpacing = 0
verticalSpacing = 0
for x in range(self.sizeX):
foundinputInfoX = []
foundinputInfoX.append(0)
for y in range(self.sizeY):
if self.field[y][x] == 1:
if y == 0:
foundinputInfoX[0] += 1
else:
if self.field[y - 1][x] == 0:
if foundinputInfoX[0] != 0:
foundinputInfoX.append(0)
foundinputInfoX[len(foundinputInfoX) - 1] += 1
if foundinputInfoX == self.inputInfoX[x]:
verticalSpacing += 1
if verticalSpacing == self.sizeX:
verticalSpacing = 1
else:
verticalSpacing = 0
if horizontalSpacing == 1 and verticalSpacing == 1:
return 1
else:
return 0
print("Starting session.")
gameNumber = 1
startTime = time.time()
input4x4 = [[[2],[2],[3],[1, 2]],
[[4],[3],[2],[1]]]
input8x8test = [[[8],[6,1],[5,2],[2,2,2],[2],[2],[8],[8]],
[[2],[2],[2],[8],[8],[2,2],[2,2],[8]]]
input6x6 = [[[4,1],[1,4],[2,3],[1,1],[1],[1,3]],
[[3],[1,4],[2],[3,1],[3,1],[3,1]]]
input6x6_2 = [[[2,3],[2,3],[2,3],[2],[2,2],[2,2]],
[[3],[6],[3],[3],[3,2],[3,2]]]
input8x8 = [[[5],[5],[2],[2],[2],[2],[8],[8]],
[[2],[2],[2],[8],[8],[2,2],[2,2],[2,2]]]
input8x8prettyfull = [[[4],[8],[6,1],[4,3],[4,3],[4,1,1],[5,2],[1,2,3]],
[[8],[7],[8],[8],[2,1],[5,1],[1,2,2],[7]]]
inputInfo = input6x6
size = [len(inputInfo[0]), len(inputInfo[1])]
print("\nInput information:")
print(inputInfo)
print("\nSize:")
print(size)
for x in range(gameNumber):
NG = Nonogramm(size, inputInfo)
NG.solveRandom()
print("\n\nSolution:")
NG.printField()
if gameNumber == 1:
print("\nUsed time:", (time.time() - startTime) / gameNumber, "seconds")
else:
print("\nAverage time:", (time.time() - startTime) / gameNumber, "seconds")
input("\nPress ENTER to exit.")
| StarcoderdataPython |
57180 | <gh_stars>0
import vk
import os
from urllib.request import urlopen
from time import sleep
session = vk.Session()
api = vk.API(session, v='5.53', lang='ru', timeout=10)
def get_photos_urls(user_id):
photos_json = api.photos.get(owner_id=user_id, album_id='saved')
photos_amount = photos_json['count']
photos_list = photos_json['items']
result = []
not_saved = []
for photo in photos_list:
if 'photo_604' in photo:
result.append(photo['photo_604'])
else:
try:
not_saved.append(photo['photo_130'])
except:
not_saved.append('ERROR: photo is too small.')
if len(result) != photos_amount:
print('Sorry, %i photos are not saved.')
print('Here are some of them:')
for photo_url in not_saved:
print(photo_url)
return result
def save_photos(photos_urls_list, foldername):
if not os.path.exists(foldername):
os.mkdir(foldername)
for i, url in enumerate(photos_urls_list):
try:
print('Downloading %s' % url)
filename = os.path.join(foldername, str(i)+'.jpg')
print(filename)
open(filename, 'wb').write(urlopen(url).read())
sleep(1)
except:
continue
print('Saved!')
save_photos(get_photos_urls(291823738), 'saved_pics')
| StarcoderdataPython |
1738281 | # Elastic search mapping definition for the Molecule entity
from glados.es.ws2es.es_util import DefaultMappings
# Shards size - can be overridden from the default calculated value here
# shards = 7
replicas = 0
analysis = DefaultMappings.COMMON_ANALYSIS
mappings = \
{
'properties':
{
'activity_comment': DefaultMappings.KEYWORD,
# EXAMPLES:
# 'Not significant' , 'Not Active' , 'Not Determined' , 'Not Determined' , 'Not Determined' , 'Not Deter
# mined' , 'No Activity' , 'Active' , 'Not Active' , 'Not Determined'
'activity_id': DefaultMappings.ID,
'activity_properties':
{
'properties':
{
'relation': DefaultMappings.KEYWORD,
# EXAMPLES:
# 'None' , 'None' , 'None' , 'None' , 'None' , 'None' , 'None' , 'None' , 'None' , 'None'
'result_flag': DefaultMappings.SHORT,
# EXAMPLES:
# '0' , '0' , '0' , '0' , '0' , '0' , '0' , '0' , '0' , '0'
'standard_relation': DefaultMappings.KEYWORD,
# EXAMPLES:
# 'None' , 'None' , 'None' , 'None' , 'None' , 'None' , 'None' , 'None' , 'None' , 'None'
'standard_text_value': DefaultMappings.KEYWORD,
# EXAMPLES:
# 'LYMLE (Lymphocytes/Leukocytes)' , 'MCH (Ery. Mean Corpuscular Hemoglobin)' , 'PT (Prothrombin
# Time)' , 'MCH (Ery. Mean Corpuscular Hemoglobin)' , 'PLAT (Platelets)' , 'HCT (Hematocrit)' ,
# 'HGB (Hemoglobin)' , 'WBC (Leukocytes)' , 'MCV (Ery. Mean Corpuscular Volume)' , 'RBC (Erythr
# ocytes)'
'standard_type': DefaultMappings.KEYWORD,
# EXAMPLES:
# 'ACTIVITY_TEST' , 'ACTIVITY_TEST' , 'ACTIVITY_TEST' , 'ACTIVITY_TEST' , 'ACTIVITY_TEST' , 'ACT
# IVITY_TEST' , 'ACTIVITY_TEST' , 'ACTIVITY_TEST' , 'ACTIVITY_TEST' , 'ACTIVITY_TEST'
'standard_units': DefaultMappings.KEYWORD,
# EXAMPLES:
# 'None' , 'None' , 'None' , 'None' , 'None' , 'None' , 'None' , 'None' , 'None' , 'None'
'standard_value': DefaultMappings.DOUBLE,
# EXAMPLES:
# 'None' , 'None' , 'None' , 'None' , 'None' , 'None' , 'None' , 'None' , 'None' , 'None'
'text_value': DefaultMappings.KEYWORD,
# EXAMPLES:
# 'LYMLE (Lymphocytes/Leukocytes)' , 'MCH (Ery. Mean Corpuscular Hemoglobin)' , 'PT (Prothrombin
# Time)' , 'MCH (Ery. Mean Corpuscular Hemoglobin)' , 'PLAT (Platelets)' , 'HCT (Hematocrit)' ,
# 'HGB (Hemoglobin)' , 'WBC (Leukocytes)' , 'MCV (Ery. Mean Corpuscular Volume)' , 'RBC (Erythr
# ocytes)'
'type': DefaultMappings.KEYWORD,
# EXAMPLES:
# 'ACTIVITY_TEST' , 'ACTIVITY_TEST' , 'ACTIVITY_TEST' , 'ACTIVITY_TEST' , 'ACTIVITY_TEST' , 'ACT
# IVITY_TEST' , 'ACTIVITY_TEST' , 'ACTIVITY_TEST' , 'ACTIVITY_TEST' , 'ACTIVITY_TEST'
'units': DefaultMappings.KEYWORD,
# EXAMPLES:
# 'None' , 'None' , 'None' , 'None' , 'None' , 'None' , 'None' , 'None' , 'None' , 'None'
'value': DefaultMappings.DOUBLE,
# EXAMPLES:
# 'None' , 'None' , 'None' , 'None' , 'None' , 'None' , 'None' , 'None' , 'None' , 'None'
}
},
# EXAMPLES:
# '1257115' , '1255687' , '1208208' , '1206737' , '1250105' , '1248197' , '1269811' , '1202567' , '12494
# 39' , '1202658'
'assay_chembl_id': DefaultMappings.CHEMBL_ID_REF,
# EXAMPLES:
# 'CHEMBL660317' , 'CHEMBL658042' , 'CHEMBL715010' , 'CHEMBL813448' , 'CHEMBL841714' , 'CHEMBL696911' ,
# 'CHEMBL845947' , 'CHEMBL719082' , 'CHEMBL746729' , 'CHEMBL645253'
'assay_description': DefaultMappings.TEXT_STD,
# EXAMPLES:
# 'Effect of BSA on Chymotrypsinogen inhibition by the compound, expressed as fold increase in IC50' , '
# Inhibitory activity against Human carbonic anhydrase II' , 'Inhibitory activity against Matrix metallo
# protease-2' , 'Effect of dose dependent inhibition of TOPO II-catalyzed kDNA decatenation in vitro' ,
# 'Therapeutic ratio in mixed-breed or Beagle dogs after intravenous administration of the compound.' ,
# 'Cytotoxicity against the cancer cell lines CNS SF-539' , 'Ratio for antagonistic activity for Pgp/MRP
# 1 was determined' , 'Agonist activity in rat at mGlu1a receptor expressed in HEK293 cells' , 'Inhibiti
# on of [3H]- DCKA binding to NMDA receptor of rat brain membranes' , 'Potency to antagonize the ability
# of angiotensin II to contract rabbit aorta'
'assay_type': DefaultMappings.KEYWORD,
# EXAMPLES:
# 'B' , 'B' , 'B' , 'F' , 'A' , 'F' , 'F' , 'F' , 'B' , 'B'
'assay_variant_accession': DefaultMappings.LOWER_CASE_KEYWORD,
# EXAMPLES:
# 'P41145' , 'P41145' , 'P41145' , 'P0DMS8' , 'P29274' , 'Q9WKE8' , 'P41145' , 'P41145' , 'P56221' , 'P0DMS8
# '
'assay_variant_mutation': DefaultMappings.LOWER_CASE_KEYWORD,
# EXAMPLES:
# 'E297K' , 'E297A' , 'E297K' , 'H95A' , 'S277E' , 'K103N' , 'E297K' , 'E297A' , 'N131A' , 'H95A'
'bao_endpoint': DefaultMappings.KEYWORD,
# EXAMPLES:
# 'BAO_0000179' , 'BAO_0000192' , 'BAO_0000192' , 'BAO_0002144' , 'BAO_0000179' , 'BAO_0000189' , 'BAO_0
# 000179' , 'BAO_0000188' , 'BAO_0000190' , 'BAO_0000034'
'bao_format': DefaultMappings.KEYWORD,
# EXAMPLES:
# 'BAO_0000357' , 'BAO_0000357' , 'BAO_0000357' , 'BAO_0000019' , 'BAO_0000218' , 'BAO_0000218' , 'BAO_0
# 000019' , 'BAO_0000219' , 'BAO_0000249' , 'BAO_0000224'
'bao_label': DefaultMappings.LOWER_CASE_KEYWORD,
# EXAMPLES:
# 'cell membrane format' , 'protein complex format' , 'single protein format' , 'assay format' , 'organi
# sm-based format' , 'assay format' , 'single protein format' , 'single protein format' , 'organism-base
# d format' , 'organism-based format'
'canonical_smiles': DefaultMappings.ID_REF,
# EXAMPLES:
# 'Oc1ccc2ccccc2c1N=Nc3c(O)cc(c4ccccc34)S(=O)(=O)O' , 'NS(=O)(=O)c1cc(c(NC(=O)CN(CCOCCOCCN(CC(=O)O)CC(=O
# )Nc2c(Cl)c(Cl)c(cc2S(=O)(=O)N)S(=O)(=O)N)CC(=O)O)c(Cl)c1Cl)S(=O)(=O)N' , 'CS(=O)(=O)NO' , 'COc1cc(NS(=
# O)(=O)C)ccc1Nc2c3ccccc3nc4ccccc24' , 'CC(C)N(CCC(C(=O)N)(c1ccccc1)c2ccccn2)C(C)C' , 'COc1cc(\C=C\c2cc3
# [C@H]4CC[C@]5(C)[C@@H](O)CC[C@H]5[C@@H]4CCc3cc2O)cc(OC)c1OC' , 'CC(=O)Nc1ccccc1c2nc3ccccc3nc2O' , 'N[C
# @@H](C[C@@H](O)C(=O)O)C(=O)O' , 'Oc1nc2ccc(Cl)c(Cl)c2c(O)c1N=O' , 'CCN(Cc1ccc(cc1)c2ccccc2c3nn[nH]n3)c
# 4ncccc4C(=O)O'
'data_validity_comment': DefaultMappings.KEYWORD,
# EXAMPLES:
# 'Outside typical range' , 'Outside typical range' , 'Outside typical range' , 'Outside typical range'
# , 'Outside typical range' , 'Outside typical range' , 'Outside typical range' , 'Potential missing dat
# a' , 'Outside typical range' , 'Outside typical range'
'data_validity_description': DefaultMappings.KEYWORD,
# EXAMPLES:
# 'Values for this activity type are unusually large/small, so may not be accurate' , 'No data provided
# for value, units or activity_comment, needs further investigation' , 'Units for this activity type are
# unusual and may be incorrect (or the standard_type may be incorrect)' , 'Values for this activity typ
# e are unusually large/small, so may not be accurate' , 'Units for this activity type are unusual and m
# ay be incorrect (or the standard_type may be incorrect)' , 'Units for this activity type are unusual a
# nd may be incorrect (or the standard_type may be incorrect)' , 'Values for this activity type are unus
# ually large/small, so may not be accurate' , 'Values for this activity type are unusually large/small,
# so may not be accurate' , 'Units for this activity type are unusual and may be incorrect (or the stan
# dard_type may be incorrect)' , 'Units for this activity type are unusual and may be incorrect (or the
# standard_type may be incorrect)'
'document_chembl_id': DefaultMappings.CHEMBL_ID_REF,
# EXAMPLES:
# 'CHEMBL1135922' , 'CHEMBL1135903' , 'CHEMBL1133677' , 'CHEMBL1127394' , 'CHEMBL1121621' , 'CHEMBL11356
# 29' , 'CHEMBL1134347' , 'CHEMBL1134758' , 'CHEMBL1129792' , 'CHEMBL1126796'
'document_journal': DefaultMappings.KEYWORD,
# EXAMPLES:
# 'J. Med. Chem.' , 'J. Med. Chem.' , 'J. Med. Chem.' , 'J. Med. Chem.' , 'J. Med. Chem.' , 'J. Med. Che
# m.' , 'J. Med. Chem.' , 'Bioorg. Med. Chem. Lett.' , 'J. Med. Chem.' , 'J. Med. Chem.'
'document_year': DefaultMappings.SHORT,
# EXAMPLES:
# '2002' , '2002' , '2000' , '1994' , '1980' , '2002' , '2001' , '2001' , '1996' , '1993'
'ligand_efficiency':
{
'properties':
{
'bei': DefaultMappings.DOUBLE,
# EXAMPLES:
# '16.02' , '18.43' , '19.83' , '21.43' , '20.51' , '23.47' , '29.8' , '12.3' , '18.7' , '8.73'
'le': DefaultMappings.DOUBLE,
# EXAMPLES:
# '0.3' , '0.34' , '0.36' , '0.42' , '0.38' , '0.43' , '0.54' , '0.24' , '0.38' , '0.16'
'lle': DefaultMappings.DOUBLE,
# EXAMPLES:
# '3.31' , '7.42' , '1.45' , '3.37' , '2.55' , '2.76' , '4.52' , '0.72' , '3.27' , '0.74'
'sei': DefaultMappings.DOUBLE,
# EXAMPLES:
# '12.27' , '8.42' , '7.97' , '14.66' , '29.24' , '52.57' , '24.27' , '6.16' , '7.25' , '3.59'
}
},
'molecule_chembl_id': DefaultMappings.CHEMBL_ID_REF,
# EXAMPLES:
# 'CHEMBL124855' , 'CHEMBL34899' , 'CHEMBL98328' , 'CHEMBL43' , 'CHEMBL517' , 'CHEMBL1628072' , 'CHEMBL1
# 54885' , 'CHEMBL371946' , 'CHEMBL40708' , 'CHEMBL440521'
'molecule_pref_name': DefaultMappings.LOWER_CASE_KEYWORD,
# EXAMPLES:
# '4'-HYDROXYCHALCONE' , 'BERBERINE' , 'CHLORANIL' , 'CORTICOSTERONE' , 'DESACETOXYMATRICARIN' , 'TOLBUT
# AMIDE' , 'NIFEDIPINE' , 'DYCLONINE HYDROCHLORIDE' , 'HARMAN' , 'EPINEPHRINE BITARTRATE'
'parent_molecule_chembl_id': DefaultMappings.CHEMBL_ID_REF,
# EXAMPLES:
# 'CHEMBL310082' , 'CHEMBL423241' , 'CHEMBL326713' , 'CHEMBL300429' , 'CHEMBL283728' , 'CHEMBL950' , 'CH
# EMBL173709' , 'CHEMBL173709' , 'CHEMBL1192700' , 'CHEMBL53463'
'pchembl_value': DefaultMappings.DOUBLE,
# EXAMPLES:
# '8.00' , '6.77' , '8.77' , '4.83' , '7.60' , '6.19' , '4.46' , '8.89' , '9.52' , '7.07'
'potential_duplicate': DefaultMappings.BOOLEAN,
# EXAMPLES:
# 'False' , 'False' , 'False' , 'False' , 'False' , 'False' , 'False' , 'False' , 'False' , 'False'
'qudt_units': DefaultMappings.KEYWORD,
# EXAMPLES:
# 'http://www.openphacts.org/units/Nanomolar' , 'http://www.openphacts.org/units/Nanomolar' , 'http://ww
# w.openphacts.org/units/Nanomolar' , 'http://www.openphacts.org/units/Nanomolar' , 'http://www.openphac
# ts.org/units/Nanomolar' , 'http://www.openphacts.org/units/Nanomolar' , 'http://www.openphacts.org/uni
# ts/Nanomolar' , 'http://www.openphacts.org/units/Nanomolar' , 'http://www.openphacts.org/units/Nanomol
# ar' , 'http://www.openphacts.org/units/Nanomolar'
'record_id': DefaultMappings.ID_REF,
# EXAMPLES:
# '78932' , '54374' , '231729' , '239131' , '11901' , '278312' , '304982' , '64208' , '208002' , '164930
# '
'relation': DefaultMappings.KEYWORD,
# EXAMPLES:
# '=' , '=' , '=' , '=' , '=' , '=' , '=' , '=' , '=' , '='
'src_id': DefaultMappings.ID_REF,
# EXAMPLES:
# '1' , '1' , '1' , '1' , '1' , '1' , '1' , '1' , '1' , '1'
'standard_flag': DefaultMappings.BOOLEAN,
# EXAMPLES:
# 'True' , 'True' , 'True' , 'True' , 'True' , 'True' , 'True' , 'True' , 'True' , 'True'
'standard_relation': DefaultMappings.KEYWORD,
# EXAMPLES:
# '>' , '=' , '>' , '=' , '=' , '=' , '=' , '=' , '=' , '='
'standard_text_value': DefaultMappings.KEYWORD,
# EXAMPLES:
# 'Active' , 'Not Active' , 'Not Active' , 'Not Active' , 'Not Active' , 'Active' , 'Not Active' , 'Acti
# ve' , 'Active' , 'Active'
'standard_type': DefaultMappings.KEYWORD,
# EXAMPLES:
# 'Increase in IC50' , 'Ki' , 'Ki' , 'IC90' , 'TR' , 'GI50' , 'Ratio' , 'EC50' , 'IC50' , 'Kd'
'standard_units': DefaultMappings.KEYWORD,
# EXAMPLES:
# 'nM' , 'nM' , 'nM' , 'nM' , 'nM' , 'nM' , 'nM' , 'nM' , 'mg.kg-1' , 'nM'
'standard_value': DefaultMappings.DOUBLE,
# EXAMPLES:
# '50' , '10' , '100' , '33000' , '6.6' , '2000' , '1' , '373000' , '170' , '1.7'
'target_chembl_id': DefaultMappings.CHEMBL_ID_REF,
# EXAMPLES:
# 'CHEMBL3314' , 'CHEMBL205' , 'CHEMBL333' , 'CHEMBL2094255' , 'CHEMBL373' , 'CHEMBL372' , 'CHEMBL612545
# ' , 'CHEMBL4477' , 'CHEMBL330' , 'CHEMBL2094256'
'target_organism': DefaultMappings.LOWER_CASE_KEYWORD,
# EXAMPLES:
# 'Bos taurus' , 'Homo sapiens' , 'Homo sapiens' , 'Homo sapiens' , 'Canis lupus familiaris' , 'Homo sap
# iens' , 'Rattus norvegicus' , 'Rattus norvegicus' , 'Homo sapiens' , 'Homo sapiens'
'target_pref_name': DefaultMappings.PREF_NAME,
# EXAMPLES:
# 'Alpha-chymotrypsin' , 'Carbonic anhydrase II' , 'Matrix metalloproteinase-2' , 'DNA topoisomerase II'
# , 'Canis familiaris' , 'Homo sapiens' , 'Unchecked' , 'Metabotropic glutamate receptor 1' , 'Glutamat
# e (NMDA) receptor subunit zeta 1' , 'Angiotensin II receptor'
'target_tax_id': DefaultMappings.ID_REF,
# EXAMPLES:
# '10090' , '9606' , '1280' , '9606' , '9606' , '7787' , '10116' , '9606' , '10090' , '5666'
'text_value': DefaultMappings.KEYWORD,
# EXAMPLES:
# 'Active' , 'Not Active' , 'Not Active' , 'Not Active' , 'Not Active' , 'Active' , 'Not Active' , 'Acti
# ve' , 'Active' , 'Active'
'toid': DefaultMappings.KEYWORD,
# EXAMPLES:
# '4981' , '5141' , '4981' , '4975' , '5141' , '4807' , '4982' , '4807' , '4969' , '4801'
'type': DefaultMappings.KEYWORD,
# EXAMPLES:
# 'Potency' , 'Potency' , 'Potency' , 'Potency' , 'Potency' , 'Potency' , 'Potency' , 'Potency' , 'Poten
# cy' , 'Potency'
'units': DefaultMappings.KEYWORD,
# EXAMPLES:
# 'um' , 'um' , 'um' , 'um' , 'um' , 'um' , 'um' , 'um' , 'um' , 'um'
'uo_units': DefaultMappings.KEYWORD,
# EXAMPLES:
# 'UO_0000065' , 'UO_0000065' , 'UO_0000065' , 'UO_0000065' , 'UO_0000065' , 'UO_0000065' , 'UO_0000065'
# , 'UO_0000065' , 'UO_0000308' , 'UO_0000065'
'upper_value': DefaultMappings.DOUBLE,
# EXAMPLES:
# '100' , '100' , '83' , '83' , '3' , '15' , '6' , '1' , '3' , '63'
'value': DefaultMappings.DOUBLE,
# EXAMPLES:
# '2.8184' , '35.4813' , '31.6228' , '3.1623' , '2.8184' , '7.9433' , '31.6228' , '28.1838' , '3.9811' ,
# '3.9811'
}
}
| StarcoderdataPython |
3223724 | <filename>scripts/cubic_traj_planner.py
#!/usr/bin/env python
import rospy
from AR_week4_test.msg import cubic_traj_params, cubic_traj_coeffs
from AR_week4_test.srv import compute_cubic_traj, compute_cubic_trajRequest
def callback(data_value):
rospy.wait_for_service('polynomial_trajectory')
try:
poly_trajectory = rospy.ServiceProxy('polynomial_trajectory', compute_cubic_traj)
initialize = compute_cubic_trajRequest(data_value.p0, data_value.pf, data_value.v0, data_value.vf, data_value.t0, data_value.tf)
soln = poly_trajectory(initialize)
publish = rospy.Publisher('client_chatter', cubic_traj_coeffs, queue_size=10)
publish.publish(soln.a0, soln.a1, soln.a2, soln.a3, data_value.t0, data_value.tf)
except rospy.ServiceException as e:
print("Service call failed: %s"%e)
def listener():
rospy.init_node('listener', anonymous=True)
rospy.Subscriber("chatter", cubic_traj_params, callback)
# spin() simply keeps python from exiting until this node is stopped
rospy.spin()
if __name__ == '__main__':
listener()
| StarcoderdataPython |
1738840 | <filename>p01-feature-splits.py
# Decision Trees: Feature Splits
#%%
# Python typing introduced in 3.5: https://docs.python.org/3/library/typing.html
from typing import List
# As of Python 3.7, this exists! https://www.python.org/dev/peps/pep-0557/
from dataclasses import dataclass
# My python file (very limited for now, but we will build up shared functions)
from shared import TODO
#%%
# Let's define a really simple class with two fields:
@dataclass
class DataPoint:
temperature: float
frozen: bool
def secret_answer(self) -> bool:
return self.temperature <= 32
def clone(self) -> "DataPoint":
return DataPoint(self.temperature, self.frozen)
# Fahrenheit, sorry.
data = [
# vermont temperatures; frozen=True
DataPoint(0, True),
DataPoint(-2, True),
DataPoint(10, True),
DataPoint(11, True),
DataPoint(6, True),
DataPoint(28, True),
DataPoint(31, True),
# warm temperatures; frozen=False
DataPoint(33, False),
DataPoint(45, False),
DataPoint(76, False),
DataPoint(60, False),
DataPoint(34, False),
DataPoint(98.6, False),
]
def is_water_frozen(temperature: float) -> bool:
"""
This is how we **should** implement it.
"""
return temperature <= 32
def get_temp(pt: DataPoint):
return pt.temperature
# Make sure the data I invented is actually correct...
for d in data:
assert d.frozen == is_water_frozen(d.temperature)
def find_candidate_splits(data: List[DataPoint]) -> List[float]:
midpoints = []
# sort by temperature
data.sort(key=get_temp)
# loop looking at two at a time
for i in range(len(data) - 1):
left = data[i]
right = data[i + 1]
mid = (left.temperature + right.temperature) / 2.0
midpoints.append(mid)
return midpoints
def gini_impurity(points: List[DataPoint]) -> float:
"""
The standard version of gini impurity sums over the classes:
"""
p_ice = sum(1 for x in points if x.frozen) / len(points)
p_water = 1.0 - p_ice
return p_ice * (1 - p_ice) + p_water * (1 - p_water)
# for binary gini-impurity (just two classes) we can simplify, because 1 - p_ice == p_water, etc.
# p_ice * p_water + p_water * p_ice
# 2 * p_ice * p_water
# not really a huge difference.
def impurity_of_split(points: List[DataPoint], split: float) -> float:
smaller = []
bigger = []
for p in points:
if p.temperature < split:
smaller.append(p)
else:
bigger.append(p)
return gini_impurity(smaller) + gini_impurity(bigger)
if __name__ == "__main__":
print("Initial Impurity: ", gini_impurity(data))
print("Impurity of first-six (all True): ", gini_impurity(data[:6]))
for split in find_candidate_splits(data):
score = impurity_of_split(data, split)
print("splitting at {} gives us impurity {}".format(split, score))
if score == 0.0:
break
| StarcoderdataPython |
1755221 | <reponame>Yuri-Lima/Fake_No_More_Blog
from django.db import models
from django.conf import settings
from django.urls import reverse
from django.contrib.auth import get_user_model
from users.models import User
UserModel = get_user_model()
# Create your models here.
class SendContactEmail(models.Model):
subject = models.CharField(max_length=150)
message = models.TextField()
from_email = models.EmailField(verbose_name='Email', max_length=60)
to_email = models.EmailField()
def __str__(self):
return self.subject
def get_absolute_url(self):
return f"/contact/thanks/" | StarcoderdataPython |
3252466 | # Generated by Django 2.2.7 on 2019-12-02 19:31
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('paper', '0018_auto_20191120_1647'),
('paper', '0018_auto_20191125_1956'),
]
operations = [
]
| StarcoderdataPython |
111336 | from bocadillo import App
app = App(
enable_cors=True,
cors_config={"allow_origins": ["*"], "allow_methods": ["*"]},
)
_COURSES = [
{
"id": 1,
"code": "adv-maths",
"name": "Advanced Mathematics",
"created": "2018-08-14T12:09:45",
},
{
"id": 2,
"code": "cs1",
"name": "Computer Science I",
"created": "2018-06-12T18:34:16",
},
]
@app.route("/courses")
async def courses_list(req, res):
res.media = _COURSES
if __name__ == "__main__":
app.run()
| StarcoderdataPython |
3261673 | # Copyright (c) 2021, NVIDIA CORPORATION
from geopandas.geoseries import is_geometry_type as gp_is_geometry_type
from cuspatial.geometry.geoseries import GeoSeries, GeoColumn
def is_geometry_type(obj):
"""
Returns `True` if the column is a `GeoPandas` or `cuspatial.GeoSeries`
"""
if isinstance(obj, (GeoSeries, GeoColumn)):
return True
if gp_is_geometry_type(obj):
return True
return False
| StarcoderdataPython |
1707734 | # idel_utils.py
import os
import json
# Constants
STR_CFN = 'cfn'
STR_AWS = 'aws'
STR_DEPLOY = 'deploy'
STR_DELETE = 'delete'
CHANGE_MODE_CHANGE = 'change'
CHANGE_MODE_PROVISION = 'provision'
CHANGE_MODE_DESTROY = 'destroy'
CHANGE_MODE_ON = 'on'
CHANGE_MODE_OFF = 'off'
# Sample continuationToken
"""
{
"StackName": "<STACK_NAME_HERE>",
"StackId": "<STACK_ID_HERE>",
"StackDesire": "<CREATE_COMPLETE|UPDATE_COMPLETE|DELETE_COMPLETE>",
"Block": "<Number>",
"Status": "<DONE|WAITING>",
"Occurrence": "<None|Number>",
"Sequence": "<Number>"
}
"""
sample_continuation_token = {
"StackName": None,
"StackId": None,
"StackDesire": None,
"Block": None,
"Status": None,
"Occurrence": None,
"Sequence": None
}
def get_user_params(job_data):
"""Decodes the JSON user parameters and validates the required properties.
Args:
job_data: The job data structure containing the UserParameters string which should be a valid JSON structure
Returns:
The JSON parameters decoded as a dictionary.
Raises:
Exception: The JSON can't be decoded or a property is missing.
"""
try:
# Get the user parameters which contain the stack, artifact and file settings
user_parameters = job_data['actionConfiguration']['configuration']['UserParameters']
# VMQ - workaround
user_parameters = user_parameters.replace('\n', ' ')
decoded_parameters = json.loads(user_parameters)
except Exception as e:
# We're expecting the user parameters to be encoded as JSON
# so we can pass multiple values. If the JSON can't be decoded
# then fail the job with a helpful message.
raise Exception('UserParameters could not be decoded as JSON: '+str(e))
return decoded_parameters
def get_sample_continuation_token():
return sample_continuation_token.copy()
def build_continuation_token(stack_name=None, stack_id=None, stack_desire=None, block=None, status=None, occurrence=None, sequence=None):
continuation_token = sample_continuation_token.copy()
continuation_token['StackName'] = stack_name
continuation_token['StackId'] = stack_id
continuation_token['StackDesire'] = stack_desire
continuation_token['Block'] = block
continuation_token['Status'] = status
continuation_token['Occurrence'] = occurrence
continuation_token['Sequence'] = sequence
return continuation_token
def stack_action_corresponding_statuses(action, stack_status):
ret = 'COMPLETE|IN_PROGRESS|UNKNOWN'
if ((action=='deploy') and (stack_status in ['UPDATE_COMPLETE', 'CREATE_COMPLETE'])) or ((action=='delete') and (stack_status in ['DELETE_COMPLETE'])):
ret = 'COMPLETE'
elif ((action=='deploy') and (stack_status in ['UPDATE_IN_PROGRESS', 'UPDATE_COMPLETE_CLEANUP_IN_PROGRESS', 'CREATE_IN_PROGRESS'])) or ((action=='delete') and (stack_status in ['DELETE_IN_PROGRESS'])):
ret = 'IN_PROGRESS'
else:
ret = 'UNKNOWN'
return ret
def stack_desire_corresponding_statuses(desire, stack_status):
ret = None
if (stack_status==desire):
# Done is True that means be able to process new block in next function
ret = True
elif (desire=='CREATE_COMPLETE' and stack_status=='CREATE_IN_PROGRESS'):
ret = False
elif (desire=='UPDATE_COMPLETE' and stack_status in ['UPDATE_IN_PROGRESS', 'UPDATE_COMPLETE_CLEANUP_IN_PROGRESS']):
ret = False
elif (desire=='DELETE_COMPLETE' and stack_status=='DELETE_IN_PROGRESS'):
ret = False
else:
# Unhandled statuses
# Exception
ret = None
return ret
def get_environment_variables():
return {
'LOGGING_LEVEL': os.environ['LOGGING_LEVEL'],
'SECRET_NAME': os.environ['SECRET_NAME'],
'ARTIFACT_DIR': os.environ['ARTIFACT_DIR'],
'CHANGES_FILE': os.environ['CHANGES_FILE'],
'WAITING_OCCURRENCE': os.environ['WAITING_OCCURRENCE'],
'CFN_WAITER_CONFIG': os.environ['CFN_WAITER_CONFIG']
}
def validate_changes(data_changes):
"""
"""
# Mode
if ('Mode' not in data_changes):
raise Exception('Broken changes file: Missing \'Mode\' item.')
elif (data_changes['Mode'] not in [CHANGE_MODE_CHANGE, CHANGE_MODE_PROVISION, CHANGE_MODE_DESTROY, CHANGE_MODE_ON, CHANGE_MODE_OFF]):
raise Exception('Broken changes file: \'Mode: {}\' not supported.'.format(data_changes['Mode']))
elif (data_changes['Mode']==CHANGE_MODE_CHANGE):
if ('Changes' not in data_changes):
raise Exception('Broken changes file: Missing \'Changes\' item.')
elif (0==len(data_changes['Changes'])):
raise Exception('Broken changes file: \'Changes\' item is empty.')
else:
pass
return True
def validate_inventory(data_inventory):
"""
"""
# Inventory
if ('Inventory' not in data_inventory):
raise Exception('Broken inventory file: Missing \'Inventory\' item.')
elif (0==len(data_inventory['Inventory'])):
raise Exception('Broken inventory file: \'Inventory\' item is empty.')
return True
def skip_object(change_mode, change):
"""
If `Mode` is `change`: we do not care about the `Conditions`
Else:
If `cfn` objects:
- We can omit the `Conditions`, objects will be involed when `Mode` is `provision` or `destroy`. (Original design. Backward compatibility.)
- In case `Conditions` is declared, objects will be involed when `Mode` matches with `Conditions`.
If `aws` objects: we must declare `Conditions` and match with `Mode`, or else the engine will skip that Object/Block.
OR
If `Mode` is `change`: we do not care about the `Conditions`
Else:
If we omit the `Conditions`:
- Only `cfn` objects are involed when `Mode` is `provision` or `destroy`. (Original design. Backward compatibility.)
- Others will be skipped.
Else:
Objects will be involed when `Mode` matches with `Conditions`.
Return:
- `True` means skipped
- `False` means involved
"""
if (change_mode!=CHANGE_MODE_CHANGE):
if ('Conditions' not in change):
if (change['Object']==STR_CFN) and (change_mode in [CHANGE_MODE_PROVISION,CHANGE_MODE_DESTROY]):
return False
return True
elif (change_mode not in change['Conditions']):
return True
return False
def override_cfn_action(change_mode, original_action):
"""
"""
mappings = {
CHANGE_MODE_PROVISION: STR_DEPLOY,
CHANGE_MODE_DESTROY: STR_DELETE,
CHANGE_MODE_ON: STR_DEPLOY,
CHANGE_MODE_OFF: STR_DELETE
}
if (change_mode not in mappings):
return original_action
return mappings[change_mode]
| StarcoderdataPython |
1697291 | <filename>omtk/widget_list_influences.py
import re
import pymel.core as pymel
from PySide import QtCore
from PySide import QtGui
from ui import widget_list_influences
import libSerialization
from omtk.libs import libSkinning
from omtk.libs import libQt
from omtk.libs import libPython
from omtk.libs import libPymel
import ui_shared
class WidgetListInfluences(QtGui.QWidget):
onRightClick = QtCore.Signal()
def __init__(self, parent=None):
super(WidgetListInfluences, self).__init__(parent=parent)
self._rig = None
self.ui = widget_list_influences.Ui_Form()
self.ui.setupUi(self)
# Tweak gui
self.ui.treeWidget.setStyleSheet(ui_shared._STYLE_SHEET)
# Connect signals
self.ui.treeWidget.customContextMenuRequested.connect(self.onRightClick)
# Connect events
self.ui.treeWidget.itemSelectionChanged.connect(self.on_influence_selection_changed)
self.ui.lineEdit_search.textChanged.connect(self.on_query_changed)
self.ui.checkBox_hideAssigned.stateChanged.connect(self.on_query_changed)
self.ui.btn_update.pressed.connect(self.update)
def set_rig(self, rig, update=True):
self._rig = rig
if update:
self.update()
@libPython.log_execution_time('update_ui_jnts')
def update(self, *args, **kwargs):
self.ui.treeWidget.clear()
if self._rig is None:
return
all_potential_influences = self._rig.get_potential_influences()
if all_potential_influences:
data = libPymel.get_tree_from_objs(all_potential_influences, sort=True)
self._fill_widget_influences(self.ui.treeWidget.invisibleRootItem(), data)
self.ui.treeWidget.sortItems(0, QtCore.Qt.AscendingOrder)
self.update_list_visibility()
def _fill_widget_influences(self, qt_parent, data):
obj = pymel.PyNode(data.val) if data.val else None
#obj, children_data = data
if obj:
obj_name = obj.stripNamespace()
fnFilter = lambda x: libSerialization.is_network_from_class(x, 'Module')
networks = libSerialization.get_connected_networks(obj, key=fnFilter, recursive=False)
textBrush = QtGui.QBrush(QtCore.Qt.white)
if self._is_influence(obj): # todo: listen to the Rig class
qItem = QtGui.QTreeWidgetItem(0)
qItem.obj = obj
qItem.networks = networks
qItem.setText(0, obj_name)
qItem.setForeground(0, textBrush)
ui_shared._set_icon_from_type(obj, qItem)
qItem.setCheckState(0, QtCore.Qt.Checked if networks else QtCore.Qt.Unchecked)
if qItem.flags() & QtCore.Qt.ItemIsUserCheckable:
qItem.setFlags(qItem.flags() ^ QtCore.Qt.ItemIsUserCheckable)
qt_parent.addChild(qItem)
qt_parent = qItem
for child_data in data.children:
self._fill_widget_influences(qt_parent, child_data)
#for child_data in children_data:
#child = child_data[0]
#if isinstance(child, pymel.nodetypes.Transform):
#self._fill_widget_influences(qt_parent, child_data)
def _is_influence(self, obj):
"""
Supported influences are joints and nurbsSurface.
:return:
"""
return libPymel.isinstance_of_transform(obj, pymel.nodetypes.Joint) or \
libPymel.isinstance_of_shape(obj, pymel.nodetypes.NurbsSurface)
def update_list_visibility(self, query_regex=None):
if query_regex is None:
query_raw = self.ui.lineEdit_search.text()
query_regex = ".*{0}.*".format(query_raw) if query_raw else ".*"
unselectableBrush = QtGui.QBrush(QtCore.Qt.darkGray)
selectableBrush = QtGui.QBrush(QtCore.Qt.white)
for qt_item in libQt.get_all_QTreeWidgetItem(self.ui.treeWidget):
can_show = self._can_show_QTreeWidgetItem(qt_item, query_regex)
qt_item.setHidden(not can_show)
if can_show:
qt_item.setForeground(0, selectableBrush)
flags = qt_item.flags()
if not flags & QtCore.Qt.ItemIsSelectable: # Make selectable
flags ^= QtCore.Qt.ItemIsSelectable
qt_item.setFlags(flags)
self._show_parent_recursive(qt_item.parent())
else:
qt_item.setForeground(0, unselectableBrush)
flags = qt_item.flags()
if flags & QtCore.Qt.ItemIsSelectable: # Make selectable
flags ^= QtCore.Qt.ItemIsSelectable
qt_item.setFlags(flags)
self.ui.treeWidget.expandAll()
def _can_show_QTreeWidgetItem(self, qItem, query_regex):
obj = qItem.obj # Retrieve monkey-patched data
obj_name = obj.stripNamespace()
# print obj_name
if not re.match(query_regex, obj_name, re.IGNORECASE):
return False
if self.ui.checkBox_hideAssigned.isChecked():
if qItem.networks:
return False
return True
def _show_parent_recursive(self, qt_parent_item):
if qt_parent_item is not None:
if qt_parent_item.isHidden:
qt_parent_item.setHidden(False)
self._show_parent_recursive(qt_parent_item.parent())
def get_selection(self):
result = []
for item in self.ui.treeWidget.selectedItems():
if item.obj.exists():
result.append(item.obj)
return result
#
# Events
#
def on_influence_selection_changed(self):
pymel.select(self.get_selection())
def on_query_changed(self):
self.update_list_visibility() | StarcoderdataPython |
175613 | import matplotlib.pyplot as pl
import os
import numpy as np
from ticle.data.dataHandler import normalizeData,load_file
from ticle.analysis.analysis import get_significant_periods
pl.rc('xtick', labelsize='x-small')
pl.rc('ytick', labelsize='x-small')
pl.rc('font', family='serif')
pl.rcParams.update({'font.size': 20})
pl.tight_layout()
path = os.getcwd()
sigma_pdm_dir = f"{path}/results/sigma_pdm"
try:
os.makedirs(sigma_pdm_dir)
except FileExistsError:
pass
data_dir = f"{path}/data/"
data_list_file = f"{data_dir}/dataList.txt"
data_list = np.loadtxt(data_list_file)
for data in data_list:
star = f"0{int(data[0])}"
file_name = f"{data_dir}/{star}/{star}_LC_destepped.txt"
res_dir = f"{sigma_pdm_dir}/{star}"
try:
os.mkdir(res_dir)
except FileExistsError:
pass
t_series = load_file(file_name)
t_series = normalizeData(t_series)
pdm = get_significant_periods(t_series, 20, True)
periods = pdm['periods']
sigma_vals = pdm['lspvals'][periods * 2 < max(t_series[0])]
periods = periods[periods * 2 < max(t_series[0])]
lightcurve_fig = pl.figure(figsize=(10,7))
pl.plot(t_series[0],t_series[1],color='k')
pl.xlabel("Time(days)")
pl.ylabel("Flux")
pl.axhline(y=0,linestyle='dashed',color='k',alpha=0.6)
pl.title(f"{star}")
pdm_fig = pl.figure(figsize=(10, 7))
pl.plot(periods, sigma_vals, color='k', markersize=3,alpha=0.5)
pl.plot(periods, sigma_vals,'x',color='k',markersize=3)
p_guess = "%.2f" % float(data[1])
pl.axvline(x=data[1],color='blue',alpha=0.6,linestyle='dashed',label=rf"$P_{{guess}}={p_guess}$ days")
pl.xlabel(r"Period(days)")
pl.ylabel(r"$\Theta_{pdm}$")
pl.legend()
pl.title(f"PDM {star}")
lightcurve_fig.savefig(f"{res_dir}/{star}_lightcurve.pdf")
pdm_fig.savefig(f"{res_dir}/{star}_pdm.pdf")
| StarcoderdataPython |
81814 | import json
import re
import requests
import threading
from flask import current_app as app
ingredLock = threading.RLock()
posLock = threading.RLock()
recipeLock = threading.RLock()
imgDownloadLock = threading.RLock()
processLocker = threading.RLock()
def recipesByIngredients(ingredientsList, maxRecpts='20', ranking='2', ignoreParty='true'):
'''
Inputs:
ingredientsList : list of incredients
maxRecpts : maximum number of receipts to generate
ranking : '1' use maximum ingredients missing, '2' - use minimum ingredients missing for generating recipes
ignoreParty : 'false' - does not ignore basic materials like water , 'true' ignores
Output:
response from API converted into JSON
'''
# ingredLock.acquire() ## maybe delete
maxRecpts = str(maxRecpts)
ranking = str(ranking)
ignoreParty = str(ignoreParty)
ingredientsList = '%2C'.join(ingredientsList)
url = "https://webknox-recipes.p.rapidapi.com/recipes/findByIngredients"
headers = {
'x-rapidapi-host': "webknox-recipes.p.rapidapi.com",
'x-rapidapi-key': "<KEY>"
}
querystring = {"number": maxRecpts, "ranking": ranking, "ignorePantry": ignoreParty, "ingredients": ingredientsList}
response = requests.request("GET", url, headers=headers, params=querystring)
# ingredLock.release() ## make be delete
return response
def lemmatize(text):
'''
Input: text data with words separated with commas
Output: list of all available lemma (one per word)
'''
url = "https://twinword-twinword-bundle-v1.p.rapidapi.com/lemma_extract/"
querystring = {"text":text,}
headers = {
'x-rapidapi-host': "twinword-twinword-bundle-v1.p.rapidapi.com",
'x-rapidapi-key': "f550b025c9mshf7e779815980f33p1a185djsn190432425332"
}
response = requests.request("GET", url, headers=headers, params=querystring)
response=response.json()
ingredientsList=[key for key,value in response['lemma'].items()]
return ingredientsList
def getNutriotion(ID):
url = "https://spoonacular-recipe-food-nutrition-v1.p.rapidapi.com/recipes/%d/nutritionWidget.json" % ID
headers = {
'x-rapidapi-host': "spoonacular-recipe-food-nutrition-v1.p.rapidapi.com",
'x-rapidapi-key': "<KEY>"
}
response = requests.request("GET", url, headers=headers)
response = response.json()
record = {'calories': response['calories']}
record['carbs'] = response['carbs']
record['fat'] = response['fat']
record['protein'] = response['protein']
return record
def getVideoByRecipe(recipeTitle):
url = "https://spoonacular-recipe-food-nutrition-v1.p.rapidapi.com/food/videos/search"
querystring = {"query": recipeTitle, "minLength": "00", "maxLength": "999", "offset": "0", "number": "1"}
headers = {
'x-rapidapi-host': "spoonacular-recipe-food-nutrition-v1.p.rapidapi.com",
'x-rapidapi-key': "<KEY>"
}
response = requests.request("GET", url, headers=headers, params=querystring)
response = response.json()
if len(response['videos']) == 0:
return 'none'
video = response['videos'][0]['youTubeId']
return 'youtube.com/watch?v=' + video
def postProcess(data, ingredientsList):
'''
Input:
data : output received from API as JSON file
ingredientsList: original list with Ingredients
Output:
list of dictionaries
'''
# posLock.acquire() ## maybe delete
missedFlag = False
output = []
for raw in data:
ingrdtsList = ingredientsList.copy()
ingredientRecord = list()
for missIngrdt in raw['missedIngredients']:
misses = missIngrdt['name'].split(" ")
missedFlag = True
for miss in misses:
if miss in ingrdtsList:
ingrdtsList.pop(ingrdtsList.index(miss))
missedFlag = False
break
if missedFlag == True:
break
missed = missIngrdt['name']
ingredientRecord.append(missed)
for usedIngred in raw['usedIngredients']:
ingred = usedIngred['name']
ingredientRecord.append(ingred)
if missedFlag == False:
record = {'id': raw['id']}
record['title'] = raw['title']
lemmaInp = [ingredient for ingredient in ingredientRecord if len(ingredient.split(" ")) == 1]
nonLemmaInp = [ingredient for ingredient in ingredientRecord if len(ingredient.split(" ")) > 1]
lemmaInp = ' '.join(lemmaInp)
ingredientRecord = lemmatize(lemmaInp)
ingredientRecord.extend(nonLemmaInp)
record['ingredients'] = ingredientRecord
record['video_url'] = getVideoByRecipe(raw['title'])
record['nutritions'] = getNutriotion(raw['id'])
output.append(record)
# posLock.release()
return output
def downloadImage(ID):
'''
Input: integer ID of recipe
Output: URL of image
'''
# imgDownloadLock.acquire()
url = "https://spoonacular-recipe-food-nutrition-v1.p.rapidapi.com/recipes/%d/information" % (ID)
headers = {
'x-rapidapi-host': "spoonacular-recipe-food-nutrition-v1.p.rapidapi.com",
'x-rapidapi-key': "<KEY>"
}
response = requests.request("GET", url, headers=headers)
response = response.json()
image_url = response['image']
# imgDownloadLock.release()
return image_url
def getStepwiseRecipy(data, stepBreakdown='true'):
'''
Input:
data: all previously preprocessed list of dictionaries
stepBrealdown: whether to provide very detailed intructions
output:
data received from previous processed, added with {'steps': list}, {'image_url':url}
'''
# recipeLock.acquire()
output = list()
for raw in data:
url = "https://spoonacular-recipe-food-nutrition-v1.p.rapidapi.com/recipes/%d/analyzedInstructions" % (
raw['id'])
querystring = {"stepBreakdown": stepBreakdown}
headers = {
'x-rapidapi-host': "spoonacular-recipe-food-nutrition-v1.p.rapidapi.com",
'x-rapidapi-key': "<KEY>"
}
response = requests.request("GET", url, headers=headers, params=querystring, )
response = response.json()
if len(response) == 0:
raw['steps']=list()
else:
raw['steps'] = [record['step'] for record in response[0]['steps']]
img_url=downloadImage(raw['id'])
raw['image_url']=img_url
output.append(raw)
# recipeLock.release()
return output
def processInput(ingredientsList):
'''
Input:
ingredientsList: list of ingredients received from front-end
Ouput:
JSON file of format as list of dictionaries
[ {'id':id,
'title': titleRecipe,
'ingredeints': list of ingredients,
'image_url':url,
'steps': list of sentences,
}]
'''
# processLocker.acquire()
# lemmaInp = [ingredient for ingredient in ingredientsList if len(ingredient.split(" ")) == 1]
# nonLemmaInp = [ingredient for ingredient in ingredientsList if len(ingredient.split(" ")) > 1]
# lemmaInp = ' '.join(lemmaInp)
# ingredientsList = lemmatize(lemmaInp)
# ingredientsList.extend(nonLemmaInp)
recipes = recipesByIngredients(ingredientsList).json()
processedRecipes = postProcess(recipes, ingredientsList)
stepWiseRecipy = getStepwiseRecipy(processedRecipes)
app.logger.info('[DEBUG] stepWiseRecipy: ')
app.logger.info(stepWiseRecipy)
#finalJson = json.dumps(stepWiseRecipy, separators=('\n', ":"), )
# processLocker.release()
return stepWiseRecipy
| StarcoderdataPython |
1768484 | #coding=utf-8
"""
__create_time__ = '13-10-13'
__author__ = 'Madre'
"""
from django.contrib import admin
from translation.models import Translation
class TranslationAdmin(admin.ModelAdmin):
list_display = ('m_type', 'title', 'tran_title', 'user', 'index', 'show')
list_display_links = ['title']
list_editable = ['show', 'index']
date_hierarchy = 'createtime'
list_filter = ['show', 'title']
search_fields = ['title', 'origin_content', 'tran_content', 'o_t_content']
admin.site.register(Translation, TranslationAdmin)
| StarcoderdataPython |
3267826 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
import core.utils
class Migration(migrations.Migration):
dependencies = [
('core', '0029_auto_20170209_1656'),
]
operations = [
migrations.AddField(
model_name='lesson',
name='custom_thumbnail',
field=models.ImageField(upload_to=core.utils.HashName(b'lesson_thumbnails', b'name'), null=True, verbose_name='Thumbnail', blank=True),
),
]
| StarcoderdataPython |
1714004 | <reponame>Jeetandra/cortx-s3server
#
# Copyright (c) 2020 Seagate Technology LLC and/or its Affiliates
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# For any questions about this software or licensing,
# please email <EMAIL> or <EMAIL>.
#
"""Implementation of RabbitMQ for object recovery"""
import traceback
import time
import json
import pika
from s3backgrounddelete.cortx_s3_kv_api import CORTXS3KVApi
from s3backgrounddelete.cortx_s3_object_api import CORTXS3ObjectApi
from s3backgrounddelete.cortx_s3_index_api import CORTXS3IndexApi
from s3backgrounddelete.object_recovery_validator import ObjectRecoveryValidator
from s3backgrounddelete.IEMutil import IEMutil
class ObjectRecoveryRabbitMq(object):
"""This class is implementation of RabbitMq for object recovery."""
_connection = None
_channel = None
def __init__(self, config, user, password, host,
exchange, queue, mode, durable, logger):
"""Initialise rabbitmq"""
self.config = config
self._user = user
self._password = password
self._host = host
self._exchange = exchange
self._mode = mode
self._queue = queue
self._durable = True if durable == "True" else False
self.logger = logger
self.connect()
def connect(self):
"""Connect to message queue"""
try:
credentials = pika.PlainCredentials(self._user, self._password)
self._connection = pika.BlockingConnection(
pika.ConnectionParameters(host=self._host, credentials=credentials))
self._channel = self._connection.channel()
self._channel.queue_declare(
queue=self._queue, durable=self._durable)
except Exception as exception:
err_msg = "error:%s, %s" % (exception, traceback.format_exc())
self.logger.warn("msg_queue connect failed." + str(err_msg))
time.sleep(5)
self.connect()
def purge_queue(self, queue_name):
"""Purge all entries from queue."""
try:
self.logger.info(("Purging queue: %s") % queue_name)
self._channel.queue_purge(queue=queue_name)
except Exception as exception:
msg = ("Purge queue exception: %s, %s") % (
exception, traceback.format_exc())
return False
return True
def send_data(self, data, mq_routing):
"""Send message data."""
try:
self._channel.basic_publish(exchange=self._exchange,
routing_key=mq_routing,
body=json.dumps(data),
properties=pika.BasicProperties(
delivery_mode=self._mode, # make message persistent
))
except Exception as exception:
msg = ("msg_queue send data except:%s, %s") % (
exception, traceback.format_exc())
return False, msg
return True, None
def worker(self, queue_msg_count=None):
"""Create worker which will process results."""
# Callback function to consume the queue messages and parameters are,
# channel : rabbitmq Channel used to send/receive/ack messages
# method : contain details to identify which consumer the message should
# go e.g delivery_tag
# properties : BasicProperties contains message properties (metadata)
# body : message body
# example:
# method: <Basic.GetOk(['delivery_tag=1', 'exchange=',
# 'message_count=0', 'redelivered=False',
# 'routing_key=s3_delete_obj_job_queue'])>
# properties: <BasicProperties(['delivery_mode=2'])>
# body: b'{"Key": "<KEY>=",
# "Value": "{\\"index_id\\":\\"egZPBQAAAHg=-YwIAAAAAJKc=\\",
# \\"object_layout_id\\":1,
# \\"object_metadata_path\\":\\"object1\\"}\\n"}'
def callback(channel, method, properties, body):
"""Process the result and send acknowledge."""
try:
self.logger.info(
"Received " +
body.decode("utf-8") +
"at consumer end")
probable_delete_records = json.loads(body.decode("utf-8"))
if (probable_delete_records is not None):
self.logger.info(
"Processing following records in consumer " +
str(probable_delete_records))
validator = ObjectRecoveryValidator(
self.config, probable_delete_records, self.logger)
validator.process_results()
channel.basic_ack(delivery_tag=method.delivery_tag)
except BaseException:
self.logger.error(
"msg_queue callback failed." + traceback.format_exc())
self._channel.basic_qos(prefetch_count=1)
# If service is running in non-daemon mode,
# then consume messages till the queue is empty and then stop
# else start consuming the message till service stops.
if (queue_msg_count is not None):
self.logger.info("Queue contains " + str(queue_msg_count) + " messages")
for msg in range(queue_msg_count, 0, -1):
method, properties, body = self._channel.basic_get(self._queue, no_ack=False)
callback(self._channel, method, properties, body)
self.logger.info("Consumed all messages and queue is empty")
return
else:
self._channel.basic_consume(callback, self._queue, no_ack=False)
self._channel.start_consuming()
def receive_data(self):
"""Receive data and create msg queue."""
try:
# Check if service is running in non-daemon mode
# then consumer should stop once queue is empty.
if not self.config.get_daemon_mode():
queue_state = self._channel.queue_declare(
queue=self._queue, durable=self._durable)
queue_msg_count = queue_state.method.message_count
self.worker(queue_msg_count)
return
else:
self._channel.queue_declare(
queue=self._queue, durable=self._durable)
self.worker()
except Exception as exception:
err_msg = "error:%s, %s" % (exception, traceback.format_exc())
IEMutil("ERROR", IEMutil.RABBIT_MQ_CONN_FAILURE, IEMutil.RABBIT_MQ_CONN_FAILURE_STR)
self.logger.error("msg_queue receive data failed." + str(err_msg))
self.connect()
self.receive_data()
def close(self):
"""Stop consumer and close rabbitmq connection."""
try:
self._channel.stop_consuming()
finally:
self._connection.close()
| StarcoderdataPython |
1623357 | import pygame as pg
class CoinDebris(object):
"""
Coin that appears when you hit the question block.
"""
def __init__(self, x_pos, y_pos):
self.rect = pg.Rect(x_pos, y_pos, 16, 28)
self.y_vel = -2
self.y_offset = 0
self.moving_up = True
self.current_image = 0
self.image_tick = 0
self.images = [
pg.image.load('images/coin_an0.png').convert_alpha(),
pg.image.load('images/coin_an1.png').convert_alpha(),
pg.image.load('images/coin_an2.png').convert_alpha(),
pg.image.load('images/coin_an3.png').convert_alpha()
]
def update(self, core):
self.image_tick += 1
if self.image_tick % 15 == 0:
self.current_image += 1
if self.current_image == 4:
self.current_image = 0
self.image_tick = 0
if self.moving_up:
self.y_offset += self.y_vel
self.rect.y += self.y_vel
if self.y_offset < -50:
self.moving_up = False
self.y_vel = -self.y_vel
else:
self.y_offset += self.y_vel
self.rect.y += self.y_vel
if self.y_offset == 0:
core.get_map().debris.remove(self)
def render(self, core):
core.screen.blit(self.images[self.current_image], core.get_map().get_camera().apply(self))
| StarcoderdataPython |
3259584 | """Datatypes."""
# pylint: disable=invalid-name,too-many-instance-attributes,missing-class-docstring
from dataclasses import dataclass
from typing import List, Optional
@dataclass
class TrackingProperties:
streak: int
username: str
creation_age: int
is_age_restricted: bool
creation_date: str
num_followers: int
gems: int
user_id: int
goal: int
direction: str
learning_reason: str
num_sections_unlocked: int
num_classrooms: int
num_skills_unlocked: int
num_observees: int
achievements: List[str]
lingots: int
trial_account: bool
prior_proficiency_onboarding: int
level: int
learning_language: str
num_sessions_completed: int
num_following: int
ui_language: str
@dataclass
class XPGain:
eventType: Optional[str]
xp: int
time: int
@dataclass
class progressQuizHistory:
endTime: int
startTime: int
score: float
maxRowReachedDuringMigration: int
@dataclass
class Skill:
lessons: int
name: str
finishedLessons: int
finishedLevels: int
levels: int
shortName: str
accessible: bool = False
strength: float = 0.0
@dataclass
class Course:
authorId: str
title: str
learningLanguage: str
xp: int
healthEnabled: bool
fromLanguage: str
crowns: int
id: str
@dataclass
class CurrentCourse:
status: str
learningLanguage: str
crowns: int
xp: int
wordsLearned: int
id: str
title: str
numberOfWords: Optional[int]
skills: List[List[Skill]]
progressQuizHistory: List[progressQuizHistory]
@dataclass
class Friend:
"""One friend."""
username: str
picture: str
name: str
monthlyXp: int
weeklyXp: int
totalXp: int
id: int
hasPlus: bool
@dataclass
class DuolingoStats:
"""FormValues."""
bio: str
trackingProperties: TrackingProperties
totalXp: int
timezoneOffset: str
inviteURL: str
xpGains: List[XPGain]
courses: List[Course]
weeklyXp: int
monthlyXp: int
lingots: int
streak: int
name: str
xpGoal: int
email: str
username: str
currentCourse: CurrentCourse
friends: List[Friend]
| StarcoderdataPython |
3301382 | # Generated by Selenium IDE
import pytest
import time
import json
from selenium import webdriver
from selenium.webdriver.common.by import By
from selenium.webdriver.common.action_chains import ActionChains
from selenium.webdriver.support import expected_conditions
from selenium.webdriver.support.wait import WebDriverWait
from selenium.webdriver.common.keys import Keys
from selenium.webdriver.common.desired_capabilities import DesiredCapabilities
class TestUntitled():
def setup_method(self, method):
self.driver = webdriver.Chrome()
self.vars = {}
def teardown_method(self, method):
self.driver.quit()
def test_untitled(self):
self.driver.get("http://localhost/")
self.driver.set_window_size(1330, 1010)
self.driver.find_element(By.LINK_TEXT, "localhost").click()
self.driver.find_element(By.LINK_TEXT, "addressbook").click()
self.driver.find_element(By.LINK_TEXT, "התנתקות").click()
self.driver.find_element(By.ID, "LoginForm").click()
self.driver.find_element(By.NAME, "pass").send_keys("<PASSWORD>")
self.driver.find_element(By.NAME, "user").send_keys("admin")
self.driver.find_element(By.CSS_SELECTOR, "input:nth-child(7)").click()
| StarcoderdataPython |
3276777 | lista = []
pos = 0
x = 0
for i in range(6):
lista.append(float(input()))
for i in lista:
if i >= 0:
pos += 1
x = i + x
media = x / pos
media = round(media)
print("{} valores positivos".format(pos))
print(media)
| StarcoderdataPython |
1740464 | <gh_stars>1-10
# BSD 3-Clause License; see https://github.com/scikit-hep/awkward-1.0/blob/main/LICENSE
import pytest # noqa: F401
import numpy as np # noqa: F401
import awkward as ak # noqa: F401
def test():
array = ak.from_numpy(np.zeros((3, 0), dtype=np.int32))
buffs = ak.to_buffers(array)
new_array = ak.from_buffers(*buffs)
assert ak.to_list(new_array) == [[], [], []]
| StarcoderdataPython |
3272978 | from io import BytesIO
from pprint import pformat
from unittest import mock
from urllib.response import addinfourl
from urllib.error import HTTPError
import pytest
from simplecep import CEPAddress
from simplecep.providers import ALL_PROVIDERS
from .providers_tests_data import providers_tests_data
from .captured_responses import captured_responses
def patched_urlopen(req, timeout):
""" "
A customized version of urlopen which will take the responses from
the captured_responses.py file instead of triggering real HTTP requests.
"""
req_dict = {
"full_url": req.full_url,
"method": req.method,
"headers": req.headers,
"data": req.data,
}
for messages in captured_responses:
if messages["request"] == req_dict:
response = messages["response"]
if response["type"] == "success":
# Create a fake response object with the same data was captured
# from the real endpoint
return addinfourl(BytesIO(response["data"]), {}, req_dict["full_url"])
elif response["type"] == "error":
# Create a fake response error object with the same data
# captured from the real endpoint
raise HTTPError(
req_dict["full_url"],
response["status"],
"Fake Error",
{},
BytesIO(response["data"]),
)
raise ValueError(
f"No stored response found for:\n {pformat(req_dict)} request.\n\n"
"Please run the script to capture real providers responses with:\n"
"$ python -m tests.providers.capture_real_responses\n\nAnd try again."
)
@pytest.mark.parametrize("input_output", providers_tests_data)
@pytest.mark.parametrize("cep_provider", ALL_PROVIDERS)
def test_expected_providers_responses(input_output, cep_provider):
# bye real urlopen and welcome our patched version which skips
# real requests and previously captured data, stored on the captured_responses.py file
with mock.patch("simplecep.providers.commons.urlopen", wraps=patched_urlopen):
cep = input_output["input"]
expected_result = input_output["expected_result"]
if expected_result is not None:
expected_cep_address = CEPAddress(**expected_result)
else:
expected_cep_address = None
cep_address = cep_provider(cep, timeout=1)
if cep_address is not None:
# use dict comparison to spot differences easily on pytest error outpuit
assert cep_address.to_dict() == expected_cep_address.to_dict()
assert cep_address == expected_cep_address
| StarcoderdataPython |
3301232 | <reponame>IMrIDarkWolf/wagtail-opengraph-image-generator<filename>wagtail_opengraph_image_generator/conf.py
from django.apps import apps
from django.conf import settings
from django.core.exceptions import ImproperlyConfigured
PREFIX = 'WAGTAIL_OG_IMAGE_GENERATOR_'
DEFAULT_SETTINGS = {
'IMAGE_WIDTH': 1200,
'IMAGE_HEIGHT': 630,
'IMAGE_PADDING': 32,
'COLLECTION_NAME': 'OpenGraph',
'TAB_NAME': 'OpenGraph Image',
'PAGE_MODEL': 'wagtailcore.Page',
'CREATE_AUTOMATICALLY': True,
'FIELD_TITLE': '',
'FIELD_SUBTITLE': '',
'FIELD_BACKGROUND_IMAGE': '',
'FIELD_LOGO': '',
}
def setting(name):
return getattr(settings, '{}{}'.format(PREFIX, name), DEFAULT_SETTINGS[name])
def get_page_model():
page = setting('PAGE_MODEL')
try:
return apps.get_model(page, require_ready=False)
except ValueError:
raise ImproperlyConfigured(
"WAGTAIL_OG_IMAGE_GENERATOR_PAGE_MODEL must be of the form 'app_label.model_name'"
)
except LookupError:
raise ImproperlyConfigured(
"WAGTAIL_OG_IMAGE_GENERATOR_PAGE_MODEL refers to model '{}' that has not been installed".format(
page
)
)
| StarcoderdataPython |
3274072 | <filename>src/web/modules/dashboard/controllers/edit.py
# edit dashboard
from flask import request, render_template
import web.util.tools as tools
def get(p):
if request.method == "POST":
# save dashboard
tools.set_conf(p['host'], p['navigation']['id'], "dashboard",
request.form["dashboard"])
# get dashboard template from the configuration
p['dashboard'] = tools.get_conf(
p["host"], p["navigation"]["id"], "dashboard")
if not p['dashboard']:
# use default template when nothing is configured
p['dashboard'] = tools.read_file(
"web/templates/dashboard/empty.html", p['base_dir'])
return render_template("dashboard/edit.html", p=p)
| StarcoderdataPython |
3258160 | from Escritor import Escritor
from PostgreSQL.ConexionSQL import ConexionSQL
import json
import datetime
class EscritorTweets(Escritor):
"""docstring for EscritorTweets"""
def __init__(self, searchID):
super(EscritorTweets, self).__init__(searchID)
conSql = ConexionSQL()
self.conn = conSql.getConexion()
self.cur = conSql.getCursor()
self.hashCache = {}
def escribe(self, data):
for tweet in data:
#escribe tweet DB
id_api_twitter = tweet["user"]["id"]
#se evitan muchos accesos a la db con este simple codigo
hash_id = self.getUserByAPIUserIDHash(id_api_twitter)
if hash_id != -1:
self.escribeTweet(tweet, hash_id)
else:
db_id = self.getUserByAPIUserID(id_api_twitter)
if db_id != -1:
#self.actualizaUsuario(tweet["user"])
self.escribeTweet(tweet, db_id)
else:
db_id = self.insertaUsuario(tweet["user"])
self.escribeTweet(tweet, db_id)
self.putUserByAPIUserIDHash(id_api_twitter, db_id)
def escribeTweet(self, tweet, userid):
id_api_twitter = tweet["id"]
db_id = self.getTweetSiExisteAPIID(id_api_twitter)
if db_id != -1:
#self.actualizaTweet(tweet)
pass
else:
db_id = self.insertaTweet(tweet)
#self.insertaJoinTable(self.searchID, db_id)
'''
def insertaJoinTable(self, id_search, id_tweet):
query = """INSERT INTO join_search_tweet (id_search, id_tweet) SELECT %s, %s
WHERE NOT EXISTS (SELECT * FROM join_search_tweet WHERE id_search=%s AND id_tweet=%s);"""
try:
self.cur.execute(query, [id_search, id_tweet, id_search, id_tweet])
self.conn.commit()
return True
except Exception, e:
print "error en insertaJoinTable Twitter"
print str(e)
return False
'''
def getTweetSiExisteAPIID(self, apiID):
query = "SELECT id_twitter FROM tweets WHERE id_twitter = %s;"
try:
self.cur.execute(query, [apiID, ])
row = self.cur.fetchone()
if row is None:
return -1
return row[0]
except Exception, e:
print str(e)
return -1
def insertaTweet(self, data):
created_at = datetime.datetime.strptime(data["created_at"], '%a %b %d %H:%M:%S +0000 %Y')
identificador = data["id"]
text = data["text"]
lang = data["lang"]
user_id = data["user"]["id"]
#Controla si existen RTs dentro del Tweet
retweet_count = 0
if "retweet_count" in data:
retweet_count = data["retweet_count"]
#Controla si existen FAVs dentro del Tweet
favorite_count = 0
if "favorite_count" in data:
favorite_count = data["favorite_count"]
#Controla si es RT para almacenar la informacion
is_rt = False
rt_id = 0
if "retweeted_status" in data:
is_rt = True
rt_id = data["retweeted_status"]["id"]
media = ""
query = """INSERT INTO tweets (id_twitter, status, tuser, created_at, lang, is_retweet, orig_tweet, favorite_count, retweet_count, media_url)
VALUES (%s, %s, %s, %s, %s, %s, %s, %s, %s, %s) RETURNING id_twitter;"""
#query = "INSERT INTO tweets_entrenamiento (id_tweet,clase) VALUES (%s,%s);"
try:
self.cur.execute(query, [identificador, text[:160], user_id, created_at,lang[:3], is_rt, rt_id,favorite_count,retweet_count,media])
Id = self.cur.fetchone()[0]
self.conn.commit()
return Id
except Exception, e:
print "error en insertaTweet Twitter"
print str(e)
return -1
def actualizaTweet(self, tweet):
query = """UPDATE tweets SET favorite_count=%s, retweet_count=%s WHERE id_twitter=%s;"""
try:
self.cur.execute(query, [tweet["favorite_count"],tweet["retweet_count"],tweet["id"]])
self.conn.commit()
return True
except Exception, e:
print "error en actualizaTweet Twitter"
print str(e)
return False
def getUserByAPIUserIDHash(self, apiUserID):
return -1
"""if apiUserID in self.hashCache:
return self.hashCache[apiUserID]
return -1
"""
def putUserByAPIUserIDHash(self, apiUserID, identificador):
self.hashCache[apiUserID] = identificador
def getUserByAPIUserID(self, apiUserID):
#select returning id
query = "SELECT id_twitter FROM users WHERE id_twitter = %s;"
try:
self.cur.execute(query, [apiUserID, ])
row = self.cur.fetchone()
if row is None:
return -1
return row[0]
except Exception, e:
print str(e)
return -1
def insertaUsuario(self, data):
identificador = data["id"]
#name
name = data["name"]
#screen name
screen_name = data["screen_name"]
#location
location = data["location"]
#followers_count
followers_count = data["followers_count"]
#created_at
created_at = datetime.datetime.strptime(data["created_at"], '%a %b %d %H:%M:%S +0000 %Y')
query = """INSERT INTO users (id_twitter, name, screen_name, followers, location, created_at)
VALUES (%s, %s, %s, %s, %s, %s) RETURNING id_twitter;"""
#query = "INSERT INTO tweets_entrenamiento (id_tweet,clase) VALUES (%s,%s);"
try:
self.cur.execute(query, [identificador, name[:20], screen_name[:15], followers_count, location[:50], created_at])
Id = self.cur.fetchone()[0]
self.conn.commit()
return Id
except Exception, e:
print "error en insertaUsuario Twitter"
print str(e)
return -1
def actualizaUsuario(self, usuario):
#update
query = """UPDATE users SET name=%s, followers=%s, location=%s WHERE id_twitter=%s;"""
try:
self.cur.execute(query, [usuario["name"][:20], usuario["followers_count"], usuario["location"][:50], usuario["id"]])
self.conn.commit()
return True
except Exception, e:
print "error en actualizaUsuario Twitter"
print str(e)
return False
| StarcoderdataPython |
124044 | <filename>tools/benchmarks.py
# -*- coding: utf-8 -*-
"""
@date: 2020/11/4 下午2:06
@file: benchmarks.py
@author: zj
@description:
"""
import time
import numpy as np
import torch
from zcls.util.distributed import get_device, get_local_rank
from zcls.util.metrics import compute_num_flops
from zcls.config import cfg
from zcls.model.recognizers.build import build_recognizer
def compute_model_time(data_shape, model, device):
model = model.to(device)
t1 = 0.0
num = 100
begin = time.time()
for i in range(num):
data = torch.randn(data_shape)
start = time.time()
model(data.to(device=device, non_blocking=True))
t1 += time.time() - start
t2 = time.time() - begin
print(f'one process need {t2 / num:.3f}s, model compute need: {t1 / num:.3f}s')
def main(data_shape, config_file, mobile_name):
np.random.seed(cfg.RNG_SEED)
torch.manual_seed(cfg.RNG_SEED)
torch.backends.cudnn.deterministic = False
torch.backends.cudnn.benchmark = True
cfg.merge_from_file(config_file)
gpu_device = torch.device('cuda:0')
cpu_device = torch.device('cpu')
model = build_recognizer(cfg, cpu_device)
model.eval()
data = torch.randn(data_shape).to(device=cpu_device, non_blocking=True)
GFlops, params_size = compute_num_flops(model, data)
print(f'{mobile_name} ' + '*' * 10)
print(f'device: {cpu_device}')
print(f'GFlops: {GFlops:.3f}G')
print(f'Params Size: {params_size:.3f}MB')
model = build_recognizer(cfg, cpu_device)
model.eval()
print(f'compute cpu infer time')
compute_model_time(data_shape, model, cpu_device)
print(f'compute gpu infer time')
compute_model_time(data_shape, model, gpu_device)
del model
torch.cuda.empty_cache()
def mobilenet():
data_shape = (1, 3, 224, 224)
cfg_file = 'configs/benchmarks/lightweight/mbv1_cifar100_224_e100.yaml'
name = 'MobileNetV1_1.0x'
main(data_shape, cfg_file, name)
cfg_file = 'configs/benchmarks/lightweight/mbv2_cifar100_224_e100.yaml'
name = 'MobileNetV2_1.0x'
main(data_shape, cfg_file, name)
cfg_file = 'configs/benchmarks/lightweight/mbv2_torchvision_cifar100_224_e100.yaml'
name = 'Torchvision_MobileNetV2_1.0x'
main(data_shape, cfg_file, name)
cfg_file = 'configs/benchmarks/lightweight/mnasnet_a1_1_3_cifar100_224_e100.yaml'
name = 'MNasNet_a1_1.3x'
main(data_shape, cfg_file, name)
cfg_file = 'configs/benchmarks/lightweight/mnasnet_a1_1_3_se_cifar100_224_e100.yaml'
name = 'MNasNet_SE_a1_1.3x'
main(data_shape, cfg_file, name)
cfg_file = 'configs/benchmarks/lightweight/mnasnet_b1_1_3_cifar100_224_e100_sgd.yaml'
name = 'MNasNet_b1_1.3x'
main(data_shape, cfg_file, name)
cfg_file = 'configs/benchmarks/lightweight/mnasnet_b1_1_3_torchvision_cifar100_224_e100_sgd.yaml'
name = 'Torchvision_MNasNet_b1_1.3x'
main(data_shape, cfg_file, name)
cfg_file = 'configs/benchmarks/lightweight/mbv3_large_cifar100_224_e100_sgd.yaml'
name = 'MobileNetV3_Large_1.0x'
main(data_shape, cfg_file, name)
cfg_file = 'configs/benchmarks/lightweight/mbv3_large_se_cifar100_224_e100_sgd.yaml'
name = 'MobileNetV3_SE_Large_1.0x'
main(data_shape, cfg_file, name)
cfg_file = 'configs/benchmarks/lightweight/mbv3_large_se_hsigmoid_cifar100_224_e100.yaml'
name = 'MobileNetV3_SE_HSigmoid_Large_1.0x'
main(data_shape, cfg_file, name)
cfg_file = 'configs/benchmarks/lightweight/mbv3_small_cifar100_224_e100_sgd.yaml'
name = 'MobileNetV3_Small_1.0x'
main(data_shape, cfg_file, name)
cfg_file = 'configs/benchmarks/lightweight/mbv3_small_se_cifar100_224_e100.yaml'
name = 'MobileNetV3_SE_Small_1.0x'
main(data_shape, cfg_file, name)
cfg_file = 'configs/benchmarks/lightweight/mbv3_small_se_hsigmoid_cifar100_224_e100.yaml'
name = 'MobileNetV3_SE_HSigmoid_Small_1.0x'
main(data_shape, cfg_file, name)
def shufflenet():
data_shape = (1, 3, 224, 224)
cfg_file = 'configs/benchmarks/lightweight/sfv1_3g1x_cifar100_224_e100.yaml'
name = 'ShuffleNetV1_1.0x'
main(data_shape, cfg_file, name)
cfg_file = 'configs/benchmarks/lightweight/sfv2_x2_0_cifar100_224_e100.yaml'
name = 'ShuffleNetV2_1.0x'
main(data_shape, cfg_file, name)
cfg_file = 'configs/benchmarks/lightweight/sfv2_torchvision_cifar100_224_e100.yaml'
name = 'Torchvision_ShuffleNetV2_1.0x'
main(data_shape, cfg_file, name)
def resnet():
data_shape = (1, 3, 224, 224)
cfg_file = 'configs/benchmarks/resnet/r50_cifar100_224_e100_rmsprop.yaml'
name = 'ResNet50'
main(data_shape, cfg_file, name)
cfg_file = 'configs/benchmarks/resnet/r50_torchvision_cifar100_224_e100_rmsprop.yaml'
name = 'Torchvision_ResNet50'
main(data_shape, cfg_file, name)
cfg_file = 'configs/benchmarks/resnet/rd50_cifar100_224_e100_rmsprop.yaml'
name = 'ResNetD50'
main(data_shape, cfg_file, name)
cfg_file = 'configs/benchmarks/resnet/rd50_cifar100_224_e100_sgd.yaml'
name = 'ResNetD50'
main(data_shape, cfg_file, name)
cfg_file = 'configs/benchmarks/resnet/rxtd50_32x4d_avg_cifar100_224_e100_rmsprop.yaml'
name = 'ResNeXtD50_32x4d_avg'
main(data_shape, cfg_file, name)
cfg_file = 'configs/benchmarks/resnet/rxtd50_32x4d_fast_avg_cifar100_224_e100_rmsprop.yaml'
name = 'ResNeXtD50_32x4d_fast_avg'
main(data_shape, cfg_file, name)
cfg_file = 'configs/benchmarks/resnet/rxt50_32x4d_cifar100_224_e100_rmsprop.yaml'
name = 'ResNeXt50_32x4d'
main(data_shape, cfg_file, name)
cfg_file = 'configs/benchmarks/resnet/rxt50_32x4d_cifar100_224_e100_sgd.yaml'
name = 'ResNeXt50_32x4d'
main(data_shape, cfg_file, name)
cfg_file = 'configs/benchmarks/resnet/rxt50_32x4d_torchvision_cifar100_224_e100_rmsprop.yaml'
name = 'Torchvisoin_ResNeXt_32x4d'
main(data_shape, cfg_file, name)
cfg_file = 'configs/benchmarks/resnet/rxt50_32x4d_torchvision_cifar100_224_e100_sgd.yaml'
name = 'Torchvision_ResNeXt50_32x4d'
main(data_shape, cfg_file, name)
cfg_file = 'configs/benchmarks/resnet/rxtd50_32x4d_cifar100_224_e100_rmsprop.yaml'
name = 'ResNeXtD50_32x4d'
main(data_shape, cfg_file, name)
cfg_file = 'configs/benchmarks/resnet/rxtd50_32x4d_cifar100_224_e100_sgd.yaml'
name = 'ResNeXtD50_32x4d'
main(data_shape, cfg_file, name)
cfg_file = 'configs/benchmarks/resnet/sknet50_cifar100_224_e100_rmsprop.yaml'
name = 'SKNet50'
main(data_shape, cfg_file, name)
cfg_file = 'configs/benchmarks/resnet/rstd50_2s2x40d_cifar100_224_e100_rmsprop.yaml'
name = 'ResNeSt50_2s2x40d'
main(data_shape, cfg_file, name)
cfg_file = 'configs/benchmarks/resnet/rstd50_2s2x40d_fast_cifar100_224_e100_rmsprop.yaml'
name = 'ResNeSt50_fast_2s2x40d'
main(data_shape, cfg_file, name)
cfg_file = 'configs/benchmarks/resnet/rstd50_2s2x40d_official_cifar100_224_e100_rmsprop.yaml'
name = 'Torchvision_ResNeSt50_2s2x40d'
main(data_shape, cfg_file, name)
cfg_file = 'configs/benchmarks/resnet/rstd50_2s2x40d_fast_official_cifar100_224_e100_rmsprop.yaml'
name = 'Torchvision_ResNeSt50_fast_2s2x40d'
main(data_shape, cfg_file, name)
if __name__ == '__main__':
# print('#' * 30)
# mobilenet()
# print('#' * 30)
# shufflenet()
print('#' * 30)
resnet()
| StarcoderdataPython |
1734433 | <reponame>Dokeey/Buy-Sell
from django.contrib import messages
from django.contrib.auth.decorators import login_required
from django.contrib.contenttypes.models import ContentType
from django.db.models import Sum, Count, FloatField, IntegerField
from django.db.models.functions import Cast
from django.http import Http404, HttpResponse, JsonResponse ,HttpResponseRedirect
from django.shortcuts import render, get_object_or_404, redirect, render_to_response
from django.template.loader import render_to_string
from django.urls import reverse_lazy, reverse
from django.utils.decorators import method_decorator
from django.views.generic import ListView, DeleteView, UpdateView, CreateView, RedirectView, TemplateView
from django.views.generic.list import MultipleObjectMixin
from hitcount.models import HitCount
from hitcount.views import HitCountMixin
from rank import DenseRank,UpperRank,Rank
from trade.models import Item
from accounts.supporter import send_mail
from django.conf import settings
from mypage.models import Follow
from trade.models import Item, Order
from .models import StoreProfile, QuestionComment, StoreGrade
from .forms import StoreProfileForm, StoreQuestionForm, StoreGradeForm
from django.db.models.functions import Coalesce
# @login_required
# def my_store_profile(request):
# stores = get_object_or_404(StoreProfile, user=request.user)
# return render(request, 'store/layout.html',{'stores': stores})
class StoreError(TemplateView):
template_name = 'store/store_error.html'
#============= 인기가게 =============
class StarStoreSearchList(ListView):
model = StoreProfile
template_name = 'store/star_store_search.html'
context_object_name = 'star_search'
paginate_by = 6
def get(self, request, *args, **kwargs):
self.query = self.request.GET.get('query', '').strip()
if self.query.replace(' ', '') == '':
self.query = ''
if self.query == '':
messages.info(self.request, '검색어를 입력해주세요')
if self.request.GET.get('next'):
url = self.request.GET.get('next')
else:
url = "store:star_store_hit"
return redirect(url)
return super().get(request, *args, **kwargs)
def get_queryset(self):
self.qs = super().get_queryset()
if self.query:
qs = self.qs.filter(name__icontains=self.query)
return qs
def get_context_data(self, *, object_list=None, **kwargs):
context = super(StarStoreSearchList, self).get_context_data()
#페이지네이션
paginator = context['paginator']
page_numbers_range = 5 # Display only 5 page numbers
max_index = len(paginator.page_range)
page = self.request.GET.get('page')
current_page = int(page) if page else 1
start_index = int((current_page - 1) / page_numbers_range) * page_numbers_range
end_index = start_index + page_numbers_range
if end_index >= max_index:
end_index = max_index
page_range = paginator.page_range[start_index:end_index]
context['page_range'] = page_range
context['ctn'] = self.get_queryset().count()
if self.query:
context['query'] = self.query
return context
class StarStoreHitListView(ListView):
template_name = 'store/star_store_hit.html'
model = StoreProfile
def get(self, request, *args, **kwargs):
ctype = ContentType.objects.get_for_model(StoreProfile)
search_hit = HitCount.objects.filter(content_type=ctype).values('object_pk')
if not search_hit:
url = 'store:store_error'
return redirect(url)
return super().get(request, *args, **kwargs)
def get_context_data(self, **kwargs):
context = super().get_context_data(**kwargs)
ctype = ContentType.objects.get_for_model(StoreProfile)
# search_hit = HitCount.objects.filter(content_type=ctype).values('object_pk','hits').annotate(rank=DenseRank('hits'))
search_hit = StoreProfile.objects.prefetch_related('hit_count_generic')
if self.request.user.is_authenticated:
my_hits = HitCount.objects.get_for_object(self.request.user.storeprofile).hits
context['my_hits'] = my_hits
context['my_rank'] = search_hit.filter(hit_count_generic__hits__gt=my_hits).count() + 1
else:
context['my_rank'] = '-'
search_hit = search_hit.filter(hit_count_generic__content_type=ctype)
search_hit = search_hit.annotate(rank=DenseRank('hit_count_generic__hits'))
search_hit = search_hit.order_by('rank')
context['first'] = search_hit.first()
context['first_rank'] = context['first'].hit_count_generic.first().hits
# context['hit_count'] = HitCount.objects.get_for_object().hits
# for i in search_hit:
# if i['object_pk']:
# i['store'] = StoreProfile.objects.get(pk=i['object_pk'])
# if self.request.user.is_active:
# if i['object_pk'] == self.request.user.storeprofile.pk:
# context['my_hit'] = i['rank']
# if context['my_rank'] == '':
# context['my_rank'] = '-'
context['stores'] = search_hit
context['kakao_key'] = settings.KAKAO_KEY_JS
return context
class StarStoreGradeListView(ListView):
template_name = 'store/star_store_grade.html'
model = StoreProfile
def get(self, request, *args, **kwargs):
search_grade = StoreGrade.objects.values('store_profile')
if not search_grade:
url = 'store:store_error'
return redirect(url)
return super().get(request, *args, **kwargs)
def get_context_data(self, **kwargs):
context = super().get_context_data(**kwargs)
# search_grade = StoreGrade.objects.values('pk','store_profile').annotate(
# count=Count('rating'),
# rating_sum=Cast(Sum('rating'), FloatField())/Cast(Count('rating'), FloatField()),
# rank=Cast(DenseRank('rating_sum'), IntegerField())
# ).order_by('-rating_sum', '-count')
search_grade = StoreProfile.objects.all()
search_grade = search_grade.annotate(
count=Count('storegrade__rating'),
rating_sum=Coalesce(Cast(Sum('storegrade__rating'), FloatField()) / Cast(Count('storegrade__rating'), FloatField()),0),
rank=Cast(DenseRank('rating_sum'), IntegerField())
)
if self.request.user.is_authenticated:
for my in search_grade :
if my == self.request.user.storeprofile:
context['my_rank'] = my.rank
context['my_grade'] = my.rating_sum
context['my_grade_count'] = my.count
else:
context['my_rank'] = '-'
search_grade = search_grade.order_by('-rating_sum', '-count')[:5]
context['first'] = search_grade.first()
# for i in search_grade:
# if i['store_profile']:
# i['store'] = StoreProfile.objects.get(pk=i['store_profile'])
# if self.request.user.is_authenticated:
# if i['store_profile'] == self.request.user.storeprofile.pk:
# context['my_grade'] = i['rank']
context['stores'] = search_grade
context['kakao_key'] = settings.KAKAO_KEY_JS
return context
class StarStoreSellListView(ListView):
template_name = 'store/star_store_sell.html'
model = StoreProfile
def get(self, request, *args, **kwargs):
search_sell = Order.objects.filter(status='success').values('item__user')
if not search_sell:
url = 'store:store_error'
return redirect(url)
return super().get(request, *args, **kwargs)
def get_context_data(self, **kwargs):
context = super().get_context_data(**kwargs)
# search_sell = Order.objects.filter(status='success').values('item__user').annotate(count=Count('status'),
# rank=DenseRank(
# 'count')).order_by('rank')
search_sell = StoreProfile.objects.filter(user__item__order__status='success')
# search_sell=search_sell.values('pk','user__item')
search_sell=search_sell.annotate(count=Count('user__item__order__status'),rank=DenseRank('count'))
context['my_rank'] = ''
if self.request.user.is_authenticated:
for sell in search_sell :
if sell == self.request.user.storeprofile:
context['my_rank'] = sell.rank
context['my_sell'] = sell.count
if context['my_rank'] == '' :
context['my_rank'] = '-'
else:
context['my_rank'] = '-'
search_sell=search_sell.order_by('rank')[:5]
context['first'] = search_sell.first()
# for i in search_sell:
# if i['rank']:
# i['store'] = StoreProfile.objects.get(user_id=i['item__user'])
# if self.request.user.is_authenticated:
# if i['item__user'] == self.request.user.pk:
# context['my_sell'] = i['rank']
context['stores'] = search_sell
context['kakao_key'] = settings.KAKAO_KEY_JS
return context
# def render_to_response(self, context, **response_kwargs):
# if context['stores'] == '<QuerySet []>':
#
# return redirect('store:store_error')
# return super().render_to_response(
# context, **response_kwargs
# )
class StarStoreFollowListView(ListView):
template_name = 'store/star_store_follow.html'
model = StoreProfile
def get(self, request, *args, **kwargs):
search_follow = Follow.objects.values('store')
if not search_follow:
url = 'store:store_error'
return redirect(url)
return super().get(request, *args, **kwargs)
def get_context_data(self, **kwargs):
context = super().get_context_data(**kwargs)
search_follow = StoreProfile.objects.select_related('user')
search_follow = search_follow.annotate(
count=Count('follow__store'),
rank=DenseRank('count')
)
# follow_dict = { follow.id:follow.rank for follow in search_follow }
if self.request.user.is_authenticated:
for foll in search_follow:
if foll.pk == self.request.user.storeprofile.pk :
context['my_rank'] = foll.rank
context['my_foll'] = foll.count
break
# context['my_rank'] = follow_dict[self.request.user.id]
else:
context['my_rank'] = '-'
search_follow = search_follow.order_by('-count')[:5]
# for i in search_follow:
# if i['store']:
# i['store'] = StoreProfile.objects.get(pk=i['store'])
#
context['first'] = search_follow.first()
context['stores'] = search_follow
# follow = Follow.objects.values_list('store',flat=True).annotate(foll_count=Count('store')).order_by('-foll_count')
# follow_list = []
# for i in range(0, follow.count()):
# follow_list.append(StoreProfile.objects.get(pk=(follow[i])))
# context['follows'] = follow_list
#
# if self.request.user.is_active:
# if self.request.user.storeprofile.follow_set.all().count() != 0:
# context['my_follow'] = follow_list.index(self.request.user.storeprofile) + 1
# else:
# context['my_follow'] = '-'
context['kakao_key'] = settings.KAKAO_KEY_JS
return context
#============= 스토어 프로필 =============
class StoreSellListView(ListView):
model = Item
template_name = 'store/store_sell_list.html'
paginate_by = 24
context_object_name = 'items'
ordering = '-created_at'
def get_ordering(self):
ordering = self.request.GET.get('sort','-created_at')
if ordering == 'looks':
ordering = 'hit_count_generic'
elif ordering == 'hprice':
ordering = '-amount'
elif ordering == 'lprice':
ordering = 'amount'
return ordering
# context_object_name = 'stores'
def get_context_data(self, **kwargs):
context = super().get_context_data(**kwargs)
paginator = context['paginator']
page_numbers_range = 5 # Display only 5 page numbers
max_index = len(paginator.page_range)
page = self.request.GET.get('page')
current_page = int(page) if page else 1
start_index = int((current_page - 1) / page_numbers_range) * page_numbers_range
end_index = start_index + page_numbers_range
if end_index >= max_index:
end_index = max_index
page_range = paginator.page_range[start_index:end_index]
context['prev'] = start_index - 4
context['next'] = end_index + 1
context['last_page'] = max_index
context['page_range'] = page_range
context['stores'] = self.store
hit_count = HitCount.objects.get_for_object(context['stores'])
context['hit_count_response'] = HitCountMixin.hit_count(self.request, hit_count)
context['sort'] = self.request.GET.get('sort','-created_at')
context['kakao_key'] = settings.KAKAO_KEY_JS
return context
def get_queryset(self):
self.store = StoreProfile.objects.get(pk=self.kwargs['pk'])
self.queryset = self.store.user.item_set.all()
return super().get_queryset()
@method_decorator(login_required, name='dispatch')
class StoreProfileEditView(UpdateView):
form_class = StoreProfileForm
model = StoreProfile
template_name = 'store/store_profile_edit.html'
def get_object(self, queryset=None):
return self.request.user.storeprofile
def form_valid(self, form):
# 유효성 검사 항목 추가하기
self.object = form.save()
return JsonResponse({'is_valid' : True}, status=200)
def get_success_url(self, **kwargs):
return reverse_lazy("store:store_sell_list", kwargs={'pk': self.request.user.storeprofile.pk})
#============= Store Question ==============
@method_decorator(login_required, name='dispatch')
class StoreQuestionLCView(CreateView):
model = QuestionComment
form_class = StoreQuestionForm
template_name = 'store/store_question.html'
ordering = '-created_at'
def form_valid(self, form):
parent_obj = None
try:
parent_id = int(self.request.POST.get('parent_id'))
#hidden으로 parent id 값 가져옴
except:
parent_id=None
if parent_id:
parent_obj = QuestionComment.objects.get(id=parent_id)
if parent_obj:
recomm = form.save(commit=False)
recomm.parent = parent_obj
comment = form.save(commit=False)
comment.store_profile_id= self.kwargs['pk']
comment.author=self.request.user
comment.save()
# 가게 문의알림 메일 발송
self.object = form.save()
if self.model.objects.filter(store_profile=self.object.store_profile).count() % 5 == 1:
send_mail(
'[Buy & Sell] {}님의 가게에 문의가 등록되었습니다.'.format(self.object.store_profile.user.username),
[self.object.store_profile.user.email],
html=render_to_string('store/store_comment_alert.html', {
'user': self.object.store_profile.user,
'domain': self.request.META['HTTP_HOST'],
'store': self.object.store_profile,
}),
)
return redirect(self.get_success_url())
def get_context_data(self, **kwargs):
context = super().get_context_data(**kwargs)
self.sort = self.request.GET.get('sort', '')
context['sort'] = self.sort
context['form'] = self.form_class
context['comms'] = self.model.objects.filter(store_profile_id=self.kwargs['pk'], parent__isnull=True)
if self.sort == 'all':
context['comms'] = self.model.objects.filter(store_profile_id=self.kwargs['pk'], parent__isnull=True)
elif self.sort == 'my':
context['comms'] = self.model.objects.filter(author=self.request.user,store_profile_id=self.kwargs['pk'], parent__isnull=True)
context['stores'] = get_object_or_404(StoreProfile, pk=self.kwargs['pk'])
context['kakao_key'] = settings.KAKAO_KEY_JS
return context
def get_success_url(self):
return reverse_lazy('store:store_question', kwargs={'pk': self.kwargs.get('pk')})
@method_decorator(login_required, name='dispatch')
class StoreQuestionEditView(UpdateView):
form_class = StoreQuestionForm
model = QuestionComment
pk_url_kwarg = 'cid'
template_name = 'store/store_question_edit.html'
def get_object(self, queryset=None):
return self.model.objects.get(pk=self.kwargs.get(self.pk_url_kwarg))
def form_valid(self, form):
comm = get_object_or_404(QuestionComment, pk=self.kwargs.get(self.pk_url_kwarg))
self.object = form.save()
data = {'id':comm.id, 'msg':form.cleaned_data['comment']}
return JsonResponse(data)
@method_decorator(login_required, name='dispatch')
class StoreQuestionDelView(DeleteView):
model = QuestionComment
template_name = 'store/store_question_delete.html'
pk_url_kwarg = 'cid'
# get method 일때 post mothod를 리턴하여 confirm template없이 삭제 가능하지만 추천하는 방법은 아님
# def get(self, request, *args, **kwargs):
# return self.post(request, *args, **kwargs)
#
# def get_object(self, queryset=None):
# return self.model.objects.get(pk=self.kwargs['cid'])
def get(self, request, *args, **kwargs):
self.object = self.get_object()
if self.request.user != self.object.author:
messages.error(self.request, '잘못된 접근 입니다.')
return redirect('store:store_question', self.kwargs.get('pk'))
return super().get(request, *args, **kwargs)
def get_success_url(self):
return reverse_lazy("store:store_question", kwargs={'pk': self.kwargs['pk']})
#=============Store Grade===============
class StoreGradeListView(ListView):
model = StoreGrade
template_name = 'store/store_grade.html'
ordering = '-created_at'
context_object_name = 'grades'
paginate_by = 10
def get_ordering(self):
self.sort = self.request.GET.get('sort','recent')
if self.sort == 'recent':
sort = '-created_at'
elif self.sort == 'past':
sort = 'created_at'
elif self.sort == 'hgrade':
sort = '-rating'
elif self.sort == 'rgrade':
sort = 'rating'
return sort
def get_queryset(self):
self.gsort = self.request.GET.get('gsort', '')
self.queryset = self.model.objects.filter(store_profile=self.kwargs['pk'])
if self.gsort == 'five':
self.queryset = self.model.objects.filter(store_profile=self.kwargs['pk'], rating=5)
elif self.gsort == 'four':
self.queryset = self.model.objects.filter(store_profile=self.kwargs['pk'], rating=4)
elif self.gsort == 'three':
self.queryset = self.model.objects.filter(store_profile=self.kwargs['pk'], rating=3)
elif self.gsort == 'two':
self.queryset = self.model.objects.filter(store_profile=self.kwargs['pk'], rating=2)
elif self.gsort == 'one':
self.queryset = self.model.objects.filter(store_profile=self.kwargs['pk'], rating=1)
return super().get_queryset()
def get_context_data(self, **kwargs):
context = super().get_context_data(**kwargs)
paginator = context['paginator']
page_numbers_range = 5 # Display only 5 page numbers
max_index = len(paginator.page_range)
page = self.request.GET.get('page')
current_page = int(page) if page else 1
start_index = int((current_page - 1) / page_numbers_range) * page_numbers_range
end_index = start_index + page_numbers_range
if end_index >= max_index:
end_index = max_index
page_range = paginator.page_range[start_index:end_index]
context['page_range'] = page_range
context['stores'] = StoreProfile.objects.get(pk=self.kwargs['pk'])
context['sort'] = self.request.GET.get('sort','recent')
context['kakao_key'] = settings.KAKAO_KEY_JS
return context
@method_decorator(login_required, name='dispatch')
class StoreGradeCreateView(CreateView):
model = StoreGrade
form_class = StoreGradeForm
template_name = 'store/store_grade_new.html'
def get(self, request, *args, **kwargs):
try:
self.model.objects.get(store_item_id = kwargs['item_id'])
messages.error(request, '이미 리뷰를 작성하셨습니다.')
return redirect("trade:order_history")
except:
items = get_object_or_404(Item, pk=self.kwargs['item_id'])
return render(request, self.template_name,{'form':self.form_class, 'items': items})
def form_valid(self, form):
gradeform = form.save(commit=False)
gradeform.author = self.request.user
gradeform.store_profile_id = self.kwargs['pk']
gradeform.store_item_id = self.kwargs['item_id']
gradeform.rating = self.request.POST.get('rating')
gradeform.save()
return redirect('store:store_grade', self.kwargs['pk'])
@method_decorator(login_required, name='dispatch')
class StoreGradeEditView(UpdateView):
form_class = StoreGradeForm
model = StoreGrade
template_name = 'store/store_grade_new.html'
pk_url_kwarg = 'gid'
# def get_object(self, queryset=None):
# return self.model.objects.get(pk=self.kwargs['gid'])
def get_context_data(self, **kwargs):
context = super(StoreGradeEditView, self).get_context_data()
grade = self.model.objects.get(pk=self.kwargs['gid'])
context['items'] = get_object_or_404(Item, pk=grade.store_item_id)
return context
def form_valid(self, form):
gradeform = form.save(commit=False)
gradeform.rating = self.request.POST.get('rating')
gradeform.save()
return redirect('store:store_grade', self.kwargs['pk'])
def get_success_url(self, **kwargs):
return reverse_lazy("store:store_grade", kwargs={'pk': self.kwargs['pk']})
@method_decorator(login_required, name='dispatch')
class StoreGradeDelView(DeleteView):
model = StoreGrade
template_name = 'store/store_question_delete.html'
pk_url_kwarg = 'gid'
def get(self, request, *args, **kwargs):
self.object = self.get_object()
if self.request.user != self.object.author:
messages.error(self.request, '잘못된 접근 입니다.')
return redirect('store:store_grade', self.kwargs.get('pk'))
return super().get(request, *args, **kwargs)
def get_success_url(self):
return reverse_lazy("store:store_grade", kwargs={'pk': self.kwargs['pk']})
| StarcoderdataPython |
1744237 | import pytest
from django import forms
from getin.forms import InvitationCodeField
class RegistrationForm(forms.Form):
invitation_code = InvitationCodeField()
@pytest.mark.parametrize(
"invitation, result",
[
("unsent_invitation", False),
("sent_invitation", True),
("consumed_invitation", False),
("expired_invitation", False),
],
)
def test_invitation_code_field(invitation, result, request):
form = RegistrationForm(
{"invitation_code": request.getfixturevalue(invitation).code}
)
assert form.is_valid() is result
| StarcoderdataPython |
1654262 | #-*-coding:utf-8-*-
from flask import Flask
app = Flask(__name__)
from celery import Celery
from celery import platforms #如果你不是linux的root用户,这两行没必要
platforms.C_FORCE_ROOT=True #允许root权限运行celery
def make_celery(app):
celery = Celery('flask_celery', #此处官网使用app.import_name,因为这里将所有代码写在同一个文件flask_celery.py,所以直接写名字。
broker=app.config['CELERY_BROKER_URL'],
backend=app.config['CELERY_RESULT_BACKEND']
)
celery.conf.update(app.config)
TaskBase = celery.Task
class ContextTask(TaskBase):
abstract = True
def __call__(self, *args, **kwargs):
with app.app_context():
return TaskBase.__call__(self, *args, **kwargs)
celery.Task = ContextTask
return celery
app.config.update(
CELERY_BROKER_URL='redis://localhost:6379/0',
CELERY_RESULT_BACKEND='redis://localhost:6379/1'
)
celery = make_celery(app)
@celery.task()
def long_time_def():
for _ in range(10000):
for j in range(10000):
i = 1
return 'hello' | StarcoderdataPython |
3228506 | <filename>mynewsite/board/admin.py<gh_stars>0
from django.contrib import admin
from board import models
# Register your models here.
class PostAdmin(admin.ModelAdmin):
list_display = ("nickname", "message", "enabled", "pub_time")
ordering = ("-pub_time", )
admin.site.register(models.Mood)
admin.site.register(models.Post, PostAdmin)
admin.site.register(models.Contact) | StarcoderdataPython |
3242554 | <reponame>ULNE/MicroPython<gh_stars>0
import pyb
import time
while True:
pyb.LED(3).on() #LED3 = orange
time.sleep(1)
pyb.LED(3).intensity(20)
time.sleep_ms(500)
| StarcoderdataPython |
1767837 | <reponame>brianbruggeman/rl
import random
import numpy as np
def perlin(samples=None, seed=None, size=None):
# permutation table
size = 256 if size is None else size
samples = 100 if samples is None else samples
seed = random.randint(0, size) if seed is None else seed
np.random.seed(seed)
lin = np.linspace(0, 5, samples, endpoint=False)
x, y = np.meshgrid(lin, lin[::-1])
# grab 256 random values
p = np.arange(size, dtype=int)
np.random.shuffle(p)
p = np.stack([p, p]).flatten()
# coordinates of the top-left
xi = x.astype(int)
yi = y.astype(int)
# internal coordinates
xf = x - xi
yf = y - yi
# fade factors
u = fade(xf)
v = fade(yf)
# noise components
n00 = gradient(p[p[xi] + yi], xf, yf)
n01 = gradient(p[p[xi] + yi + 1], xf, yf - 1)
n11 = gradient(p[p[xi + 1] + yi + 1], xf - 1, yf - 1)
n10 = gradient(p[p[xi + 1] + yi], xf - 1, yf)
# linear interpolation of noises
x1 = lerp(n00, n10, u)
x2 = lerp(n01, n11, u)
final = lerp(x1, x2, v)
return final
def lerp(a, b, x):
return a + x * (b - a)
def fade(t):
"6t^5 - 15t^4 + 10t^3"
return 6 * t ** 5 - 15 * t ** 4 + 10 * t ** 3
def gradient(h, x, y):
"grad converts h to the right gradient vector and return the dot product with (x,y)"
vectors = np.array([[0, 1], [0, -1], [1, 0], [-1, 0]])
g = vectors[h % 4]
return g[:, :, 0] * x + g[:, :, 1] * y
| StarcoderdataPython |
1722248 | <reponame>ShivamPytho/parsifal
from django import forms
from django.contrib.auth.models import User
from django.contrib.sites.shortcuts import get_current_site
from django.core.mail import EmailMultiAlternatives
from django.db.models.functions import Lower
from django.template.loader import render_to_string
from django.utils.translation import gettext as _
from parsifal.apps.invites.constants import InviteStatus
from parsifal.apps.invites.models import Invite
class SendInviteForm(forms.ModelForm):
class Meta:
model = Invite
fields = ("invitee", "invitee_email")
def __init__(self, *args, request, review, **kwargs):
self.request = request
self.review = review
super().__init__(*args, **kwargs)
user_ids = {user.pk for user in self.request.user.profile.get_following()}
self.fields["invitee"].queryset = (
User.objects.filter(pk__in=user_ids)
.exclude(pk__in=self.review.co_authors.all())
.annotate(lower_username=Lower("username"))
.order_by("lower_username")
)
self.fields["invitee"].label = _("Contacts")
self.fields["invitee"].help_text = _("List of people that you are currently following on Parsifal.")
self.fields["invitee_email"].label = _("Email address of the person you want to invite")
self.fields["invitee_email"].help_text = _(
"If the person you want to invite is not on Parsifal, you can inform their email address and we will send "
"an invitation link to their inbox."
)
self.fields["invitee_email"].required = False
def clean(self):
cleaned_data = super().clean()
if cleaned_data.get("invitee") and cleaned_data.get("invitee_email"):
self.add_error(
None, _("You must inform either a contact or an email address, but not both at the same time.")
)
if not cleaned_data.get("invitee") and not cleaned_data.get("invitee_email"):
self.add_error(None, _("You must inform either a contact or an email address."))
return cleaned_data
def clean_invitee(self):
invitee = self.cleaned_data.get("invitee")
if invitee:
if self.review.is_author_or_coauthor(invitee):
self.add_error("invitee", _("This person is already a co-author of this review."))
if Invite.objects.filter(
review=self.review, invitee_email__iexact=invitee.email, status=InviteStatus.PENDING
).exists():
self.add_error("invitee", _("This person already has a pending invite."))
return invitee
def clean_invitee_email(self):
invitee_email = self.cleaned_data.get("invitee_email")
if invitee_email:
invitee_email = User.objects.normalize_email(invitee_email)
if invitee_email.lower() == self.request.user.email.lower():
self.add_error("invitee_email", _("You cannot invite yourself."))
try:
user = User.objects.get(email__iexact=invitee_email)
if self.review.is_author_or_coauthor(user):
self.add_error("invitee_email", _("This person is already a co-author of this review."))
except User.DoesNotExist:
pass
if Invite.objects.filter(
review=self.review, invitee_email__iexact=invitee_email, status=InviteStatus.PENDING
).exists():
self.add_error("invitee_email", _("This person already has a pending invite."))
return invitee_email
def send_mail(self):
"""
Send a django.core.mail.EmailMultiAlternatives to `to_email`.
"""
subject = render_to_string("invites/invite_subject.txt", {"invite": self.instance})
# Email subject *must not* contain newlines
subject = "".join(subject.splitlines())
current_site = get_current_site(self.request)
site_name = current_site.name
domain = current_site.domain
invited_by_name = self.instance.invited_by.profile.get_screen_name()
from_email = f"{invited_by_name} via Parsifal <<EMAIL>>"
to_email = self.instance.get_invitee_email()
body = render_to_string(
"invites/invite_email.html",
{
"invite": self.instance,
"site_name": site_name,
"domain": domain,
"protocol": "https" if self.request.is_secure() else "http",
},
)
email_message = EmailMultiAlternatives(subject, body, from_email, [to_email])
email_message.send()
def save(self, commit=True):
self.instance = super().save(commit=False)
if self.instance.invitee:
self.instance.invitee_email = self.instance.invitee.email
else:
self.instance.invitee = User.objects.filter(email__iexact=self.instance.invitee_email).first()
self.instance.review = self.review
self.instance.invited_by = self.request.user
if commit:
self.instance.save()
self.send_mail()
return self.instance
| StarcoderdataPython |
3254868 | <reponame>altcnews/bitmex_grid<gh_stars>1-10
import logging
from market_maker.settings import settings
from datetime import datetime
import os
def setup_custom_logger(name, log_level=settings.LOG_LEVEL):
os.environ['TZ'] = 'Europe/Moscow'
formatter = logging.Formatter(fmt='%(asctime)s - %(levelname)s - %(module)s - %(message)s')
handler = logging.StreamHandler()
handler.setFormatter(formatter)
logger = logging.getLogger(name)
logger.setLevel(log_level)
logger.addHandler(handler)
date = datetime.today().strftime("%Y%m%d_%H")
file_handler = logging.FileHandler('{}.log'.format(date))
file_handler.setFormatter(formatter)
logger.addHandler(file_handler)
return logger
| StarcoderdataPython |
44390 | <reponame>mfeindt0705/pynetmf<gh_stars>0
#!/usr/bin/env python
from getpass import getpass
from pprint import pprint
from napalm import get_network_driver
# Supress SSL Certificate Warnings
import requests
from requests.packages.urllib3.exceptions import InsecureRequestWarning
requests.packages.urllib3.disable_warnings(InsecureRequestWarning)
cisco3 = dict(
hostname="cisco3.lasthop.io",
device_type="ios",
username="pyclass",
password=getpass(),
optional_args={},
)
nxos1 = dict(
hostname="nxos1.lasthop.io",
device_type="nxos",
username="pyclass",
password=getpass(),
optional_args={"port": 8443},
)
# device_type = cisco3.pop("device_type")
device_type = nxos1.pop("device_type")
driver = get_network_driver(device_type)
# device = driver(**cisco3)
device = driver(**nxos1)
print()
print("\n\n>>>Test device open")
device.open()
print()
output = device.get_facts()
pprint(output)
print()
| StarcoderdataPython |
3279264 | # coding: utf-8
"""
SimScale API
The version of the OpenAPI document: 0.0.0
Generated by: https://openapi-generator.tech
"""
import pprint
import re # noqa: F401
import six
from simscale_sdk.configuration import Configuration
class OneOfDimensionalVectorFunctionPressureValue(object):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
openapi_types = {
'type': 'str',
'x': 'OneOfComponentVectorFunctionX',
'y': 'OneOfComponentVectorFunctionY',
'z': 'OneOfComponentVectorFunctionZ',
'label': 'str',
'table_id': 'str',
'result_index': 'list[int]',
'independent_variables': 'list[TableFunctionParameter]',
'separator': 'str',
'out_of_bounds': 'str'
}
attribute_map = {
'type': 'type',
'x': 'x',
'y': 'y',
'z': 'z',
'label': 'label',
'table_id': 'tableId',
'result_index': 'resultIndex',
'independent_variables': 'independentVariables',
'separator': 'separator',
'out_of_bounds': 'outOfBounds'
}
discriminator_value_class_map = {
'COMPONENT': 'ComponentVectorFunction',
'TABLE_DEFINED': 'TableDefinedVectorFunction'
}
def __init__(self, type='TABLE_DEFINED', x=None, y=None, z=None, label=None, table_id=None, result_index=None, independent_variables=None, separator=None, out_of_bounds=None, local_vars_configuration=None): # noqa: E501
"""OneOfDimensionalVectorFunctionPressureValue - a model defined in OpenAPI""" # noqa: E501
if local_vars_configuration is None:
local_vars_configuration = Configuration()
self.local_vars_configuration = local_vars_configuration
self._type = None
self._x = None
self._y = None
self._z = None
self._label = None
self._table_id = None
self._result_index = None
self._independent_variables = None
self._separator = None
self._out_of_bounds = None
self.discriminator = 'type'
self.type = type
if x is not None:
self.x = x
if y is not None:
self.y = y
if z is not None:
self.z = z
if label is not None:
self.label = label
if table_id is not None:
self.table_id = table_id
if result_index is not None:
self.result_index = result_index
if independent_variables is not None:
self.independent_variables = independent_variables
if separator is not None:
self.separator = separator
if out_of_bounds is not None:
self.out_of_bounds = out_of_bounds
@property
def type(self):
"""Gets the type of this OneOfDimensionalVectorFunctionPressureValue. # noqa: E501
Schema name: TableDefinedVectorFunction # noqa: E501
:return: The type of this OneOfDimensionalVectorFunctionPressureValue. # noqa: E501
:rtype: str
"""
return self._type
@type.setter
def type(self, type):
"""Sets the type of this OneOfDimensionalVectorFunctionPressureValue.
Schema name: TableDefinedVectorFunction # noqa: E501
:param type: The type of this OneOfDimensionalVectorFunctionPressureValue. # noqa: E501
:type: str
"""
if self.local_vars_configuration.client_side_validation and type is None: # noqa: E501
raise ValueError("Invalid value for `type`, must not be `None`") # noqa: E501
self._type = type
@property
def x(self):
"""Gets the x of this OneOfDimensionalVectorFunctionPressureValue. # noqa: E501
:return: The x of this OneOfDimensionalVectorFunctionPressureValue. # noqa: E501
:rtype: OneOfComponentVectorFunctionX
"""
return self._x
@x.setter
def x(self, x):
"""Sets the x of this OneOfDimensionalVectorFunctionPressureValue.
:param x: The x of this OneOfDimensionalVectorFunctionPressureValue. # noqa: E501
:type: OneOfComponentVectorFunctionX
"""
self._x = x
@property
def y(self):
"""Gets the y of this OneOfDimensionalVectorFunctionPressureValue. # noqa: E501
:return: The y of this OneOfDimensionalVectorFunctionPressureValue. # noqa: E501
:rtype: OneOfComponentVectorFunctionY
"""
return self._y
@y.setter
def y(self, y):
"""Sets the y of this OneOfDimensionalVectorFunctionPressureValue.
:param y: The y of this OneOfDimensionalVectorFunctionPressureValue. # noqa: E501
:type: OneOfComponentVectorFunctionY
"""
self._y = y
@property
def z(self):
"""Gets the z of this OneOfDimensionalVectorFunctionPressureValue. # noqa: E501
:return: The z of this OneOfDimensionalVectorFunctionPressureValue. # noqa: E501
:rtype: OneOfComponentVectorFunctionZ
"""
return self._z
@z.setter
def z(self, z):
"""Sets the z of this OneOfDimensionalVectorFunctionPressureValue.
:param z: The z of this OneOfDimensionalVectorFunctionPressureValue. # noqa: E501
:type: OneOfComponentVectorFunctionZ
"""
self._z = z
@property
def label(self):
"""Gets the label of this OneOfDimensionalVectorFunctionPressureValue. # noqa: E501
:return: The label of this OneOfDimensionalVectorFunctionPressureValue. # noqa: E501
:rtype: str
"""
return self._label
@label.setter
def label(self, label):
"""Sets the label of this OneOfDimensionalVectorFunctionPressureValue.
:param label: The label of this OneOfDimensionalVectorFunctionPressureValue. # noqa: E501
:type: str
"""
self._label = label
@property
def table_id(self):
"""Gets the table_id of this OneOfDimensionalVectorFunctionPressureValue. # noqa: E501
The ID of the imported table. # noqa: E501
:return: The table_id of this OneOfDimensionalVectorFunctionPressureValue. # noqa: E501
:rtype: str
"""
return self._table_id
@table_id.setter
def table_id(self, table_id):
"""Sets the table_id of this OneOfDimensionalVectorFunctionPressureValue.
The ID of the imported table. # noqa: E501
:param table_id: The table_id of this OneOfDimensionalVectorFunctionPressureValue. # noqa: E501
:type: str
"""
self._table_id = table_id
@property
def result_index(self):
"""Gets the result_index of this OneOfDimensionalVectorFunctionPressureValue. # noqa: E501
Indicates which column(s) of the table contains the result values. One-based indexing must be used. For example, set this field to '[2]' if the second column of the table contains the dependent variable values. # noqa: E501
:return: The result_index of this OneOfDimensionalVectorFunctionPressureValue. # noqa: E501
:rtype: list[int]
"""
return self._result_index
@result_index.setter
def result_index(self, result_index):
"""Sets the result_index of this OneOfDimensionalVectorFunctionPressureValue.
Indicates which column(s) of the table contains the result values. One-based indexing must be used. For example, set this field to '[2]' if the second column of the table contains the dependent variable values. # noqa: E501
:param result_index: The result_index of this OneOfDimensionalVectorFunctionPressureValue. # noqa: E501
:type: list[int]
"""
self._result_index = result_index
@property
def independent_variables(self):
"""Gets the independent_variables of this OneOfDimensionalVectorFunctionPressureValue. # noqa: E501
:return: The independent_variables of this OneOfDimensionalVectorFunctionPressureValue. # noqa: E501
:rtype: list[TableFunctionParameter]
"""
return self._independent_variables
@independent_variables.setter
def independent_variables(self, independent_variables):
"""Sets the independent_variables of this OneOfDimensionalVectorFunctionPressureValue.
:param independent_variables: The independent_variables of this OneOfDimensionalVectorFunctionPressureValue. # noqa: E501
:type: list[TableFunctionParameter]
"""
self._independent_variables = independent_variables
@property
def separator(self):
"""Gets the separator of this OneOfDimensionalVectorFunctionPressureValue. # noqa: E501
Values in each row are separated by this character. Also known as a delimiter. # noqa: E501
:return: The separator of this OneOfDimensionalVectorFunctionPressureValue. # noqa: E501
:rtype: str
"""
return self._separator
@separator.setter
def separator(self, separator):
"""Sets the separator of this OneOfDimensionalVectorFunctionPressureValue.
Values in each row are separated by this character. Also known as a delimiter. # noqa: E501
:param separator: The separator of this OneOfDimensionalVectorFunctionPressureValue. # noqa: E501
:type: str
"""
self._separator = separator
@property
def out_of_bounds(self):
"""Gets the out_of_bounds of this OneOfDimensionalVectorFunctionPressureValue. # noqa: E501
:return: The out_of_bounds of this OneOfDimensionalVectorFunctionPressureValue. # noqa: E501
:rtype: str
"""
return self._out_of_bounds
@out_of_bounds.setter
def out_of_bounds(self, out_of_bounds):
"""Sets the out_of_bounds of this OneOfDimensionalVectorFunctionPressureValue.
:param out_of_bounds: The out_of_bounds of this OneOfDimensionalVectorFunctionPressureValue. # noqa: E501
:type: str
"""
allowed_values = ["CLAMP"] # noqa: E501
if self.local_vars_configuration.client_side_validation and out_of_bounds not in allowed_values: # noqa: E501
raise ValueError(
"Invalid value for `out_of_bounds` ({0}), must be one of {1}" # noqa: E501
.format(out_of_bounds, allowed_values)
)
self._out_of_bounds = out_of_bounds
def get_real_child_model(self, data):
"""Returns the real base class specified by the discriminator"""
discriminator_key = self.attribute_map[self.discriminator]
discriminator_value = data[discriminator_key]
return self.discriminator_value_class_map.get(discriminator_value)
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, OneOfDimensionalVectorFunctionPressureValue):
return False
return self.to_dict() == other.to_dict()
def __ne__(self, other):
"""Returns true if both objects are not equal"""
if not isinstance(other, OneOfDimensionalVectorFunctionPressureValue):
return True
return self.to_dict() != other.to_dict()
| StarcoderdataPython |
1600294 | # ---
# jupyter:
# jupytext:
# formats: ipynb,py:percent
# text_representation:
# extension: .py
# format_name: percent
# format_version: '1.3'
# jupytext_version: 1.9.1
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# %%
import matplotlib.pyplot as plt
# %matplotlib inline
from IPython.display import display, set_matplotlib_formats
set_matplotlib_formats('png','pdf')
# %%
# %load_ext watermark
# %watermark
# %% tags=["parameters"]
PRINT=False
# %%
import pandas as pd
if PRINT:
pd.options.plotting.backend = "matplotlib"
else:
pd.options.plotting.backend = "plotly"
# %%
df = pd.read_excel("https://github.com/dionresearch/demo_data/blob/master/excel/iris.xlsx?raw=true")
# %%
df.describe(include='all').T
# %%
df.plot(kind='scatter',x='sepal_length',y='sepal_width')
# %%
| StarcoderdataPython |
4820646 | import logzero
logzero.json()
log = logzero.logger
| StarcoderdataPython |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.