blob_id stringlengths 40 40 | directory_id stringlengths 40 40 | path stringlengths 2 616 | content_id stringlengths 40 40 | detected_licenses listlengths 0 69 | license_type stringclasses 2
values | repo_name stringlengths 5 118 | snapshot_id stringlengths 40 40 | revision_id stringlengths 40 40 | branch_name stringlengths 4 63 | visit_date timestamp[us] | revision_date timestamp[us] | committer_date timestamp[us] | github_id int64 2.91k 686M ⌀ | star_events_count int64 0 209k | fork_events_count int64 0 110k | gha_license_id stringclasses 23
values | gha_event_created_at timestamp[us] | gha_created_at timestamp[us] | gha_language stringclasses 220
values | src_encoding stringclasses 30
values | language stringclasses 1
value | is_vendor bool 2
classes | is_generated bool 2
classes | length_bytes int64 2 10.3M | extension stringclasses 257
values | content stringlengths 2 10.3M | authors listlengths 1 1 | author_id stringlengths 0 212 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
7d5da1367a1597e4d9465a4fb0380070cefb1584 | 2c740f4c06380da4a0f0ca2df65790f9788a4ce4 | /anubis-cron-job/src/anubis_cron_job/config.py | 32af4a6600785c945faa03f13b741a7e6206fb4b | [
"Apache-2.0",
"LicenseRef-scancode-unknown-license-reference"
] | permissive | gavinmbell/benchmark-ai-1 | 326fa3b6b6cdff36ffbfe8dbab18cabb46fb5792 | a697e67d68b843fe9350e55871dad867bab5d51d | refs/heads/master | 2023-05-26T09:36:14.267799 | 2021-06-01T20:19:28 | 2021-06-01T20:19:28 | 300,193,422 | 0 | 0 | null | 2020-10-01T07:49:22 | 2020-10-01T07:49:21 | null | UTF-8 | Python | false | false | 2,107 | py | # Copyright 2019 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License").
# You may not use this file except in compliance with the License.
# A copy of the License is located at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# or in the "license" file accompanying this file. This file is distributed
# on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
# express or implied. See the License for the specific language governing
# permissions and limitations under the License.
import configargparse
import json
from dataclasses import dataclass
from typing import Any, Dict, List
@dataclass
class AnubisCronJobConfig:
kafka_bootstrap_servers: List[str]
producer_topic: str
status_topic: str
benchmark_event: Dict[str, Any]
def get_config(argv, env=None) -> AnubisCronJobConfig:
parser = configargparse.ArgParser(
description="Tool for generating single-benchmark runs from a periodic benchmark template"
)
parser.add(
"--kafka-bootstrap-servers",
type=lambda x: x.split(","),
env_var="KAFKA_BOOTSTRAP_SERVERS",
help="Comma separated list of kafka bootstrap servers",
required=True,
)
parser.add(
"--producer-topic",
env_var="PRODUCER_TOPIC",
help="The topic the executor listens to to spawn single-run jobs",
required=True,
)
parser.add("--status-topic", env_var="STATUS_TOPIC", help="The status topic", required=True)
parser.add(
"--benchmark-event",
type=json.loads,
env_var="BENCHMARK_EVENT",
help="A string containing the original event containing the periodic benchmark",
required=True,
)
parsed_args, _ = parser.parse_known_args(argv, env_vars=env)
return AnubisCronJobConfig(
kafka_bootstrap_servers=parsed_args.kafka_bootstrap_servers,
producer_topic=parsed_args.producer_topic,
status_topic=parsed_args.status_topic,
benchmark_event=parsed_args.benchmark_event,
)
| [
"noreply@github.com"
] | gavinmbell.noreply@github.com |
f7704d11de6500356f5a0264aa2a05b0534f42a0 | e10a6d844a286db26ef56469e31dc8488a8c6f0e | /spaceopt/gp_utils.py | dabf02d461833af563929d3c7310be1c9a08d714 | [
"Apache-2.0",
"CC-BY-4.0"
] | permissive | Jimmy-INL/google-research | 54ad5551f97977f01297abddbfc8a99a7900b791 | 5573d9c5822f4e866b6692769963ae819cb3f10d | refs/heads/master | 2023-04-07T19:43:54.483068 | 2023-03-24T16:27:28 | 2023-03-24T16:32:17 | 282,682,170 | 1 | 0 | Apache-2.0 | 2020-07-26T15:50:32 | 2020-07-26T15:50:31 | null | UTF-8 | Python | false | false | 8,808 | py | # coding=utf-8
# Copyright 2022 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Preparing the GP utility functions for evaluting the search space scores."""
from typing import Any, Callable, Dict
import jax
import jax.numpy as jnp
import numpy as np
import sklearn
from sklearn.gaussian_process import GaussianProcessRegressor
from sklearn.gaussian_process import kernels
from tensorflow_probability.substrates import jax as tfp
PRECISION = jax.lax.Precision.DEFAULT
def sqdist(x1, x2=None, precision=PRECISION):
"""Computes the matrix of squared distances between two tensors.
Args:
x1: (n, ...) shaped tensor.
x2: (m, ...) shaped tensor where x1.shape[1:] and x2.shape[1:] are assumed
to be compatible.
precision: argument for jax functions controlling the tradeoff between
accuracy and speed.
Returns:
out: (n, m) shaped array of squared distances between x1 and x2.
"""
if x2 is None:
x2 = x1
sum_axis = list(range(1, x1.ndim))
out = jnp.float32(-2) * jnp.tensordot(
x1, x2, (sum_axis, sum_axis), precision=precision)
out += jnp.sum(x1**2, axis=sum_axis)[:, jnp.newaxis]
out += jnp.sum(x2**2, axis=sum_axis)[jnp.newaxis]
return out
def matern_5_2(x, y, length_scale):
dists = jnp.sqrt(sqdist(x / length_scale, y / length_scale))
k = dists * jnp.sqrt(5.)
k = (1. + k + k ** 2 / 3.0) * jnp.exp(-k)
return k
PARAMS_BOUNDS = {
'amplitude': (0.05, 2.),
'noise': (0.0005, .1),
'lengthscale': (0.005, 20.)
}
N_RESTARTS_OPTIMIZER = 10
def cov_function_sklearn(params, nu = 5/2):
"""Generates a default covariance function.
Args:
params: A dictionary with GP hyperparameters.
nu: Degree of the matern kernel.
Returns:
cov_fun: an ARD Matern covariance function with diagonal noise for
numerical stability.
"""
amplitude = params['amplitude']
noise = params['noise']
lengthscale = params['lengthscale'].flatten()
amplitude_bounds = PARAMS_BOUNDS['amplitude']
lengthscale_bounds = PARAMS_BOUNDS['lengthscale']
noise_bounds = PARAMS_BOUNDS['noise']
cov_fun = kernels.ConstantKernel(
amplitude, constant_value_bounds=amplitude_bounds) * kernels.Matern(
lengthscale, nu=nu,
length_scale_bounds=lengthscale_bounds) + kernels.WhiteKernel(
noise, noise_level_bounds=noise_bounds)
return cov_fun
def cov_function_jax(params, x, y=None, add_noise=False):
"""Evaluates the default matern 5/2 covariance function."""
amplitude = params['amplitude']
noise = params['noise']
lengthscale = params['lengthscale'].flatten()
if y is None:
y = x
add_noise = True
cov = amplitude * matern_5_2(x, y, lengthscale)
if add_noise:
cov += np.eye(cov.shape[0]) * noise**2
return cov
def extract_params_from_sklearn_gp(gaussian_process):
"""Extracts parameter values from the fitted sklearn gp object.
Following https://arxiv.org/pdf/1206.2944.pdf we assume an ARD Matern 5/2
kernel with observation noise. The input to this function is a fitted sklearn
GP object and the output is a dictionary including the values of learned
hyperparameters and GP statistics.
Args:
gaussian_process: GP object from sklearn implementation.
Returns:
Dictionary of learned GP hyperparameters and statistics from the sklearn GP
implementation.
"""
kernel = gaussian_process.kernel_
assert isinstance(kernel, sklearn.gaussian_process.kernels.Sum)
matern_kernel = kernel.k1
noise_kernel = kernel.k2
assert isinstance(matern_kernel, sklearn.gaussian_process.kernels.Product)
assert isinstance(noise_kernel, sklearn.gaussian_process.kernels.WhiteKernel)
params = {
'noise': noise_kernel.noise_level,
'lengthscale': matern_kernel.k2.length_scale,
'amplitude': matern_kernel.k1.constant_value,
'l_': gaussian_process.L_,
# pylint: disable=protected-access
'y_train_std_': gaussian_process._y_train_std,
'y_train_mean_': gaussian_process._y_train_mean,
# pylint: enable=protected-access
'alpha_': gaussian_process.alpha_
}
return params
class GPUtils:
"""Class for GP utilities."""
def __init__(self,
cov_fun = None,
gp_noise_eps = 1e-5):
"""Initialize the GP class."""
self.cov_fun = cov_fun
self.gp_noise_eps = gp_noise_eps
def fit_gp(self,
x_obs,
y_obs,
params,
steps = 1000):
"""Fit a GP to the observed data and return the optimized params.
Args:
x_obs: (n, d) shaped array of n observed x-locations in dimension d.
y_obs: (n, 1) shaped array of objective values at x_obs.
params: A dictionary of model hyperparameters.
steps: Number of optimization steps.
Note that this argument is ignored for sklearn GP, however might be
included for other GP backends.
Returns:
Dictionary of learned parameters from the sklearn GP implementation.
"""
del steps
if self.cov_fun is None:
self.cov_fun = cov_function_sklearn(params)
gaussian_process = GaussianProcessRegressor(
kernel=self.cov_fun,
alpha=self.gp_noise_eps,
n_restarts_optimizer=N_RESTARTS_OPTIMIZER,
optimizer='fmin_l_bfgs_b')
gaussian_process.fit(np.array(x_obs), np.array(y_obs))
self.gaussian_process = gaussian_process
params = extract_params_from_sklearn_gp(gaussian_process)
return params
def posterior_mean_cov(self, params, x_obs,
y_obs, x_test):
"""Evaluate the posterior mean and cov of the test x-locations.
Args:
params: Dictionary of learned parameters from the sklearn GP
implementation.
x_obs: (n, d) shaped array of n observed x-locations in dimension d.
y_obs: (n, 1) shaped array of objective values at x_obs.
Note that this argument is ignored for sklearn GP since we alternatively
use the already calculated statistics from sklearn GP object, however
might be included for other GP backends.
x_test: (m, d) shaped array of m test x-locations in dimension d.
Returns:
mu: (m, 1) shaped array of mean at x_test.
cov: (m, m) shaped array of covariance at x_test.
"""
del y_obs
l_ = params['l_']
y_train_std_ = params['y_train_std_']
y_train_mean_ = params['y_train_mean_']
alpha_ = params['alpha_']
cross_cov = cov_function_jax(params, x_test, x_obs)
mu = cross_cov @ alpha_
mu = y_train_std_ * mu + y_train_mean_
v = jax.scipy.linalg.solve_triangular(l_, cross_cov.T, lower=True)
other_cov = cov_function_jax(params, x_test)
other_cov += jnp.eye(other_cov.shape[0]) * self.gp_noise_eps
cov = (other_cov - jnp.dot(v.T, v))
cov = jnp.outer(cov, y_train_std_ ** 2).reshape(*cov.shape, -1)
if cov.shape[2] == 1:
cov = jnp.squeeze(cov, axis=2)
return mu, cov
def draw_gp_samples(self,
key,
mu,
cov,
num_samples = 1,
method = 'cholesky',
tol = 1e-4):
"""Draw multivariate-normal samples given mu and cov.
Args:
key: a jax random.PRNGKey.
mu: (m, 1) shaped array of mean values.
cov: (m, m) shaped array of covariance values.
num_samples: number of samples.
method: method of sampling from 'own', 'cholesky', 'svd' and 'tfp'.
tol: additional tolerance for numerical stability issue.
Returns:
samples: (num_samples, m) shaped array of drawn samples.
"""
if (method == 'cholesky') or (method == 'svd'):
samples = jax.random.multivariate_normal(
key, mu.T, cov, shape=(num_samples,), method=method)
elif method == 'own':
y_rand = jax.random.normal(key, (num_samples, cov.shape[0]))
chol = jax.scipy.linalg.cholesky(
cov + jnp.eye(cov.shape[0]) * tol, lower=True)
samples = jnp.dot(y_rand, chol) + mu.T
elif method == 'tfp':
tfd = tfp.distributions
mvn = tfd.MultivariateNormalFullCovariance(
loc=mu.flatten(), covariance_matrix=cov)
samples = mvn.sample(num_samples, key)
else:
raise ValueError('Accepted methods include own, cholesky, svd and tfp.')
return samples
| [
"copybara-worker@google.com"
] | copybara-worker@google.com |
528a33fdfd2337c933fcf62029911b79c0b56a58 | 6a76710c92ab6dd701644b59046b0682ff2b5b2f | /src/classes/modeles/geo_routines/place.py | 17bbd9a8ea9bbbb06109c904f7483b503f841aa0 | [] | no_license | Abdallah-herri/Tweetos | b70121d1bc77c832042a55c92deb328a8624eba4 | e6faf9755464d722b6055086a23e758e389ae75a | refs/heads/master | 2020-08-15T11:40:05.069173 | 2019-10-15T15:42:43 | 2019-10-15T15:42:43 | 215,335,110 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 412 | py | # -*- coding: utf-8 -*-
from classes.modeles.geo import Geolocalisation
# this class represent a place (like a city, country, place...)
class Place(Geolocalisation):
def __init__(self, obj):
super(Place, self).__init__(Geolocalisation.PLACE)
# all info of the place
self.name = obj["name"]
self.country = obj["country"]
self.country_code = obj["country_code"]
self.place_type = obj["place_type"]
| [
"abdallah.herri@etu.umontpellier.fr"
] | abdallah.herri@etu.umontpellier.fr |
bc4d11e0bbcfc7a85602b9109b7298e8ebdb1e07 | 50dbecc7ad3a778fb9cf5ea63de683a48a3d73ff | /app.py | 30d4852f9f5ee82c0ba74d5dc68e3ba5ef591d13 | [] | no_license | mahsan2/prac | d6eae09c944c1d97a6a34cecb5a249035ec267f8 | 5eec08cd1d1ff5b2810fa64c251eaa671ab55e7d | refs/heads/master | 2023-03-22T07:41:23.274963 | 2020-04-25T01:44:04 | 2020-04-25T01:44:04 | 258,663,404 | 0 | 0 | null | 2021-03-20T03:43:44 | 2020-04-25T01:39:33 | HTML | UTF-8 | Python | false | false | 871 | py | import numpy as np
from flask import Flask, request, jsonify, render_template
import pickle
app = Flask(__name__)
model = pickle.load(open('model.pkl', 'rb'))
@app.route('/')
def home():
return render_template('index.html')
@app.route('/predict',methods=['POST'])
def predict():
'''
For rendering results on HTML GUI
'''
int_features = [int(x) for x in request.form.values()]
final_features = [np.array(int_features)]
prediction = model.predict(final_features)
output = round(prediction[0], 2)
def heartattack(output):
if output == 0:
return 'You have high chance of heart attack'
else:
return 'You do not have heart disease!'
return render_template('index.html', prediction_text=heartattack(output))
if __name__ == "__main__":
app.run(debug=True) | [
"noreply@github.com"
] | mahsan2.noreply@github.com |
ec3f9cbef7317e66b3d04f93239ed3262260f5e8 | 3a0ddda68459a1bb9f93269990a0d152e43b9cb7 | /graphUtils.py | 964e00129dc10ca74af15a7aca7ecc38d6fa0b31 | [] | no_license | lbignell/oneton | a126a8d3e1e8cefb11d0061585754d73caac4992 | d4ab9fd4f46cbf35925f5f9203a9284415b504f1 | refs/heads/master | 2020-12-25T11:15:32.375744 | 2016-08-26T11:53:13 | 2016-08-26T11:53:13 | 50,036,274 | 0 | 0 | null | 2016-01-20T14:43:46 | 2016-01-20T14:43:46 | null | UTF-8 | Python | false | false | 17,670 | py | #!/usr/bin/env python
'''
basic modules to make, print tgraphs
20150921
'''
import time
import datetime
import sys
import os
import math
import ROOT
from ROOT import TH1D, TFile, gROOT, TCanvas, TLegend, TGraph, TDatime, TMultiGraph, gStyle, TGraphErrors, TLine
from array import array
import re # regular expression
import pipath
class graphUtils():
def __init__(self):
# use in color()
self.goodColors = [1,2,3,4, 6,7,8,9] # no yellow(5) or white (0,10)
self.goodColors.extend( [11, 12, 18] )
self.goodColors.extend( [x for x in range(28,50)] )
self.goodMarkers = [x for x in range(20,31) ]
self.pip = pipath.pipath()
return
def t2dt(self,t):
'''
convert struct_time to datetime object
'''
return datetime.datetime.fromtimestamp(time.mktime(t))
def dt2t(self,dt):
'''
convert datetime object to struct_time (time object)
'''
fmt = "%Y %M %d %H %m %S"
return time.strptime(dt.strftime(fmt),fmt)
def addSeconds(self,t,seconds=0):
'''
add seconds to struct_time t by converting to datetime object,
using timedelta and converting back
'''
dt = self.t2dt(t)
dt += datetime.timedelta(seconds=seconds)
return self.dt2t(dt)
def convertTime(self,day,fmt,text):
c = text.count(":")
if c==1: return time.strptime(day+text,fmt+"%H:%M")
if c==2: return time.strptime(day+text,fmt+"%H:%M:%S")
sys.exit("graphUtils.convertTime ERROR Unknown input " + str(text))
return
def fixTimeDisplay(self,g,showDate=False,maybeShowDate=True):
'''
set time axis to display nicely
'''
if g:
g.GetXaxis().SetTimeDisplay(1)
g.GetXaxis().SetTimeFormat("%H:%M")
if showDate:
g.GetXaxis().SetTimeFormat("#splitline{%H:%M}{%y/%m/%d}")
else:
if maybeShowDate:
x1 = g.GetXaxis().GetXmin()
x2 = g.GetXaxis().GetXmax()
if x2-x1>24.*60.*60.:
g.GetXaxis().SetTimeFormat("#splitline{%H:%M}{%y/%m/%d}")
#print 'graphUtils.fixTimeDisplay: >1 day, so use splitline in SetTimeFormat'
g.GetXaxis().SetNdivisions(-409)
g.GetXaxis().SetLabelSize(0.025) #0.5*lx)
g.GetXaxis().SetTimeOffset(0,"local") # what does this do?
# g.GetXaxis().SetTimeOffset(0,"gmt") # using gmt option gives times that are only off by 1 hour on tgraph
else:
print 'graphUtils.fixTimeDisplay: WARNING Null pointer passed to fixTimeDisplay?????'
return
def makeTH1D(self,v,title,name,nx=100,xmi=1,xma=-1):
if xmi>xma:
xmi = min(v)
xma = max(v)
dx = (xma-xmi)/float(nx)
xmi -= dx/2.
xma += dx/2.
h = TH1D(name,title,nx,xmi,xma)
for y in v: h.Fill(y)
return h
def makeTH1Dwtd(self,x,y,title,Name='',NX=None,XMI=None,XMA=None):
'''
fill 1d hist with weights y
given equal size, monotonically increasing bin centers x
'''
name = Name
if Name=='': name = title.replace(' ','_').replace('.','_')
nx = len(x)
dx = x[1]-x[0]
xmi = min(x)-dx/2.
xma = max(x)+dx/2.
if NX is not None: nx = NX
if XMI is not None:xmi =XMI
if XMA is not None:xma =XMA
h = TH1D(name,title,nx,xmi,xma)
for a,b in zip(x,y): h.Fill(a,b)
ymi,yma = min(y),max(y)
dy = (yma-ymi)/20.
ymi,yma = ymi-dy/2.,yma+dy/2.
h.SetMaximum(yma)
h.SetMinimum(ymi)
return h
def printHistStats(self,h):
'''
print some stats for input hist
'''
N,mean,stddev,underflow,overflow = self.getHistStats(h)
print h.GetTitle(),'mean',mean,'stddev',stddev,'Nentries',N,'uflow',underflow,'oflow',overflow
return
def getHistStats(self,h):
'''
return histogram stats
'''
axis = 1 # 1d hist only
mean = h.GetMean(axis)
stddev = h.GetStdDev(axis)
N = h.GetEntries()
underflow = h.GetBinContent(0)
if axis==1: nbins = h.GetNbinsX()
if axis==2: nbins = h.GetNbinsY()
if axis==3: nbins = h.GetNbinsZ()
overflow = h.GetBinContent(nbins+1)
return N,mean,stddev,underflow,overflow
def drawGraph(self,g,figDir="",SetLogx=False,SetLogy=False,option='APL', verbose=False):
'''
output graph to file
'''
title = g.GetTitle()
name = g.GetName()
if SetLogx: name += '_logx'
if SetLogy: name += '_logy'
if len(figDir) > 0 and figDir[-1] != os.path.sep:
pdf = self.pip.fix(figDir + '/' + name + '.pdf')
else:
pdf = figDir + name + '.pdf'
if verbose: print 'drawing Graph:',pdf
xsize,ysize = 1100,850 # landscape style
noPopUp = True
if noPopUp : gROOT.ProcessLine("gROOT->SetBatch()")
canvas = TCanvas(pdf,title,xsize,ysize)
g.Draw(option)
if SetLogy: canvas.SetLogy(1)
if SetLogx: canvas.SetLogx(1)
canvas.Draw()
canvas.SetGrid(1)
canvas.SetTicks(1)
canvas.cd()
canvas.Modified()
canvas.Update()
canvas.Print(pdf,'pdf')
return
def drawFit(self,h,figdir='',SetLogy=False,SetLogx=False,extraName=None):
'''
draw histogram with fit parameters
'''
name = h.GetName()
if extraName is not None: name += '_' + extraName
title = h.GetTitle()
if SetLogx: name += '_logx'
if SetLogy: name += '_logy'
if len(figdir)>0 and figdir[-1]!= os.path.sep:
pdf = self.pip.fix( figdir + '/' + name + '.pdf')
ps = self.pip.fix( figdir + '/' + name + '.ps')
else:
pdf = figdir + name + '.pdf'
ps = figdir + name + '.ps'
xsize,ysize = 1100,850 # landscape style
noPopUp = True
if noPopUp : gROOT.ProcessLine("gROOT->SetBatch()")
canvas = TCanvas(pdf,title,xsize,ysize)
gStyle.SetOptFit(1111)
h.Draw()
if SetLogy: canvas.SetLogy(1)
if SetLogx: canvas.SetLogx(1)
canvas.Draw()
canvas.SetGrid(1)
canvas.SetTicks(1)
canvas.cd()
canvas.Modified()
canvas.Update()
canvas.Print(ps,'Landscape')
os.system('ps2pdf ' + ps + ' ' + pdf)
if os.path.exists(pdf): os.remove(ps)
return
def finishDraw(self,canvas,ps,pdf,setGrid=True,setTicks=True,ctitle=None):
'''
standard nonsense to finish drawing
ctitle can be considered 'global' title
'''
canvas.Draw()
canvas.SetGrid(setGrid)
canvas.SetTicks(setTicks)
canvas.cd()
canvas.Modified()
canvas.Update()
ct = None
if ctitle is not None:
ct = ROOT.TText(0.5,0.975,ctitle)
ct.SetTextAlign(20) # horizontally centered
s = ct.GetTextSize()
ct.SetTextSize(s/2.)
ct.Draw()
canvas.Print(ps,'Landscape')
os.system('ps2pdf ' + ps + ' ' + pdf)
if os.path.exists(pdf): os.remove(ps)
return
def drawMultiHists(self,histlist,fname='',figdir='',statOpt=1111111,setLogy=False,setLogx=False,dopt='',abscissaIsTime=False):
'''
draw multiple histograms on single pdf output file
'''
nHist = len(histlist)
if nHist<=0:
print 'graphUtils.drawMultiHists: ERROR zero length histogram list'
return
if nHist==1:
nX = nY = 1
else:
nX = 2
nY = int(float(nHist)/float(nX) + 0.5)
if nHist<4: nX,nY = 1,nHist
#print 'nHist,nX,nY=',nHist,nX,nY
# create output directory if it does not exist
if len(figdir)>0:
if os.path.isdir(figdir):
pass
else:
try:
os.mkdir(figdir)
except IOError,e:
print 'graphUtils.drawMultiHists',e
else:
print 'graphUtils.drawMultiHists created',figdir
# set output file name and canvas title
base = figdir
ctitle = None
if fname!='':
ctitle = fname
else:
for h in histlist:
name = h.GetName()
ctitle += name
if h!=histlist[-1]:
ctitle += '_'
if setLogx:
ctitle += '_logx'
if setLogy:
ctitle += '_logy'
if base[-1] != os.path.sep:
pdf = self.pip.fix(base + '/' + ctitle + '.pdf')
ps = self.pip.fix(base + '/' + ctitle + '.ps')
else:
pdf = base + ctitle + '.pdf'
ps = base + ctitle + '.ps'
# open canvas, draw on it
title = ''
xsize,ysize = 1100,850 # landscape style
noPopUp = True
if noPopUp : gROOT.ProcessLine("gROOT->SetBatch()")
canvas = TCanvas(pdf,title,xsize,ysize)
gStyle.SetOptStat(statOpt)
spaceBtwnPads = 0.01 / 1000.
canvas.Divide(nX,nY,spaceBtwnPads,spaceBtwnPads)
for i,h in enumerate(histlist):
canvas.cd(i+1).SetLogy(setLogy)
canvas.cd(i+1).SetLogx(setLogx)
if abscissaIsTime : self.fixTimeDisplay(h)
h.Draw(dopt)
self.biggerLabels(h)
if abscissaIsTime : self.fixTimeDisplay(h)
#print i+1,h.GetName()
self.finishDraw(canvas,ps,pdf,ctitle=ctitle)
return
def biggerLabels(self,h):
'''
increase axis label size
'''
factor = 2.0 # empirically determined
sx = h.GetXaxis().GetLabelSize()
h.GetXaxis().SetLabelSize(factor*sx)
sy = h.GetYaxis().GetLabelSize()
h.GetYaxis().SetLabelSize(factor*sy)
return
def drawMultiGraph(self,TMG,figdir='',SetLogy=False, SetLogx=False, abscissaIsTime = True, drawLines=True, xAxisLabel=None,yAxisLabel=None):
'''
draw TMultiGraph with legend and output as pdf
Default is that abscissa is calendar time.
Returns canvas
'''
debugMG = False
if not TMG.GetListOfGraphs(): return # empty
title = TMG.GetTitle()
name = TMG.GetName()
if SetLogx: name += '_logx'
if SetLogy: name += '_logy'
if debugMG: print 'graphUtils.drawMultiGraph',title,name,'TMG.GetListOfGraphs()',TMG.GetListOfGraphs(),'TMG.GetListOfGraphs().GetSize()',TMG.GetListOfGraphs().GetSize()
nGraphs = TMG.GetListOfGraphs().GetSize()
if figdir[-1] != os.path.sep:
pdf = self.pip.fix(figdir + '/' + name + '.pdf')
ps = self.pip.fix(figdir + '/' + name + '.ps')
else:
pdf = figdir + name + '.pdf'
ps = figdir + name + '.ps'
xsize,ysize = 1100,850 # landscape style
noPopUp = True
if noPopUp : gROOT.ProcessLine("gROOT->SetBatch()")
canvas = TCanvas(pdf,title,xsize,ysize)
canvas.SetLogy(SetLogy)
canvas.SetLogx(SetLogx)
# move title to left in order to put legend above plot
gStyle.SetTitleX(0.3)
x1 = 0.5
x2 = x1 + .5
y1 = 0.9
y2 = y1 + .1
lg = TLegend(x1,y1,x2,y2)
NGraph = 0
for g in TMG.GetListOfGraphs():
NGraph += 1
t = g.GetTitle()
lg.AddEntry(g, t, "LP" )
if abscissaIsTime : self.fixTimeDisplay(g)
if NGraph>6: lg.SetNColumns(2)
dOption = "AP"
if drawLines: dOption += "L"
# complicated monkey business because of idiotic way that logY is set
if SetLogy:
ymi,yma = 1.e20,1.e-20
for g in TMG.GetListOfGraphs():
x,y = self.getPoints(g)
ymi = min(ymi,min(y))
yma = max(yma,max(y))
if ymi<=0: ymi = 0.1
ymi = ymi/2.
yma = 2.*yma
TMG.SetMinimum(ymi)
TMG.SetMaximum(yma)
for g in TMG.GetListOfGraphs():
g.SetMinimum(ymi)
g.SetMaximum(yma)
if "A" in dOption:
g.SetTitle( TMG.GetTitle() )
if xAxisLabel is not None: g.GetXaxis().SetTitle(xAxisLabel)
if yAxisLabel is not None: g.GetYaxis().SetTitle(yAxisLabel)
g.Draw(dOption)
dOption = dOption.replace("A","")
else:
TMG.Draw(dOption)
if xAxisLabel is not None: TMG.GetXaxis().SetTitle(xAxisLabel)
if yAxisLabel is not None: TMG.GetYaxis().SetTitle(yAxisLabel)
if abscissaIsTime : self.fixTimeDisplay(TMG)
self.labelTMultiGraph(TMG,debug=debugMG)
lg.Draw()
canvas.Draw()
canvas.SetGrid(1)
canvas.SetTicks(1)
canvas.cd()
canvas.Modified()
canvas.Update()
if 0:
canvas.Print(pdf,'pdf')
else:
canvas.Print(ps,'Landscape')
os.system('ps2pdf ' + ps + ' ' + pdf)
if os.path.exists(pdf): os.remove(ps)
if debugMG: print 'graphUtils.drawMultiGraph',title,'complete'
return canvas
def makeTMultiGraph(self,name,tit=None):
title = tit
if tit is None:title = name.replace('_',' ')
tmg = TMultiGraph()
tmg.SetName(name)
tmg.SetTitle(title)
return tmg
def labelTMultiGraph(self,tmg,debug=False):
name = tmg.GetName()
if 'vs' in name:
s = name.split('_')
xt = s[2]
yt = s[0]
xt = xt.replace('by','/')
xt = xt.replace('BY','/')
yt = yt.replace('by','/')
yt = yt.replace('BY','/')
if debug:
print 'graphUtils.labelTMultiGraph: xt,yt',xt,yt,'tmg',tmg
print 'tmg.GetXaxis()',tmg.GetXaxis(),'tmg.GetYaxis()',tmg.GetYaxis()
if tmg.GetXaxis(): tmg.GetXaxis().SetTitle(xt)
if tmg.GetYaxis(): tmg.GetYaxis().SetTitle(yt)
return
def makeTGraph(self,u,v,title,name,ex=None,ey=None):
if ex is None:
g = TGraph(len(u),array('d',u), array('d',v))
else:
dy = ey
if ey is None: dy = [0. for x in range(len(ex))]
g = TGraphErrors(len(u),array('d',u),array('d',v),array('d',ex),array('d',dy))
g.SetTitle(title)
g.SetName(name)
return g
def color(self,obj,n,M,setMarkerColor=False,setMarkerType=True):
'''
set line color and marker type for obj based on indices n and M
if M=n then use M to set marker type, otherwise determine marker type from n
unless setMarkerType is False
'''
debug = False
LC = len(self.goodColors)
LM = len(self.goodMarkers)
c = n%LC
obj.SetLineColor( self.goodColors[c] )
if debug: print 'color: obj',obj,'n',n,'obj.IsA().GetName()',obj.IsA().GetName()
if setMarkerType:
oName = obj.IsA().GetName()
if oName=='TGraph' or oName=='TGraphErrors':
if M==n:
m = M%LM
else:
m = int(float(n)/float(LC))%LM
obj.SetMarkerStyle( self.goodMarkers[m] )
if setMarkerColor: obj.SetMarkerColor( self.goodColors[c] )
if debug: print 'color:',obj.GetName(),'m',m,'self.goodMarkers[m]',self.goodMarkers[m]
return
def getPoints(self,g,getErrors=False):
'''
return abscissa,ordinate values of input graph g
also return errors if getErrors is True
'''
x,y = [],[]
if getErrors: dx,dy = [],[]
for i in range(g.GetN()):
a,b = ROOT.Double(0),ROOT.Double(0)
OK = g.GetPoint(i,a,b)
if OK!=-1:
x.append(a)
y.append(b)
if getErrors:
dx.append(g.GetErrorX(i))
dy.append(g.GetErrorY(i))
if getErrors: return x,y,dx,dy
return x,y
def getTDatime(self,dt,fmt='%Y/%m/%d %H:%M:%S'):
'''
convert date/time text to TDatime object
'''
datetimeObj = self.getdatetime(dt,fmt=fmt)
return TDatime( datetimeObj.strftime('%Y-%m-%d %H:%M:%S') ).Convert()
def getdatetime(self,dt,fmt='%Y/%m/%d %H:%M:%S'):
''' convert timestamp dt to text '''
return datetime.datetime.strptime(dt,fmt)
def reportHist(self,h):
'''
write out some properties of hist h
'''
name = h.GetName()
title = h.GetTitle()
xa = h.GetXaxis()
nx = xa.GetNbins()
xmi= xa.GetXmin()
xma= xa.GetXmax()
xex= xa.CanExtend()
nd = h.GetDimension()
words = 'graphUtils.reportHist',name,title,'nx,xmi,xma',nx,xmi,xma
if xex: words += 'can extend x-axis.'
if nd>1:
ya = h.GetYaxis()
ny = ya.GetNbins()
ymi= ya.GetXmin()
yma= ya.GetXmax()
yex= ya.CanExtend()
words += 'ny,ymi,yma=',ny,ymi,yma
if yex: words += 'can extend y-axis.'
print words
return
| [
"djaffe@bnl.gov"
] | djaffe@bnl.gov |
c274d8ae0994be62f340674dbe871429b3fb4592 | 6a3c78454957aa3a5693da0e11bb5859e68d89ad | /Plot_scripts/Err_MT_misfit_vs_wd_12_iters_GOOD.py | 69eea69543c2b8cfd387ac6830097bb0f9083a23 | [] | no_license | Andrei1412/Emod3d_FWI | f698ad5f35f5377249c1d9ebca66c7debcc565bc | ec552b39bd3af192ff1ef403ec8f22a6eddca5d4 | refs/heads/main | 2023-02-20T22:56:57.653860 | 2021-01-20T01:36:10 | 2021-01-20T01:36:10 | 317,999,402 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 3,846 | py | #!/usr/bin/env python2i
# -*- coding: utf-8 -*-
"""
Created on Mon Jan 7 10:50:28 2019
@author: user
"""
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
##iN=16
iN=10
Err=np.array([[8.10509518,304],[6.24488777,295],[4.48133162,292],[4.71763736,293],[4.08310807,295],[3.8712934,286],[3.99048825,286],[4.02149162,289],[4.00253364,287],[3.90183374,288],[3.27640018, 276],[3.20957784, 270],[3.21473381, 267],[3.18271712, 271],[3.214283, 268],[3.2315786, 272]])
#xint=range(0,iN-1)
Err_0 = Err[0,0]
#iN=5
Err2=np.array([[2.60653187,553],[2.55095285,548],[2.49932458,541],[2.47281439,533],[2.48493813, 532]])
Err2_0 = Err2[0,0];Err2_1 = Err2_0;
#Err=np.array([843.13996323,624.72674779,524.31773419,455.72303331,459.29979017,462.16926876,462.16840173,452.55299466,450.55013137,450.55020534,450.55011971])
#Err2=np.array([483.54925189,491.95850618,506.33704642,520.97264321])
#Err2=np.array([450.55011971,436.28985938,444.77741887,484.27325394,485.714463,486.00849363])
#Err2_0 = Err2[0];Err2_1 = Err2_0;
#xint=range(0,iN+3)
xint=range(0,iN+3)
# create figure and axis objects with subplots()
#fig,ax = plt.subplots(figsize=(7.5,5))
fig,ax = plt.subplots(figsize=(5,5))
#Err2_new = Err2[0:6,0]/(Err2_1)*(Err2_1/Err2_0)*(Err[len(xint)-7,0]/Err_0)
Err2_new = Err2[0:6,0]/Err_0*(Err[9,0]/Err2_0)
#Err2_new[0] = (Err[len(xint)-7,0]/Err_0)
#line2 = ax.plot(xint[len(xint)-4:len(xint)], Err2_new[0:4], color="k", marker="o")
#line1 = ax.plot(xint[0:len(xint)-3], Err[0:len(xint)-3]/Err_0, color="k", marker="o")
line2 = ax.plot(xint[len(xint)-4:len(xint)], Err2_new[0:4], color="r", marker="o")
line1 = ax.plot(xint[0:len(xint)-3], Err[0:len(xint)-3,0]/Err_0, color="r", marker="o")
#Err10 = np.array([])
#Err10_new = Err10/Err2_0*(Err[len(xint)-5,0]/Err_0)
#line10 = ax.plot(xint[len(xint)-5:len(xint)-3], Err10_new[0:2], color="r", marker="o",linestyle='dashed')
yy=np.array(np.linspace(0, 1))
#ax.plot(9*np.ones(yy.shape),yy,c='k',linestyle='dashed')
ax.plot(9*np.ones(yy.shape),yy,c='k',linestyle='dashed')
#line2 = ax.plot(xint[len(xint)-5:len(xint)],[Err[len(xint)-5,0]/Err_0,Err2[0:4,0]/(Err2_1)*(Err2_1/Err2_0)*(Err[len(xint)-5,0]/Err_0)], color="k", marker="o", label='misfit with revised CMT')
#ax.legend(loc='upper right',bbox_to_anchor=(0.95, 0.95))
# set x-axis label
ax.set_xlabel("Iteration, $\mathbf{m}$",fontsize=14)
# set y-axis label
#ax.set_ylabel("Misfit",color="red",fontsize=14)
ax.set_ylabel("Normalized misfit, $\chi_{p}(\mathbf{m})/\chi_{p}(\mathbf{m_{00}})$",color="red",fontsize=14)
#ax.title("Normalized RWM according to 14 validation events")
#plt.ylim([0.4, 1.01])
# twin object for two different y-axis on the sample plot
ax2=ax.twinx()
## make a plot with different y-axis using second axis object
line3 = ax2.plot(xint[0:len(xint)-3], Err[0:len(xint)-3,1],color="blue",marker="o")
line4 = ax2.plot(xint[len(xint)-4:len(xint)], Err2[0:4,1],color="blue",marker="o")
##ax2.scatter(0, 396, color="blue", marker="*")
#line3 = ax2.plot(xint, Err[0:len(xint),1],color="blue",marker="o",label='windows with revised CMT')
#line4 = ax2.scatter(0, 396, color="blue", marker="*",label='windows without revised CMT')
#ax2.legend(loc='upper right',bbox_to_anchor=(0.95, 0.75))
ax2.set_ylabel("Windows",color="blue",fontsize=14)
##plt.ylim([270, 305])
#plt.ylim([370, 400])
plt.xticks(xint[0:16])
#ax.text(5,0.8, '13 events', fontsize=14)
#ax.text(5,0.75, 'inversion', fontsize=14)
##ax.text(5,0.7, 'cc>0.7', fontsize=14)
#ax.text(9.2,0.8, '27 events ', fontsize=14)
#ax.text(9.2,0.75, 'inversion', fontsize=14)
#ax.text(9.4,0.7, 'cc>0.8', fontsize=14)
plt.xlim([-0.2, 12.2])
ax.set_ylim([0.4, 1.01])
#ax2.xlim([-0.1, 12.1])
ax2.set_ylim([100, 700])
plt.show()
# save the plot as a file
fig.savefig('Misfit_vs_window.jpg',
format='jpeg',
dpi=300,
bbox_inches='tight') | [
"noreply@github.com"
] | Andrei1412.noreply@github.com |
952f772de00105e7f6f3aaa25350df408ae7fcaa | 303d972198704f45de9f0ccfd0db39fff304692f | /gestion_de_solicitud/migrations/0006_asignacion_motivo.py | dddc20b59f47be3717b9a9cfdf16308a5d8f11bf | [] | no_license | jbust97/proyecto_is2 | 6e5551739f2aecffe689dcb395ab8adcaecbefe9 | 423e79d437b8666f9508b4b0eeb2be67533b8b2d | refs/heads/master | 2023-05-28T18:09:08.826645 | 2021-04-16T13:27:57 | 2021-04-16T13:27:57 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 451 | py | # Generated by Django 3.0.5 on 2020-08-28 15:06
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('gestion_de_solicitud', '0005_auto_20200823_2217'),
]
operations = [
migrations.AddField(
model_name='asignacion',
name='motivo',
field=models.CharField(default=' ', max_length=500),
preserve_default=False,
),
]
| [
"hugofleitasbus@gmail.com"
] | hugofleitasbus@gmail.com |
d662e3cccc6393bf07124edfdf202bfc54925ebe | 7cf29923d278c5b934a40de216ac606c25c8a5eb | /wheelcms_axle/translate.py | 4f0f0c74ba26a7823c2018014ad16b58ddbffc3f | [
"BSD-2-Clause"
] | permissive | wheelcms/wheelcms_axle | 1df024f75d17544a575953359e3cc9a4ab56d93c | b5916b555f37b7baafdf08fd56b5b985688df9d0 | refs/heads/master | 2020-04-05T22:43:04.176353 | 2015-04-05T10:53:42 | 2015-04-05T10:53:42 | 7,800,085 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 1,095 | py | from django.conf import settings
any_lang = ('any', 'Any')
def languages():
languages = tuple(settings.CONTENT_LANGUAGES)
if any_lang not in languages:
languages = languages + (any_lang, )
return languages
def fallback_languages(language):
""" given a language, provide a list of alternatives, prioritized """
langs = [language]
if language != any_lang[0]:
langs.append(any_lang[0])
return langs
def language_slug(slugs, slug, language):
"""
slugs is a mapping of lang->slug,
slug is a default slug,
Try to get the appropriate slug from the mapping first,
else use the provided slug. If neither are present, return
*any* slug from the mapping
(XXX we might try settings.LANGUAGE first)
"""
lslug = slugs.get(language, slug)
if lslug is None and language == any_lang[0]:
## Use fallback? XXX
return slugs.values()[0] # any
if lslug is None:
return slugs.values()[0] # any
## may still be None, let caller fail, for now
return lslug
| [
"github@in.m3r.nl"
] | github@in.m3r.nl |
0afe597851624a8f6aaecdcb02d4d61c69e8a8a0 | 3a57c3d562a527e6ff1150cd5c1d39f488e6c579 | /ДЗ/movies/migrations/0001_initial.py | 36f795c90e341149334185d23651f36692bfd1f9 | [] | no_license | kirsan007/RIP | 88ab25f30f476cac9f4bb5f459a2a6492f79dc47 | 8a61e24531873ee354fcf0f14319e9c11638f3d3 | refs/heads/master | 2023-02-12T23:33:20.198847 | 2021-01-14T19:11:26 | 2021-01-14T19:11:26 | 297,072,148 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 1,978 | py | # Generated by Django 3.1.5 on 2021-01-10 12:48
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Actor',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=100, verbose_name='Имя')),
('age', models.PositiveSmallIntegerField(default=0, verbose_name='Возраст')),
('description', models.TextField(verbose_name='Описание')),
],
options={
'verbose_name': 'Актеры и режиссеры',
'verbose_name_plural': 'Актеры и режиссеры',
},
),
migrations.CreateModel(
name='Movie',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('title', models.CharField(max_length=100, verbose_name='Название')),
('tagline', models.CharField(default='', max_length=100, verbose_name='Слоган')),
('description', models.TextField(verbose_name='Описание')),
('draft', models.BooleanField(default=False, verbose_name='Черновик')),
('poster', models.ImageField(upload_to='movie_list/', verbose_name='Постер')),
('actors', models.ManyToManyField(related_name='film_actor', to='movies.Actor', verbose_name='актеры')),
('directors', models.ManyToManyField(related_name='film_director', to='movies.Actor', verbose_name='режиссер')),
],
options={
'verbose_name': 'Фильм',
'verbose_name_plural': 'Фильмы',
},
),
]
| [
"kirsan.pahomkin2013@yandex.ru"
] | kirsan.pahomkin2013@yandex.ru |
80b78c597397a6b92f8d5b4ecf36b743360a63db | c976078bf8dde5baf96416d60dd3bb06c72111ad | /test/dds/communication/test_build.py | 979f677d3d929188d2e4adfce66dd6a535cb68ab | [
"Apache-2.0"
] | permissive | eProsima/Fast-DDS | 21f3fecacca5a285ad9950b7683456c6f9930a4d | 107ea8d64942102696840cd7d3e4cf93fa7a143e | refs/heads/master | 2023-08-31T14:56:45.942016 | 2023-08-11T11:40:25 | 2023-08-11T11:40:25 | 20,296,703 | 1,225 | 463 | Apache-2.0 | 2023-09-14T11:33:09 | 2014-05-29T14:36:15 | C++ | UTF-8 | Python | false | false | 7,659 | py | """Execute a DDS communictaion test following a json definition file."""
# Copyright 2021 Proyectos y Sistemas de Mantenimiento SL (eProsima).
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import glob
import json
import logging
import os
import subprocess
import sys
import time
script_dir = os.path.dirname(os.path.realpath(__file__))
seed = str(os.getpid())
def read_json(file_name):
"""Read json file with test definition."""
structure_dic = {}
with open(file_name) as json_file:
structure_dic = json.load(json_file)
return structure_dic
def test_definition(file_name):
"""Return a list with each test process defined in dictionary."""
return read_json(file_name)['participants']
def define_args(tests_definition):
"""Use list dictionary to get commands args for each test process."""
sub_args = []
pub_args = []
pubsub_args = []
for test in tests_definition:
if 'kind' not in test.keys():
print('ARGUMENT ERROR : '
'Test definition requites <kind> field for each participant')
continue
# All processes has seed argument
test_arguments = ['--seed', seed]
possible_arguments = ['samples',
'wait',
'magic',
'publishers',
'sleep_before_exec']
for argument in possible_arguments:
if argument in test.keys():
test_arguments.append('--' + argument)
test_arguments.append(test[argument])
# Add arguments without value
possible_flags = ['exit_on_lost_liveliness',
'zero_copy',
'fixed_type',
'notexit']
for flag in possible_flags:
if flag in test.keys():
test_arguments.append('--' + flag)
xmlfile_arg = 'xmlfile'
if xmlfile_arg in test.keys():
xml_path = os.path.join(script_dir, test[xmlfile_arg])
test_arguments.append('--' + xmlfile_arg)
test_arguments.append(xml_path)
if test['kind'] == 'publisher':
pub_args.append(test_arguments)
elif test['kind'] == 'subscriber':
sub_args.append(test_arguments)
elif test['kind'] == 'pubsub':
pubsub_args.append(test_arguments)
else:
print('ARGUMENT ERROR : '
'<kind> field can be publisher/subscriber/pubsub')
return pub_args, sub_args, pubsub_args
def define_commands(pub_args, sub_args, pubsub_args):
"""Create commands for each test addind executable to args."""
# Publisher executable
publisher_command = os.environ.get(
'DDS_SIMPLE_COMMUNICATION_PUBLISHER_BIN')
if not publisher_command:
publisher_files = glob.glob(
os.path.join(
script_dir,
'**/DDSCommunicationPublisher*'),
recursive=True)
pf = iter(publisher_files)
publisher_command = next(pf, None)
while publisher_command and \
(not os.path.isfile(publisher_command)
or not os.access(publisher_command,
os.X_OK)):
publisher_command = next(pf, None)
assert publisher_command
# Subscriber executable
subscriber_command = os.environ.get(
'DDS_SIMPLE_COMMUNICATION_SUBSCRIBER_BIN')
if not subscriber_command:
subscriber_files = glob.glob(
os.path.join(
script_dir,
'**/DDSCommunicationSubscriber*'),
recursive=True)
pf = iter(subscriber_files)
subscriber_command = next(pf, None)
while subscriber_command and \
(not os.path.isfile(subscriber_command)
or not os.access(subscriber_command,
os.X_OK)):
subscriber_command = next(pf, None)
assert subscriber_command
# Pub Sub executable
pubsub_command = os.environ.get('DDS_SIMPLE_COMMUNICATION_PUBSUB_BIN')
if not pubsub_command:
pubsub_files = glob.glob(
os.path.join(
script_dir,
'**/DDSCommunicationPubSub*'),
recursive=True)
pf = iter(pubsub_files)
pubsub_command = next(pf, None)
while pubsub_command and \
(not os.path.isfile(pubsub_command)
or not os.access(pubsub_command,
os.X_OK)):
pubsub_command = next(pf, None)
assert pubsub_command
# Add executable to each command
return (
[[publisher_command] + args for args in pub_args],
[[subscriber_command] + args for args in sub_args],
[[pubsub_command] + args for args in pubsub_args]
)
def execute_command(command):
"""Execute command after possibly waiting some time."""
sleep_tag = '--sleep_before_exec'
if sleep_tag in command:
time.sleep(int(command.pop(command.index(sleep_tag) + 1)))
command.remove(sleep_tag)
return subprocess.Popen(command)
def execute_commands(pub_commands, sub_commands, pubsub_commands, logger):
"""Get test definitions in command lists and execute each process."""
pubs_proc = []
subs_proc = []
pubsubs_proc = []
for subscriber_command in sub_commands:
logger.info(f'Executing subcriber: {subscriber_command}')
subs_proc.append(execute_command(subscriber_command))
for pubsub_command in pubsub_commands:
logger.info(f'Executing pubsub: {pubsub_command}')
pubsubs_proc.append(execute_command(pubsub_command))
for publisher_command in pub_commands:
logger.info(f'Executing publisher: {publisher_command}')
pubs_proc.append(execute_command(publisher_command))
ret_value = 0
for proc in subs_proc:
proc.communicate()
ret_value = ret_value + proc.returncode
for proc in pubsubs_proc:
proc.communicate()
ret_value = ret_value + proc.returncode
for proc in pubs_proc:
proc.kill()
return ret_value
if __name__ == '__main__':
logger = logging.getLogger('DDS COMMUNICATION TEST')
logger.setLevel(logging.INFO)
logger.error("TEST RUNNING")
args = sys.argv[1:]
if len(args) != 1:
logger.error('ARGUMENTS ERROR : 1 argument required: '
'path to .json file with test definition')
sys.exit(1)
test_definitions = test_definition(args[0])
logger.error(test_definitions)
pub_args, sub_args, pubsub_args = define_args(test_definitions)
logger.error(pub_args)
logger.error(sub_args)
logger.error(pubsub_args)
pub_commands, sub_commands, pubsub_commands = \
define_commands(pub_args, sub_args, pubsub_args)
logger.error(pub_commands)
logger.error(sub_commands)
logger.error(pubsub_commands)
test_value = execute_commands(
pub_commands,
sub_commands,
pubsub_commands,
logger)
logger.error(test_value)
if test_value != 0:
sys.exit(1)
else:
sys.exit(0)
| [
"noreply@github.com"
] | eProsima.noreply@github.com |
fb48fd9656915149d8133355706be99ed2db0469 | a31de016611f3b4efc7a576e7113cad1a738419b | /_string_monster2.py | ba71783722b858478094721a871a759c7c6dd5c1 | [] | no_license | Ing-Josef-Klotzner/python | 9d4044d632672fff966b28ab80e1ef77763c78f5 | 3913729d7d6e1b7ac72b46db7b06ca0c58c8a608 | refs/heads/master | 2022-12-09T01:40:52.275592 | 2022-12-01T22:46:43 | 2022-12-01T22:46:43 | 189,040,355 | 0 | 0 | null | 2022-12-01T19:52:37 | 2019-05-28T14:05:16 | Python | UTF-8 | Python | false | false | 1,363 | py | #!/usr/bin/python3
from sys import stdin
def match (ssof, ss):
if ss == "": return True
#print (ssof, ss, end = " ")
for st in ssof:
if ss.startswith (st):
return match (ssof - {st}, ss [len (st):])
return False
# this works with testcases, because strings are included
# in order in sleepy string (hackerearth testcases)
# fails for sample test case where sleepy string chars are scrumbled
def main ():
read = stdin.readline
t = int (read ())
for t_ in range (t):
n = int (read ())
sof = [] # list of strings on floor
lns = [] # list of the string lengths
for n_ in range (n):
s = read ().rstrip ()
sof.append (s)
lns.append (len (s))
ss = read ().rstrip () # sleepy string
lnss = len (ss)
mnl = min (lns)
mxl = max (lns)
justone = 0
allother_max = 0
for n_ in range (n):
if lns [n_] == mnl: justone += 1
elif lns [n_] == mxl: allother_max += 1
if lnss < mnl or lnss > mnl and lnss < 2 * mnl or mnl == mxl and lnss % mnl or justone == 1 and allother_max == n - 1 and lnss % mxl not in {0, mnl}:
print ("NO")
continue
ssof = set (sof)
print ("YES" if match (ssof, ss) else "NO")
if __name__ == "__main__": main ()
| [
"noreply@github.com"
] | Ing-Josef-Klotzner.noreply@github.com |
391c160e66432620a1d0f5b7efc59ce2320d27bd | 0f7f2dbda8619c5105c23fc1249b7e5e2e7beb1e | /2015/day19.py | f9abc7e27b9fc453c39d30ef752d898e2d1bf5b7 | [] | no_license | Wojonatior/AdventOfCode | bb305887ff31c12048904147ea123d913c92badb | 78925ab9a3f70ba077db26b0463d3ad01438f8a3 | refs/heads/master | 2021-01-10T14:36:53.460498 | 2017-03-13T08:30:11 | 2017-03-13T08:30:11 | 48,130,194 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,180 | py | import re
calibration = "ORnPBPMgArCaCaCaSiThCaCaSiThCaCaPBSiRnFArRnFArCaCaSiThCaCaSiThCaCaCaCaCaCaSiRnFYFArSiRnMgArCaSiRnPTiTiBFYPBFArSiRnCaSiRnTiRnFArSiAlArPTiBPTiRnCaSiAlArCaPTiTiBPMgYFArPTiRnFArSiRnCaCaFArRnCaFArCaSiRnSiRnMgArFYCaSiRnMgArCaCaSiThPRnFArPBCaSiRnMgArCaCaSiThCaSiRnTiMgArFArSiThSiThCaCaSiRnMgArCaCaSiRnFArTiBPTiRnCaSiAlArCaPTiRnFArPBPBCaCaSiThCaPBSiThPRnFArSiThCaSiThCaSiThCaPTiBSiRnFYFArCaCaPRnFArPBCaCaPBSiRnTiRnFArCaPRnFArSiRnCaCaCaSiThCaRnCaFArYCaSiRnFArBCaCaCaSiThFArPBFArCaSiRnFArRnCaCaCaFArSiRnFArTiRnPMgArF"
#calibration = "HOH"
replacements = []
#replacements = [("H", "HO"),("H", "OH"),("O", "HH")]
resultingMolecules = {}
moleculeCount = 0
for line in open("day19input.txt", "r"):
match = re.match(R"(\w+) => (\w+)", line)
replacements.append((str(match.group(1)),str(match.group(2))))
for replacement in replacements:
for i in range(len(calibration)-len(replacement[0])+1):
if(calibration[i:i+len(replacement[0])] == replacement[0]):
firstHalf = calibration[:i]
secondHalf = calibration[i+len(replacement[0]):]
middle = replacement[1]
resultingMolecules[firstHalf + middle + secondHalf] = 1
print (len(resultingMolecules.items())) | [
"jarekwojo@gmail.com"
] | jarekwojo@gmail.com |
1844307fa070223924ce07cc389c398066e09a3f | e292a4be6ddfa7ebe90ceb5ad55502971d6dc578 | /Django_music_app/pydjangopractice/wsgi.py | 29b47002f256182daee185b3083f0b4805b76135 | [] | no_license | sandeepvempati/Django | fb87309a66a5792678dadb63d00140980a17ca5f | 750bf5d3083503a46ccc2bc222b6c4b1b0cefdd4 | refs/heads/master | 2020-12-02T07:47:50.430587 | 2017-07-10T02:47:56 | 2017-07-10T02:47:56 | 96,728,190 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 410 | py | """
WSGI config for pydjangopractice project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/1.11/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "pydjangopractice.settings")
application = get_wsgi_application()
| [
"vempatisandeep189@gmail.com"
] | vempatisandeep189@gmail.com |
ac73ecf0179370acf915cd0e0a943ce57c4ca9e0 | d5b434e2fd3996ff7d64cfbea66b614e9e65589e | /tests/Pegasus/tokens_list.py | 4ca67ed05e64d59a7b6c4c4b734575fb0f855363 | [] | no_license | ttran4info/Baby_Boss | 1e7637f1824dc68571a3b7daa00482199b69fa83 | 728881002ed99ea5b762bf302f4b94297b12251d | refs/heads/master | 2023-04-06T22:34:56.386454 | 2018-08-24T20:35:46 | 2018-08-24T20:35:46 | 123,638,356 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 345 | py |
def get_tokens_list(s):
import re, string, textwrap
# Remove all non-word characters (everything except numbers and letters)
s = re.sub(r"[^\w\s]", '', s)
# Replace all runs of whitespace
s1 = re.sub(r"\s+", '', s)
# Split a string into N equal parts
tokens=textwrap.wrap(s1, 32)
return tokens
| [
"noreply@github.com"
] | ttran4info.noreply@github.com |
6dc00598c711ec5f7e2bff0bfce460629db84485 | c4460fe6d7171469705405d1893713f35580f135 | /Man-bot5.py | d340bcf9998eada8459f6a21cca3d591b3c52c68 | [] | no_license | dew2564/thirdza056 | 28cc8ffdc2576fc641bb9bfe82df38d4330af533 | d6bc6aa56ec571d2c70cfe74b30296bca1a1d788 | refs/heads/master | 2021-09-11T13:42:45.672316 | 2018-04-08T05:39:23 | 2018-04-08T05:39:23 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 115,785 | py | # -*- coding: utf-8 -*-
import LINETCR
from LINETCR.lib.curve.ttypes import *
from io import StringIO
from datetime import datetime
import time,random,sys,json,codecs,threading,glob,sys
import re,string,os
import os.path,sys,urllib,shutil,subprocess,pickle
cl = LINETCR.LINE()
cl.login(token="ใส่โทเค็น")
cl.loginResult()
ki = LINETCR.LINE()
ki.login(token="ใส่โทเค็น k1")
ki.loginResult()
kk = LINETCR.LINE()
kk.login(token="ใส่โทเค็น k2")
kk.loginResult()
ks = LINETCR.LINE()
ks.login(token="ใส่โทเค็น k3")
ks.loginResult()
kc = LINETCR.LINE()
kc.login(token="ใส่โทเค็น k4")
kc.loginResult()
ka = LINETCR.LINE()
ka.login(token="ใส่โทเค็น k5")
ka.loginResult()
with open('profileSave.pkl') as f:
save1 = pickle.load(f)
print "login success"
reload(sys)
sys.setdefaultencoding('utf-8')
helpMessage ="""Mr. Bots…⛿
╔══╦═╦═╗
║║║║║║║║ ✯✯❇ᵀᴴᴬᴵᴸᴬᴺᴰ❇✯✯
║║║║╦║║║ ❇͜͡C͜͡r͜͡e͜͡a͜͡t͜͡o͜͡r✯͜͡$͜͡ë͜͡I͜͡F͜͡-͜͡฿͜͡o͜͡Ŧ❇
╚╩╩╩╩╩╩╝️ နับთิஏთั้ଏบਹທ SirichanV⒑
📧https://line.me/R/ti/p/%40uvh1233u
╔══════════════════════
║❂➣[Id]
║❂➣[Mid]
║❂➣[All mid]
║❂➣[Me]
║❂➣[You @]
║❂➣[Mybot]
║❂➣[Name Bot (Text)]
║❂➣[Sendcontact]
║❂➣[K1/K2/K3 join]
║❂➣[K1/K2/K3/]
║❂➣[K1/K2/K3 fuck:]
║❂➣[K1/K2/K3 gift
║❂➣[Allgift]
║❂➣[Group Id]
║❂➣[TL:"Text"]
║❂➣[Clock:]
║❂➣[Up clock]
║❂➣[Name:'text']
║❂➣[Mic]:"mid"]
║❂➣[Mc @]
║❂➣[Rejectall]
║❂➣[Massage add:"text"]
║❂➣[Add confirmasi]
║❂➣[Comment set:"Text"]
║❂➣[Comment check]
║❂➣[Clock: on]
║❂➣[Clock: off]
║❂➣[Ban]:
║❂➣[Unban]:
║❂➣[Conban]
║❂➣[Banlist]:
║❂➣[Allgiftt]
║❂➣[Test]
║❂➣[Copy @]
║❂➣[Save]
║❂➣[Load]
║❂➣[Spam on (Number) (Text)
║❂➣[Spam off (Number) (Text)
║❂➣[Gcreator]
║❂➣[Covergroup]
║❂➣[Tagall]
║❂➣[Kicker]
║❂➣[Setpoint]
║❂➣[Setcheck]
║❂➣[Kick"@tag]]
╠══════════════════════
║ ✥(sᴇᴛ)ᴄᴏᴍᴍᴀɴᴅ✥
╠══════════════════════
║❂➣[Contact: on/off]
║❂➣[Auto join: on/off]
║❂➣[Cancel Invite: 1 on/off]
║❂➣[Auto share: on/off]
║❂➣[Auto leave: on/off]
║❂➣[Comment: on/off]
║❂➣[Auto add: on/off]
║❂➣[Auto like: on/off]
╠══════════════════════
║✥ᴄᴏᴍᴍᴀɴᴅ ɪɴ ᴛʜᴇ ɢʀᴏᴜᴘ✥
╠══════════════════════
║❂➣[Ban"@Tag]
║❂➣[Unban"@Tag]
║❂➣[Urlon]:
║❂➣[Urloff]:
║❂➣[Url]:
║❂➣[Ginfo]:
║❂➣[Invite:"mid"]
║❂➣[Say:"Text"]:
║❂➣[Cancel]:
║❂➣[Gn:"name"]:
║❂➣[NK @tag]:
║❂➣[Dead]
╠══════════════════════
║•─ ͜͡✫ѕєʟғвот[ᴍ̶̲̅ᴀ̶̲̅ɴ̶̲̅]κɪcκєʀ ͜͡✫─•
╚══════════════════════
Message Protect [Help2]
"""
helpMessage2 ="""•─ ͜͡✫ѕєʟғвот[ᴍ̶̲̅ᴀ̶̲̅ɴ̶̲̅]κɪcκєʀ ͜͡✫─•
╔═══════════════════════
║❂➣[PROTECT: ON/OFF]ชุดล็อกกลุ่ม
║❂➣[BLOCK URL: ON/OFF] ล็อกลิงก์
║❂➣[NAMELOCK: ON/OFF] ล็อกชื่อกลุ่ม
║❂➣[BLOCKINVITE: ON/OFF]ล็อกเชิญ
╚═══════════════════════
"""
KAC = [cl,ki,kk,ks,kc,ka]
mid = cl.getProfile().mid
Amid = ki.getProfile().mid
kimid = kk.getProfile().mid
ki2mid = ks.getProfile().mid
Cmid = kc.getProfile().mid
Emid = ka.getProfile().mid
admin = ["ud24af63fd62d14c3bf8f719df80c3745"]
me = cl.getProfile().mid
bot1 = cl.getProfile().mid
main = cl.getProfile().mid
kicker1 = ki.getProfile().mid
bots = me + kicker1
protectname = []
protecturl = []
protection = []
autocancel = {}
autoinvite = []
autoleaveroom = []
admins = ["ud24af63fd62d14c3bf8f719df80c3745"]
Rx5 = ["u406133ad4d3fbe50a2f4d51ea081d050"]
Rx4 = ["u406133ad4d3fbe50a2f4d51ea081d050"]
Rx3 = ["u406133ad4d3fbe50a2f4d51ea081d050"]
Rx2 = ["ua51ba06b0dd18c0bfe2cc6caa3458202"]
Rx1 = ["uc7f32bb28dc009916d40af87c9910ddc"]
Administrator = admins + Rx5 + Rx4 + Rx3 + Rx2 + Rx1
AS = Rx2 + Rx1 + Rx3 + Rx4 + Rx5
adminsA = admins + Rx3 + Rx5
omikuzi = ["大吉","中吉","小吉","末吉","大凶","凄1�7"]
wait = {
'contact':False,
'autoJoin':False,
'autoCancel':{"on":False,"members":1},
'leaveRoom':True,
'timeline':True,
'autoAdd':True,
'message':"""──────┅═ইई═┅──────
နับთิஏთั้ଏบਹທ Sirichan V⒑ ชุดบอทป้องกัน
สนใจติดต่อที่ ѕєʟғвот[ᴍ̶̲̅ᴀ̶̲̅ɴ̶̲̅]κɪcκєʀ
http://line.me/ti/p/~1ove..neverdie
──────┅═ইई═┅──────
Thank For Add Me Creator Selfbot
""",
"lang":"JP",
"comment":"Auto like By.[ᴍ̶̲̅ᴀ̶̲̅ɴ̶̲̅] \n\nနับთิஏთั้ଏบਹທ Sirichan V⒑ ชุดบอทป้องกัน\n📧https://line.me/R/ti/p/%40uvh1233u \n",
"likeOn":True,
"commentOn":True,
"commentBlack":{},
"wblack":False,
"dblack":False,
"clock":False,
"blacklist":{},
"wblacklist":False,
"pnharfbot":{},
"pname":{},
"pro_name":{},
"dblacklist":False
}
wait2 = {
'readMember':{},
'readPoint':{},
'ROM':{},
'setTime':{}
}
setTime = {}
setTime = wait2["setTime"]
res = {
'num':{},
'us':{},
'au':{},
}
save1 = {
"Saved":False,
"displayName":"",
"statusMessage":"",
"pictureStatus":""
}
def Cmd(string, commands): #/XXX, >XXX, ;XXX, ^XXX, %XXX, $XXX...
tex = [""]
for texX in tex:
for command in commands:
if string ==texX + command:
return True
return False
def sendMessage(to, text, contentMetadata={}, contentType=0):
mes = Message()
mes.to, mes.from_ = to, profile.mid
mes.text = text
mes.contentType, mes.contentMetadata = contentType, contentMetadata
if to not in messageReq:
messageReq[to] = -1
messageReq[to] += 1
def bot(op):
try:
if op.type == 0:
return
if op.type == 5:
if wait["autoAdd"] == True:
cl.findAndAddContactsByMid(op.param1)
if (wait["message"] in [""," ","\n",None]):
pass
else:
cl.sendText(op.param1,str(wait["message"]))
if op.type == 13:
if mid in op.param3:
G = cl.getGroup(op.param1)
if wait["autoJoin"] == True:
if wait["autoCancel"]["on"] == True:
if len(G.members) <= wait["autoCancel"]["members"]:
cl.rejectGroupInvitation(op.param1)
else:
cl.acceptGroupInvitation(op.param1)
else:
cl.acceptGroupInvitation(op.param1)
elif wait["autoCancel"]["on"] == True:
if len(G.members) <= wait["autoCancel"]["members"]:
cl.rejectGroupInvitation(op.param1)
else:
Inviter = op.param3.replace("",',')
InviterX = Inviter.split(",")
matched_list = []
for tag in wait["blacklist"]:
matched_list+=filter(lambda str: str == tag, InviterX)
if matched_list == []:
pass
else:
cl.cancelGroupInvitation(op.param1, matched_list)
kk.cancelGroupInvitation(op.param1, matched_list)
ks.cancelGroupInvitation(op.param1, matched_list)
if op.type == 17:
if mid in op.param3:
group = cl.getGroup(msg.to)
gMembMids = [contact.mid for contact in group.members]
matched_list = []
for tag in wait["blacklist"]:
matched_list+=filter(lambda str: str == tag, gMembMids)
if matched_list == []:
cl.sendText(msg.to,"There was no blacklist user")
return
for jj in matched_list:
cl.kickoutFromGroup(msg.to,[jj])
cl.sendText(msg.to,"Blacklist user flushing is complete")
if op.type == 11:
if op.param3 == '1':
if op.param1 in wait['pname']:
try:
G = cl.getGroup(op.param1)
except:
try:
G = ki.getGroup(op.param1)
except:
try:
G = kk.getGroup(op.param1)
except:
try:
G = ks.getGroup(op.param1)
except:
try:
G = ki.getGroup(op.param1)
except:
try:
G = kc.getGroup(op.param1)
except:
pass
G.name = wait['pro_name'][op.param1]
try:
cl.updateGroup(G)
except:
try:
ki.updateGroup(G)
except:
try:
kk.updateGroup(G)
except:
try:
ks.updateGroup(G)
except:
try:
kc.updateGroup(G)
except:
try:
ka.updateGroup(G)
except:
pass
if op.param2 in ken:
pass
else:
try:
koutFromGroup(op.param1,[op.param2])
except:
try:
kk.kickoutFromGroup(op.param1,[op.param2])
except:
try:
kc.kickoutFromGroup(op.param1,[op.param2])
except:
try:
ka.kickoutFromGroup(op.param1,[op.param2])
except:
try:
ks.kickoutFromGroup(op.param1,[op.param2])
except:
pass
cl.sendText(op.param1,"Group name lock")
ki.sendText(op.param1,"Haddeuh dikunci Pe'a")
kk.sendText(op.param1,"Wekawekaweka Har Har")
c = Message(to=op.param1, from_=None, text=None, contentType=13)
c.contentMetadata={'mid':op.param2}
cl.sendMessage(c)
if op.type == 19:
if mid in op.param3:
wait["blacklist"][op.param2] = True
if op.type == 17:
if mid in op.param3:
if wait["blacklist"] == True:
cl.kickoutFromGroup(op.param1,[op.param2])
ki.kickoutFromGroup(op.param1,[op.param2])
kk.kickoutFromGroup(op.param1,[op.param2])
ks.kickoutFromGroup(op.param1,[op.param2])
if op.type == 32:
if mid in op.param3:
wait["blacklist"][op.param2] == True
if op.type == 32:
if mid in op.param3:
if wait["blacklist"] == True:
cl.kickoutFromGroup(op.param1,[op.param2])
ki.kickoutFromGroup(op.param1,[op.param2])
kk.kickoutFromGroup(op.param1,[op.param2])
ks.kickoutFromGroup(op.param1,[op.param2])
if op.type == 25:
if mid in op.param3:
wait["blacklist"][op.param2] == True
if op.type == 25:
if mid in op.param3:
if wait["blacklist"] == True:
cl.kickoutFromGroup(op.param1,[op.param2])
ki.kickoutFromGroup(op.param1,[op.param2])
kk.kickoutFromGroup(op.param1,[op.param2])
ks.kickoutFromGroup(op.param1,[op.param2])
if op.type == 22:
if wait["leaveRoom"] == True:
cl.leaveRoom(op.param1)
if op.type == 24:
if wait["leaveRoom"] == True:
cl.leaveRoom(op.param1)
if op.param3 == "4":
if op.param1 in protecturl:
group = cl.getGroup(op.param1)
if group.preventJoinByTicket == False:
group.preventJoinByTicket = True
cl.updateGroup(group)
cl.sendText(op.param1,"URL can not be changed")
ki.kickoutFromGroup(op.param1,[op.param2])
kk.kickoutFromGroup(op.param1,[op.param2])
ks.kickoutFromGroup(op.param1,[op.param2])
wait["blacklist"][op.param2] = True
f=codecs.open('st2__b.json','w','utf-8')
json.dump(wait["blacklist"], f, sort_keys=True, indent=4,ensure_ascii=False)
else:
pass
if op.type == 26:
msg = op.message
if msg.toType == 0:
msg.to = msg.from_
if msg.from_ == "ud24af63fd62d14c3bf8f719df80c3745":
if "join:" in msg.text:
list_ = msg.text.split(":")
try:
cl.acceptGroupInvitationByTicket(list_[1],list_[2])
ki.acceptGroupInvitationByTicket(list_[1],list_[2])
kk.acceptGroupInvitationByTicket(list_[1],list_[2])
ks.acceptGroupInvita
X = cl.getGroup(list_[1])
X = ki.getGroup(list_[1])
X = kk.getGroup(list_[1])
X = ks.getGroup(list_[1])
X.preventJoinByTicket = True
cl.updateGroup(X)
ki.updateGroup(X)
kk.updateGroup(X)
ks.updateGroup(X)
except:
cl.sendText(msg.to,"error")
if msg.toType == 1:
if wait["leaveRoom"] == True:
cl.leaveRoom(msg.to)
if msg.contentType == 16:
url = msg.contentMetadata["postEndUrl"]
cl.like(url[25:58], url[66:], likeType=1002)
if op.type == 25:
msg = op.message
if msg.contentType == 13:
if wait["wblack"] == True:
if msg.contentMetadata["mid"] in wait["commentBlack"]:
cl.sendText(msg.to,"It's included in a blacklist already〄1�7")
wait["wblack"] = False
else:
wait["commentBlack"][msg.contentMetadata["mid"]] = True
wait["wblack"] = False
cl.sendText(msg.to,"I decided not to make a comment〄1�7")
elif wait["dblack"] == True:
if msg.contentMetadata["mid"] in wait["commentBlack"]:
del wait["commentBlack"][msg.contentMetadata["mid"]]
cl.sendText(msg.to,"It was eliminated from a blacklist〄1�7")
wait["dblack"] = False
else:
wait["dblack"] = False
cl.sendText(msg.to,"It isn't included in a blacklist〄1�7")
elif wait["wblacklist"] == True:
if msg.contentMetadata["mid"] in wait["blacklist"]:
cl.sendText(msg.to,"It's included in a blacklist already.〄1�7")
wait["wblacklist"] = False
else:
wait["blacklist"][msg.contentMetadata["mid"]] = True
wait["wblacklist"] = False
cl.sendText(msg.to,"It was added to the blacklist.〄1�7")
elif wait["dblacklist"] == True:
if msg.contentMetadata["mid"] in wait["blacklist"]:
del wait["blacklist"][msg.contentMetadata["mid"]]
cl.sendText(msg.to,"It was eliminated from a blacklist〄1�7")
wait["dblacklist"] = False
else:
wait["dblacklist"] = False
cl.sendText(msg.to,"It isn't included in a blacklist〄1�7")
elif wait["contact"] == True:
msg.contentType = 0
cl.sendText(msg.to,msg.contentMetadata["mid"])
if 'displayName' in msg.contentMetadata:
contact = cl.getContact(msg.contentMetadata["mid"])
try:
cu = cl.channel.getCover(msg.contentMetadata["mid"])
except:
cu = ""
cl.sendText(msg.to,"[displayName]:\n" + msg.contentMetadata["displayName"] + "\n[mid]:\n" + msg.contentMetadata["mid"] + "\n[statusMessage]:\n" + contact.statusMessage + "\n[pictureStatus]:\nhttp://dl.profile.line-cdn.net/" + contact.pictureStatus + "\n[coverURL]:\n" + str(cu))
else:
contact = cl.getContact(msg.contentMetadata["mid"])
try:
cu = cl.channel.getCover(msg.contentMetadata["mid"])
except:
cu = ""
cl.sendText(msg.to,"[displayName]:\n" + contact.displayName + "\n[mid]:\n" + msg.contentMetadata["mid"] + "\n[statusMessage]:\n" + contact.statusMessage + "\n[pictureStatus]:\nhttp://dl.profile.line-cdn.net/" + contact.pictureStatus + "\n[coverURL]:\n" + str(cu))
elif msg.contentType == 16:
if wait["timeline"] == True:
msg.contentType = 0
if wait["lang"] == "JP":
msg.text = "" + msg.contentMetadata["postEndUrl"]
else:
msg.text = "" + msg.contentMetadata["postEndUrl"]
cl.sendText(msg.to,msg.text)
elif msg.text is None:
return
elif msg.text in ["help","Help","HELP"]:
if wait["lang"] == "JP":
cl.sendText(msg.to,helpMessage)
else:
cl.sendText(msg.to,helpt)
elif msg.text in ["Help2","Key","KEY"]:
if wait["lang"] == "JP":
cl.sendText(msg.to,helpMessage2)
else:
cl.sendText(msg.to,helpt)
elif ("Gn:"in msg.text):
if msg.toType == 2:
X = cl.getGroup(msg.to)
X.name = msg.text.replace("Gn:","")
cl.updateGroup(X)
else:
cl.sendText(msg.to,"It can't be used besides the group.")
elif ("ki1 gn " in msg.text):
if msg.toType == 2:
X = cl.getGroup(msg.to)
X.name = msg.text.replace("ki1 gn ","")
ki.updateGroup(X)
else:
ki.sendText(msg.to,"It can't be used besides the group.")
elif ("ki2 gn " in msg.text):
if msg.toType == 2:
X = cl.getGroup(msg.to)
X.name = msg.text.replace("ki2 gn ","")
ki2.updateGroup(X)
else:
ki2.sendText(msg.to,"It can't be used besides the group.")
elif "kick:" in msg.text:
midd = msg.text.replace("kick:","")
cl.kickoutFromGroup(msg.to,[midd])
elif "Invite:" in msg.text:
midd = msg.text.replace("Invite:","")
cl.findAndAddContactsByMid(midd)
cl.inviteIntoGroup(msg.to,[midd])
elif "K1 invite:" in msg.text:
midd = msg.text.replace("K1 invite:","")
ki.findAndAddContactsByMid(midd)
ki.inviteIntoGroup(msg.to,[midd])
elif "Me" == msg.text:
msg.contentType = 13
msg.contentMetadata = {'mid': mid}
cl.sendMessage(msg)
elif "K1" == msg.text:
msg.contentType = 13
msg.contentMetadata = {'mid': Amid}
ki.sendMessage(msg)
elif "K2" == msg.text:
msg.contentType = 13
msg.contentMetadata = {'mid': kimid}
kk.sendMessage(msg)
elif "K3" == msg.text:
msg.contentType = 13
msg.contentMetadata = {'mid': ki2mid}
ks.sendMessage(msg)
elif "K4" == msg.text:
msg.contentType = 13
msg.contentMetadata = {'mid': Cmid}
kc.sendMessage(msg)
elif "K5" == msg.text:
msg.contentType = 13
msg.contentMetadata = {'mid': Emid}
ka.sendMessage(msg)
elif "Mybot" == msg.text:
msg.contentType = 13
msg.contentMetadata = {'mid': Amid}
cl.sendMessage(msg)
msg.contentMetadata = {'mid': kimid}
cl.sendMessage(msg)
msg.contentMetadata = {'mid': ki2mid}
cl.sendMessage(msg)
msg.contentMetadata = {'mid': Cmid}
cl.sendMessage(msg)
msg.contentMetadata = {'mid': Emid}
cl.sendMessage(msg)
elif "Sendcontact" == msg.text:
msg.contentType = 13
msg.contentMetadata = {'mid': mid}
cl.sendMessage(msg)
msg.contentMetadata = {'mid': Amid}
ki.sendMessage(msg)
msg.contentMetadata = {'mid': kimid}
kk.sendMessage(msg)
msg.contentMetadata = {'mid': ki2mid}
ks.sendMessage(msg)
msg.contentMetadata = {'mid': Cmid}
kc.sendMessage(msg)
msg.contentMetadata = {'mid': Emid}
ka.sendMessage(msg)
elif msg.text in ["Gift","Man gift"]:
msg.contentType = 9
msg.contentMetadata={'PRDID': '3b92ccf5-54d3-4765-848f-c9ffdc1da020',
'PRDTYPE': 'THEME',
'MSGTPL': '1'}
msg.text = None
cl.sendMessage(msg)
msg.contentType = 9
msg.contentMetadata={'PRDID': '3b92ccf5-54d3-4765-848f-c9ffdc1da020',
'PRDTYPE': 'THEME',
'MSGTPL': '2'}
msg.text = None
cl.sendMessage(msg)
msg.contentType = 9
msg.contentMetadata={'PRDID': '3b92ccf5-54d3-4765-848f-c9ffdc1da020',
'PRDTYPE': 'THEME',
'MSGTPL': '3'}
msg.text = None
cl.sendMessage(msg)
msg.contentType = 9
msg.contentMetadata={'PRDID': '3b92ccf5-54d3-4765-848f-c9ffdc1da020',
'PRDTYPE': 'THEME',
'MSGTPL': '4'}
msg.text = None
cl.sendMessage(msg)
msg.contentType = 9
msg.contentMetadata={'PRDID': '3b92ccf5-54d3-4765-848f-c9ffdc1da020',
'PRDTYPE': 'THEME',
'MSGTPL': '5'}
msg.text = None
cl.sendMessage(msg)
msg.contentType = 9
msg.contentMetadata={'PRDID': '3b92ccf5-54d3-4765-848f-c9ffdc1da020',
'PRDTYPE': 'THEME',
'MSGTPL': '6'}
msg.text = None
cl.sendMessage(msg)
msg.contentType = 9
msg.contentMetadata={'PRDID': '3b92ccf5-54d3-4765-848f-c9ffdc1da020',
'PRDTYPE': 'THEME',
'MSGTPL': '7'}
msg.text = None
cl.sendMessage(msg)
msg.contentType = 9
msg.contentMetadata={'PRDID': '3b92ccf5-54d3-4765-848f-c9ffdc1da020',
'PRDTYPE': 'THEME',
'MSGTPL': '8'}
msg.text = None
cl.sendMessage(msg)
elif msg.text in ["愛のプレゼンツ1�7","K1 gift"]:
msg.contentType = 9
msg.contentMetadata={'PRDID': 'a0768339-c2d3-4189-9653-2909e9bb6f58',
'PRDTYPE': 'THEME',
'MSGTPL': '4'}
msg.text = None
ki.sendMessage(msg)
elif msg.text in ["愛のプレゼンツ1�7","K2 gift"]:
msg.contentType = 9
msg.contentMetadata={'PRDID': 'a0768339-c2d3-4189-9653-2909e9bb6f58',
'PRDTYPE': 'THEME',
'MSGTPL': '6'}
msg.text = None
kk.sendMessage(msg)
elif msg.text in ["愛のプレゼンツ1�7","K3 gift"]:
msg.contentType = 9
msg.contentMetadata={'PRDID': 'a0768339-c2d3-4189-9653-2909e9bb6f58',
'PRDTYPE': 'THEME',
'MSGTPL': '3'}
msg.text = None
ks.sendMessage(msg)
elif msg.text in ["愛のプレゼンツ1�7","K4 gift"]:
msg.contentType = 9
msg.contentMetadata={'PRDID': 'a0768339-c2d3-4189-9653-2909e9bb6f58',
'PRDTYPE': 'THEME',
'MSGTPL': '3'}
msg.text = None
kc.sendMessage(msg)
elif msg.text in ["愛のプレゼンツ1�7","K5 gift"]:
msg.contentType = 9
msg.contentMetadata={'PRDID': 'a0768339-c2d3-4189-9653-2909e9bb6f58',
'PRDTYPE': 'THEME',
'MSGTPL': '3'}
msg.text = None
ka.sendMessage(msg)
elif msg.text in ["Allgift","All Gift"]:
msg.contentType = 9
msg.contentMetadata={'PRDID': 'a0768339-c2d3-4189-9653-2909e9bb6f58',
'PRDTYPE': 'THEME',
'MSGTPL': '12'}
msg.text = None
cl.sendMessage(msg)
ki.sendMessage(msg)
kk.sendMessage(msg)
ks.sendMessage(msg)
kc.sendMessage(msg)
ka.sendMessage(msg)
elif msg.text in ["cancel","Cancel"]:
if msg.toType == 2:
group = cl.getGroup(msg.to)
if group.invitee is not None:
gInviMids = [contact.mid for contact in group.invitee]
cl.cancelGroupInvitation(msg.to, gInviMids)
else:
if wait["lang"] == "JP":
cl.sendText(msg.to,"There isn't an invited person〄1�7")
else:
cl.sendText(msg.to,"you Sato face-like person absence〄1�7")
else:
if wait["lang"] == "JP":
cl.sendText(msg.to,"It can't be used besides the group〄1�7")
else:
cl.sendText(msg.to,"Impossible use besides")
elif msg.text in ["K1 cancel"]:
if msg.toType == 2:
group = ki.getGroup(msg.to)
if group.invitee is not None:
gInviMids = [contact.mid for contact in group.invitee]
ki.cancelGroupInvitation(msg.to, gInviMids)
else:
if wait["lang"] == "JP":
ki.sendText(msg.to,"There isn't an invited person〄1�7")
else:
ki.sendText(msg.to,"you Sato face-like person absence〄1�7")
else:
if wait["lang"] == "JP":
cl.sendText(msg.to,"It can't be used besides the group〄1�7")
else:
cl.sendText(msg.to,"Impossible use besides")
elif "Comment set:" in msg.text:
c = msg.text.replace("Comment set:","")
if c in [""," ","\n",None]:
cl.sendText(msg.to,"Error")
else:
wait["comment"] = c
cl.sendText(msg.to,"It was changed。\n\n" + c)
elif msg.text in ["Comment check"]:
cl.sendText(msg.to,"An automatic comment is established as follows at present。\n\n" + str(wait["comment"]))
elif msg.text in ["コメント:オン","Comment:on"]:
if wait["commentOn"] == True:
if wait["lang"] == "JP":
cl.sendText(msg.to,"Done〄1�7")
else:
wait["commentOn"] = True
if wait["lang"] == "JP":
cl.sendText(msg.to,"Already〄1�7")
elif msg.text in ["コメント:オフ","Comment:off"]:
if wait["commentOn"] == False:
if wait["lang"] == "JP":
cl.sendText(msg.to,"Done〄1�7")
else:
wait["commentOn"] = False
if wait["lang"] == "JP":
cl.sendText(msg.to,"Already〄1�7")
#elif "gurl" == msg.text:
#print cl.getGroup(msg.to)
##cl.sendMessage(msg)
elif msg.text in ["Block url:on"]:
protecturl.append(msg.to)
cl.sendText(msg.to,"ƊƠƝЄ")
elif msg.text in ["Block url:off"]:
if msg.from_ in Administrator:
protecturl.remove(msg.to)
cl.sendText(msg.to,"ƛԼԼƠƜЄƊ")
else:
cl.sendText(msg.to,"ƛԼƦЄƛƊƳ")
elif msg.text in ["Urlon"]:
if msg.toType == 2:
group = cl.getGroup(msg.to)
group.preventJoinByTicket = False
cl.updateGroup(group)
if wait["lang"] == "JP":
cl.sendText(msg.to,"ƲƦԼ ƠƝ ƛԼƦЄƛƊƳ〄1�7")
else:
cl.sendText(msg.to,"ƛԼƦЄƛƊƳ ƲƦԼ。")
else:
if wait["lang"] == "JP":
cl.sendText(msg.to,"It can't be used besides the group。")
else:
cl.sendText(msg.to,"Impossible use besides")
elif msg.text in ["Urloff"]:
if msg.toType == 2:
group = cl.getGroup(msg.to)
group.preventJoinByTicket = True
cl.updateGroup(group)
if wait["lang"] == "JP":
cl.sendText(msg.to,"ƲƦԼ ƇԼƠƧЄƊ。")
else:
cl.sendText(msg.to,"ƛԼƦЄƛƊƳ ƲƦԼ。")
else:
if wait["lang"] == "JP":
cl.sendText(msg.to,"It can't be used besides the group。")
else:
cl.sendText(msg.to,"Impossible use besides")
elif msg.text in ["ginfo","Ginfo"]:
if msg.toType == 2:
ginfo = cl.getGroup(msg.to)
try:
gCreator = ginfo.creator.displayName
except:
gCreator = "Error"
if wait["lang"] == "JP":
if ginfo.invitee is None:
sinvitee = "0"
else:
sinvitee = str(len(ginfo.invitee))
if ginfo.preventJoinByTicket == True:
u = "ปิดอยู่"
else:
u = "เปิดอยู่"
cl.sendText(msg.to,"[กลุ่ม]\n" + str(ginfo.name) + "\n\n[ไอดีกลุ่ม]\n" + msg.to + "\n\n[ผู้สร้างกลุ่ม]\n" + gCreator + "\n\n[รูปโปรไฟล์กลุ่ม]\nhttp://dl.profile.line.naver.jp/" + ginfo.pictureStatus + "\n\nสมาชิก:" + str(len(ginfo.members)) + " ท่าน\nเชิญ:" + sinvitee + " ท่าน\nURL:" + u + "")
else:
cl.sendText(msg.to,"[名字]\n" + str(ginfo.name) + "\n[gid]\n" + msg.to + "\n[小组的作成者]\n" + gCreator + "\n[小组图标]\nhttp://dl.profile.line.naver.jp/" + ginfo.pictureStatus)
else:
if wait["lang"] == "JP":
cl.sendText(msg.to,"It can't be used besides the group。")
else:
cl.sendText(msg.to,"Impossible use besides")
elif "Id" == msg.text:
cl.sendText(msg.to,msg.to)
elif "Mid" == msg. text:
cl.sendText(msg.to,mid)
elif "All mid" == msg.text:
cl.sendText(msg.to,mid)
ki.sendText(msg.to,Amid)
kk.sendText(msg.to,kimid)
ks.sendText(msg.to,ki2mid)
elif "ฮ่าๆ" == msg.text:
msg.contentType = 7
msg.text = None
msg.contentMetadata = {
"STKID": "100",
"STKPKGID": "1",
"STKVER": "100" }
ks.sendMessage(msg)
elif "โกรธ" == msg.text:
msg.contentType = 7
msg.text = None
msg.contentMetadata = {
"STKID": "105",
"STKPKGID": "1",
"STKVER": "100" }
ks.sendMessage(msg)
elif "ยินดีต้อนรับ" == msg.text:
msg.contentType = 7
msg.text = None
msg.contentMetadata = {
"STKID": "247",
"STKPKGID": "3",
"STKVER": "100" }
ks.sendMessage(msg)
elif "TL:" in msg.text:
tl_text = msg.text.replace("TL:","")
cl.sendText(msg.to,"line://home/post?userMid="+mid+"&postId="+cl.new_post(tl_text)["result"]["post"]["postInfo"]["postId"])
elif "Name:" in msg.text:
string = msg.text.replace("Name:","")
if len(string.decode('utf-8')) <= 20:
profile = cl.getProfile()
profile.displayName = string
cl.updateProfile(profile)
cl.sendText(msg.to,"The name " + string + " I did NI change。")
elif "Name Bot" in msg.text:
string = msg.text.replace("Name Bot","")
if len(string.decode('utf-8')) <= 20:
profile = cl.getProfile()
profile.displayName = string
ki.updateProfile(profile)
kk.updateProfile(profile)
ks.updateProfile(profile)
kc.updateProfile(profile)
ka.updateProfile(profile)
cl.sendText(msg.to,"The name " + string + " I did NI change。")
#---------------------------------------------------------
elif "K1 upname:" in msg.text:
string = msg.text.replace("K1 up name:","")
if len(string.decode('utf-8')) <= 20:
profile = ki.getProfile()
profile.displayName = string
ki.updateProfile(profile)
ki.sendText(msg.to,"The name " + string + " I did NI change。")
#--------------------------------------------------------
elif "K2 upname:" in msg.text:
string = msg.text.replace("K2 up name:","")
if len(string.decode('utf-8')) <= 20:
profile = kk.getProfile()
profile.displayName = string
kk.updateProfile(profile)
kk.sendText(msg.to,"The name " + string + " I did NI change。")
#--------------------------------------------------------
elif "K3 upname:" in msg.text:
string = msg.text.replace("K3 up name:","")
if len(string.decode('utf-8')) <= 20:
profile = ks.getProfile()
profile.displayName = string
ks.updateProfile(profile)
ks.sendText(msg.to,"The name " + string + " I did NI change。")
#--------------------------------------------------------
elif "K1 upstatus: " in msg.text:
string = msg.text.replace("K1 upstatus: ","")
if len(string.decode('utf-8')) <= 500:
profile_B = ki.getProfile()
profile_B.statusMessage = string
ki.updateProfile(profile_B)
ki.sendText(msg.to,"display message " + string + " done")
elif "K2 upstatus: " in msg.text:
string = msg.text.replace("K2 upstatus: ","")
if len(string.decode('utf-8')) <= 500:
profile_C = kk.getProfile()
profile_C.statusMessage = string
kk.updateProfile(profile_C)
kk.sendText(msg.to,"display message " + string + " done")
elif "K3 upstatus: " in msg.text:
string = msg.text.replace("K3 upstatus: ","")
if len(string.decode('utf-8')) <= 500:
profile_C = ks.getProfile()
profile_C.statusMessage = string
ks.updateProfile(profile_C)
ks.sendText(msg.to,"display message " + string + " done")
elif "Mic:" in msg.text:
mmid = msg.text.replace("Mic:","")
msg.contentType = 13
msg.contentMetadata = {"mid":mmid}
cl.sendMessage(msg)
elif msg.text in ["Contact:on"]:
if wait["contact"] == True:
if wait["lang"] == "JP":
cl.sendText(msg.to,"ƇƠƝƬƛƇƬ ƠƝ ƛԼƦЄƛƊƳ。")
else:
cl.sendText(msg.to,"ƛԼƦЄƛƊƳ ƠƝ。")
else:
wait["contact"] = True
if wait["lang"] == "JP":
cl.sendText(msg.to,"ƖƬ ƜƛƧ ƬƲƦƝЄƊ ƠƝ。")
else:
cl.sendText(msg.to,"ƬƲƦƝЄƊ ƠƝ。")
elif msg.text in ["Contact:off"]:
if wait["contact"] == False:
if wait["lang"] == "JP":
cl.sendText(msg.to,"ƇƠƝƬƛƇƬ ƠƑƑ ƛԼƦЄƛƊƳ。")
else:
cl.sendText(msg.to,"ƛԼƦЄƛƊƳ ƠƑƑ。")
else:
wait["contact"] = False
if wait["lang"] == "JP":
cl.sendText(msg.to,"ƖƬ ƜƛƧ ƬƲƦƝЄƊ ƠƑƑ。")
else:
cl.sendText(msg.to,"ƬƲƦƝЄƊ ƠƑƑ。")
elif msg.text in ["Auto join:on"]:
if wait["autoJoin"] == True:
if wait["lang"] == "JP":
cl.sendText(msg.to,"ʆƠƖƝ ƠƝ ƛԼƦЄƛƊƳ。")
else:
cl.sendText(msg.to,"ƛԼƦЄƛƊƳ ƠƝ。")
else:
wait["autoJoin"] = True
if wait["lang"] == "JP":
cl.sendText(msg.to,"ƖƬ ƜƛƧ ƬƲƦƝЄƊ ƠƝ。")
else:
cl.sendText(msg.to,"ƬƲƦƝЄƊ ƠƝ。")
elif msg.text in ["Auto join:off"]:
if wait["autoJoin"] == False:
if wait["lang"] == "JP":
cl.sendText(msg.to,"ʆƠƖƝ ƠƑƑ ƛԼƦЄƛƊƳ。")
else:
cl.sendText(msg.to,"ƛԼƦЄƛƊƳ ƠƑƑ。")
else:
wait["autoJoin"] = False
if wait["lang"] == "JP":
cl.sendText(msg.to,"ƖƬ ƜƛƧ ƬƲƦƝЄƊ ƠƑƑ。")
else:
cl.sendText(msg.to,"ƬƲƦƝЄƊ ƠƑƑ。")
elif "Cancel invite:" in msg.text:
try:
strnum = msg.text.replace("Cancel invite:","")
if strnum == "off":
wait["autoCancel"]["on"] = False
if wait["lang"] == "JP":
cl.sendText(msg.to,"Invitation refusal was turned off。\non, please designate and send the number of people.")
else:
cl.sendText(msg.to,"number of people")
else:
num = int(strnum)
wait["autoCancel"]["on"] = True
if wait["lang"] == "JP":
cl.sendText(msg.to,strnum + "A group below the person made sure that I'll refuse invitation automatically。")
else:
cl.sendText(msg.to,strnum + "Self- you for below shinin-like small.")
except:
if wait["lang"] == "JP":
cl.sendText(msg.to,"The price is wrong。")
else:
cl.sendText(msg.to,"key is wrong。")
elif msg.text in ["Auto leave:on"]:
if wait["leaveRoom"] == True:
if wait["lang"] == "JP":
cl.sendText(msg.to,"ԼЄƛƔЄ ƠƝ ƛԼƦЄƛƊƳ。")
else:
cl.sendText(msg.to,"ƛԼƦЄƛƊƳ ƠƝ。")
else:
wait["leaveRoom"] = True
if wait["lang"] == "JP":
cl.sendText(msg.to,"ƖƬ ƜƛƧ ƬƲƦƝЄƊ ƠƝ。")
else:
cl.sendText(msg.to,"ƬƲƦƝЄƊ ƠƝ。")
elif msg.text in ["Auto leave:off"]:
if wait["leaveRoom"] == False:
if wait["lang"] == "JP":
cl.sendText(msg.to,"ԼЄƛƔЄ ƠƑƑ ƛԼƦЄƛƊƳ。")
else:
cl.sendText(msg.to,"ƛԼƦЄƛƊƳ ƠƑƑ。")
else:
wait["leaveRoom"] = False
if wait["lang"] == "JP":
cl.sendText(msg.to,"ƖƬ ƜƛƧ ƬƲƦƝЄƊ ƠƑƑ。")
else:
cl.sendText(msg.to,"ƬƲƦƝЄƊ ƠƑƑ。")
elif msg.text in ["共有:オン","共有:オン","Auto share:on"]:
if wait["timeline"] == True:
if wait["lang"] == "JP":
cl.sendText(msg.to,"ƛԼƦЄƛƊƳ ƠƝ")
else:
cl.sendText(msg.to,"done")
else:
wait["timeline"] = True
if wait["lang"] == "JP":
cl.sendText(msg.to,"done")
else:
cl.sendText(msg.to,"Already。")
elif msg.text in ["共有:オフ","共有:オフ","Auto share:off"]:
if wait["timeline"] == False:
if wait["lang"] == "JP":
cl.sendText(msg.to,"ƛԼƦЄƛƊƳ ƠƑƑ")
else:
cl.sendText(msg.to,"done")
else:
wait["timeline"] = False
if wait["lang"] == "JP":
cl.sendText(msg.to,"done")
else:
cl.sendText(msg.to,"Already。")
elif "Set" == msg.text:
md = ""
if wait["contact"] == True: md+="✔ Contact → on \n"
else: md+="✖ Contact → off \n"
if wait["autoJoin"] == True: md+="✔ Auto join → on \n"
else: md +="✖ Auto join → off \n"
if wait["autoCancel"]["on"] == True:md+="✔ Cancel Invite → " + str(wait["autoCancel"]["members"]) + " \n"
else: md+= "✖ Cancel Invite → off \n"
if wait["leaveRoom"] == True: md+="✔ Auto leave → on \n"
else: md+="✖ Auto leave → off \n"
if wait["timeline"] == True: md+="✔ Auto Share → on \n"
else:md+="✖ Auto Share → off \n"
if wait["commentOn"] == True: md+="✔ Comment → on \n"
else:md+="✖ Comment → off \n"
if wait["autoAdd"] == True: md+="✔ Auto add → on \n"
else:md+="✖ Auto add → off \n"
if wait["likeOn"] == True: md+="✔ Auto like → on \n"
else:md+="✖ Auto like → off \n"
cl.sendText(msg.to,md)
elif msg.text in ["Group id","group id"]:
gid = cl.getGroupIdsJoined()
g = ""
for i in gid:
g += "[%s]:%s\n" % (cl.getGroup(i).name,i)
cl.sendText(msg.to,g)
elif msg.text in ["Rejectall"]:
gid = cl.getGroupIdsInvited()
for i in gid:
cl.rejectGroupInvitation(i)
if wait["lang"] == "JP":
cl.sendText(msg.to,"Completion。")
else:
cl.sendText(msg.to,"key is wrong。")
elif msg.text in ["Auto like:on"]:
if wait["likeOn"] == True:
if wait["lang"] == "JP":
cl.sendText(msg.to,"Done。")
else:
wait["likeOn"] = True
if wait["lang"] == "JP":
cl.sendText(msg.to,"Already。")
elif msg.text in ["いいね:オフ","Auto like:off"]:
if wait["likeOn"] == False:
if wait["lang"] == "JP":
cl.sendText(msg.to,"Done。")
else:
wait["likeOn"] = False
if wait["lang"] == "JP":
cl.sendText(msg.to,"Already。")
elif msg.text in ["Auto add:on"]:
if wait["autoAdd"] == True:
if wait["lang"] == "JP":
cl.sendText(msg.to,"It's on already。")
else:
cl.sendText(msg.to,"on already。")
else:
wait["autoAdd"] = True
if wait["lang"] == "JP":
cl.sendText(msg.to,"It was turned on。")
else:
cl.sendText(msg.to,"Turned on。")
elif msg.text in ["Auto add:off"]:
if wait["autoAdd"] == False:
if wait["lang"] == "JP":
cl.sendText(msg.to,"It's off already。")
else:
cl.sendText(msg.to,"off already。")
else:
wait["autoAdd"] = False
if wait["lang"] == "JP":
cl.sendText(msg.to,"It was turned off。")
else:
cl.sendText(msg.to,"Turned off。")
elif "Massage add:" in msg.text:
wait["message"] = msg.text.replace("Massage add:","")
cl.sendText(msg.to,"The message was changed。")
elif "Auto addition→" in msg.text:
wait["message"] = msg.text.replace("Auto addition→","")
if wait["lang"] == "JP":
cl.sendText(msg.to,"The message was changed。")
else:
cl.sendText(msg.to,"was change already。")
elif msg.text in ["Add confirmasi","自動追加問候語確認"]:
if wait["lang"] == "JP":
cl.sendText(msg.to,".automatic message is established as follows。\n\n" + wait["message"])
else:
cl.sendText(msg.to,"One of weeds on the surface below the self- additional breath image。\n\n" + wait["message"])
elif msg.text in ["CHANGE","言語變更"]:
if wait["lang"] =="JP":
wait["lang"] = "TW"
cl.sendText(msg.to,"ƇƠƲƝƬƦƳ ԼƛƝƓƲƛƓЄ ƊƲƦƖƝƓ ƛ ƇHƛƝƓЄ。")
else:
wait["lang"] = "JP"
cl.sendText(msg.to,". The language was made English。")
elif msg.text in ["Url"]:
if msg.toType == 2:
x = cl.getGroup(msg.to)
if x.preventJoinByTicket == True:
x.preventJoinByTicket = False
cl.updateGroup(x)
gurl = cl.reissueGroupTicket(msg.to)
cl.sendText(msg.to,"line://ti/g/" + gurl)
else:
if wait["lang"] == "JP":
cl.sendText(msg.to,"ƖƬ ƇƛƝ'Ƭ ƁЄ ƲƧЄƊ ƁЄƧƖƊЄƧ ƬHЄ ƓƦƠƲƤ.。")
else:
cl.sendText(msg.to,"ƖMƤƠƧƧƖƁԼЄ ƲƧЄ ƁЄƧƖƊЄƧ ƬHЄ ƓƦƠƲƤ. ")
elif "gurl:" in msg.text:
if msg.toType == 2:
gid = msg.text.replace("gurl:","")
gurl = cl.reissueGroupTicket(gid)
cl.sendText(msg.to,"line://ti/g/" + gurl)
else:
cl.sendText(msg.to,"ƖƬ ƇƛƝ'Ƭ ƁЄ ƲƧЄƊ ƁЄƧƖƊЄƧ ƬHЄ ƓƦƠƲƤ。")
elif msg.text in ["Gurl"]:
if msg.toType == 2:
x = ki.getGroup(msg.to)
if x.preventJoinByTicket == True:
x.preventJoinByTicket = False
ki.updateGroup(x)
gurl = ki.reissueGroupTicket(msg.to)
ki.sendText(msg.to,"line://ti/g/" + gurl)
else:
if wait["lang"] == "JP":
cl.sendText(msg.to,"ƇƛƝ ƝƠƬ ƁЄ ƲƧЄƊ ƠƲƬƧƖƊЄ ƬHЄ ƓƦƠƲƤ")
else:
cl.sendText(msg.to,"ƝƠƬ ƑƠƦ ƲƧЄ ԼЄƧƧ ƬHƛƝ ƓƦƠƲƤ")
elif msg.text in ["cb"]:
wait["wblack"] = True
cl.sendText(msg.to,"Please send the phone number of the person who adds it to the blacklist.")
elif msg.text in ["cbd"]:
wait["dblack"] = True
cl.sendText(msg.to,"Please send the phone number of the person who adds it to the blacklist.")
elif msg.text in ["cbc"]:
if wait["commentBlack"] == {}:
cl.sendText(msg.to,"There isn't a person made a blacklist。")
else:
cl.sendText(msg.to,"Below is a blacklist。")
mc = ""
for mi_d in wait["commentBlack"]:
mc += "・" +cl.getContact(mi_d).displayName + "\n"
cl.sendText(msg.to,mc)
elif msg.text in ["Clock:on"]:
if wait["clock"] == True:
cl.sendText(msg.to,"It's on already。")
else:
wait["clock"] = True
now2 = datetime.now()
nowT = datetime.strftime(now2,"(%H:%M)")
profile = cl.getProfile()
profile.displayName = wait["cName"] + nowT
cl.updateProfile(profile)
cl.sendText(msg.to,"It was turned on")
elif msg.text in ["Clock:off"]:
if wait["clock"] == False:
cl.sendText(msg.to,"It's off already.。")
else:
wait["clock"] = False
cl.sendText(msg.to,"It was tuned off。")
elif "Clock:" in msg.text:
n = msg.text.replace("Clock:","")
if len(n.decode("utf-8")) > 13:
cl.sendText(msg.to,"Last name clock。")
else:
wait["cName"] = n
cl.sendText(msg.to,"It was renewed\n\n" + n)
elif msg.text in ["Up clock"]:
if wait["clock"] == True:
now2 = datetime.now()
nowT = datetime.strftime(now2,"(%H:%M)")
profile = cl.getProfile()
profile.displayName = wait["cName"] + nowT
cl.updateProfile(profile)
cl.sendText(msg.to,"It was renewed。")
else:
cl.sendText(msg.to,"Please turn on a name clock.。")
elif "Tagall" in msg.text:
group = cl.getGroup(msg.to)
k = len(group.members)//100
for j in xrange(k+1):
msg = Message(to=msg.to)
txt = u''
s=0
d=[]
for i in group.members[j*100 : (j+1)*100]:
d.append({"S":str(s), "E" :str(s+8), "M":i.mid})
s += 9
txt += u'@Krampus\n'
msg.text = txt
msg.contentMetadata = {u'MENTION':json.dumps({"MENTIONEES":d})}
cl.sendMessage(msg)
elif "Kicker" in msg.text:
X = cl.getGroup(msg.to)
X.preventJoinByTicket = False
cl.updateGroup(X)
invsend = 0
Ti = cl.reissueGroupTicket(msg.to)
ki.acceptGroupInvitationByTicket(msg.to,Ti)
kk.acceptGroupInvitationByTicket(msg.to,Ti)
ks.acceptGroupInvitationByTicket(msg.to,Ti)
kc.acceptGroupInvitationByTicket(msg.to,Ti)
ka.acceptGroupInvitationByTicket(msg.to,Ti)
G = cl.getGroup(msg.to)
G.preventJoinByTicket = True
cl.updateGroup(G)
Ticket = cl.reissueGroupTicket(msg.to)
elif msg.text in ["K1 join"]:
X = cl.getGroup(msg.to)
X.preventJoinByTicket = False
cl.updateGroup(X)
invsend = 0
Ti = cl.reissueGroupTicket(msg.to)
ki.acceptGroupInvitationByTicket(msg.to,Ti)
G = ki.getGroup(msg.to)
G.preventJoinByTicket = True
ki.updateGroup(G)
Ticket = ki.reissueGroupTicket(msg.to)
elif msg.text in ["K2 join"]:
X = cl.getGroup(msg.to)
X.preventJoinByTicket = False
cl.updateGroup(X)
invsend = 0
Ti = cl.reissueGroupTicket(msg.to)
kk.acceptGroupInvitationByTicket(msg.to,Ti)
G = kk.getGroup(msg.to)
G.preventJoinByTicket = True
kk.updateGroup(G)
Ticket = kk.reissueGroupTicket(msg.to)
elif msg.text in ["K3 join"]:
X = cl.getGroup(msg.to)
X.preventJoinByTicket = False
cl.updateGroup(X)
invsend = 0
Ti = cl.reissueGroupTicket(msg.to)
ks.acceptGroupInvitationByTicket(msg.to,Ti)
G = ks.getGroup(msg.to)
G.preventJoinByTicket = True
ks.updateGroup(G)
Ticket = ks.reissueGroupTicket(msg.to)
elif msg.text in ["Bye"]:
if msg.toType == 2:
X = cl.getGroup(msg.to)
try:
ki.leaveGroup(msg.to)
kk.leaveGroup(msg.to)
ks.leaveGroup(msg.to)
kc.leaveGroup(msg.to)
ka.leaveGroup(msg.to)
except:
pass
elif "Nk " in msg.text:
nk0 = msg.text.replace("Nk ","")
nk1 = nk0.lstrip()
nk2 = nk1.replace("@","")
nk3 = nk2.rstrip()
_name = nk3
gs = cl.getGroup(msg.to)
ginfo = cl.getGroup(msg.to)
gs.preventJoinByTicket = False
cl.updateGroup(gs)
invsend = 0
Ticket = cl.reissueGroupTicket(msg.to)
kc.acceptGroupInvitationByTicket(msg.to,Ticket)
time.sleep(0.2)
targets = []
for s in gs.members:
if _name in s.displayName:
targets.append(s.mid)
if targets == []:
sendMessage(msg.to,"user does not exist")
pass
else:
for target in targets:
try:
kc.kickoutFromGroup(msg.to,[target])
print (msg.to,[g.mid])
except:
kc.leaveGroup(msg.to)
gs = cl.getGroup(msg.to)
gs.preventJoinByTicket = True
cl.updateGroup(gs)
gs.preventJoinByTicket(gs)
cl.updateGroup(gs)
#-----------------------------------------------------------
elif "Kick" in msg.text:
if msg.contentMetadata is not None:
targets = []
key = eval(msg.contentMetadata["MENTION"])
key["MENTIONEES"][0]["M"]
for x in key["MENTIONEES"]:
targets.append(x["M"])
for target in targets:
try:
cl.kickoutFromGroup(msg.to,[target])
except:
cl.kickoutFromGroup(msg.to,[target])
else:
pass
elif "K1 fuck" in msg.text:
OWN = "u9fee8ed8e746cc6134346e37f672cbb3"
if msg.from_ in OWN:
pass
else:
nk0 = msg.text.replace("K1 fuck","")
nk1 = nk0.lstrip()
nk2 = nk1.replace("@","")
nk3 = nk2.rstrip()
_name = nk3
gs = ki.getGroup(msg.to)
targets = []
for h in gs.members:
if _name in h.displayName:
targets.append(h.mid)
if targets == []:
sendMessage(msg.to,"ƲƧЄƦ ƊƠЄƧ ƝƠƬ ЄҲƖƧƬ")
pass
else:
for target in targets:
try:
if msg.from_ not in target:
ki.kickoutFromGroup(msg.to, [target])
except:
ki.kickoutFromGroup(msg.to, [target])
pass
elif "K2 fuck" in msg.text:
OWN = "u49e3ce7e546c60d2f5a38afe264fd1e9"
if msg.from_ in OWN:
pass
else:
nk0 = msg.text.replace("K2 fuck","")
nk1 = nk0.lstrip()
nk2 = nk1.replace("@","")
nk3 = nk2.rstrip()
_name = nk3
gs = ki.getGroup(msg.to)
targets = []
for h in gs.members:
if _name in h.displayName:
targets.append(h.mid)
if targets == []:
sendMessage(msg.to,"ƲƧЄƦ ƊƠЄƧ ƝƠƬ ЄҲƖƧƬ")
pass
else:
for target in targets:
try:
if msg.from_ not in target:
kk.kickoutFromGroup(msg.to, [target])
except:
kk.kickoutFromGroup(msg.to, [target])
pass
elif "K3 fuck" in msg.text:
OWN = "uc903012b76390e088c772b21062a3b20"
if msg.from_ in OWN:
pass
else:
nk0 = msg.text.replace("K3 fuck","")
nk1 = nk0.lstrip()
nk2 = nk1.replace("@","")
nk3 = nk2.rstrip()
_name = nk3
gs = ki.getGroup(msg.to)
targets = []
for h in gs.members:
if _name in h.displayName:
targets.append(h.mid)
if targets == []:
sendMessage(msg.to,"ƲƧЄƦ ƊƠЄƧ ƝƠƬ ЄҲƖƧƬ")
pass
else:
for target in targets:
try:
if msg.from_ not in target:
ks.kickoutFromGroup(msg.to, [target])
except:
ks.kickoutFromGroup(msg.to, [target])
pass
elif "Ban " in msg.text:
if msg.toType == 2:
if msg.from_ in admin:
ban0 = msg.text.replace("Ban ","")
ban1 = ban0.lstrip()
ban2 = ban1.replace("@","")
ban3 = ban2.rstrip()
_name = ban3
gs = cl.getGroup(msg.to)
targets = []
for s in gs.members:
if _name in s.displayName:
targets.append(s.mid)
if targets == []:
cl.sendText(msg.to,"ƲƧЄƦ ƊƠЄƧ ƝƠƬ ЄҲƖƧƬ")
pass
else:
for target in targets:
try:
wait["blacklist"][target] = True
f=codecs.open('st2__b.json','w','utf-8')
json.dump(wait["blacklist"], f, sort_keys=True, indent=4,ensure_ascii=False)
cl.sendText(msg.to,"ヽ( ^ω^)ノ ƧƲƇƇЄƧƧ")
except:
cl.sendText(msg.to,"ヽ( ^ω^)ノ ƧƲƇƇЄƧƧ")
#-----------------------------------------------------------
elif "Mid @" in msg.text:
_name = msg.text.replace("Mid @","")
_nametarget = _name.rstrip(' ')
gs = cl.getGroup(msg.to)
for g in gs.members:
if _nametarget == g.displayName:
cl.sendText(msg.to, g.mid)
else:
pass
#-----------------------------------------------------------
elif "Unban " in msg.text:
if msg.toType == 2:
if msg.from_ in admin:
unb0 = msg.text.replace("Unban ","")
unb1 = unb0.lstrip()
unb2 = unb1.replace("@","")
unb3 = unb2.rstrip()
x_name = unb3
gs = cl.getGroup(msg.to)
targets = []
for s in gs.members:
if x_name in s.displayName:
targets.append(s.mid)
if targets == []:
cl.sendText(msg.to,"ƲƧЄƦ ƊƠЄƧ ƝƠƬ ЄҲƖƧƬ")
pass
else:
for target in targets:
try:
del wait["blacklist"][target]
f=codecs.open('st2__b.json','w','utf-8')
json.dump(wait["blacklist"], f, sort_keys=True, indent=4,ensure_ascii=False)
cl.sendText(msg.to,"ヽ( ^ω^)ノ ƧƲƇƇЄƧƧ")
except:
cl.sendText(msg.to,"ヽ( ^ω^)ノ ƧƲƇƇЄƧƧ")
#-----------------------------------------------------------
elif "Protect:on" == msg.text:
if msg.to in protection:
cl.sendText(msg.to,"ƛԼƦЄƛƊƳ ƠƝ")
else:
wait["pnharfbot"][msg.to] = cl.getGroup(msg.to).name
f=codecs.open('pnharfbot.json','w','utf-8')
json.dump(wait["pnharfbot"], f, sort_keys=True, indent=4,ensure_ascii=False)
protection.append(msg.to)
cl.sendText(msg.to,"ƬƲƦƝЄƊ ƠƝ")
elif "Protect:off" == msg.text:
try:
if msg.from_ in Administrator:
protection.remove(msg.to)
cl.sendText(msg.to,"ƬƲƦƝЄƊ ƠƑƑ")
else:
cl.sendText(msg.to,"ƛԼƦЄƛƊƳ ƠƑƑ")
except:
pass
elif "Namelock:on" in msg.text:
if msg.to in wait['pname']:
cl.sendText(msg.to,"ƬƲƦƝЄƊ ƠƝ.")
else:
cl.sendText(msg.to,"ƛԼƦЄƛƊƳ ƠƝ")
wait['pname'][msg.to] = True
wait['pro_name'][msg.to] = cl.getGroup(msg.to).name
elif "Namelock:off" in msg.text:
if msg.to in wait['pname']:
cl.sendText(msg.to,"ƬƲƦƝ ƠƑƑ.")
del wait['pname'][msg.to]
else:
cl.sendText(msg.to,"ƛԼƦЄƛƊƳ ƠƑƑ")
elif "Blockinvite:on" == msg.text:
gid = msg.to
autocancel[gid] = "poni"
cl.sendText(msg.to,"ƤƦƠƬЄƇƬ ƖƝƔƖƬƛƬƖƠƝ ƠƝ")
elif "Blockinvite:off" == msg.text:
try:
del autocancel[msg.to]
cl.sendText(msg.to,"ƤƦƠƬЄƇƬ ƖƝƔƖƬƛƬƖƠƝ ƠƑƑ")
except:
pass
#-----------------------------------------------------------
elif msg.text in ["Delete chat"]:
cl.removeAllMessages(op.param2)
ki.removeAllMessages(op.param2)
kk.removeAllMessages(op.param2)
ks.removeAllMessages(op.param2)
kc.removeAllMessages(op.param2)
ka.removeAllMessages(op.param2)
cl.sendText(msg.to,"Delete Chat")
cl.sendText(msg.to,"Success...")
#-----------------------------------------------------------
elif "Spam @" in msg.text:
_name = msg.text.replace("Spam @","")
_nametarget = _name.rstrip(' ')
gs = cl.getGroup(msg.to)
for g in gs.members:
if _nametarget == g.displayName:
cl.sendText(msg.to,"Spam Start")
ki.sendText(g.mid,"•─ ͜͡ᴛᴇᴀᴍ ᴛᴇsᴛ ʙᴏᴛ͜͡ ─•")
kk.sendText(g.mid,"•─ ͜͡ᴛᴇᴀᴍ ᴛᴇsᴛ ʙᴏᴛ͜͡ ─•")
ks.sendText(g.mid,"•─ ͜͡ᴛᴇᴀᴍ ᴛᴇsᴛ ʙᴏᴛ͜͡ ─•")
kc.sendText(g.mid,"•─ ͜͡ᴛᴇᴀᴍ ᴛᴇsᴛ ʙᴏᴛ͜͡ ─•")
ka.sendText(g.mid,"•─ ͜͡ᴛᴇᴀᴍ ᴛᴇsᴛ ʙᴏᴛ͜͡ ─•")
cl.sendText(g.mid,"•─ ͜͡ᴛᴇᴀᴍ ᴛᴇsᴛ ʙᴏᴛ͜͡ ─•")
ki.sendText(g.mid,"•─ ͜͡ᴛᴇᴀᴍ ᴛᴇsᴛ ʙᴏᴛ͜͡ ─•")
kk.sendText(g.mid,"•─ ͜͡ᴛᴇᴀᴍ ᴛᴇsᴛ ʙᴏᴛ͜͡ ─•")
ks.sendText(g.mid,"•─ ͜͡ᴛᴇᴀᴍ ᴛᴇsᴛ ʙᴏᴛ͜͡ ─•")
kc.sendText(g.mid,"•─ ͜͡ᴛᴇᴀᴍ ᴛᴇsᴛ ʙᴏᴛ͜͡ ─•")
ka.sendText(g.mid,"•─ ͜͡ᴛᴇᴀᴍ ᴛᴇsᴛ ʙᴏᴛ͜͡ ─•")
cl.sendText(g.mid,"•─ ͜͡ᴛᴇᴀᴍ ᴛᴇsᴛ ʙᴏᴛ͜͡ ─•")
ki.sendText(g.mid,"•─ ͜͡ᴛᴇᴀᴍ ᴛᴇsᴛ ʙᴏᴛ͜͡ ─•")
kk.sendText(g.mid,"•─ ͜͡ᴛᴇᴀᴍ ᴛᴇsᴛ ʙᴏᴛ͜͡ ─•")
ks.sendText(g.mid,"•─ ͜͡ᴛᴇᴀᴍ ᴛᴇsᴛ ʙᴏᴛ͜͡ ─•")
kc.sendText(g.mid,"•─ ͜͡ᴛᴇᴀᴍ ᴛᴇsᴛ ʙᴏᴛ͜͡ ─•")
ka.sendText(g.mid,"•─ ͜͡ᴛᴇᴀᴍ ᴛᴇsᴛ ʙᴏᴛ͜͡ ─•")
cl.sendText(g.mid,"•─ ͜͡ᴛᴇᴀᴍ ᴛᴇsᴛ ʙᴏᴛ͜͡ ─•")
ki.sendText(g.mid,"•─ ͜͡ᴛᴇᴀᴍ ᴛᴇsᴛ ʙᴏᴛ͜͡ ─•")
kk.sendText(g.mid,"•─ ͜͡ᴛᴇᴀᴍ ᴛᴇsᴛ ʙᴏᴛ͜͡ ─•")
ks.sendText(g.mid,"•─ ͜͡ᴛᴇᴀᴍ ᴛᴇsᴛ ʙᴏᴛ͜͡ ─•")
kc.sendText(g.mid,"•─ ͜͡ᴛᴇᴀᴍ ᴛᴇsᴛ ʙᴏᴛ͜͡ ─•")
ka.sendText(g.mid,"•─ ͜͡ᴛᴇᴀᴍ ᴛᴇsᴛ ʙᴏᴛ͜͡ ─•")
cl.sendText(g.mid,"•─ ͜͡ᴛᴇᴀᴍ ᴛᴇsᴛ ʙᴏᴛ͜͡ ─•")
ki.sendText(g.mid,"•─ ͜͡ᴛᴇᴀᴍ ᴛᴇsᴛ ʙᴏᴛ͜͡ ─•")
kk.sendText(g.mid,"•─ ͜͡ᴛᴇᴀᴍ ᴛᴇsᴛ ʙᴏᴛ͜͡ ─•")
ks.sendText(g.mid,"•─ ͜͡ᴛᴇᴀᴍ ᴛᴇsᴛ ʙᴏᴛ͜͡ ─•")
kc.sendText(g.mid,"•─ ͜͡ᴛᴇᴀᴍ ᴛᴇsᴛ ʙᴏᴛ͜͡ ─•")
ka.sendText(g.mid,"•─ ͜͡ᴛᴇᴀᴍ ᴛᴇsᴛ ʙᴏᴛ͜͡ ─•")
cl.sendText(g.mid,"•─ ͜͡ᴛᴇᴀᴍ ᴛᴇsᴛ ʙᴏᴛ͜͡ ─•")
ki.sendText(g.mid,"•─ ͜͡ᴛᴇᴀᴍ ᴛᴇsᴛ ʙᴏᴛ͜͡ ─•")
kk.sendText(g.mid,"•─ ͜͡ᴛᴇᴀᴍ ᴛᴇsᴛ ʙᴏᴛ͜͡ ─•")
ks.sendText(g.mid,"•─ ͜͡ᴛᴇᴀᴍ ᴛᴇsᴛ ʙᴏᴛ͜͡ ─•")
kc.sendText(g.mid,"•─ ͜͡ᴛᴇᴀᴍ ᴛᴇsᴛ ʙᴏᴛ͜͡ ─•")
ka.sendText(g.mid,"•─ ͜͡ᴛᴇᴀᴍ ᴛᴇsᴛ ʙᴏᴛ͜͡ ─•")
ks.sendText(g.mid,"•─ ͜͡ᴛᴇᴀᴍ ᴛᴇsᴛ ʙᴏᴛ͜͡ ─•")
cl.sendText(msg.to, "Ok success")
print "Done spam"
#----------------------------------------------------------
elif msg.text in ["Conban","Contactban","Contact ban"]:
if wait["blacklist"] == {}:
cl.sendText(msg.to,"Tidak Ada Blacklist")
else:
cl.sendText(msg.to,"Daftar Blacklist")
h = ""
for i in wait["blacklist"]:
h = cl.getContact(i)
M = Message()
M.to = msg.to
M.contentType = 13
M.contentMetadata = {'mid': i}
cl.sendMessage(M)
#----------------------------------------------------------
elif msg.text in ["Test"]:
ki.sendText(msg.to,"●")
ki.sendText(msg.to,"●●")
ki.sendText(msg.to,"●●●")
ki.sendText(msg.to,"●●●●")
ki.sendText(msg.to,"●●●●●")
ki.sendText(msg.to,"●●●●●●")
kk.sendText(msg.to,"●●●●●●●")
kk.sendText(msg.to,"●●●●●●●●")
kk.sendText(msg.to,"●●●●●●●●●")
kk.sendText(msg.to,"●●●●●●●●●●")
kk.sendText(msg.to,"●●●●●●●●●●●")
kk.sendText(msg.to,"●●●●●●●●●●●●")
ks.sendText(msg.to,"●●●●●●●●●●●●●")
ks.sendText(msg.to,"●●●●●●●●●●●●●●")
ks.sendText(msg.to,"●●●●●●●●●●●●●●●")
ks.sendText(msg.to,"●●●●●●●●●●●●●●●●")
ks.sendText(msg.to,"●●●●●●●●●●●●●●●●●")
ks.sendText(msg.to,"●●●●●●●●●●●●●●●●●●")
kc.sendText(msg.to,"●●●●●●●●●●●●●●●●●●●")
kc.sendText(msg.to,"●●●●●●●●●●●●●●●●●●●●")
kc.sendText(msg.to,"●●●●●●●●●●●●●●●●●●●●●")
kc.sendText(msg.to,"●●●●●●●●●●●●●●●●●●●●●●")
kc.sendText(msg.to,"●●●●●●●●●●●●●●●●●●●●●●●")
kc.sendText(msg.to,"●●●●●●●●●●●●●●●●●●●●●●●●")
ka.sendText(msg.to,"●●●●●●●●●●●●●●●●●●●●●●●●●")
ka.sendText(msg.to,"●●●●●●●●●●●●●●●●●●●●●●●●●●")
ka.sendText(msg.to,"●Bot Working●")
cl.sendText(msg.to,"●( ^^)人(^^ )●")
#----------------------------------------------------------
elif msg.text == "Setpoint":
if msg.from_ in admin:
cl.sendText(msg.to, "sᴇᴛ ᴛʜᴇ ʟᴀsᴛsᴇᴇɴs' ᴘᴏɪɴᴛ(`・ω・´)")
try:
del wait2['readPoint'][msg.to]
del wait2['readMember'][msg.to]
except:
pass
now2 = datetime.now()
wait2['readPoint'][msg.to] = msg.id
wait2['readMember'][msg.to] = ""
wait2['setTime'][msg.to] = datetime.strftime(now2,"%H:%M")
wait2['ROM'][msg.to] = {}
print wait2
elif msg.text == "Setcheck":
if msg.from_ in admin:
if msg.to in wait2['readPoint']:
if wait2["ROM"][msg.to].items() == []:
chiya = ""
else:
chiya = ""
for rom in wait2["ROM"][msg.to].items():
print rom
chiya += rom[1] + "\n"
cl.sendText(msg.to, " %s\n\n\nPeople who have ignored reads\n(`・ω・´)\n%s\n\nThese anu anu uesrs have seen at the lastseen point(`・ω・´)\n[%s]" % (wait2['readMember'][msg.to],chiya,setTime[msg.to]))
else:
cl.sendText(msg.to, "Sider ga bisa di read cek setpoint dulu bego tinggal ketik\nSetlastpoint\nkalo mau liat sider ketik\nViewlastseen")
#-----------------------------------------------------------speed
elif msg.text in ["Ban"]:
wait["wblacklist"] = True
cl.sendText(msg.to,"ƤԼЄƛƧЄ ƧЄƝƊ ƬHЄ ƛƇƇƠƲƝƬ ƦЄƓƖƧƬЄƦЄƊ ƜƖƬH ƛ ƁԼƛƇƘԼƖƧƬ。")
elif msg.text in ["Unban"]:
wait["dblacklist"] = True
cl.sendText(msg.to,"ƤԼЄƛƧЄ ƧЄƝƊ ƬHЄ ƛƇƇƠƲƝƬ ƦЄƓƖƧƬЄƦЄƊ ƜƖƬH ƛ ƁԼƛƇƘԼƖƧƬ。")
elif msg.text in ["Banlist"]:
if wait["blacklist"] == {}:
cl.sendText(msg.to,"ƬHЄƦЄ ƖƧƝ'Ƭ ƛ ƤЄƦƧƠƝ MƛƊЄ ƛ ƁԼƛƇƘԼƖƧƬ.。")
else:
cl.sendText(msg.to,"ƁЄԼƠƜ ƖƧ ƛ ƁԼƛƇƘԼƖƧƬ。")
mc = ""
for mi_d in wait["blacklist"]:
mc += "・" +cl.getContact(mi_d).displayName + "\n"
cl.sendText(msg.to,mc)
elif msg.text in ["Blist"]:
if msg.toType == 2:
group = cl.getGroup(msg.to)
gMembMids = [contact.mid for contact in group.members]
matched_list = []
for tag in wait["blacklist"]:
matched_list+=filter(lambda str: str == tag, gMembMids)
cocoa = ""
for mm in matched_list:
cocoa += "・" +cl.getContact(mm).displayName + "\n"
cl.sendText(msg.to,cocoa + "But it's a blacklist.。")
elif msg.text in ["Kill ban"]:
if msg.toType == 2:
group = ki.getGroup(msg.to)
gMembMids = [contact.mid for contact in group.members]
matched_list = []
for tag in wait["blacklist"]:
matched_list+=filter(lambda str: str == tag, gMembMids)
if matched_list == []:
ki.sendText(msg.to,"There wasn't a blacklist user。")
return
for jj in matched_list:
try:
klist=[cl,ki,kk,ks,kc,ka]
kicker=random.choice(klist)
kicker.kickoutFromGroup(msg.to,[jj])
print (msg.to,[jj])
except:
pass
elif msg.text in ["single"]:
if msg.toType == 2:
group = cl.getGroup(msg.to)
gMembMids = [contact.mid for contact in group.invitee]
for _mid in gMembMids:
cl.cancelGroupInvitation(msg.to,[_mid])
cl.sendText(msg.to,"I have feigned and have canceled it。")
elif "random:" in msg.text:
if msg.toType == 2:
strnum = msg.text.replace("random:","")
source_str = '•─ ͜͡ᴛᴇᴀᴍ ᴛᴇsᴛ ʙᴏᴛ͜͡ ─•'
try:
num = int(strnum)
group = cl.getGroup(msg.to)
for var in range(0,num):
name = "".join([random.choice(source_str) for x in xrange(10)])
time.sleep(0.01)
group.name = name
cl.updateGroup(group)
except:
cl.sendText(msg.to,"ЄƦƦƠƦ")
elif "Album making" in msg.text:
try:
albumtags = msg.text.replace("Album making","")
gid = albumtags[:33]
name = albumtags.replace(albumtags[:34],"")
cl.createAlbum(gid,name)
cl.sendText(msg.to,name + "An album was made。")
except:
cl.sendText(msg.to,"ЄƦƦƠƦ")
elif "FAK" in msg.text:
try:
source_str = 'abcdefghijklmnopqrstuvwxyz1234567890@:;./_][!&%$#)(=~^|'
name = "".join([random.choice(source_str) for x in xrange(10)])
amid = msg.text.replace("fakec→","")
cl.sendText(msg.to,str(cl.channel.createAlbumF(msg.to,name,amid)))
except Exception as e:
try:
cl.sendText(msg.to,str(e))
except:
pass
#-----------------------------------------------
elif msg.text in ["Cb","Clearban"]:
wait["blacklist"] = {}
cl.sendText(msg.to,"clear")
#-----------------------------------------------
elif "Me @" in msg.text:
msg.contentType = 13
_name = msg.text.replace("Me @","")
_nametarget = _name.rstrip(' ')
gs = cl.getGroup(msg.to)
for g in gs.members:
if _nametarget == g.displayName:
msg.contentMetadata = {'mid': g.mid}
cl.sendMessage(msg)
else:
pass
#-----------------------------------------------
elif "Say " in msg.text:
string = msg.text.replace("Say ","")
if len(string.decode('utf-8')) <= 50:
ki.sendText(msg.to," " + string + " ")
kk.sendText(msg.to," " + string + " ")
ks.sendText(msg.to," " + string + " ")
kc.sendText(msg.to," " + string + " ")
ka.sendText(msg.to," " + string + " ")
#-----------------------------------------------
elif msg.text in ["Group creator","Gc","Gcreator","gcreator"]:
ginfo = cl.getGroup(msg.to)
gCreator = ginfo.creator.mid
msg.contentType = 13
msg.contentMetadata = {'mid': gCreator}
cl.sendMessage(msg)
cl.sendText(msg.to,"""╔══════════════
║ผู้สร้างกลุ่ม Creator Group
╚══════════════""")
#-----------------------------------------------
elif "Nk " in msg.text:
nk0 = msg.text.replace("Nk ","")
nk1 = nk0.lstrip()
nk2 = nk1.replace("@","")
nk3 = nk2.rstrip()
_name = nk3
gs = cl.getGroup(msg.to)
ginfo = cl.getGroup(msg.to)
gs.preventJoinByTicket = False
cl.updateGroup(gs)
invsend = 0
Ticket = cl.reissueGroupTicket(msg.to)
ka.acceptGroupInvitationByTicket(msg.to,Ticket)
time.sleep(0.2)
targets = []
for s in gs.members:
if _name in s.displayName:
targets.append(s.mid)
if targets == []:
sendMessage(msg.to,"user does not exist")
pass
else:
for target in targets:
try:
kc.kickuotFromGroup(msg.to,[target])
print (msg.to,[g.mid])
except:
kc.leaveGroup(msg.to)
gs = cl.getGroup(msg.to)
gs.preventJoinByTicket = True
cl.updateGroup(gs)
gs.preventJoinByTicket(gs)
cl.updateGroup(gs)
#-----------------------------------------------
elif "Covergroup" in msg.text:
thisgroup = cl.getGroups([msg.to])
Mids = [contact.mid for contact in thisgroup[0].members]
mi_d = Mids[:33]
cl.createGroup("•─ ͜͡ᴛᴇᴀᴍ ᴛᴇsᴛ ʙᴏᴛ͜͡ ─•", mi_d)
cl.sendText(msg.to,"Cover Group")
#-----------------------------------------------
elif "Spam " in msg.text:
txt = msg.text.split(" ")
jmlh = int(txt[2])
text = msg.text.replace("Spam "+str(txt[1])+" "+str(jmlh)+" ","")
tulisan = jmlh * (text+"\n")
if txt[1] == "on":
if jmlh <= 1000:
for x in range(jmlh):
cl.sendText(msg.to, text)
else:
cl.sendText(msg.to, "Out Of Range!")
elif txt[1] == "off":
if jmlh <= 1000:
cl.sendText(msg.to, tulisan)
else:
cl.sendText(msg.to, "Out Of Range!")
#-----------------------------------------------
elif "Mc " in msg.text:
key = eval(msg.contentMetadata["MENTION"])
key1 = key["MENTIONEES"][0]["M"]
cl.sendText(msg.to,"Mc: " + key1)
elif "Mc: " in msg.text:
mmid = msg.text.replace("Mc: ","")
msg.contentType = 13
msg.contentMetadata = {"mid":mmid}
ki.sendMessage(msg)
ki.sendMessage(msg)
kk.sendMessage(msg)
kk.sendMessage(msg)
ks.sendMessage(msg)
ks.sendMessage(msg)
kc.sendMessage(msg)
kc.sendMessage(msg)
ka.sendMessage(msg)
ka.sendMessage(msg)
#-----------------------------------------------
elif "Speed" in msg.text:
start = time.time()
cl.sendText(msg.to, "ᴘʀᴏɢʀᴇss...")
elapsed_time = time.time() - start
cl.sendText(msg.to, "%sseconds" % (elapsed_time))
ki.sendText(msg.to, "%sseconds" % (elapsed_time))
kk.sendText(msg.to, "%sseconds" % (elapsed_time))
ks.sendText(msg.to, "%sseconds" % (elapsed_time))
kc.sendText(msg.to, "%sseconds" % (elapsed_time))
ka.sendText(msg.to, "%sseconds" % (elapsed_time))
#-----------------------------------------------
elif "Sp" in msg.text:
start = time.time()
cl.sendText(msg.to, "ᴘʀᴏɢʀᴇss...")
elapsed_time = time.time() - start
cl.sendText(msg.to, "%sseconds" % (elapsed_time))
#-----------------------------------------------
elif msg.text == "Save":
me = cl.getProfile()
save1["displayName"] = me.displayName
save1["statusMessage"] = me.statusMessage
save1["pictureStatus"] = me.pictureStatus
save1["Saved"] = True
cl.sendText(msg.to,"บันทึกสถานะบัญชีเรียบร้อยแล้ว")
elif msg.text == "Load":
if save1["Saved"]:
me = cl.getProfile()
me.displayName = save1["displayName"]
me.statusMessage = save1["statusMessage"]
me.pictureStatus = save1["pictureStatus"]
cl.updateDisplayPicture(me.pictureStatus)
cl.updateProfile(me)
cl.sendText(msg.to,"โหลดสถานะบัญชีเรียบร้อยแล้ว")
else:
cl.sendText(msg.to,"ก่อนหน้านี้ยังไม่ได้มีการบันทึกสถานะบัญชี")
elif msg.text == "Copy":
if msg.toType == 0:
targ = cl.getContact(msg.to)
me = cl.getProfile()
me.displayName = targ.displayName
me.statusMessage = targ.statusMessage
me.pictureStatus = targ.pictureStatus
cl.updateDisplayPicture(me.pictureStatus)
cl.updateProfile(me)
cl.sendText(msg.to,"สำเร็จแล้ว")
else:
cl.sendText(msg.to,"คำสั่งนี้ใช้ได้เฉพาะในแชทส่วนตัวเท่านั้น")
elif "Copy " in msg.text:
if msg.toType == 2:
red = re.compile(re.escape('Copy '),re.IGNORECASE)
tname = red.sub('',msg.text)
tname = tname.lstrip()
tname = tname.replace(" @","$spliter$")
tname = tname.rstrip()
tname = tname.split("$spliter$")
tname = tname[0]
tname = tname[1:]
clist = {
"Founded":False,
"displayName":"",
"statusMessage":"",
"pictureStatus":""
}
mems = cl.getGroup(msg.to).members
for targ in mems:
if targ.displayName == tname:
clist["displayName"] = targ.displayName
clist["statusMessage"] = targ.statusMessage
clist["pictureStatus"] = targ.pictureStatus
clist["Founded"] = True
if clist["Founded"]:
me = cl.getProfile()
me.displayName = clist["displayName"]
me.statusMessage = clist["statusMessage"]
me.pictureStatus = clist["pictureStatus"]
cl.updateDisplayPicture(me.pictureStatus)
cl.updateProfile(me)
cl.sendText(msg.to,"สำเร็จแล้ว")
elif "Steal dp " in msg.text:
if msg.toType == 2:
red = re.compile(re.escape('steal dp '),re.IGNORECASE)
namel = red.sub('',msg.text)
namel = namel.lstrip()
namel = namel.replace(" @","$spliter$")
namel = namel[1:]
namel = namel.rstrip()
namel = namel.split("$spliter$")
gmem = cl.getGroup(msg.to).members
for targ in gmem:
if targ.displayName in namel:
if targ.displayName != '':
cl.sendText(msg.to,targ.displayName)
try:
cl.sendImageWithURL(msg.to,"http://dl.profile.line.naver.jp/"+targ.pictureStatus)
except:
pass
elif "Steal home " in msg.text:
if msg.toType == 2:
red = re.compile(re.escape('steal home '),re.IGNORECASE)
namel = red.sub('',msg.text)
namel = namel.lstrip()
namel = namel.replace(" @","$spliter$")
namel = namel[1:]
namel = namel.rstrip()
namel = namel.split("$spliter$")
gmem = cl.getGroup(msg.to).members
for targ in gmem:
if targ.displayName in namel:
if targ.displayName != '':
cl.sendText(msg.to,targ.displayName)
try:
cl.sendImageWithURL(msg.to,cl.channel.getCover(targ.mid))
except:
pass
if op.type == 19:
try:
if op.param3 in mid:
if op.param2 in Amid:
G = ki.getGroup(op.param1)
G.preventJoinByTicket = False
ki.updateGroup(G)
Ticket = ki.reissueGroupTicket(op.param1)
cl.acceptGroupInvitationByTicket(op.param1,Ticket)
ki.acceptGroupInvitationByTicket(op.param1,Ticket)
G.preventJoinByTicket = True
cl.updateGroup(G)
else:
G = ki.getGroup(op.param1)
ki.kickoutFromGroup(op.param1,[op.param2])
G.preventJoinByTicket = False
ki.updateGroup(G)
Ticket = ki.reissueGroupTicket(op.param1)
cl.acceptGroupInvitationByTicket(op.param1,Ticket)
ki.acceptGroupInvitationByTicket(op.param1,Ticket)
G.preventJoinByTicket = True
ki.updateGroup(G)
wait["blacklist"][op.param2] = False
elif op.param3 in op.param3:
if op.param1 in protection:
OWN = "u2144f4eca089e5888899ad5d0551c085","u406133ad4d3fbe50a2f4d51ea081d050","ua51ba06b0dd18c0bfe2cc6caa3458202","u34a9af3a18784280147fc413a68a77fd"
if op.param2 in OWN:
kicker1 = [cl,ki,kk,ks,kc,ka,km,kn,ko]
G = random.choice(kicker1).getGroup(op.param1)
G.preventJoinByTicket = False
random.choice(kicker1).updateGroup(G)
cl.acceptGroupInvitationByTicket(op.param1,Ticket)
ki.acceptGroupInvitationByTicket(op.param1,Ticket)
G.preventJoinByTicket = True
random.choice(kicker1).updateGroup(G)
else:
G = random.choice(kicker1).getGroup(op.param1)
random.choice(kicker1).kickoutFromGroup(op.param1,[op.param2])
G.preventJoinByTicket = False
random.choice(kicker1).updateGroup(G)
Ticket = random.choice(kicker1).reissueGroupTicket(op.param1)
cl.acceptGroupInvitationByTicket(op.param1,Ticket)
G.preventJoinByTicket = True
random.choice(kicker1).updateGroup(G)
wait["blacklist"][op.param2] = True
f=codecs.open('st2__b.json','w','utf-8')
json.dump(wait["blacklist"], f, sort_keys=True, indent=4,ensure_ascii=False)
elif op.param3 in Amid:
if op.param2 in mid:
G = cl.getGroup(op.param1)
G.preventJoinByTicket = False
cl.updateGroup(G)
Ticket = cl.reissueGroupTicket(op.param1)
cl.acceptGroupInvitationByTicket(op.param1,Ticket)
ki.acceptGroupInvitationByTicket(op.param1,Ticket)
G.preventJoinByTicket = True
cl.updateGroup(G)
else:
G = cl.getGroup(op.param1)
cl.kickoutFromGroup(op.param1,[op.param2])
G.preventJoinByTicket = False
cl.updateGroup(G)
Ticket = cl.reissueGroupTicket(op.param1)
cl.acceptGroupInvitationByTicket(op.param1,Ticket)
ki.acceptGroupInvitationByTicket(op.param1,Ticket)
G.preventJoinByTicket = True
cl.updateGroup(G)
wait["blacklist"][op.param2] = False
f=codecs.open('st2__b.json','w','utf-8')
json.dump(wait["blacklist"], f, sort_keys=True, indent=4,ensure_ascii=False)
except:
pass
if op.type == 19:
try:
if op.param3 in Amid:
if op.param2 in mid:
G = cl.getGroup(op.param1)
G.preventJoinByTicket = False
cl.updateGroup(G)
Ticket = cl.reissueGroupTicket(op.param1)
cl.acceptGroupInvitationByTicket(op.param1,Ticket)
ki.acceptGroupInvitationByTicket(op.param1,Ticket)
G.preventJoinByTicket = True
cl.updateGroup(G)
else:
G = cl.getGroup(op.param1)
cl.kickoutFromGroup(op.param1,[op.param2])
G.preventJoinByTicket = False
cl.updateGroup(G)
Ticket = cl.reissueGroupTicket(op.param1)
cl.acceptGroupInvitationByTicket(op.param1,Ticket)
ki.acceptGroupInvitationByTicket(op.param1,Ticket)
G.preventJoinByTicket = True
cl.updateGroup(G)
elif op.param3 in mid:
if op.param2 in Amid:
G = ki.getGroup(op.param1)
G.preventJoinByTicket = False
ki.updateGroup(G)
Ticket = ki.reissueGroupTicket(op.param1)
cl.acceptGroupInvitationByTicket(op.param1,Ticket)
ki.acceptGroupInvitationByTicket(op.param1,Ticket)
G.preventJoinByTicket = True
ki.updateGroup(G)
else:
G = ki.getGroup(op.param1)
ki.kickoutFromGroup(op.param1,[op.param2])
G.preventJoinByTicket = False
ki.updateGroup(G)
Ticket = ki.reissueGroupTicket(op.param1)
cl.acceptGroupInvitationByTicket(op.param1,Ticket)
ki.acceptGroupInvitationByTicket(op.param1,Ticket)
G.preventJoinByTicket = True
ki.updateGroup(G)
wait["blacklist"][op.param2] = False
f=codecs.open('st2__b.json','w','utf-8')
json.dump(wait["blacklist"], f, sort_keys=True, indent=4,ensure_ascii=False)
except:
pass
if op.type == 19:
try:
if op.param3 in kimid:
if op.param2 in ki2mid:
G = ks.getGroup(op.param1)
G.preventJoinByTicket = False
ks.updateGroup(G)
Ticket = ks.reissueGroupTicket(op.param1)
cl.acceptGroupInvitationByTicket(op.param1,Ticket)
ki.acceptGroupInvitationByTicket(op.param1,Ticket)
kk.acceptGroupInvitationByTicket(op.param1,Ticket)
ks.acceptGroupInvitationByTicket(op.param1,Ticket)
kc.acceptGroupInvitationByTicket(op.param1,Ticket)
ka.acceptGroupInvitationByTicket(op.param1,Ticket)
G.preventJoinByTicket = True
ks.updateGroup(G)
else:
G = ks.getGroup(op.param1)
ks.kickoutFromGroup(op.param1,[op.param2])
G.preventJoinByTicket = False
ks.updateGroup(G)
Ticket = ks.reissueGroupTicket(op.param1)
cl.acceptGroupInvitationByTicket(op.param1,Ticket)
ki.acceptGroupInvitationByTicket(op.param1,Ticket)
kk.acceptGroupInvitationByTicket(op.param1,Ticket)
ks.acceptGroupInvitationByTicket(op.param1,Ticket)
kc.acceptGroupInvitationByTicket(op.param1,Ticket)
ka.acceptGroupInvitationByTicket(op.param1,Ticket)
G.preventJoinByTicket = True
ks.updateGroup(G)
elif op.param3 in Amid:
if op.param2 in kimid:
G = kk.getGroup(op.param1)
G.preventJoinByTicket = False
kk.updateGroup(G)
Ticket = kk.reissueGroupTicket(op.param1)
cl.acceptGroupInvitationByTicket(op.param1,Ticket)
ki.acceptGroupInvitationByTicket(op.param1,Ticket)
kk.acceptGroupInvitationByTicket(op.param1,Ticket)
ks.acceptGroupInvitationByTicket(op.param1,Ticket)
kc.acceptGroupInvitationByTicket(op.param1,Ticket)
ka.acceptGroupInvitationByTicket(op.param1,Ticket)
G.preventJoinByTicket = True
kk.updateGroup(G)
else:
G = kk.getGroup(op.param1)
kk.kickoutFromGroup(op.param1,[op.param2])
G.preventJoinByTicket = False
kk.updateGroup(G)
Ticket = kk.reissueGroupTicket(op.param1)
cl.acceptGroupInvitationByTicket(op.param1,Ticket)
ki.acceptGroupInvitationByTicket(op.param1,Ticket)
kk.acceptGroupInvitationByTicket(op.param1,Ticket)
ks.acceptGroupInvitationByTicket(op.param1,Ticket)
kc.acceptGroupInvitationByTicket(op.param1,Ticket)
ka.acceptGroupInvitationByTicket(op.param1,Ticket)
G.preventJoinByTicket = True
kk.updateGroup(G)
wait["blacklist"][op.param2] = False
f=codecs.open('st2__b.json','w','utf-8')
json.dump(wait["blacklist"], f, sort_keys=True, indent=4,ensure_ascii=False)
except:
pass
if op.type == 19:
try:
if op.param3 in ki2mid:
if op.param2 in kimid:
if op.param4 in Cmid:
if op.param5 in Emid:
G = kk.getGroup(op.param1)
G.preventJoinByTicket = False
kk.updateGroup(G)
Ticket = kk.reissueGroupTicket(op.param1)
cl.acceptGroupInvitationByTicket(op.param1,Ticket)
ki.acceptGroupInvitationByTicket(op.param1,Ticket)
kk.acceptGroupInvitationByTicket(op.param1,Ticket)
ks.acceptGroupInvitationByTicket(op.param1,Ticket)
kc.acceptGroupInvitationByTicket(op.param1,Ticket)
ka.acceptGroupInvitationByTicket(op.param1,Ticket)
G.preventJoinByTicket = True
kk.updateGroup(G)
else:
G = kk.getGroup(op.param1)
kk.kickoutFromGroup(op.param1,[op.param2])
G.preventJoinByTicket = False
kk.updateGroup(G)
Ticket = kk.reissueGroupTicket(op.param1)
cl.acceptGroupInvitationByTicket(op.param1,Ticket)
ki.acceptGroupInvitationByTicket(op.param1,Ticket)
kk.acceptGroupInvitationByTicket(op.param1,Ticket)
ks.acceptGroupInvitationByTicket(op.param1,Ticket)
kc.acceptGroupInvitationByTicket(op.param1,Ticket)
ka.acceptGroupInvitationByTicket(op.param1,Ticket)
G.preventJoinByTicket = True
kk.updateGroup(G)
elif op.param3 in kimid:
if op.param2 in ki2mid:
G = ks.getGroup(op.param1)
G.preventJoinByTicket = False
ks.updateGroup(G)
Ticket = ks.reissueGroupTicket(op.param1)
cl.acceptGroupInvitationByTicket(op.param1,Ticket)
ki.acceptGroupInvitationByTicket(op.param1,Ticket)
kk.acceptGroupInvitationByTicket(op.param1,Ticket)
ks.acceptGroupInvitationByTicket(op.param1,Ticket)
kc.acceptGroupInvitationByTicket(op.param1,Ticket)
ka.acceptGroupInvitationByTicket(op.param1,Ticket)
G.preventJoinByTicket = True
ks.updateGroup(G)
else:
G = ks.getGroup(op.param1)
ks.kickoutFromGroup(op.param1,[op.param2])
G.preventJoinByTicket = False
ks.updateGroup(G)
Ticket = ks.reissueGroupTicket(op.param1)
cl.acceptGroupInvitationByTicket(op.param1,Ticket)
ki.acceptGroupInvitationByTicket(op.param1,Ticket)
kk.acceptGroupInvitationByTicket(op.param1,Ticket)
ks.acceptGroupInvitationByTicket(op.param1,Ticket)
kc.acceptGroupInvitationByTicket(op.param1,Ticket)
ka.acceptGroupInvitationByTicket(op.param1,Ticket)
G.preventJoinByTicket = True
ks.updateGroup(G)
wait["blacklist"][op.param2] = False
f=codecs.open('st2__b.json','w','utf-8')
json.dump(wait["blacklist"], f, sort_keys=True, indent=4,ensure_ascii=False)
except:
pass
if op.type == 19:
try:
if op.param3 in mid:
if op.param2 in ki2mid:
G = ks.getGroup(op.param1)
G.preventJoinByTicket = False
ks.updateGroup(G)
Ticket = ks.reissueGroupTicket(op.param1)
cl.acceptGroupInvitationByTicket(op.param1,Ticket)
ki.acceptGroupInvitationByTicket(op.param1,Ticket)
kk.acceptGroupInvitationByTicket(op.param1,Ticket)
ks.acceptGroupInvitationByTicket(op.param1,Ticket)
kc.acceptGroupInvitationByTicket(op.param1,Ticket)
ka.acceptGroupInvitationByTicket(op.param1,Ticket)
G.preventJoinByTicket = True
ks.updateGroup(G)
else:
G = ks.getGroup(op.param1)
ks.kickoutFromGroup(op.param1,[op.param2])
G.preventJoinByTicket = False
ks.updateGroup(G)
Ticket = ks.reissueGroupTicket(op.param1)
cl.acceptGroupInvitationByTicket(op.param1,Ticket)
ki.acceptGroupInvitationByTicket(op.param1,Ticket)
kk.acceptGroupInvitationByTicket(op.param1,Ticket)
ks.acceptGroupInvitationByTicket(op.param1,Ticket)
kc.acceptGroupInvitationByTicket(op.param1,Ticket)
ka.acceptGroupInvitationByTicket(op.param1,Ticket)
G.preventJoinByTicket = True
ks.updateGroup(G)
wait["blacklist"][op.param2] = False
f=codecs.open('st2__b.json','w','utf-8')
json.dump(wait["blacklist"], f, sort_keys=True, indent=4,ensure_ascii=False)
elif op.param3 in ki2mid:
if op.param2 in mid:
G = cl.getGroup(op.param1)
G.preventJoinByTicket = False
cl.updateGroup(G)
Ticket = cl.reissueGroupTicket(op.param1)
cl.acceptGroupInvitationByTicket(op.param1,Ticket)
ki.acceptGroupInvitationByTicket(op.param1,Ticket)
kk.acceptGroupInvitationByTicket(op.param1,Ticket)
ks.acceptGroupInvitationByTicket(op.param1,Ticket)
kc.acceptGroupInvitationByTicket(op.param1,Ticket)
ka.acceptGroupInvitationByTicket(op.param1,Ticket)
G.preventJoinByTicket = True
cl.updateGroup(G)
else:
G = cl.getGroup(op.param1)
cl.kickoutFromGroup(op.param1,[op.param2])
G.preventJoinByTicket = False
cl.updateGroup(G)
Ticket = cl.reissueGroupTicket(op.param1)
cl.acceptGroupInvitationByTicket(op.param1,Ticket)
ki.acceptGroupInvitationByTicket(op.param1,Ticket)
kk.acceptGroupInvitationByTicket(op.param1,Ticket)
ks.acceptGroupInvitationByTicket(op.param1,Ticket)
kc.acceptGroupInvitationByTicket(op.param1,Ticket)
ka.acceptGroupInvitationByTicket(op.param1,Ticket)
G.preventJoinByTicket = True
cl.updateGroup(G)
wait["blacklist"][op.param2] = False
f=codecs.open('st2__b.json','w','utf-8')
json.dump(wait["blacklist"], f, sort_keys=True, indent=4,ensure_ascii=False)
except:
pass
if op.param3 == "1":
if op.param1 in protectname:
group = cl.getGroup(op.param1)
try:
group.name = wait["pro_name"][op.param1]
cl.updateGroup(group)
cl.sendText(op.param1, "Groupname protect now")
wait["blacklist"][op.param2] = True
f=codecs.open('st2__b.json','w','utf-8')
json.dump(wait["blacklist"], f, sort_keys=True, indent=4,ensure_ascii=False)
except Exception as e:
print e
pass
"""if op.type == 32:
OWN = "ua7fc5964d31f45ac75128fc2b8deb842","u406133ad4d3fbe50a2f4d51ea081d050","ua51ba06b0dd18c0bfe2cc6caa3458202","uc7f32bb28dc009916d40af87c9910ddc"
if op.param2 in OWN:
pass
else:
Inviter = op.param3.replace("",',')
InviterX = Inviter.split(",")
contact = cl.getContact(op.param2)
ki.kickoutFromGroup(op.param1,[op.param2])
kk.kickoutFromGroup(op.param1,[op.param2])
ks.kickoutFromGroup(op.param1,[op.param2])
kc.kickoutFromGroup(op.param1,[op.param2])
ka.kickoutFromGroup(op.param1,[op.param2])
wait["blacklist"][op.param2] = True
f=codecs.open('st2__b.json','w','utf-8')
json.dump(wait["blacklist"], f, sort_keys=True, indent=4,ensure_ascii=False)"""
#------------------------------------------------------------------------------------
if op.type == 55:
try:
if op.param1 in wait2['readPoint']:
Name = cl.getContact(op.param2).displayName
if Name in wait2['readMember'][op.param1]:
pass
else:
wait2['readMember'][op.param1] += "\n☑" + Name
wait2['ROM'][op.param1][op.param2] = "☑" + Name
else:
cl.sendText
except:
pass
#-----------------------------------------------------------
if op.type == 59:
print op
except Exception as error:
print error
def autoSta():
count = 1
while True:
try:
for posts in cl.activity(1)["result"]["posts"]:
if posts["postInfo"]["liked"] is False:
if wait["likeOn"] == True:
cl.like(posts["userInfo"]["writerMid"], posts["postInfo"]["postId"], 1001)
if wait["commentOn"] == True:
if posts["userInfo"]["writerMid"] in wait["commentBlack"]:
pass
else:
cl.comment(posts["userInfo"]["writerMid"],posts["postInfo"]["postId"],wait["comment"])
except:
count += 1
if(count == 50):
sys.exit(0)
else:
pass
thread1 = threading.Thread(target=autoSta)
thread1.daemon = True
thread1.start()
def a2():
now2 = datetime.now()
nowT = datetime.strftime(now2,"%M")
if nowT[14:] in ["10","20","30","40","50","00"]:
return False
else:
return True
def nameUpdate():
while True:
try:
#while a2():
#pass
if wait["clock"] == True:
now2 = datetime.now()
nowT = datetime.strftime(now2,"(%H:%M)")
profile = cl.getProfile()
profile.displayName = wait["cName"] + nowT
cl.updateProfile(profile)
time.sleep(600)
except:
pass
#----------------------------------------
#-------------------------------
thread2 = threading.Thread(target=nameUpdate)
thread2.daemon = True
thread2.start()
try:
while True:
try:
Ops = cl.fetchOps(cl.Poll.rev, 5)
except EOFError:
raise Exception("It might be wrong revision\n" + str(cl.Poll.rev))
for Op in Ops:
if (Op.type != OpType.END_OF_OPERATION):
cl.Poll.rev = max(cl.Poll.rev, Op.revision)
bot(Op)
except Exception as e:
print e
with open('profileSave.pkl', 'w') as f:
pickle.dump([save1], f)
| [
"noreply@github.com"
] | dew2564.noreply@github.com |
ddd6e203de85cd145ace2229c6c14f6d58edfe3c | d5e24c3586e5f1104727fceb94616023d995b8ec | /traffic_bot/register_client.py | 997975041016bb001bf8c7a8afc408cea505ae6f | [
"Apache-2.0"
] | permissive | unclejay80/matrix-traffic-bot | c53475e7af02f1200f658447b89c0cb7b59c0f23 | 9a22654b246877f6b1618c7bed99bb9d4142af6f | refs/heads/master | 2023-05-04T22:15:57.971583 | 2021-05-18T21:21:27 | 2021-05-18T21:21:27 | 368,287,582 | 0 | 0 | Apache-2.0 | 2021-05-17T20:47:36 | 2021-05-17T18:38:33 | Python | UTF-8 | Python | false | false | 3,306 | py | #!/usr/bin/env python3
import asyncio
import logging
import sys
from time import sleep
from aiohttp import ClientConnectionError, ServerDisconnectedError
from nio import (
AsyncClient,
AsyncClientConfig,
InviteMemberEvent,
LocalProtocolError,
LoginError,
RegisterResponse,
MegolmEvent,
RoomMessageText,
UnknownEvent,
)
from traffic_bot.config import Config
from traffic_bot.storage import Storage
logger = logging.getLogger(__name__)
class RegisterClient:
def __init__(
self,
store: Storage,
config: Config,
user_id: str,
username: str,
password: str
):
from traffic_bot.callbacks import Callbacks
self.config = config
self.store = store
# Configuration options for the AsyncClient
self.client_config = AsyncClientConfig(
max_limit_exceeded=0,
max_timeouts=0,
store_sync_tokens=True,
encryption_enabled=True,
)
self.user_id = username
self.user_password = password
self.user_id_without_host = user_id
# Initialize the matrix client
self.client = AsyncClient(
self.config.homeserver_url,
self.user_id,
device_id=self.config.device_id + self.user_id,
store_path=self.config.store_path,
config=self.client_config,
ssl=False
)
# Set up event callbacks
callbacks = Callbacks(self.client, store, config, False)
self.client.add_event_callback(callbacks.message, (RoomMessageText,))
self.client.add_event_callback(callbacks.invite, (InviteMemberEvent,))
self.client.add_event_callback(callbacks.decryption_failure, (MegolmEvent,))
self.client.add_event_callback(callbacks.unknown, (UnknownEvent,))
async def start(self):
logger.info(f"Start {self.user_id}")
try:
register_response = await self.client.register(
self.user_id_without_host,
self.user_password,
self.config.device_name
)
if type(register_response) != RegisterResponse:
logger.error("Failed to register: %s", register_response.message)
return False
except LocalProtocolError as e:
# There's an edge case here where the user hasn't installed the correct C
# dependencies. In that case, a LocalProtocolError is raised on login.
logger.fatal(
"Failed to login. Have you installed the correct dependencies? "
"https://github.com/poljar/matrix-nio#installation "
"Error: %s",
e,
)
return False
try:
# Try to login with the configured username/password
logger.info(f"Logged in as {self.user_id}")
await self.client.sync(timeout=30000, full_state=True)
await self.client.keys_upload()
except (ClientConnectionError, ServerDisconnectedError):
logger.warning("Unable to connect to homeserver...")
finally:
logger.info(f"Close connection {self.user_id}")
await self.client.close() | [
"juergen.wischer@bwi.de"
] | juergen.wischer@bwi.de |
1f3f8ad62b3bff4ac0821b0fc51593df8ce0d5ce | c61c9bedba1968bfaf571ac3996b696fc35890a6 | /Chapter12/has_match.py | 00b6ca1068d542e225e1be731b69d6152b593ec3 | [] | no_license | ArunRamachandran/ThinkPython-Solutions | 497b3dbdeba1c64924fe1d9aa24204a9ca552c5b | 1a0872efd169e5d39b25134960168e3f09ffdc99 | refs/heads/master | 2020-04-01T10:23:20.255132 | 2014-11-07T17:04:52 | 2014-11-07T17:04:52 | 25,806,318 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 463 | py | # has_match takes two sequences, t1 and t2, and returns True, if there is
# an index i such that t1[i] == t2[i]
def has_match(t1,t2):
for x,y in zip(t1,t2):
if x == y:
return True
else:
return False
t1 = "banana"
t2 = "sequence"
print "Given sequences are : "
print t1
print t2
case = has_match(t1,t2)
if case == True:
print "Yeah..!! Two sequences have a matching index "
if case == False:
print "Nope... It doesn't have a matching index !! "
| [
"arunkramachandran92@gmail.com"
] | arunkramachandran92@gmail.com |
12f4246c32a81d71e4f0c71a5bf049fbb48cc540 | 9dcfe6e3120316d49ef2d5e13d4ccf877a8a9a27 | /myRedis.py | 4e40b9abb9d1e495117dae252a6c7200cae30a19 | [] | no_license | sunchao1024/mime-server | e754d9b29e406d76a15fff473f6c1e046e419ecf | 1db9a59ed4d7c5ad73648fa17c1299dd4c9382f0 | refs/heads/master | 2021-01-21T21:38:37.114870 | 2016-10-12T07:29:02 | 2016-10-12T07:29:02 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,232 | py | #!/usr/bin/env python
# coding=utf-8
'''
desc: redis相关的方法
author: jsongo
'''
import tornadis
from tornado import gen
pool = 10
# host = '127.0.0.1'
host = 'redis-server'
port = 6379
pwd = ''
POOL = tornadis.ClientPool(max_size=pool, host=host, port=port, autoconnect=True)
getRedis = POOL.connected_client
@gen.coroutine
def execRedis(*args, **kwargs):
result = None
with (yield getRedis()) as redis:
if not isinstance(redis, tornadis.TornadisException):
data = kwargs.get('data')
if args[0] == 'hmset' and data:
params = []
[params.extend(x) for x in data.items()]
tmpList = list(args)
tmpList.extend(params)
args = tuple(tmpList)
print args
result = yield redis.call(*args)
expire = kwargs.get('expire')
if expire and len(args) > 1:
key = args[1]
yield redis.call('expire', key, expire)
if args[0] == 'hgetall' and result and type(result)==list:
from itertools import izip
i = iter(result)
result = dict(izip(i, i))
raise gen.Return(result)
| [
"jsongo@qq.com"
] | jsongo@qq.com |
3408a6c9e44089d95da922063b88e10cf1af7507 | 8be10b64c129adb189b0d0b86de0502cbdfe362b | /inheritance.py | 98937ad32e150eeec1125a35cb251a6929de0a9c | [] | no_license | hadiuzzaman524/Python-Coding-practices | 721b5de5e1c82e17399d9eb8201759824cb3deef | 84ce679143ee94785ff4483e2dc31ec886e8b80d | refs/heads/master | 2022-12-16T05:52:00.900939 | 2020-09-19T05:40:37 | 2020-09-19T05:40:37 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,757 | py | class Person:
def __init__(self,person_name:str,year_of_birth:int,height:int):
self.__name=person_name
self.__date_of_birth=year_of_birth
self.__height=height
self.abc=None
def get_year_of_birth(self):
return self.__date_of_birth
def get_name(self):
return self.__name
def set_name(self,newName):
self.__name=newName
def get_summery(self):
print("Method in Person class:")
return f"name:{self.__name} DOB: {self.__date_of_birth} Height: {self.__height}"
class Student(Person):
def __init__(self, person_name: str, year_of_birth: int, height: int,email:str,student_id:str):
super().__init__(person_name, year_of_birth, height)
self.id=student_id
self.email=email
#overriden method.....
def get_summery(self):
print("Method in Student class:")
return f"name:{self.get_name()} Email: {self.email} Year: {self.get_year_of_birth()}"
# direct print object values
def __str__(self):
#return self.get_summery()
return f"name:{self.get_name()} Email: {self.email} Year: {self.get_year_of_birth()}"
# this overridden method also help us debugging the code........
#def __repr__(self):
#return f"name:{self.get_name()} Email: {self.email} Year: {self.get_year_of_birth()}"
student=Student("hadi",2020,55,"hadiuzzaman@gmail.com","1702020")
print(student.get_summery())
print(student) # use __str__(self): method for printing direct object value.
# every python class have __str__ overridden method
# use class like structure
class Book:
pass
# in python structure any time create new property
obj=Book
obj.name="Java"
obj.price="230"
obj.author="Hadi"
print(obj.author) | [
"52348628+hadiuzzaman524@users.noreply.github.com"
] | 52348628+hadiuzzaman524@users.noreply.github.com |
7eb562b71baf118ff5c51e211f5f193073246c96 | 5a2cc432d4a458bc3d5c310c18f70bfbf0b02342 | /专利查询和搜集/spider.py | 3f696ffd16add0793dda46494ad19b4b097e7dd4 | [] | no_license | blime4/hieyoung | d901e0e9ed002f4c3c4777c011bc0805e46ded08 | 80bc9b04cd852b68522a57f8c4580c3987d8d500 | refs/heads/main | 2023-01-03T15:53:50.471188 | 2020-10-29T04:06:25 | 2020-10-29T04:06:25 | 302,527,544 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,200 | py | from tkinter import *
from tkinter import messagebox
import os
import requests
import time
import pandas as pd
from bs4 import BeautifulSoup
from contextlib import closing
import threading
# from pdf2image import convert_from_path
from PIL import Image, ImageEnhance,ImageTk
from tkinter.ttk import Treeview
import xlsxwriter
import xlrd
import aiohttp
import asyncio
import nest_asyncio
nest_asyncio.apply()
import fitz
from shutil import copyfile
import threading
import webbrowser
import aiomultiprocess
key_words,done_words=[],[]
SURL_2,SURL = "",""
time_sacle = ""
def spider():
global key_words,done_words
if len(key_words)==0:
messagebox.showinfo("提示","关键词为空")
else:
showinfo("[ run ]---开始爬取数据")
showinfo("[ tip ]---这一步需要的时间,和爬取的时间间隔和爬取的内容数量有关,请耐心等待。")
for_key_words = [key for key in key_words if key not in done_words]
key_urls = {}
if not os.path.exists("原始数据"):
os.mkdir("原始数据")
if len(key_words)==0:
messagebox.showinfo("提示","关键词为空")
for key_word in for_key_words:
if "&" in key_word:
term_1 = key_word.split("&")[0]
term_2 = key_word.split("&")[1]
key_words_url = SURL_2
key_words_url = key_words_url.replace("search_change",str(term_1))
key_words_url = key_words_url.replace("term2_change",str(term_2))
url_1 = key_words_url
url_1 = url_1.replace("page_change","1")
key_urls[key_word]=url_1
else:
key_words_url = SURL
key_words_url = key_words_url.replace("search_change",str(key_word))
url_1 = key_words_url
url_1 = url_1.replace("page_change","1")
key_urls[key_word]=url_1
url_lst_failed = []
url_lst_successed = []
global time_sacle
async def get_page(session,key):
async with session.get(key_urls[key], timeout=300) as resp:
if resp.status != 200:
url_lst_failed.append(key_urls[key])
else:
url_lst_successed.append(key_urls[key])
return await resp.text(),key
async def parser(html,key):
page_a = []
page_img = []
page_href = []
soup = BeautifulSoup(html, 'html.parser')
try:
total_len = soup.find("body").find_all("i")[1].find_all("strong")[2].get_text()
except:
total_len = 0
if total_len == 0:
showinfo(key+"---没有找到匹配专利")
done_words.append(key)
else:
page_num = int(float(total_len)/50.5)+2
showinfo(key+"一共"+str(total_len)+"个专利,共"+str(page_num-1)+"页")
urls = []
if page_num >= 2:
for i in range(2,page_num):
key_words_url = SURL
key_words_url = key_words_url.replace("search_change",str(key))
url_i = key_words_url
url_i = url_i.replace("page_change",str(i))
urls.append(url_i)
soup_tb = soup.find_all("table")
for tb in soup_tb:
for tr in tb.find_all("tr"):
valign_top = list(tr.find_all("td",attrs={"valign":"top"}))
if len(valign_top)>=2:
num = valign_top[1].get_text().replace(",","")
img = "https://pdfpiw.uspto.gov/.piw?Docid="+str(num)
top = valign_top[2]
href = "http://patft.uspto.gov/"+str(top.a.get("href"))
a = top.get_text()
a = a.replace("\n"," ")
page_a.append(a)
page_img.append(img)
page_href.append(href)
page_dict = {"标题":page_a,"专利链接":page_href,"图片链接":page_img}
page_df = pd.DataFrame(page_dict)
page_df.to_csv("./原始数据/"+key+".csv",index=None,header=False)
showinfo("导出"+key+"第一页")
return urls
async def get_page_s(session,key,url):
async with session.get(url, timeout=60) as resp:
if resp.status != 200:
url_lst_failed.append(url)
else:
url_lst_successed.append(url)
return await resp.text(),key
async def parser_s(html,key):
page_a = []
page_img = []
page_href = []
soup = BeautifulSoup(html, 'html.parser')
soup_tb = soup.find_all("table")
for tb in soup_tb:
for tr in tb.find_all("tr"):
valign_top = list(tr.find_all("td",attrs={"valign":"top"}))
if len(valign_top)>=2:
num = valign_top[1].get_text().replace(",","")
img = "https://pdfpiw.uspto.gov/.piw?Docid="+str(num)
top = valign_top[2]
href = "http://patft.uspto.gov/"+str(top.a.get("href"))
a = top.get_text()
a = a.replace("\n"," ")
page_a.append(a)
page_img.append(img)
page_href.append(href)
page_dict = {"标题":page_a,"专利链接":page_href,"图片链接":page_img}
page_df = pd.DataFrame(page_dict)
page_df.to_csv("./原始数据/"+key+".csv",index=None,mode='a',header=False)
showinfo(key+" +1")
async def download_s(kurl):
key,url = kurl
async with aiohttp.ClientSession() as session:
html,key = await get_page_s(session,key,url)
await parser_s(html,key)
await asyncio.sleep(int(time_sacle))
async def download(key):
async with aiohttp.ClientSession() as session:
html,key = await get_page(session,key)
urls = await parser(html,key)
urls = [[key,i] for i in urls]
async with aiomultiprocess.Pool() as pool:
await pool.map(download_s,urls)
await asyncio.sleep(int(time_sacle))
start = time.time()
loop = asyncio.new_event_loop()
asyncio.set_event_loop(loop)
tasks = [asyncio.ensure_future(download(key)) for key in key_urls]
if len(tasks) != 0:
loop.run_until_complete(asyncio.wait(tasks))
showinfo("[ tip ]---所有第一页已经爬取完成")
showinfo("#"*40)
end = time.time()
showinfo('总共耗时{}秒'.format(end-start))
showinfo("[ ok ]---全部导出完毕,保存在“原始数据”文件夹中")
| [
"13192551898@163.com"
] | 13192551898@163.com |
faefbf8c3dbb1076390463c58cb0b0e98d8bafc4 | 6bc77013027222ffc4ecb1524729a2ef580d78b3 | /olfactory/detection/tools.py | 4424b816678df0c2767b6341701db54629cecdbf | [
"Apache-2.0"
] | permissive | OctaveLauby/olfactory | 819ae759637e36df460daa6447ca6f5b990fea2f | 679b67459c12002041a8f77e1bdffe33d776500b | refs/heads/master | 2020-04-30T22:11:04.845805 | 2019-03-22T14:38:39 | 2019-03-22T14:38:39 | 177,112,233 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,363 | py | import numpy as np
def diff(list1, list2):
"""Return diff b/w lists in a dictionary
About:
Because 0 == False and 1 == True, diff may not work as wanted with
list mixing booleans and integers.
"""
s1 = set(list1)
s2 = set(list2)
common = s1.intersection(s2)
return {
'common': common,
'minus': s1 - common,
'plus': s2 - common,
}
def group_consecutives(a, step=1):
"""Group step-consecutive elements in a list of arrays
Example:
>> group_consecutives([1, 2, 4, 5, 6, 9], step=1)
[[1, 2], [4, 5, 6], [9]]
"""
if len(a) == 0:
return []
return np.split(a, np.where(np.diff(a) != step)[0] + 1)
def linearize(a, index=-1):
"""Linearize vector in 2 linear segments
Assumption: a is based on regular step
Args:
a (np.ndarray)
index (int): index where to split linearization
if index out of bounds, return one segment
Return:
(np.ndarray)
"""
if index <= 0 or index >= (len(a) - 1):
return ((a[-1] - a[0]) / (len(a)-1)) * np.arange(0, len(a)) + a[0]
y = a[index]
fst_seg = ((y - a[0]) / index) * np.arange(index+1) + a[0]
rindex = len(a) - index - 1
lst_seg = ((a[-1] - y) / rindex) * np.arange(rindex+1) + y
return np.concatenate([fst_seg, lst_seg[1:]])
| [
"WX5812@D70.tes.local"
] | WX5812@D70.tes.local |
8f18a7a3cb0b452be92e2c21ca740144639a7e69 | 7e4a1838dbcbe0526f20b4b49f88a3f213dbc712 | /npcaller/fasta.py | 7d1d78befe1990ff329540e7f2e2e5f87acb256e | [
"MIT"
] | permissive | grst/nanopore_pkg | c5c8ee940ddd9218c08846ba5e5884c697914ca6 | e13ccfae0be79f23ae3270b09744726504b0e58f | refs/heads/master | 2023-04-02T14:38:52.410352 | 2020-11-06T19:34:37 | 2020-11-06T19:34:37 | 48,172,418 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,569 | py | """
Since skbio and Biopython are overkill and slightly to complicated most of the time
I came up with this really simple fasta-io class.
"""
from itertools import groupby
class FastaReader(object):
def __init__(self, file):
if not hasattr(file, 'read'):
self.file = open(file, 'r')
else:
self.file = file
def get_entries(self):
"""
Get the next Entry from the fasta file.
Returns: Generator, which yields (header, sequence) tuples
"""
for isheader, group in groupby(self.file, lambda line: line[0] == ">"):
if isheader:
header = next(group)[1:]
else:
seq = "".join(line.strip() for line in group)
yield header, seq
def close(self):
self.file.close()
class FastaWriter(object):
"""
Very simple fasta file format writer.
"""
SPLIT = 80
def __init__(self, file):
if not hasattr(file, 'write'):
self.file = open(file, 'w')
else:
self.file = file
def write_entry(self, header, sequence):
"""
Write Entry to File
Args:
header: >sequence_header
sequence: ACTGATT...
"""
sequence = [sequence[i:i+self.SPLIT] for i in range(0, len(sequence), self.SPLIT)]
self.file.write(">{0}\n".format(header))
for s in sequence:
self.file.write(s + "\n")
def flush(self):
self.file.flush()
def close(self):
self.file.close()
| [
"mail@gregor-sturm.de"
] | mail@gregor-sturm.de |
fc77466e30f68146a40c8d3ba3b858f15859ddb5 | 19ddab74600f71700a6b693281d0180d5271f295 | /程序员面试金典/03_03_堆盘子.py | 2f96f3b2e8fb699bf5461a949729ba6f932d252c | [] | no_license | zhulf0804/Coding.Python | 4d55a430da1a8077c81feba65c13ac654aaf094a | 46ab03e23d15ebd5434ef4dd5ae99130000b00a5 | refs/heads/master | 2022-09-14T18:40:59.880941 | 2022-08-20T08:25:51 | 2022-08-20T08:25:51 | 213,113,482 | 3 | 1 | null | null | null | null | UTF-8 | Python | false | false | 1,000 | py | class StackOfPlates:
def __init__(self, cap: int):
self.stack = []
self.cap = cap
def push(self, val: int) -> None:
if self.cap == 0:
return
if len(self.stack) == 0 or len(self.stack[-1]) == self.cap:
self.stack.append([])
self.stack[-1].append(val)
def pop(self) -> int:
if self.cap == 0 or len(self.stack) == 0:
return -1
val = self.stack[-1].pop()
if len(self.stack[-1]) == 0:
self.stack = self.stack[:-1]
return val
def popAt(self, index: int) -> int:
if self.cap == 0 or index >= len(self.stack):
return -1
val = self.stack[index].pop()
if len(self.stack[index]) == 0:
self.stack = self.stack[:index] + self.stack[index+1:]
return val
# Your StackOfPlates object will be instantiated and called as such:
# obj = StackOfPlates(cap)
# obj.push(val)
# param_2 = obj.pop()
# param_3 = obj.popAt(index) | [
"zhulf0804@gmail.com"
] | zhulf0804@gmail.com |
c4fd6afe113c170e2b3985c756cac05390668ae8 | e04dbc32247accf073e3089ed4013427ad182c7c | /hhkb2020/C TLE.py | 61c4d78700c4375a274fc85a2aa4fa2d73278a89 | [] | no_license | twobooks/atcoder_training | 9deb237aed7d9de573c1134a858e96243fb73ca0 | aa81799ec87cc9c9d76de85c55e99ad5fa7676b5 | refs/heads/master | 2021-10-28T06:33:19.459975 | 2021-10-20T14:16:57 | 2021-10-20T14:16:57 | 233,233,854 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 260 | py | import numpy as np # np.lcm(),np.gcd()
N = int(input())
arrP = np.array(input().split(),dtype=np.int64)
arrAll = np.arange(200000+1,dtype=np.int64)
mask = np.ones(200000+1,dtype=np.int64) == 1
for p in arrP:
mask[p] = False
print(arrAll[mask][0]) | [
"twobookscom@gmail.com"
] | twobookscom@gmail.com |
0039c651448039361b81905af4f22f0c708cdfb3 | 7c2a13aedf2e959453713108d7eac70bd46a328e | /cgi-bin/p220.py | 4a748dadf8fede07e9f585365f46abfcc9253ffa | [] | no_license | uesin/python | bc7a1d89d3edb5a9d42c37ecb4ca7b114c481906 | 13945cb4000ab2970df99584ea7300e0e4ed44ba | refs/heads/master | 2020-11-29T05:56:17.494331 | 2020-01-06T06:53:18 | 2020-01-06T06:53:18 | 230,038,334 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 448 | py | #URLパラメーター値取得
#!/usr/bin/env python3
import cgi
print("Content-Type: text/html; charset=utf-8")
print("")
print("<pre>")
# URLパラメーターを取得
form = cgi.FieldStorage()
#URLパラメーターを取得して表示
mode = form.getvalue("mode", default="")
print("mode", mode)
#全てのパラメーターを取得して表示
print("---- all params ----------")
for k in form.keys():
print(k,"=",form.getvalue(k)) | [
"kazukihana87@gmail.com"
] | kazukihana87@gmail.com |
c30a552b0b096eb2c4c08d9b42ab374586647447 | 49ac1eb5c640f9256f56954b14b6630405811b7f | /src/python/ema/ema.py | 668ad5776a1180d6f302635b8e5090334a4d80d5 | [] | no_license | trulsmag/lpctddf | 5701bd7f297766b482b4fc7e9ad3d8dabacba4a4 | 41671f4f4bb76a3ba3ccaa6fe01c3e34dee8d8b4 | refs/heads/master | 2020-12-24T12:06:31.947705 | 2013-11-26T13:59:07 | 2013-11-26T13:59:07 | 7,489,746 | 3 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,481 | py | #Python script for generating test vectors for EMA filter simulation.
import random #For generating random numbers.
import time #For timing each sort function with "time.clock()".
#Function for converting integer to binary.
def int2bin(n, count=128):
return "".join([str((n >> y) & 1) for y in range(count-1, -1, -1)])
length = 8 #Number of bits, length of testvectors.
X = 255 #Max value.
N = 100 #Number of testcases.
M = 3 #Number of input vectors.
list = []
#Erase file content.
w = open('_input.dat', 'w')
w.close()
w = open('_output.dat', 'w')
w.close()
#Open files.
input_file = open('_input.dat', 'a+')
output_file = open('_output.dat', 'a+')
#Generate N input and output test vectors sets.
for k in range(0, N):
#Append M random input test vectors (SAM_i EMA_{i-1} ALPHA)to list.
for i in range(0, M):
list.append(random.randint(0, X-1))
#Write input test vectors in binary to "input_file".
for j in range(0, M):
input_file.write(int2bin(list[j], length))
input_file.write("\n")
#Compute "EMA_i".
sam = list[0]
emaold = list[1]
alpha = list[2]
tmp = sam - emaold
tmp = alpha*tmp
tmp = tmp >> length
tmp = emaold + tmp
emanew = tmp
#Write output test vectors in binary to "output_file".
output_file.write(int2bin(emanew, length))
output_file.write("\n")
#Clean list.
for j in range(0, M):
list.pop()
#Close files.
input_file.close()
output_file.close()
| [
"truls88@gmail.com"
] | truls88@gmail.com |
3b42b2004ad1a8926033063a87025b6b469c2212 | d72a480f653736ab124ccb39ff131c7933a8419f | /desafio033_maior_menor_valores.py | dd3d70653f1764f09449f10288a5db02a51b12af | [] | no_license | joaabjb/curso_em_video_python_3 | fb3b7e22583a4f81140d763f463c20e65bd3a4d0 | 060d07436618d677e747f1d8683750f10066c96a | refs/heads/master | 2022-12-13T09:55:48.489897 | 2020-09-05T02:03:41 | 2020-09-05T02:03:41 | 290,318,168 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 657 | py | print('Digite três números inteiros diferentes')
n1 = int(input('Digite o primeiro numero: '))
n2 = int(input('Digite o segundo numero: '))
n3 = int(input('Digite o terceiro número: '))
if n1 > n2:
if n1 > n3:
if n2 > n3:
print(f'{n1} é o maior e {n3} é o menor')
else:
print(f'{n1} é o maior e {n2} é o menor')
else:
print(f'{n3} é o maior e {n2} é o menor')
else:
if n2 > n3:
if n1 > n3:
print(f'{n2} é o maior e {n3} é o menor')
else:
print(f'{n2} é o maior e {n1} é o menor')
else:
print(f'{n3} é o maior e {n1} é o menor')
| [
"48816108+joaabjb@users.noreply.github.com"
] | 48816108+joaabjb@users.noreply.github.com |
284681300a16787eff81ba74aed127883382f932 | 6d9c75de320fb984e163d8af55d99ba8f7eff816 | /run.py | 7f89fdff30c339a7a32a7dcfb037a74f8187d4b7 | [] | no_license | szmer/BERTPolishWSD | 3b261e459c9cef45c6ea03062b479435e62e9c55 | 1f110753024f29999b1d07c10947de14a9b9ebdb | refs/heads/master | 2021-04-14T04:30:39.885162 | 2020-05-19T15:08:40 | 2020-05-19T15:08:40 | 249,206,957 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,694 | py | import argparse
import copy
import logging
import pickle
from local_settings import (
bert_model_path, bert_tokenizer_name, pl_wordnet_path, annot_corpus_path, nkjp_path,
is_tokenizer_cased
)
from wsd.corpora import load_annotated_corpus, wordnet_corpus_for_lemmas, load_nkjp_ambiguous
from wsd.bert import bert_model, bert_tokenizer
from wsd.embedding_dict import build_embedding_dict
from wsd.evaluate import embedding_dict_accuracy, compare_predictions
argparser = argparse.ArgumentParser(description=
'Run WSD experiments with estimating senses from examples with BERT.')
argparser.add_argument('--load', help='Load a premade embeddings dictionary.')
argparser.add_argument('--load2', help=
'Load a second premade embeddings dictionary. You have to also compare.')
argparser.add_argument('--save', help='Save embeddings dictionary to the file.')
argparser.add_argument('--extend',
help='Extend the trained embedding dictionary with additional, similar cases taken from '
'the NKJP corpus.',
action='store_true')
argparser.add_argument('--incremental',
help='Use incremental strategy when extending dictionaries with corpora of ambiguous texts.',
action='store_true')
argparser.add_argument('--compare',
help='Compare a newly made embedding dictionary\'s predictions with the loaded one\'s',
action='store_true')
argparser.add_argument('--load_test_lemmas',
help='Load Wordnet glosses also for lemmas from the test corpus to simulate a whole Wordnet'
' information run. Used only when building a dictionary from scratch. Will not use these '
'lemmas to extend.',
action='store_true')
argparser.add_argument('--weigh_wordpieces',
help='Use NKJP to weight BERT tokenization wordpieces, weighing down the most frequent ones'
'in every form.',
action='store_true')
argparser.add_argument('--cut_wordpieces',
help='Ignore BERT tokenization wordpieces beyond the fourth one in each form',
action='store_true')
argparser.add_argument('--case',
help='Use the prediction with average (default), best or worst case. The "average" case '
'compares the embedding to average embedding of each sense; the "best" one selects the sense '
'with one nearest embedding; the "worst" case selects the sense where the farther embedding '
'is still nearer than farther ones of other senses.',
default='average')
args = argparser.parse_args()
if args.compare and not args.load:
raise ValueError(
'You cannot compare unless you load and train another dictionary at the same time.')
if not args.extend and args.incremental:
raise ValueError('You cannot use the incremental strategy when not extending.')
if not args.compare and args.load and (args.incremental or args.load_test_lemmas):
raise ValueError(
'You cannot use the incremental or load_test_lemmas option when only loading an '
'embeddings dictionary.')
if args.load2 and not args.compare:
raise ValueError(
'You have to compare embedding dictionaries when you load two of them.')
if args.case and not args.case in ['average', 'best', 'worst']:
raise ValueError('The case has to be one of average, best and worst.')
logging.basicConfig(
format='%(asctime)s %(levelname)-8s %(message)s',
level=logging.INFO,
datefmt='%Y-%m-%d %H:%M:%S')
model = bert_model(bert_model_path)
tokenizer = bert_tokenizer(bert_tokenizer_name)
logging.info('Loading the annotated corpus...')
train_corp, test_corp = load_annotated_corpus(annot_corpus_path, test_ratio=7)
# embeddings_dict1 is trained or load2 if present, embeddings_dict2 can be only from --load
embeddings_dict1, embeddings_dict2 = None, None
# Training a new embeddings dictionary.
if (not args.load or args.compare) and not args.load2:
logging.info('Loading wordnet...')
if args.load_test_lemmas:
all_lemmas = copy.copy(train_corp.lemmas)
all_lemmas.update(test_corp.lemmas)
wordnet_corp = wordnet_corpus_for_lemmas(pl_wordnet_path,
train_corp.lemmas if not args.load_test_lemmas else all_lemmas,
model, tokenizer)
if args.extend or args.weigh_wordpieces:
logging.info('Loading NKJP...')
nkjp_corp = load_nkjp_ambiguous(nkjp_path, lowercase=not is_tokenizer_cased)
if args.weigh_wordpieces:
logging.info('Counting wordpieces and building the embedding dictionary...')
embeddings_dict1 = build_embedding_dict(model, tokenizer, train_corp, wordnet_corp,
count_wordpieces_in=nkjp_corp, cut_wordpieces=args.cut_wordpieces)
else:
logging.info('Building the embedding dictionary...')
embeddings_dict1 = build_embedding_dict(model, tokenizer, train_corp, wordnet_corp,
cut_wordpieces=args.cut_wordpieces)
# Extending, if desired.
if args.extend:
logging.info('Extending embeddings with NKJP... (incremental: {})'.format(
args.incremental))
embeddings_dict1.extend_with_ambiguous_corpus(nkjp_corp, incremental=args.incremental)
# Saving the embedding dictionary.
if args.save:
logging.info('Building the dictionary to disk...')
with open(args.save, 'wb') as out_file:
pickle.dump(embeddings_dict1, out_file)
# Loading a premade embeddings dictionary.
else:
logging.info('Building the premade dictionary...')
with open(args.load, 'rb') as premade_file:
embeddings_dict2 = pickle.load(premade_file)
# load2, if requested, takes the place of the dictionary that would be trained.
if args.load2:
logging.info('Building the premade dictionary...')
with open(args.load2, 'rb') as premade_file:
embeddings_dict1 = pickle.load(premade_file)
# Normal accuracy evaluation.
if not args.compare:
logging.info('Accuracy evaluation (the {} case variant)...'.format(args.case))
result = embedding_dict_accuracy(
embeddings_dict1 if embeddings_dict2 is None else embeddings_dict2,
test_corp, case=args.case)
logging.info(result)
# Comparing responses.
else:
logging.info('Comparing {} (left) and {} (right), the {} case variant...'.format(
args.load2 if args.load2 is not None else 'currently built',
args.load, args.case))
test_corp_ambiguous = test_corp.get_ambiguous_version()
prediction1 = embeddings_dict1.predict(test_corp_ambiguous, case=args.case)
prediction2 = embeddings_dict2.predict(test_corp_ambiguous, case=args.case)
for item in compare_predictions(prediction1, prediction2):
print(item)
| [
"szymon@szymonrutkowski.pl"
] | szymon@szymonrutkowski.pl |
a9f4b7a28904f996db7f087c513dc0ecbbe8b08b | e750a3ce806e018678ddcd79912893ffbd07bd43 | /kyu-05/valid-date-regex.py | 88bd4d91ad6d8a49d773620e3a80b87799b3eb76 | [] | permissive | Ristani/Codewars-Katas | b16b6186ba51c5e7292875d4576234d15d037d33 | f6226947c1bd1c010d0e34158e39e3a5b76fa9b8 | refs/heads/main | 2023-08-13T10:04:27.942826 | 2021-09-19T05:34:45 | 2021-09-19T05:34:45 | 398,741,152 | 0 | 0 | MIT | 2021-09-04T03:09:19 | 2021-08-22T07:34:31 | Python | UTF-8 | Python | false | false | 114 | py | valid_date = compile('\[((?!02-(?:29|30))(?:0[1-9]|1[012])-(?:0[1-9]|1[0-9]|2[0-9]|30)|(?:0[13578]|1[02])-31)\]')
| [
"ristani@gmail.com"
] | ristani@gmail.com |
60f824b28d0b87883df4ae5c1a9bc4856a0705de | 8f06d70c8a56be5ee9825234bfa6f53481cda63b | /Kumpulan Tugas/reduce.py | 40b426dcebc79746e2b0bda7f837f2f3480e47cd | [] | no_license | dimaswahabp/Kumpulan-Catatan-Fundamental | 2fadc1623888e95b81dcbfa5e6e4d6de06fb86cd | c7a1bed237492b23ead7947f5ca0e42021ad415c | refs/heads/master | 2020-09-08T01:34:42.212823 | 2019-11-11T12:23:02 | 2019-11-11T12:23:02 | 220,972,788 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 226 | py | number = [1,2,3,4]
y = reduce(lambda a, b:a*b, number)
print(y)
number = [1,2,3,4]
hasil = 1
for i in number:
hasil *= i
print(hasil)
kata = ['ini', 'ibu', 'budi']
b = reduce(lambda a, b:a+b, kata)
print(b) | [
"noreply@github.com"
] | dimaswahabp.noreply@github.com |
3f34150915354d47c739a9c429ea364e4e22cf30 | f33df559f9c02efeaf29fc9a4be58653867a4efc | /Python/Greedy/change.py | 757a9f32828d7b1595ba2b6e955006735c9dbb21 | [] | no_license | Jiseok97/AlgoriJSm_Phython | 79b66bc582ec24776e55b46a3817afcdb1f21bac | 225370e005d237657a08fd441d79bcdc7770d321 | refs/heads/master | 2023-05-03T22:28:08.636401 | 2021-05-09T02:48:26 | 2021-05-09T02:48:26 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 355 | py | # 거스름돈
# 내가 푼 코드
price = 1260
have = [500, 100, 50, 10]
count = 0
for i in range(4):
if i == 0:
count += price // have[i]
else:
count += price % have[i - 1] // have[i]
print(count)
# 동빈나 코드
n = 1260
count = 0
coin_types = [500,100,50,10]
for coin in coin_types:
count += n // coin
n %= coin
print(count) | [
"wltjr1426@naver.com"
] | wltjr1426@naver.com |
038995c7a3f973178d63ec73cbc9bdf3ad967831 | eebad484eff57521e97357c8f1f7913598176134 | /pokemon/main_app/models.py | 00b516e62a9f9d68dd6b2805e799347f9c2b6220 | [] | no_license | alxalves/django-pokemon | d319d469938ee7320d251e528877524d5d3aa51e | 3cbdb533177c65a134cce5a9add18ed9ef97f4d0 | refs/heads/main | 2023-08-18T15:05:23.021226 | 2021-10-08T03:15:02 | 2021-10-08T03:15:02 | 408,672,919 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,449 | py | from django.db import models
from django.urls import reverse
from django.contrib.auth.models import User
# Create your models here.
MOVEATTRIBUTE = (
('Phys', 'Physical'),
('Spec', 'Special'),
('Stat', 'Status')
)
class Ribbon(models.Model):
name = models.CharField(max_length=100)
color = models.CharField(max_length=100)
def __str__(self):
return f"{self.get_name_display()} on {self.name}"
def get_absolute_url(self):
return reverse('detail', kwargs={'pokemon_id': self.id})
class Pokemon(models.Model):
name = models.CharField(max_length=100)
type = models.CharField(max_length=100)
description = models.TextField(max_length=250)
level = models.IntegerField()
ribbons = models.ManyToManyField(Ribbon)
user = models.ForeignKey(User, on_delete=models.CASCADE)
def __str__(self):
return self.name
def get_absolute_url(self):
return reverse('detail', kwargs={'pokemon_id': self.id})
class Move(models.Model):
name = models.CharField(max_length=100)
type = models.CharField(max_length=100)
power = models.IntegerField()
attribute = models.CharField(
max_length=4,
default=MOVEATTRIBUTE[0][0]
)
pokemon = models.ForeignKey(Pokemon, on_delete=models.CASCADE)
def __str__(self):
return f"{self.get_attribute_display()} on {self.name}"
| [
"alxalves2@gmail.com"
] | alxalves2@gmail.com |
732c1c0d174dda04b095303aad10890036f6c911 | 80b69acd5a85b612603ef06e5df94cb19d5ff672 | /portal/views.py | 20ff17b6ce60edf82d843f1c2ea50c74ef41582d | [
"MIT"
] | permissive | zenpassvpn/bbgo | 710145b0e49945dc79678f0e4693f7448968a16c | f16a8e1ca48aad01669d96d0660c37da12f014bb | refs/heads/master | 2020-03-15T04:26:28.771806 | 2018-03-21T01:04:48 | 2018-03-21T01:04:48 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,654 | py | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from boards.models import Board
from boards.table import BoardTable
from django.db.models import Q
from django.shortcuts import redirect, render
def portal(request, page=''):
"""Redirect to blog"""
# redirect example
if page == '1019':
return redirect('blogs:show_post', id=43)
elif page == '1039':
return redirect('blogs:show_post', id=44)
elif page == '1044':
return redirect('blogs:show_post', id=45)
elif page == '1064':
return redirect('blogs:show_post', id=46)
elif page == '1080':
return redirect('blogs:show_post', id=47)
elif page == '1318':
return redirect('blogs:show_post', id=48)
elif page == '1364':
return redirect('blogs:show_post', id=50)
elif page == '1374':
return redirect('blogs:show_post', id=52)
elif page == '1168':
return redirect('blogs:show_post', id=53)
elif page == '1260':
return redirect('blogs:show_post', id=54)
# end of example
return redirect('blogs:blog_home')
def bbgo(request):
"""Show board samples"""
board_table = BoardTable()
sample_limit, sample_limit_mobile = board_table.get_sample_limit()
qs = Q(status='1normal') | Q(status='3notice') | Q(status='4warning')
table = Q(table=3) | Q(table=12)
bbs = Board.objects.filter(qs).filter(table).order_by('-id')[
0:sample_limit]
return render(
request,
"portal/bbgo.html",
{
'bbs': bbs,
'sample_limit_mobile': sample_limit_mobile,
'app': 'bbgo',
}
)
| [
"genonfire@gmail.com"
] | genonfire@gmail.com |
c371789ea1a9647fb9a12254f62d0e48858cc2e5 | 4c8b66ddd046b8e0ddb8610db9eb910fd99c1dfc | /app/models/mart/askue_balance_reg_table.py | 121e2bb552db7612ede0d721a213a3b4dc3fe456 | [] | no_license | giveyourtears/electroComputationServer | 941bcb9fcd3903dadb8a7060a60b93709df1a25e | fcdd44c145eaddd0eb7d93a99cc7064a825623f6 | refs/heads/master | 2022-10-30T22:42:17.643439 | 2020-06-19T13:36:11 | 2020-06-19T13:36:11 | 272,920,520 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 442 | py | from sqlalchemy import Column, String, Integer, Float, DateTime
from app.models.mart.core import MartBase
class BalanceRegTable(MartBase):
__tablename__ = 'calc_reg_balance'
id = Column(String(100), primary_key=True)
id_tu = Column(Integer, nullable=False)
start_period = Column(Float, nullable=False)
time_start_write = Column(DateTime, nullable=False)
def __repr__(self):
return 'id: {}'.format(self.id)
| [
"giveyourtears@gmail.com"
] | giveyourtears@gmail.com |
27b8f49cb7a0e85b1fe35959e45a5d9c84dcb57b | dfb53581b4e6dbdc8e3789ea2678de1e1c4b5962 | /Django/mydjango01/news/views.py | 21a263f4be374c6a40d7fe19b8fd65329d2cf18d | [] | no_license | biabulinxi/Python-ML-DL | 7eff6d6898d72f00575045c5aa2acac45b4b0b82 | 217d594a3c0cba1e52550f74d100cc5023fb415b | refs/heads/master | 2020-06-01T09:13:17.314121 | 2019-06-08T03:59:36 | 2019-06-08T03:59:36 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 168 | py | from django.shortcuts import render
from django.http import HttpResponse
# Create your views here.
def index(request):
return HttpResponse("这是news的首页")
| [
"biabu1208@163.com"
] | biabu1208@163.com |
1c8ece7d4249fe611627d70002ee43ad9d5fac23 | fc7bbfdaebb856a908df639f2f2ae1f15e1d84a3 | /app.py | 2ec9085e214c9b9f661ddee1c3ac01f0153191c4 | [] | no_license | Fazza92/stripe-flask-app | 26afb2a03bbfdf178bec79f624a92ee6f90b4627 | 1121c45c497251697380462c072c1cd1ee96670c | refs/heads/master | 2020-04-08T05:31:11.006645 | 2018-11-27T01:58:54 | 2018-11-27T01:58:54 | 159,063,883 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 884 | py | from flask import Flask, render_template,request,redirect,url_for
import stripe
app = Flask(__name__)
public_key = "pk_test_TYooMQauvdEDq54NiTphI7jx SECRET_KEY=sk_test_4eC39HqLyjWDarjtT1zdp7dc"
stripe.api_key = "sk_test_BQokikJOvBiI2HlWgH4olfQ2"
@app.route('/')
def index():
return render_template('index.html', public_key=public_key)
@app.route('/thankyou')
def thankyou():
return render_template('thankyou.html')
@app.route('/payment', methods=['POST'])
def payment():
customer = stripe.Customer.create(email=request.form['stripeEmail'],
source=request.form['stripeToken'])
charge = stripe.Charge.create(
customer=customer.id,
amount=1999,
currency='usd',
description='Donation'
)
return redirect(url_for('thankyou'))
if __name__ == '__main__':
app.run(debug=True) | [
"oscarwasil@gmail.com"
] | oscarwasil@gmail.com |
3b199477395e73ead41b6374f4f1e0d538de6b1a | e10a6d844a286db26ef56469e31dc8488a8c6f0e | /homophonous_logography/neural/transformer_model.py | 9264e7380f16b34f04cdfb65679049c04562b23b | [
"Apache-2.0",
"CC-BY-4.0"
] | permissive | Jimmy-INL/google-research | 54ad5551f97977f01297abddbfc8a99a7900b791 | 5573d9c5822f4e866b6692769963ae819cb3f10d | refs/heads/master | 2023-04-07T19:43:54.483068 | 2023-03-24T16:27:28 | 2023-03-24T16:32:17 | 282,682,170 | 1 | 0 | Apache-2.0 | 2020-07-26T15:50:32 | 2020-07-26T15:50:31 | null | UTF-8 | Python | false | false | 21,306 | py | # coding=utf-8
# Copyright 2022 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Simple sequence-to-sequence transformer model.
Loosely based on:
https://blog.tensorflow.org/2019/05/transformer-chatbot-tutorial-with-tensorflow-2.html
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import sys
import time
import numpy as np
import tensorflow as tf # tf
import homophonous_logography.neural.corpus as data
import homophonous_logography.neural.utils as utils
tf.config.run_functions_eagerly(False)
tf.compat.v1.disable_eager_execution()
def _create_padding_mask(x):
mask = tf.cast(tf.math.equal(x, 0), tf.float32)
# (batch_size, 1, 1, sequence length)
return mask[:, tf.newaxis, tf.newaxis, :]
def _create_look_ahead_mask(x):
seq_len = tf.shape(x)[1]
look_ahead_mask = 1 - tf.linalg.band_part(tf.ones((seq_len, seq_len)), -1, 0)
padding_mask = _create_padding_mask(x)
return tf.maximum(look_ahead_mask, padding_mask)
def _scaled_dot_product_attention(query, key, value, mask):
"""Actual attention function using dot product."""
matmul_qk = tf.matmul(query, key, transpose_b=True)
depth = tf.cast(tf.shape(key)[-1], tf.float32)
logits = matmul_qk / tf.math.sqrt(depth)
# add the mask zero out padding tokens.
if mask is not None:
logits += (mask * -1e9)
attention_weights = tf.nn.softmax(logits, axis=-1)
return tf.matmul(attention_weights, value), attention_weights
class MultiHeadAttention(tf.keras.layers.Layer):
"""Multi-head attention implementation."""
def __init__(self, d_model, num_heads, name="multi_head_attention"):
super(MultiHeadAttention, self).__init__(name=name)
self.num_heads = num_heads
self.d_model = d_model
assert d_model % self.num_heads == 0
self.depth = d_model // self.num_heads
self.query_dense = tf.keras.layers.Dense(units=d_model)
self.key_dense = tf.keras.layers.Dense(units=d_model)
self.value_dense = tf.keras.layers.Dense(units=d_model)
self.dense = tf.keras.layers.Dense(units=d_model)
def split_heads(self, inputs, batch_size):
inputs = tf.reshape(
inputs, shape=(batch_size, -1, self.num_heads, self.depth))
return tf.transpose(inputs, perm=[0, 2, 1, 3])
def call(self, inputs):
query, key, value, mask = inputs["query"], inputs["key"], inputs[
"value"], inputs["mask"]
batch_size = tf.shape(query)[0]
# linear layers
query = self.query_dense(query)
key = self.key_dense(key)
value = self.value_dense(value)
# split heads
query = self.split_heads(query, batch_size)
key = self.split_heads(key, batch_size)
value = self.split_heads(value, batch_size)
scaled_attention, attention_weights = _scaled_dot_product_attention(
query, key, value, mask)
scaled_attention = tf.transpose(scaled_attention, perm=[0, 2, 1, 3])
concat_attention = tf.reshape(scaled_attention,
(batch_size, -1, self.d_model))
outputs = self.dense(concat_attention)
return outputs, attention_weights
class PositionalEncoding(tf.keras.layers.Layer):
"""Trigonometric positional encoding."""
def __init__(self, position, d_model):
super(PositionalEncoding, self).__init__()
self.pos_encoding = self.positional_encoding(position, d_model)
def get_angles(self, position, i, d_model):
angles = 1 / tf.pow(10000, (2 * (i // 2)) / tf.cast(d_model, tf.float32))
return position * angles
def positional_encoding(self, position, d_model):
angle_rads = self.get_angles(
position=tf.range(position, dtype=tf.float32)[:, tf.newaxis],
i=tf.range(d_model, dtype=tf.float32)[tf.newaxis, :],
d_model=d_model)
# apply sin to even index in the array
sines = tf.math.sin(angle_rads[:, 0::2])
# apply cos to odd index in the array
cosines = tf.math.cos(angle_rads[:, 1::2])
pos_encoding = tf.concat([sines, cosines], axis=-1)
pos_encoding = pos_encoding[tf.newaxis, Ellipsis]
return tf.cast(pos_encoding, tf.float32)
def call(self, inputs):
return inputs + self.pos_encoding[:, :tf.shape(inputs)[1], :]
def _encoder_layer(units, d_model, num_heads, dropout, name="encoder_layer"):
"""One layer of the encoder."""
inputs = tf.keras.Input(shape=(None, d_model), name="inputs")
padding_mask = tf.keras.Input(shape=(1, 1, None), name="padding_mask")
attention, _ = MultiHeadAttention(
d_model, num_heads, name="attention")({
"query": inputs,
"key": inputs,
"value": inputs,
"mask": padding_mask
})
attention = tf.keras.layers.Dropout(rate=dropout)(attention)
attention = tf.keras.layers.LayerNormalization(
epsilon=1e-6)(inputs + attention)
outputs = tf.keras.layers.Dense(units=units, activation="relu")(attention)
outputs = tf.keras.layers.Dense(units=d_model)(outputs)
outputs = tf.keras.layers.Dropout(rate=dropout)(outputs)
outputs = tf.keras.layers.LayerNormalization(
epsilon=1e-6)(attention + outputs)
return tf.keras.Model(
inputs=[inputs, padding_mask], outputs=outputs, name=name)
# Limit the lengths of input sequences.
_MAX_SEQUENCE_LENGTH = 500
def _encoder(vocab_size,
num_layers,
units,
d_model,
num_heads,
dropout,
name="encoder"):
"""Encoder component."""
inputs = tf.keras.Input(shape=(None,), name="inputs")
padding_mask = tf.keras.Input(shape=(1, 1, None), name="padding_mask")
embeddings = tf.keras.layers.Embedding(vocab_size, d_model)(inputs)
embeddings *= tf.math.sqrt(tf.cast(d_model, tf.float32))
embeddings = PositionalEncoding(_MAX_SEQUENCE_LENGTH, d_model)(embeddings)
outputs = tf.keras.layers.Dropout(rate=dropout)(embeddings)
for i in range(num_layers):
outputs = _encoder_layer(
units=units,
d_model=d_model,
num_heads=num_heads,
dropout=dropout,
name="encoder_layer_{}".format(i),
)([outputs, padding_mask])
return tf.keras.Model(
inputs=[inputs, padding_mask], outputs=outputs, name=name)
def _decoder_layer(units, d_model, num_heads, dropout, name="decoder_layer"):
"""Single decoder layer."""
inputs = tf.keras.Input(shape=(None, d_model), name="inputs")
enc_outputs = tf.keras.Input(shape=(None, d_model), name="encoder_outputs")
look_ahead_mask = tf.keras.Input(
shape=(1, None, None), name="look_ahead_mask")
padding_mask = tf.keras.Input(shape=(1, 1, None), name="padding_mask")
attention1, attention_weights_block1 = MultiHeadAttention(
d_model, num_heads, name="attention_1")(inputs={
"query": inputs,
"key": inputs,
"value": inputs,
"mask": look_ahead_mask
})
attention1 = tf.keras.layers.LayerNormalization(
epsilon=1e-6)(attention1 + inputs)
attention2, attention_weights_block2 = MultiHeadAttention(
d_model, num_heads, name="attention_2")(inputs={
"query": attention1,
"key": enc_outputs,
"value": enc_outputs,
"mask": padding_mask
})
attention2 = tf.keras.layers.Dropout(rate=dropout)(attention2)
attention2 = tf.keras.layers.LayerNormalization(
epsilon=1e-6)(attention2 + attention1)
outputs = tf.keras.layers.Dense(units=units, activation="relu")(attention2)
outputs = tf.keras.layers.Dense(units=d_model)(outputs)
outputs = tf.keras.layers.Dropout(rate=dropout)(outputs)
outputs = tf.keras.layers.LayerNormalization(
epsilon=1e-6)(outputs + attention2)
return tf.keras.Model(
inputs=[inputs, enc_outputs, look_ahead_mask, padding_mask],
outputs=[outputs, attention_weights_block1, attention_weights_block2],
name=name)
def _decoder(vocab_size,
num_layers,
units,
d_model,
num_heads,
dropout,
name="decoder"):
"""Decoder component."""
inputs = tf.keras.Input(shape=(None,), name="inputs")
enc_outputs = tf.keras.Input(shape=(None, d_model), name="encoder_outputs")
look_ahead_mask = tf.keras.Input(
shape=(1, None, None), name="look_ahead_mask")
padding_mask = tf.keras.Input(shape=(1, 1, None), name="padding_mask")
embeddings = tf.keras.layers.Embedding(vocab_size, d_model)(inputs)
embeddings *= tf.math.sqrt(tf.cast(d_model, tf.float32))
embeddings = PositionalEncoding(_MAX_SEQUENCE_LENGTH, d_model)(embeddings)
outputs = tf.keras.layers.Dropout(rate=dropout)(embeddings)
attention_weights = {}
for i in range(num_layers):
outputs, attn_w_block1, attn_w_block2 = _decoder_layer(
units=units,
d_model=d_model,
num_heads=num_heads,
dropout=dropout,
name="decoder_layer_{}".format(i),
)(inputs=[outputs, enc_outputs, look_ahead_mask, padding_mask])
attention_weights["decoder_layer{}_block1".format(i+1)] = attn_w_block1
attention_weights["decoder_layer{}_block2".format(i+1)] = attn_w_block2
return tf.keras.Model(
inputs=[inputs, enc_outputs, look_ahead_mask, padding_mask],
outputs=[outputs, attention_weights],
name=name)
def _transformer(input_vocab_size,
target_vocab_size,
num_layers,
units,
d_model,
num_heads,
dropout,
name="transformer"):
"""Transformer network."""
inputs = tf.keras.Input(shape=(None,), name="inputs")
dec_inputs = tf.keras.Input(shape=(None,), name="dec_inputs")
enc_padding_mask = tf.keras.layers.Lambda(
_create_padding_mask, output_shape=(1, 1, None),
name="enc_padding_mask")(inputs)
# mask the future tokens for decoder inputs at the 1st attention block
look_ahead_mask = tf.keras.layers.Lambda(
_create_look_ahead_mask,
output_shape=(1, None, None),
name="look_ahead_mask")(dec_inputs)
# mask the encoder outputs for the 2nd attention block
dec_padding_mask = tf.keras.layers.Lambda(
_create_padding_mask, output_shape=(1, 1, None),
name="dec_padding_mask")(inputs)
enc_outputs = _encoder(
vocab_size=input_vocab_size,
num_layers=num_layers,
units=units,
d_model=d_model,
num_heads=num_heads,
dropout=dropout,
)(inputs=[inputs, enc_padding_mask])
dec_outputs, attention_weights = _decoder(
vocab_size=target_vocab_size,
num_layers=num_layers,
units=units,
d_model=d_model,
num_heads=num_heads,
dropout=dropout,
)(inputs=[dec_inputs, enc_outputs, look_ahead_mask, dec_padding_mask])
outputs = tf.keras.layers.Dense(units=target_vocab_size, name="outputs")(
dec_outputs)
model = tf.keras.Model(inputs=[inputs, dec_inputs],
outputs=[outputs, attention_weights], name=name)
model.summary()
return model
class CustomSchedule(tf.keras.optimizers.schedules.LearningRateSchedule):
"""Learning rate schedule."""
def __init__(self, d_model, warmup_steps=4000):
super(CustomSchedule, self).__init__()
self.d_model = d_model
self.d_model = tf.cast(self.d_model, tf.float32)
self.warmup_steps = warmup_steps
def __call__(self, step):
arg1 = tf.math.rsqrt(step)
arg2 = step * (self.warmup_steps ** -1.5)
return tf.math.rsqrt(self.d_model) * tf.math.minimum(arg1, arg2)
_TRAIN_STEP_SIGNATURE = [
tf.TensorSpec(shape=(None, None), dtype=tf.int32),
tf.TensorSpec(shape=(None, None), dtype=tf.int32),
]
class Seq2SeqTransformerModel(object):
"""Full transformer model."""
def __init__(self,
batch_size=64,
num_heads=8,
ff_dim=512,
num_layers=4,
model_dim=128,
input_symbols=None,
output_symbols=None,
multihead_retrieval_strategy="AVERAGE",
model_dir=".",
name="model"):
self._batch_size = batch_size
self._input_symbols = input_symbols
self._input_vocab_size = len(input_symbols)
self._output_symbols = output_symbols
self._output_vocab_size = len(output_symbols)
self._num_heads = num_heads
self._num_layers = num_layers
self._multihead_retrieval = multihead_retrieval_strategy
self._transformer = _transformer(
input_vocab_size=self._input_vocab_size,
target_vocab_size=self._output_vocab_size,
num_layers=num_layers,
units=ff_dim,
d_model=model_dim,
num_heads=num_heads,
dropout=0.1)
self._learning_rate = CustomSchedule(model_dim)
self._optimizer = tf.keras.optimizers.Adam(
self._learning_rate, beta_1=0.9, beta_2=0.98, epsilon=1e-9)
self._loss_object = tf.keras.losses.SparseCategoricalCrossentropy(
from_logits=True, reduction="none")
self._train_accuracy = tf.keras.metrics.Mean(name="train_accuracy")
self._name = name
self._checkpoint_dir = os.path.join(model_dir, self._name)
self._checkpoint_prefix = os.path.join(self._checkpoint_dir, "ckpt")
self._checkpoint = tf.train.Checkpoint(optimizer=self._optimizer,
transformer=self._transformer)
# Length of the current output tensor (for eval).
self._input_length = -1
self._output_length = -1
def _loss_function(self, y_true, y_pred):
loss = self._loss_object(y_true, y_pred)
mask = tf.cast(tf.not_equal(y_true, 0), tf.float32)
loss = tf.multiply(loss, mask)
return tf.reduce_mean(loss)
def _accuracy_function(self, real, pred):
accuracies = tf.equal(real, tf.argmax(pred, output_type=tf.int32, axis=2))
mask = tf.math.logical_not(tf.math.equal(real, 0))
accuracies = tf.math.logical_and(mask, accuracies)
accuracies = tf.cast(accuracies, dtype=tf.float32)
mask = tf.cast(mask, dtype=tf.float32)
return tf.reduce_sum(accuracies) / tf.reduce_sum(mask)
@tf.function(input_signature=_TRAIN_STEP_SIGNATURE)
def _train_step(self, inputs, targets):
"""One step of the training."""
target_inputs = targets[:, :-1]
target_real = targets[:, 1:]
with tf.GradientTape() as tape:
predictions, _ = self._transformer(
inputs=[inputs, target_inputs], training=True)
loss = self._loss_function(target_real, predictions)
gradients = tape.gradient(loss, self._transformer.trainable_variables)
self._optimizer.apply_gradients(zip(gradients,
self._transformer.trainable_variables))
self._train_accuracy(self._accuracy_function(target_real, predictions))
return loss
def train(self, corpus, epochs=10, direction="pronounce", window=-1):
"""Runs training."""
# Create training log that also redirects to stdout.
stdout_file = sys.stdout
logfile = os.path.join(self._checkpoint_dir, "train.log")
print("Training log: {}".format(logfile))
sys.stdout = utils.DualLogger(logfile)
# Dump some parameters.
print(" Direction: {}".format(direction))
print(" # Epochs: {}".format(epochs))
print(" Batch size: {}".format(self._batch_size))
print(" Window size: {}".format(window))
print(" Max written len: {}".format(corpus.max_written_len))
print(" Max pron len: {}".format(corpus.max_pronounce_len))
print("Max written word len: {}".format(corpus.max_written_word_len))
print(" Max pron word len: {}".format(corpus.max_pronounce_word_len))
# Perform training.
best_total_loss = 1000000
nbatches = data.num_batches(corpus, self._batch_size, direction=direction,
window=window)
for epoch in range(epochs):
self._train_accuracy.reset_states()
start = time.time()
total_loss = 0
steps = 0
batches = data.batchify(corpus, self._batch_size, direction,
window=window)
batch, (inputs, targ) = next(batches)
while batch > -1:
bos = np.expand_dims(
[self._output_symbols.find("<s>")] * np.shape(targ)[0], 1)
targets = np.concatenate((bos, targ), axis=-1)
batch_loss = self._train_step(inputs, targets)
total_loss += batch_loss
if batch % 10 == 0:
print("Epoch {} Batch {} (/{}) Loss {:.4f}".format(
epoch + 1,
batch,
nbatches,
batch_loss))
steps += 1
batch, (inputs, targ) = next(batches)
total_loss /= steps
print("Epoch {} Loss {:.4f} Accuracy {:.4f}".format(
epoch + 1, total_loss, self._train_accuracy.result()))
if total_loss < best_total_loss:
self._checkpoint.save(file_prefix=self._checkpoint_prefix)
print("Saved checkpoint to {}".format(self._checkpoint_prefix))
best_total_loss = total_loss
print("Time taken for 1 epoch {} sec\n".format(
time.time() - start))
print("Best total loss: {:.4f}".format(best_total_loss))
# Restore stdout.
sys.stdout = stdout_file
def _get_attention(self, attention_weights):
"""Prepare attention for consumption.
Args:
attention_weights: tensor with shape:
(batch=1, num_heads, seq_len_q, seq_len_k).
Returns:
Accumulated attention.
"""
attention_heads = tf.squeeze( # Remove batch dimension.
attention_weights["decoder_layer%d_block2" % self._num_layers], 0)
# Basic sanity checks.
if len(attention_heads) != self._num_heads:
raise ValueError("Invalid number of attention heads: {}".format(
len(attention_heads)))
if len(attention_heads.shape) != 3:
raise ValueError("Invalid shape of attention weights: {}".format(
len(attention_heads.shape)))
if attention_heads.shape[1] > self._output_length:
raise ValueError("Expected output length <= {} for dim 1, got {}".format(
self._output_length, attention_heads.shape[1]))
elif attention_heads.shape[1] < self._output_length:
output_len_diff = self._output_length - attention_heads.shape[1]
attention_heads = tf.pad(attention_heads,
[[0, 0], [0, output_len_diff], [0, 0]])
if attention_heads.shape[2] != self._input_length:
raise ValueError("Expected input length {} for dim 2, got {}".format(
self._input_length, attention_heads.shape[2]))
# Combine.
if self._multihead_retrieval == "AVERAGE":
attention = tf.reduce_sum(attention_heads, axis=0) / self._num_heads
elif self._multihead_retrieval == "MAX":
attention = tf.reduce_max(attention_heads, axis=0)
else:
raise ValueError("Unknown retrieval strategy: {}".format(
self._multihead_retrieval))
return attention
@tf.function(reduce_retracing=True)
def _predict_step(self, encoder_input, output):
"""One prediction step."""
return self._transformer(
inputs=[encoder_input, output], training=False)
def decode(self, inputs, joiner=""):
"""Decodes the inputs."""
encoder_input = tf.convert_to_tensor([inputs], dtype=tf.int32)
# The first input to the transformer will be the start token.
start = [self._output_symbols.find("<s>")]
output = tf.convert_to_tensor(start, dtype=tf.int32)
output = tf.expand_dims(output, 0)
result = []
for _ in range(self._output_length):
# predictions.shape == (batch_size, seq_len, vocab_size)
predictions, attention_weights = self._predict_step(
encoder_input, output)
# select the last word from the seq_len dimension
predictions = predictions[:, -1:, :] # (batch_size, 1, vocab_size)
predicted_id = tf.argmax(predictions, axis=-1, output_type=tf.int32)
# concatentate the predicted_id to the output which is given to the
# decoder as its input.
output = tf.concat([output, predicted_id], axis=-1)
outsym = self._output_symbols.find(int(predicted_id.numpy()))
if outsym == "</s>" or outsym == "</targ>":
break
else:
result.append(outsym)
# Accumulate attention over all the heads.
attention = self._get_attention(attention_weights)
return joiner.join(result), attention.numpy()
def update_property(self, property_name, value):
setattr(self, property_name, value)
@property
def checkpoint(self):
return self._checkpoint
@property
def checkpoint_dir(self):
return self._checkpoint_dir
@property
def input_symbols(self):
return self._input_symbols
@property
def output_symbols(self):
return self._output_symbols
@property
def input_length(self):
return self._input_length
@property
def eval_mode(self):
return "_%s" % self._multihead_retrieval.lower()
| [
"copybara-worker@google.com"
] | copybara-worker@google.com |
a3ca40b86bede1189a60cae636d70d7118b96c07 | 3fdb357a807ea488e81c8eb87f84df95ed3f2c43 | /futionapi_intro_7.1.1.py | 0e4b1a4d88c83d7ddb3ff306e954a6e021c7512d | [] | no_license | GdRaymond/DLWP | 18e975055d5c7fc7680f7b0d43eab063244ad518 | 2f3ac5771c5d10484fe2d337fecf8d323122b832 | refs/heads/master | 2020-04-16T12:56:23.069487 | 2019-02-22T06:43:46 | 2019-02-22T06:43:46 | 165,602,833 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 935 | py | from keras import Input,layers
from keras.models import Model,Sequential
input_tensor=Input(shape=(64,))
x=layers.Dense(32,activation='relu')(input_tensor)
x=layers.Dense(32,activation='relu')(x)
output_tensor=layers.Dense(10,activation='softmax')(x)
model=Model(input_tensor,output_tensor)
print('model of manually assemble is:')
model.summary()
seq_model=Sequential()
seq_model.add(layers.Dense(32,activation='relu',input_shape=(64,)))
seq_model.add(layers.Dense(32,activation='relu'))
seq_model.add(layers.Dense(10,activation='softmax'))
print('Sequential model is ')
seq_model.summary()
model.compile(optimizer='rmsprop',loss='categorical_crossentropy')
import numpy as np
x_train=np.random.random((1000,64))
print('x_train top 2',x_train[:2])
y_train=np.random.random((1000,10))
print('y_train top 2',y_train[:2])
model.fit(x_train,y_train,epochs=10,batch_size=128)
score=model.evaluate(x_train,y_train)
print('score=',score)
| [
"gd.raymond@gmail.com"
] | gd.raymond@gmail.com |
5c55e1fc28777b12e6e5312dd1d1535592890d84 | c08b6a871c265811f72b0222840418c43b002a5d | /PokemonGame/Pokemons.py | f8c3e676177d7ea0ff146d1aae86a15502d4f12c | [] | no_license | Slmslg/PokemonGamePython | f2b2c4703bc806d2b3ec02a1820ddce11a51c69f | 863a577d0fdb2cebccb4573c62752339acd27b50 | refs/heads/master | 2020-05-01T01:54:02.967987 | 2019-03-22T20:40:43 | 2019-03-22T20:40:43 | 177,205,835 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 928 | py | class Pokemons():
PokeList = {
1: "Magmar",
2: "Bulbasaur",
3: "Squirtle",
4: "Pidgeotto",
5: "Zapdos",
6: "Sandshrew",
7: "Mew",
8: "Moltres",
9: "Psyduck",
10: "Butterfree",
11: "Exeggutor",
12: "Electabuzz",
13: "Cubone",
14: "Hunter",
15: "Charmendar",
16: "Gloom",
17: "Krabby",
18: "Spearow",
19: "Pikachu",
20: "Golem",
21: "Abra",
22: "Vulpix",
23: "Paras",
24: "Staryu",
25: "Golbat",
26: "Electrode",
27: "Diglet",
28: "Drowzee"
}
liste = []
def showPokemons(self):
print(self.PokeList)
def pokemon_listem(self):
print("Pokemon Listesi : ")
for key,value in self.PokeList.items():
print(key,value) | [
"noreply@github.com"
] | Slmslg.noreply@github.com |
5358bc181b74ca3442b7da55c0da316a0c8b3765 | ff831ee8b1ef51abbf23468b511dae4a7d369c86 | /Projects/maps/tests/02.py | eff8bc6d0f0740a5634c427ebdf3eee4c3c7f91e | [] | no_license | mazshahbaz/CS61A | 4ca03eadbe3f5360d75948cba85816d91a63b4fb | df04e9a30c3ee44c4ab5962b9c0324b520068db2 | refs/heads/master | 2020-04-28T18:02:02.955806 | 2019-03-26T23:42:57 | 2019-03-26T23:42:57 | 175,465,791 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,299 | py | test = {
'name': 'Problem 2',
'points': 1,
'suites': [
{
'cases': [
{
'code': r"""
>>> soda_reviews = [make_review('Soda', 4.5),
... make_review('Soda', 4)]
>>> soda = make_restaurant('Soda', [127.0, 0.1],
... ['Restaurants', 'Breakfast & Brunch'],
... 1, soda_reviews)
>>> restaurant_name(soda)
72915c31b7b5a29ae54d3c5fd964368f
# locked
>>> restaurant_location(soda)
45013648009573e47bea9c4f0f933977
# locked
>>> restaurant_categories(soda)
b7a0d141faa2b17caf8ff9b1a32b45cf
# locked
>>> restaurant_price(soda)
0371813f881bf637f2dca7a167d20c45
# locked
>>> restaurant_ratings(soda)
a131ee26d99ec1bd2a9cfdf6ef591a32
# locked
""",
'hidden': False,
'locked': True
}
],
'scored': True,
'setup': r"""
>>> from abstractions import *
>>> import abstractions
""",
'teardown': '',
'type': 'doctest'
},
{
'cases': [
{
'code': r"""
>>> test.swap_implementations(abstractions, rest=False)
>>> make_review = abstractions.make_review
>>> soda_reviews = [make_review('Soda', 4.5),
... make_review('Soda', 4)]
>>> soda = make_restaurant('Soda', [127.0, 0.1],
... ['Restaurants', 'Breakfast & Brunch'],
... 1, soda_reviews)
>>> restaurant_name(soda)
'Soda'
>>> restaurant_location(soda)
[127.0, 0.1]
>>> restaurant_categories(soda)
['Restaurants', 'Breakfast & Brunch']
>>> restaurant_price(soda)
1
>>> restaurant_ratings(soda)
[4.5, 4]
""",
'hidden': False,
'locked': False
}
],
'scored': True,
'setup': r"""
>>> from abstractions import *
>>> import abstractions
>>> import tests.test_functions as test
""",
'teardown': r"""
>>> test.restore_implementations(abstractions)
""",
'type': 'doctest'
},
{
'cases': [
{
'code': r"""
>>> cory_reviews = [make_review('Cory', 2),
... make_review('Cory', 4.5),
... make_review('Cory', 1)]
>>> cory = make_restaurant('Cory', [128.0, 0.1],
... ['Cafe', 'Boba', 'Tea'],
... 2, cory_reviews)
>>> restaurant_name(cory)
'Cory'
>>> restaurant_location(cory)
[128.0, 0.1]
>>> restaurant_categories(cory)
['Cafe', 'Boba', 'Tea']
>>> restaurant_price(cory)
2
>>> restaurant_ratings(cory)
[2, 4.5, 1]
""",
'hidden': False,
'locked': False
}
],
'scored': True,
'setup': r"""
>>> from abstractions import *
>>> import abstractions
""",
'teardown': '',
'type': 'doctest'
}
]
}
| [
"mazen-chaaraoui@hotmail.com"
] | mazen-chaaraoui@hotmail.com |
99b1f62912fb80c7e719697e2f9075d4fd505216 | 15b12d69ac3123d1562986970ce01d7a47d171de | /typings/nltk/translate/__init__.pyi | 79712704c982cb5c2d56cec50d1fde99fb9fb8ad | [
"Apache-2.0"
] | permissive | simplymanas/python-learning | 9b67b5a7acfb3a7c2455a7d1fc66203a2b419c37 | 75bc99c0dce211fd1bce5f6ce1155e0f4c71d7d0 | refs/heads/master | 2021-07-11T06:40:24.803589 | 2021-06-20T12:06:02 | 2021-06-20T12:06:02 | 241,769,614 | 5 | 1 | null | null | null | null | UTF-8 | Python | false | false | 768 | pyi | """
This type stub file was generated by pyright.
"""
from nltk.translate.api import AlignedSent, Alignment, PhraseTable
from nltk.translate.ibm_model import IBMModel
from nltk.translate.ibm1 import IBMModel1
from nltk.translate.ibm2 import IBMModel2
from nltk.translate.ibm3 import IBMModel3
from nltk.translate.ibm4 import IBMModel4
from nltk.translate.ibm5 import IBMModel5
from nltk.translate.bleu_score import sentence_bleu as bleu
from nltk.translate.ribes_score import sentence_ribes as ribes
from nltk.translate.meteor_score import meteor_score as meteor
from nltk.translate.metrics import alignment_error_rate
from nltk.translate.stack_decoder import StackDecoder
"""
Experimental features for machine translation.
These interfaces are prone to change.
"""
| [
"manas.dash@tesco.com"
] | manas.dash@tesco.com |
65841d7b34c5d983cb0b2ce76425250eba816af0 | 7711d649865cbb05d86b5985e6a1ee99a38df3b3 | /thirdparty-cpp/boost_1_62_0/tools/build/src/tools/stage.py | e6a7cb2effb9072e5575da7225f06e1b12edd191 | [
"BSL-1.0",
"Apache-2.0"
] | permissive | nxplatform/nx-mobile | 0ae4ba8b5b20cf06a9396d685a35b4651c0eab02 | 0dc174c893f2667377cb2ef7e5ffeb212fa8b3e5 | refs/heads/master | 2021-01-17T07:35:33.860681 | 2017-04-25T10:12:30 | 2017-04-25T10:12:30 | 83,760,335 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 13,664 | py | # Status: ported.
# Base revision 64444.
#
# Copyright 2003 Dave Abrahams
# Copyright 2005, 2006 Rene Rivera
# Copyright 2002, 2003, 2004, 2005, 2006, 2010 Vladimir Prus
# Distributed under the Boost Software License, Version 1.0.
# (See accompanying file LICENSE_1_0.txt or http://www.boost.org/LICENSE_1_0.txt)
# This module defines the 'install' rule, used to copy a set of targets to a
# single location.
import b2.build.feature as feature
import b2.build.targets as targets
import b2.build.property as property
import b2.build.property_set as property_set
import b2.build.generators as generators
import b2.build.virtual_target as virtual_target
from b2.manager import get_manager
from b2.util.sequence import unique
from b2.util import bjam_signature
import b2.build.type
import os.path
import re
import types
feature.feature('install-dependencies', ['off', 'on'], ['incidental'])
feature.feature('install-type', [], ['free', 'incidental'])
feature.feature('install-source-root', [], ['free', 'path'])
feature.feature('so-version', [], ['free', 'incidental'])
# If 'on', version symlinks for shared libraries will not be created. Affects
# Unix builds only.
feature.feature('install-no-version-symlinks', ['on'], ['optional', 'incidental'])
class InstallTargetClass(targets.BasicTarget):
def update_location(self, ps):
"""If <location> is not set, sets it based on the project data."""
loc = ps.get('location')
if not loc:
loc = os.path.join(self.project().get('location'), self.name())
ps = ps.add_raw(["<location>" + loc])
return ps
def adjust_properties(self, target, build_ps):
a = target.action()
properties = []
if a:
ps = a.properties()
properties = ps.all()
# Unless <hardcode-dll-paths>true is in properties, which can happen
# only if the user has explicitly requested it, nuke all <dll-path>
# properties.
if build_ps.get('hardcode-dll-paths') != ['true']:
properties = [p for p in properties if p.feature().name() != 'dll-path']
# If any <dll-path> properties were specified for installing, add
# them.
properties.extend(build_ps.get_properties('dll-path'))
# Also copy <linkflags> feature from current build set, to be used
# for relinking.
properties.extend(build_ps.get_properties('linkflags'))
# Remove the <tag> feature on original targets.
# And <location>. If stage target has another stage target in
# sources, then we shall get virtual targets with the <location>
# property set.
properties = [p for p in properties
if not p.feature().name() in ['tag', 'location']]
properties.extend(build_ps.get_properties('dependency'))
properties.extend(build_ps.get_properties('location'))
properties.extend(build_ps.get_properties('install-no-version-symlinks'))
d = build_ps.get_properties('install-source-root')
# Make the path absolute: we shall use it to compute relative paths and
# making the path absolute will help.
if d:
p = d[0]
properties.append(property.Property(p.feature(), os.path.abspath(p.value())))
return property_set.create(properties)
def construct(self, name, source_targets, ps):
source_targets = self.targets_to_stage(source_targets, ps)
ps = self.update_location(ps)
ename = ps.get('name')
if ename:
ename = ename[0]
if ename and len(source_targets) > 1:
get_manager().errors()("When <name> property is used in 'install', only one source is allowed")
result = []
for i in source_targets:
staged_targets = []
new_ps = self.adjust_properties(i, ps)
# See if something special should be done when staging this type. It
# is indicated by the presence of a special "INSTALLED_" type.
t = i.type()
if t and b2.build.type.registered("INSTALLED_" + t):
if ename:
get_manager().errors()("In 'install': <name> property specified with target that requires relinking.")
else:
(r, targets) = generators.construct(self.project(), name, "INSTALLED_" + t,
new_ps, [i])
assert isinstance(r, property_set.PropertySet)
staged_targets.extend(targets)
else:
staged_targets.append(copy_file(self.project(), ename, i, new_ps))
if not staged_targets:
get_manager().errors()("Unable to generate staged version of " + i)
result.extend(get_manager().virtual_targets().register(t) for t in staged_targets)
return (property_set.empty(), result)
def targets_to_stage(self, source_targets, ps):
"""Given the list of source targets explicitly passed to 'stage', returns the
list of targets which must be staged."""
result = []
# Traverse the dependencies, if needed.
if ps.get('install-dependencies') == ['on']:
source_targets = self.collect_targets(source_targets)
# Filter the target types, if needed.
included_types = ps.get('install-type')
for r in source_targets:
ty = r.type()
if ty:
# Do not stage searched libs.
if ty != "SEARCHED_LIB":
if included_types:
if self.include_type(ty, included_types):
result.append(r)
else:
result.append(r)
elif not included_types:
# Don't install typeless target if there is an explicit list of
# allowed types.
result.append(r)
return result
# CONSIDER: figure out why we can not use virtual-target.traverse here.
#
def collect_targets(self, targets):
s = [t.creating_subvariant() for t in targets]
s = unique(filter(lambda l: l != None,s))
result = set(targets)
for i in s:
i.all_referenced_targets(result)
result2 = []
for r in result:
if isinstance(r, property.Property):
if r.feature().name() != 'use':
result2.append(r.value())
else:
result2.append(r)
result2 = unique(result2)
return result2
# Returns true iff 'type' is subtype of some element of 'types-to-include'.
#
def include_type(self, type, types_to_include):
return any(b2.build.type.is_subtype(type, ti) for ti in types_to_include)
# Creates a copy of target 'source'. The 'properties' object should have a
# <location> property which specifies where the target must be placed.
#
def copy_file(project, name, source, ps):
if not name:
name = source.name()
relative = ""
new_a = virtual_target.NonScanningAction([source], "common.copy", ps)
source_root = ps.get('install-source-root')
if source_root:
source_root = source_root[0]
# Get the real path of the target. We probably need to strip relative
# path from the target name at construction.
path = os.path.join(source.path(), os.path.dirname(name))
# Make the path absolute. Otherwise, it would be hard to compute the
# relative path. The 'source-root' is already absolute, see the
# 'adjust-properties' method above.
path = os.path.abspath(path)
relative = os.path.relpath(path, source_root)
name = os.path.join(relative, os.path.basename(name))
return virtual_target.FileTarget(name, source.type(), project, new_a, exact=True)
def symlink(name, project, source, ps):
a = virtual_target.Action([source], "symlink.ln", ps)
return virtual_target.FileTarget(name, source.type(), project, a, exact=True)
def relink_file(project, source, ps):
action = source[0].action()
cloned_action = virtual_target.clone_action(action, project, "", ps)
targets = cloned_action.targets()
# We relink only on Unix, where exe or shared lib is always a single file.
assert len(targets) == 1
return targets[0]
# Declare installed version of the EXE type. Generator for this type will cause
# relinking to the new location.
b2.build.type.register('INSTALLED_EXE', [], 'EXE')
class InstalledExeGenerator(generators.Generator):
def __init__(self):
generators.Generator.__init__(self, "install-exe", False, ['EXE'], ['INSTALLED_EXE'])
def run(self, project, name, ps, source):
need_relink = False;
if ps.get('os') in ['NT', 'CYGWIN'] or ps.get('target-os') in ['windows', 'cygwin']:
# Never relink
pass
else:
# See if the dll-path properties are not changed during
# install. If so, copy, don't relink.
need_relink = source[0].action() and ps.get('dll-path') != source[0].action().properties().get('dll-path')
if need_relink:
return [relink_file(project, source, ps)]
else:
return [copy_file(project, None, source[0], ps)]
generators.register(InstalledExeGenerator())
# Installing a shared link on Unix might cause a creation of versioned symbolic
# links.
b2.build.type.register('INSTALLED_SHARED_LIB', [], 'SHARED_LIB')
class InstalledSharedLibGenerator(generators.Generator):
def __init__(self):
generators.Generator.__init__(self, 'install-shared-lib', False, ['SHARED_LIB'], ['INSTALLED_SHARED_LIB'])
def run(self, project, name, ps, source):
source = source[0]
if ps.get('os') in ['NT', 'CYGWIN'] or ps.get('target-os') in ['windows', 'cygwin']:
copied = copy_file(project, None, source, ps)
return [get_manager().virtual_targets().register(copied)]
else:
a = source.action()
if not a:
# Non-derived file, just copy.
copied = copy_file(project, None, source, ps)
else:
need_relink = ps.get('dll-path') != source.action().properties().get('dll-path')
if need_relink:
# Rpath changed, need to relink.
copied = relink_file(project, source, ps)
else:
copied = copy_file(project, None, source, ps)
result = [get_manager().virtual_targets().register(copied)]
# If the name is in the form NNN.XXX.YYY.ZZZ, where all 'X', 'Y' and
# 'Z' are numbers, we need to create NNN.XXX and NNN.XXX.YYY
# symbolic links.
m = re.match("(.*)\\.([0123456789]+)\\.([0123456789]+)\\.([0123456789]+)$",
copied.name());
if m:
# Symlink without version at all is used to make
# -lsome_library work.
result.append(symlink(m.group(1), project, copied, ps))
# Symlinks of some libfoo.N and libfoo.N.M are used so that
# library can found at runtime, if libfoo.N.M.X has soname of
# libfoo.N. That happens when the library makes some binary
# compatibility guarantees. If not, it is possible to skip those
# symlinks.
if ps.get('install-no-version-symlinks') != ['on']:
result.append(symlink(m.group(1) + '.' + m.group(2), project, copied, ps))
result.append(symlink(m.group(1) + '.' + m.group(2) + '.' + m.group(3),
project, copied, ps))
return result
generators.register(InstalledSharedLibGenerator())
# Main target rule for 'install'.
#
@bjam_signature((["name"], ["sources", "*"], ["requirements", "*"],
["default_build", "*"], ["usage_requirements", "*"]))
def install(name, sources, requirements=[], default_build=[], usage_requirements=[]):
requirements = requirements[:]
# Unless the user has explicitly asked us to hardcode dll paths, add
# <hardcode-dll-paths>false in requirements, to override default value.
if not '<hardcode-dll-paths>true' in requirements:
requirements.append('<hardcode-dll-paths>false')
if any(r.startswith('<tag>') for r in requirements):
get_manager().errors()("The <tag> property is not allowed for the 'install' rule")
from b2.manager import get_manager
t = get_manager().targets()
project = get_manager().projects().current()
return t.main_target_alternative(
InstallTargetClass(name, project,
t.main_target_sources(sources, name),
t.main_target_requirements(requirements, project),
t.main_target_default_build(default_build, project),
t.main_target_usage_requirements(usage_requirements, project)))
get_manager().projects().add_rule("install", install)
get_manager().projects().add_rule("stage", install)
| [
"narongrit@3dsinteractive.com"
] | narongrit@3dsinteractive.com |
2c997c25a12ff9e725bba502df98445432fea978 | cd8748198985f46fafb7b732d822e4c12d9ffabf | /bookstore/cart/views.py | 70adafafd1c7a131844f7f8f84bc2b9ef210dab6 | [] | no_license | sundaming/bookstore | 68c382b7c0de81a40e9870c00f9a60f939d9144a | 640bcdc4584fe351b93fd3730029f33f9de9c514 | refs/heads/master | 2021-09-06T00:46:09.206845 | 2018-02-01T02:10:12 | 2018-02-01T02:10:12 | 119,686,312 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,687 | py | from django.shortcuts import render
from django.http import JsonResponse
from books.models import Books
from utils.decorators import login_required
from django_redis import get_redis_connection
# Create your views here.
# 前端发过来的数据:商品id 商品数目 books_id books_count
# 涉及到数据的修改,使用post方式
def cart_add(request):
'''向购物车中添加数据'''
# 判断用户是否登录
if not request.session.has_key('islogin'):
return JsonResponse({'res':0, 'errmsg':'请先登录'})
# 接收数据
books_id = request.POST.get('books_id')
books_count = request.POST.get('books_count')
# 进行数据校验
if not all([books_id, books_count]):
return JsonResponse({'res':1 , 'errmsg':'数据不完整'})
books = Books.objects.get_books_by_id(books_id=books_id)
if books is None:
# 商品不存在
return JsonResponse({'res':2, 'errmsg':'商品不存在'})
try:
count = int(books_count)
except Exception as e:
# 商品数目不合法
return JsonResponse({'res':3, 'errmsg':'商品数量必须为数字'})
# 添加商品到购物车
# 每个用户的购物车记录用一条hash数据保存,格式:cart_用户id: 商品id 商品数量
conn = get_redis_connection('default')
cart_key = 'cart_%d' % request.session.get('passport_id')
res = conn.hget(cart_key, books_id)
if res is None:
# 如果用户的购车中没有添加过该商品,则添加数据
res = count
else:
# 如果用户的购车中已经添加过该商品,则累计商品数目
res = int(res) + count
# 判断商品的库存
if res > books.stock:
# 库存不足
return JsonResponse({'res': 4, 'errmsg': '商品库存不足'})
else:
conn.hset(cart_key, books_id, res)
# 返回结果
return JsonResponse({'res': 5})
def cart_count(request):
'''获取用户购物车中商品的数目'''
# 判断用户是否登录
if not request.session.has_key('islogin'):
return JsonResponse({'res': 0})
# 计算用户购物车商品的数量
conn = get_redis_connection('default')
cart_key = 'cart_%d' % request.session.get('passport_id')
# res = conn.hlen(cart_key) 显示商品的条目数
res = 0
res_list = conn.hvals(cart_key)
for i in res_list:
res += int(i)
# 返回结果
return JsonResponse({'res': res})
# http://127.0.0.1:8000/cart/
@login_required
def cart_show(request):
'''显示用户购物车页面'''
passport_id = request.session.get('passport_id')
# 获取用户购物车的记录
conn = get_redis_connection('default')
cart_key = 'cart_%d' % passport_id
res_dict = conn.hgetall(cart_key)
books_li = []
# 保存所有商品的总数
total_count = 0
# 保存所有商品的总价格
total_price = 0
# 遍历res_dict获取商品的数据
for id, count in res_dict.items():
# 根据id获取商品的信息
books = Books.objects.get_books_by_id(books_id=id)
# 保存商品的数目
books.count = count
# 保存商品的小计
books.amount = int(count) * books.price
# books_li.append((books, count))
books_li.append(books)
total_count += int(count)
total_price += int(count) * books.price
# 定义模板上下文
context = {
'books_li': books_li,
'total_count': total_count,
'total_price': total_price,
}
return render(request, 'cart/cart.html', context)
# 前端传过来的参数:商品id books_id
# post
# /cart/del/
def cart_del(request):
'''删除用户购物车中商品的信息'''
# 判断用户是否登录
if not request.session.has_key('islogin'):
return JsonResponse({'res': 0, 'errmsg': '请先登录'})
# 接收数据
books_id = request.POST.get('books_id')
# 校验商品是否存放
if not all([books_id]):
return JsonResponse({'res': 1, 'errmsg': '数据不完整'})
books = Books.objects.get_books_by_id(books_id=books_id)
if books is None:
return JsonResponse({'res': 2, 'errmsg': '商品不存存'})
# 删除购物车商品信息
conn = get_redis_connection('default')
cart_key = 'cart_%d' % request.session.get('passport_id')
conn.hdel(cart_key, books_id)
# 返回信息
return JsonResponse({'res': 3})
# 前端传过来的参数:商品id books_id 更新数目 books_count
# post
# /cart/update/
def cart_update(request):
'''更新购物车商品数目'''
# 判断用户是否登录
if not request.session.has_key('islogin'):
return JsonResponse({'res': 0, 'errmsg':'请先登录'})
# 接收数据
books_id = request.POST.get('books_id')
books_count = request.POST.get('books_count')
# 数据的校验
if not all([books_id, books_count]):
return JsonResponse({'res': 1, 'errmsg': '数据不完整'})
books = Books.objects.get_books_by_id(books_id=books_id)
if books is None:
return JsonResponse({'res': 2, 'errmsg': '商品不存在'})
try:
books_count = int(books_count)
except Exception as e:
return JsonResponse({'res': 3, 'errmsg': '商品数目必须为数字'})
# 更新操作
conn = get_redis_connection('default')
cart_key = 'cart_%d' % request.session.get('passport_id')
# 判断商品库存
if books_count > books.stock:
return JsonResponse({'res': 4, 'errmsg': '商品库存不足'})
conn.hset(cart_key, books_id, books_count)
return JsonResponse({'res': 5})
| [
"35587284+sundaming@users.noreply.github.com"
] | 35587284+sundaming@users.noreply.github.com |
0ed4bd0af7f19138ff52fe3e4cbfb37773d4c346 | 2e456c4832d7cc88ebea7a8232d5963a040aea14 | /accounts/views.py | 14827b9175ace2b05348e1aaeec55e00223bccd0 | [
"MIT"
] | permissive | ksudarshan26/Product-Review-and-Rating-using-Django-Frameworks | fa7b20547073c1572a2e88874ae8efd00fd6dff4 | 88793f38138366564b6636301d5c6d1a03a52e7b | refs/heads/main | 2023-03-04T18:55:13.528818 | 2021-02-16T07:51:22 | 2021-02-16T07:51:22 | 337,077,965 | 7 | 2 | null | null | null | null | UTF-8 | Python | false | false | 1,499 | py | from django.shortcuts import render, redirect
from .forms import *
from django.contrib.auth import authenticate, login, logout
# Create your views here.
def register(request):
if request.user.is_authenticated:
return redirect("main:home")
else:
if request.method == "POST":
form = RegistrationForm(request.POST or None)
if form.is_valid():
user = form.save()
raw_password =form.cleaned_data.get('password1')
user = authenticate(username=user.username, password=raw_password)
login(request,user)
return redirect("main:home")
else:
form = RegistrationForm()
return render(request,"accounts/register.html",{"form":form})
def login_user(request):
if request.user.is_authenticated:
return redirect("main:home")
else:
if request.method == "POST":
username = request.POST['username']
password = request.POST['password']
user = authenticate(username=username,password=password)
if user is not None:
if user.is_active:
login(request,user)
return redirect("main:home")
else:
return render(request,'accounts/login.html',{"error":"Your account has been disabled"})
else:
return render(request,'accounts/login.html',{"error":"Invalid username or password, try again!"})
return render(request,'accounts/login.html')
def logout_user(request):
if request.user.is_authenticated:
logout(request)
return redirect("accounts:login")
else:
return redirect("accounts:login")
| [
"noreply@github.com"
] | ksudarshan26.noreply@github.com |
1ea3c90b11bae470d1bcb576bac7a4804df1f284 | 63f27bce26d956e686f3c18701b92ff8c0921cb6 | /binarysearch.py | 5efd033b4b91635f9f35fc83f3b9eb2bbeb557c1 | [] | no_license | nqureshi/python-data-structures | c03edd13dd20e4e6fb53aa9df85caefcae18a118 | f568b1641ee814de6a537cd67a4ea9b2c5f1f9eb | refs/heads/main | 2023-01-30T04:01:08.222642 | 2020-12-13T18:30:27 | 2020-12-13T18:30:27 | 321,132,539 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,775 | py | import time
import random
def binarySearch(alist, item):
first = 0
last = len(alist)-1
found = False
while first<=last and not found:
midpoint = (first + last)//2
if alist[midpoint] == item:
found = True
else:
if item < alist[midpoint]:
last = midpoint-1
else:
first = midpoint+1
return found
# Recursive
def binarySearch2(alist, item):
if len(alist) == 0:
return False
else:
midpoint = len(alist)//2
if alist[midpoint]==item:
return True
else:
if item<alist[midpoint]:
return binarySearch(alist[:midpoint],item)
else:
return binarySearch(alist[midpoint+1:],item)
"""
We have two implementations of binary search, a recursive one and an iterative
one. Idea is to benchmark these against an ordered list, so I picked a
Fibonacci sequence as an example.
"""
# generate fibonnacci
fib = [1]
i=1
while i<10000000000:
fib.append(i)
i = i + fib[-2]
s = len(fib)
iterativeTimes = []
recursiveTimes = []
for i in range(1000):
k = fib[random.randint(0,s-1)]
start1=time.time()
binarySearch(fib, k)
end1=time.time()
t = (end1-start1)*1000
iterativeTimes.append(t)
for i in range(1000):
k = fib[random.randint(0,s-1)]
start2=time.time()
binarySearch2(fib, k)
end2=time.time()
t = (end2-start2)*1000
recursiveTimes.append(t)
def Average(lst):
return sum(lst) / len(lst)
print("Average for iteratives: %f" % Average(iterativeTimes))
print("Average for recursives: %f" % Average(recursiveTimes))
# Recursive is slower than iterative by ~2x. I think this is because the
# recursive implementation is using Python's "slice" operator, which is O(k). To
# prove this, let's try a third implementation of binary search that is
# recursive but doesn't use slice.
def binarySearch3(alist, item, *args):
if args:
if len(alist[args[0]:args[1]]) == 0:
return False
if len(alist) == 0:
return False
else:
if args:
start = args[0]
end = args[1]
midpoint = (start+end)//2
else:
start = 0
end = len(alist)-1
midpoint = (start+end)//2
if alist[midpoint]==item:
return True
else:
if item<alist[midpoint]:
return binarySearch3(alist,item,start,midpoint-1)
else:
return binarySearch3(alist,item,midpoint+1,end)
recursiveTimes2 = []
for i in range(1000):
k = fib[random.randint(0,s-1)]
start3=time.time()
binarySearch3(fib, k)
end3=time.time()
t = (end3-start3)*1000
recursiveTimes2.append(t)
print("Average for recursives 2: %f" % Average(recursiveTimes2))
# This implementation of the recursive binary search beats the other two!
| [
"nabeelqureshi67@gmail.com"
] | nabeelqureshi67@gmail.com |
ee75934b54a7c419ea4df630c94ae680bfee4f92 | ba0e07b34def26c37ee22b9dac1714867f001fa5 | /azure-mgmt-powerbiembedded/azure/mgmt/powerbiembedded/models/error_detail.py | 08890398d70c2163092510b29f7f60ffe5e56300 | [
"MIT"
] | permissive | CharaD7/azure-sdk-for-python | b11a08ac7d24a22a808a18203072b4c7bd264dfa | 9fdf0aac0cec8a15a5bb2a0ea27dd331dbfa2f5c | refs/heads/master | 2023-05-12T12:34:26.172873 | 2016-10-26T21:35:20 | 2016-10-26T21:35:20 | 72,448,760 | 1 | 0 | MIT | 2023-05-04T17:15:01 | 2016-10-31T15:14:09 | Python | UTF-8 | Python | false | false | 1,024 | py | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from msrest.serialization import Model
class ErrorDetail(Model):
"""ErrorDetail.
:param code:
:type code: str
:param message:
:type message: str
:param target:
:type target: str
"""
_attribute_map = {
'code': {'key': 'code', 'type': 'str'},
'message': {'key': 'message', 'type': 'str'},
'target': {'key': 'target', 'type': 'str'},
}
def __init__(self, code=None, message=None, target=None):
self.code = code
self.message = message
self.target = target
| [
"lmazuel@microsoft.com"
] | lmazuel@microsoft.com |
c2991b2bf462c17dd248db335305e4195ccdc8e3 | d40ee63566975dd11ae6ba6ea1c2889680c47c90 | /workspace/ros/aerostack_catkin_ws/devel/lib/python2.7/dist-packages/mavros_msgs/srv/_FileRemoveDir.py | 38c5a47514ff4a963c7222853176f534895d0c59 | [] | no_license | la16k/TFG_Laura | 45e9df0f60ef94572260f14346c47969ab2c73b3 | f5e0661aa7ccd200ba056a40beb9e687f5f0d06e | refs/heads/master | 2022-12-27T02:49:05.549777 | 2020-10-05T10:48:57 | 2020-10-05T10:48:57 | 301,374,417 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 8,221 | py | # This Python file uses the following encoding: utf-8
"""autogenerated by genpy from mavros_msgs/FileRemoveDirRequest.msg. Do not edit."""
import sys
python3 = True if sys.hexversion > 0x03000000 else False
import genpy
import struct
class FileRemoveDirRequest(genpy.Message):
_md5sum = "401d5cf5f836aaa9ebdc0897f75da874"
_type = "mavros_msgs/FileRemoveDirRequest"
_has_header = False # flag to mark the presence of a Header object
_full_text = """# FTP::RemoveDir
#
# :success: indicates success end of request
# :r_errno: remote errno if applicapable
string dir_path
"""
__slots__ = ['dir_path']
_slot_types = ['string']
def __init__(self, *args, **kwds):
"""
Constructor. Any message fields that are implicitly/explicitly
set to None will be assigned a default value. The recommend
use is keyword arguments as this is more robust to future message
changes. You cannot mix in-order arguments and keyword arguments.
The available fields are:
dir_path
:param args: complete set of field values, in .msg order
:param kwds: use keyword arguments corresponding to message field names
to set specific fields.
"""
if args or kwds:
super(FileRemoveDirRequest, self).__init__(*args, **kwds)
# message fields cannot be None, assign default values for those that are
if self.dir_path is None:
self.dir_path = ''
else:
self.dir_path = ''
def _get_types(self):
"""
internal API method
"""
return self._slot_types
def serialize(self, buff):
"""
serialize message into buffer
:param buff: buffer, ``StringIO``
"""
try:
_x = self.dir_path
length = len(_x)
if python3 or type(_x) == unicode:
_x = _x.encode('utf-8')
length = len(_x)
buff.write(struct.pack('<I%ss'%length, length, _x))
except struct.error as se: self._check_types(struct.error("%s: '%s' when writing '%s'" % (type(se), str(se), str(locals().get('_x', self)))))
except TypeError as te: self._check_types(ValueError("%s: '%s' when writing '%s'" % (type(te), str(te), str(locals().get('_x', self)))))
def deserialize(self, str):
"""
unpack serialized message in str into this message instance
:param str: byte array of serialized message, ``str``
"""
try:
end = 0
start = end
end += 4
(length,) = _struct_I.unpack(str[start:end])
start = end
end += length
if python3:
self.dir_path = str[start:end].decode('utf-8')
else:
self.dir_path = str[start:end]
return self
except struct.error as e:
raise genpy.DeserializationError(e) # most likely buffer underfill
def serialize_numpy(self, buff, numpy):
"""
serialize message with numpy array types into buffer
:param buff: buffer, ``StringIO``
:param numpy: numpy python module
"""
try:
_x = self.dir_path
length = len(_x)
if python3 or type(_x) == unicode:
_x = _x.encode('utf-8')
length = len(_x)
buff.write(struct.pack('<I%ss'%length, length, _x))
except struct.error as se: self._check_types(struct.error("%s: '%s' when writing '%s'" % (type(se), str(se), str(locals().get('_x', self)))))
except TypeError as te: self._check_types(ValueError("%s: '%s' when writing '%s'" % (type(te), str(te), str(locals().get('_x', self)))))
def deserialize_numpy(self, str, numpy):
"""
unpack serialized message in str into this message instance using numpy for array types
:param str: byte array of serialized message, ``str``
:param numpy: numpy python module
"""
try:
end = 0
start = end
end += 4
(length,) = _struct_I.unpack(str[start:end])
start = end
end += length
if python3:
self.dir_path = str[start:end].decode('utf-8')
else:
self.dir_path = str[start:end]
return self
except struct.error as e:
raise genpy.DeserializationError(e) # most likely buffer underfill
_struct_I = genpy.struct_I
def _get_struct_I():
global _struct_I
return _struct_I
# This Python file uses the following encoding: utf-8
"""autogenerated by genpy from mavros_msgs/FileRemoveDirResponse.msg. Do not edit."""
import sys
python3 = True if sys.hexversion > 0x03000000 else False
import genpy
import struct
class FileRemoveDirResponse(genpy.Message):
_md5sum = "85394f2e941a8937ac567a617f06157f"
_type = "mavros_msgs/FileRemoveDirResponse"
_has_header = False # flag to mark the presence of a Header object
_full_text = """bool success
int32 r_errno
"""
__slots__ = ['success','r_errno']
_slot_types = ['bool','int32']
def __init__(self, *args, **kwds):
"""
Constructor. Any message fields that are implicitly/explicitly
set to None will be assigned a default value. The recommend
use is keyword arguments as this is more robust to future message
changes. You cannot mix in-order arguments and keyword arguments.
The available fields are:
success,r_errno
:param args: complete set of field values, in .msg order
:param kwds: use keyword arguments corresponding to message field names
to set specific fields.
"""
if args or kwds:
super(FileRemoveDirResponse, self).__init__(*args, **kwds)
# message fields cannot be None, assign default values for those that are
if self.success is None:
self.success = False
if self.r_errno is None:
self.r_errno = 0
else:
self.success = False
self.r_errno = 0
def _get_types(self):
"""
internal API method
"""
return self._slot_types
def serialize(self, buff):
"""
serialize message into buffer
:param buff: buffer, ``StringIO``
"""
try:
_x = self
buff.write(_get_struct_Bi().pack(_x.success, _x.r_errno))
except struct.error as se: self._check_types(struct.error("%s: '%s' when writing '%s'" % (type(se), str(se), str(locals().get('_x', self)))))
except TypeError as te: self._check_types(ValueError("%s: '%s' when writing '%s'" % (type(te), str(te), str(locals().get('_x', self)))))
def deserialize(self, str):
"""
unpack serialized message in str into this message instance
:param str: byte array of serialized message, ``str``
"""
try:
end = 0
_x = self
start = end
end += 5
(_x.success, _x.r_errno,) = _get_struct_Bi().unpack(str[start:end])
self.success = bool(self.success)
return self
except struct.error as e:
raise genpy.DeserializationError(e) # most likely buffer underfill
def serialize_numpy(self, buff, numpy):
"""
serialize message with numpy array types into buffer
:param buff: buffer, ``StringIO``
:param numpy: numpy python module
"""
try:
_x = self
buff.write(_get_struct_Bi().pack(_x.success, _x.r_errno))
except struct.error as se: self._check_types(struct.error("%s: '%s' when writing '%s'" % (type(se), str(se), str(locals().get('_x', self)))))
except TypeError as te: self._check_types(ValueError("%s: '%s' when writing '%s'" % (type(te), str(te), str(locals().get('_x', self)))))
def deserialize_numpy(self, str, numpy):
"""
unpack serialized message in str into this message instance using numpy for array types
:param str: byte array of serialized message, ``str``
:param numpy: numpy python module
"""
try:
end = 0
_x = self
start = end
end += 5
(_x.success, _x.r_errno,) = _get_struct_Bi().unpack(str[start:end])
self.success = bool(self.success)
return self
except struct.error as e:
raise genpy.DeserializationError(e) # most likely buffer underfill
_struct_I = genpy.struct_I
def _get_struct_I():
global _struct_I
return _struct_I
_struct_Bi = None
def _get_struct_Bi():
global _struct_Bi
if _struct_Bi is None:
_struct_Bi = struct.Struct("<Bi")
return _struct_Bi
class FileRemoveDir(object):
_type = 'mavros_msgs/FileRemoveDir'
_md5sum = 'f140c5ef05b00c3cfc30d5a2061b4d63'
_request_class = FileRemoveDirRequest
_response_class = FileRemoveDirResponse
| [
"kunito.laura.ac@gmail.com"
] | kunito.laura.ac@gmail.com |
33161c34e78739d53ded91e468cf82f429dfef1d | b170d37a81c09fd0dbb0edf3cff6296084b32af9 | /cexbot/command_utils.py | 7d0382b5e4f8d343853e41df961287aa984532fe | [
"MIT"
] | permissive | metaperl/cexbot | 8e17a7d5063a82675e002d926324e3c4a6eb6745 | 0dd0b60415afd9c1feb959186d32b1a683887975 | refs/heads/master | 2020-12-29T01:11:50.768031 | 2013-12-10T17:13:18 | 2013-12-10T17:13:18 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,754 | py | #!/usr/bin/env python
""" cexbot - command_utils.py
Default command line utitlities to run cexbot
"""
import os, sys, logging
import cexbot, config, parser, db, cexapi, updater, timer, cex
def main(argv=[]):
args = parser.get_parser()
verbose = 1
if args.verbose:
verbose = 2
if args.debug:
verbose = 3
if verbose>2:
log_level=logging.DEBUG
elif verbose==2:
log_level=logging.INFO
elif verbose==1:
log_level=logging.WARNING
elif verbose<1:
log_level=logging.ERROR
logging.basicConfig(level=log_level, format="%(asctime)s %(levelname)s: %(message)s")
if args.command == 'version':
print cexbot.get_version()
return True
# make sure this is always above command parsing
# print config
config.first_run()
if verbose == 3:
print args
if args.command == 'config':
if args.list:
return config.list()
elif args.edit:
return config.edit_config()
elif args.testauth:
return config.test_auth()
elif args.name and args.value:
v = config.set(args.name, args.value)
return config.cprint(args.name)
elif args.name:
return config.cprint(args.name)
logging.error('Invalid config option')
return 1
elif args.command == 'update':
return updater.check_update()
# not implemented
elif args.command == 'cleardata':
return config.clear_userdata()
ac = cexapi.CexAPI(config.get('cex.username'), config.get('cex.apikey'), config.get('cex.secret'))
dbi = db.DbManager()
cx = CexMethods(ac, dbi)
if args.command == 'balance':
print "Balance: %s BTC" % ac.get_balance()
return True
elif args.command == 'initdb':
return dbi.initdb()
elif args.command == 'getmarket':
return ac.get_market()
elif args.command == 'getprice':
return ac.get_market_quote()
elif args.command == 'order':
amount = args.amount
price = args.price
r = ac.place_order(amount, price)
logging.info("Ordered: %s" % r)
elif args.command == 'updatequotes':
logging.info('Running updatequotes')
ticker_timer = timer.ReqTimer(2, cx.update_ticker)
ticker_timer.start()
elif args.command == 'buybalance':
logging.info('Running buybalance')
balance_timer = timer.ReqTimer(5, ac.buy_balance)
balance_timer.start()
# @TODO __import__
# if args.task in cexbot.tasks:
# cexbot.tasks[args.task]()
def cl_error(msg=""):
print >> sys.stderr, msg
def run_cl(argv=[]):
try:
raise SystemExit(main(sys.argv))
except KeyboardInterrupt:
cl_error('Interrupted.')
raise SystemExit(-1)
def run_gui(argv=[]):
print "GUI coming soon."
# return None
try:
import cexbot.gui
cexbot.gui.main()
except Exception, e:
print "Error: %s" % str(e)
| [
"nikcub@gmail.com"
] | nikcub@gmail.com |
c6910ab68ad6229bf80b24479eb1c72fa5afb7b0 | 00e4e930bf49d413ca69ffd12a4c0fe1005f8f24 | /library/urls.py | 26453cdc09049fa59265c42867aeeb863a91ed73 | [] | no_license | ChenQQ96/library-management-system | 6c7130f4a24d2c42142377403a89043dd6640d7f | c8e03c86c476952821859b5f9134aca3799b2358 | refs/heads/master | 2022-11-26T12:18:50.661981 | 2020-08-08T07:03:49 | 2020-08-08T07:03:49 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,283 | py | """library URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/3.0/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.urls import path
from mylibrary.views import *
urlpatterns = [
path('admin/', admin.site.urls),
path('', IndexView.as_view(), name='index'),
path('login/', LoginView.as_view(), name='login'),
path('logout/', LogoutView.as_view(), name='logout'),
path('register/', RegisterView.as_view(), name='register'),
path('home/', HomeView.as_view(), name='home'),
path('search/', SearchView.as_view(), name='search'),
path('borrow/', BorrowView.as_view(), name='borrow'),
path('return/', ReturnView.as_view(), name='return'),
path('test/', TestView.as_view(), name='test'),
]
| [
"dyh0701@gmail.com"
] | dyh0701@gmail.com |
8ff11f80a9d114c4bc8bf1308b3959c25a622ffc | 39e2cb86b667012939ec8ed54c5827386f6ede32 | /GenProduction/python/pycharm_debug_example.py | 005cdd2c02c0aa2da964767a1a740b15b1c029b9 | [] | no_license | TillArndt/MyPackage | 8cc7968267e3973e4559f22fe9b193bd07b9c29d | dbffe36390ec3b01c5e231553b0e12307e96e980 | refs/heads/master | 2021-01-14T19:27:22.938927 | 2014-06-17T14:23:15 | 2014-06-17T14:23:15 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 578 | py | from pydev import pydevd
pydevd.settrace('localhost', port=22072, stdoutToServer=True, stderrToServer=True)
import cmstoolsac3b.main
import cmstoolsac3b.sample as smp
samples = smp.generate_samples(
("tta0.lhef", "tta01.lhef", "tta025.lhef", "tta05.lhef", "tta075.lhef", "tta1.lhef"),
"/user/backes/share/",
"/user/tholen/eventFiles/"
)
if __name__ == '__main__':
cmstoolsac3b.main.main(
samples=samples,
cfg_main_import_path="MyPackage.GenProduction.Hadronizer_MgmMatchTune4C_7TeV_madgraph_pythia8_cff_py_GEN_FASTSIM_HLT_PU_START42"
)
| [
""
] | |
037f9be5bf80317ae74a07e45127ecc157c518d3 | e7736337b1b2b9c18263a15940f5fc6941cb5cca | /pantra/models/choicefield.py | a2a28e1f5480f3f7b6dff87bbf51b0a2530b9c9d | [
"Apache-2.0"
] | permissive | TheSpitefulOctopus/pantra | 00f2e1fda794c9f442d2f97f82e1077a0de8d969 | e68196489a2da0f46fba2f54473762b3b0b15fa7 | refs/heads/master | 2023-04-02T21:49:55.983504 | 2020-09-29T20:32:53 | 2020-09-29T20:32:53 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,816 | py | from typing import Mapping
from pony.orm import Required, dbapiprovider
from pony.orm.core import throw
# next code based on example from https://gist.github.com/erickmendonca/c260cc945201e8ab31ebabb47d33c2a0
# Choice field for PonyORM similar to Django https://docs.djangoproject.com/en/2.0/ref/models/fields/#choices
# Use it like this:
# class SomeModel(db.Entity):
# some_field = Choice(str, choices={
# 'key': 'Value',
# 'som': 'Something',
# 'ano': 'Another thing',
# })
class Choice(Required):
__slots__ = ('__choices',)
def __init__(self, *args, choices=None, **kwargs):
if not choices or not isinstance(choices, Mapping):
throw(
ValueError,
'Choices argument must be a Mapping (dict) of sql_value: display_value instance'
)
if any(not isinstance(value, str) for value in choices):
throw(
ValueError,
'Choices only support strings for sql_value',
)
super().__init__(*args, **kwargs)
self.__choices = dict(**choices)
def validate(self, val, *args, **kwargs):
val = super().validate(val, *args, **kwargs)
if val not in self.__choices.values():
throw(
ValueError,
'Choice {} is not valid. Valid choices are {}.'.format(
val, self.__choices.values(),
)
)
return val
def get_display_value(self, sql_value):
return self.__choices[sql_value]
def get_sql_value(self, display_value):
try:
value = next(
value for key, value in self.__choices.items()
if value == display_value
)
return value
except StopIteration:
return None
class ChoiceConverter(dbapiprovider.StrConverter):
def validate(self, val, obj=None):
if not isinstance(val, Choice):
throw(ValueError, 'Must be a Choice. Got {}'.format(type(val)))
return val
def py2sql(self, val):
return val.name
def sql2py(self, value):
# Any enum type can be used, so py_type ensures the correct one is used to create the enum instance
return self.py_type[value]
# monkey patching
try:
from pony.orm.dbproviders.postgres import PGProvider
except:
PGProvider = None
try:
from pony.orm.dbproviders.mysql import MySQLProvider
except:
MySQLProvider = None
try:
from pony.orm.dbproviders.oracle import OraProvider
except:
OraProvider = None
from pony.orm.dbproviders.sqlite import SQLiteProvider
for provider in (PGProvider, MySQLProvider, OraProvider, SQLiteProvider):
if provider:
provider.converter_classes.append((Choice, ChoiceConverter))
| [
"invent@zergos.ru"
] | invent@zergos.ru |
20faeb3af99098aeae7f42e863b981e32e75deb0 | f8a053f287c66652adffd15624c85dcc0850d898 | /setup.py | 424d2c9837ce0ca5390c3445ddf06d2283a94b46 | [
"MIT"
] | permissive | heyongwei/zvt | cce9e9bac78c6acc5e73b517f80d1fa464342817 | 051106955a6a01707847ee56a447e2502a25ff46 | refs/heads/master | 2023-04-23T16:36:58.631045 | 2021-05-16T16:01:18 | 2021-05-16T16:01:18 | 363,716,402 | 0 | 0 | MIT | 2021-05-16T16:01:19 | 2021-05-02T17:59:26 | Python | UTF-8 | Python | false | false | 2,508 | py | #!/usr/bin/env python
# To use a consistent encoding
from codecs import open
from os import path
# Always prefer setuptools over distutils
from setuptools import setup, find_packages
try:
# for pip >= 10
from pip._internal.req import parse_requirements
except ImportError:
# for pip <= 9.0.3
from pip.req import parse_requirements
here = path.abspath(path.dirname(__file__))
# Get the long description from the README file
with open(path.join(here, 'README.md'), encoding='utf-8') as f:
long_description = f.read()
# Arguments marked as "Required" below must be included for upload to PyPI.
# Fields marked as "Optional" may be commented out.
install_reqs = parse_requirements("requirements.txt", session=False)
try:
requirements = [str(ir.req) for ir in install_reqs]
except:
requirements = [str(ir.requirement) for ir in install_reqs]
setup(
name='zvt',
version='0.9.3',
description='unified,modular quant framework for human beings ',
long_description=long_description,
url='https://github.com/zvtvz/zvt',
author='foolcage',
author_email='5533061@qq.com',
classifiers=[ # Optional
'Development Status :: 5 - Production/Stable',
'Intended Audience :: Developers',
'Intended Audience :: Customer Service',
'Intended Audience :: Education',
'Intended Audience :: Financial and Insurance Industry',
'Topic :: Software Development :: Build Tools',
'Topic :: Office/Business :: Financial :: Investment',
'License :: OSI Approved :: MIT License',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
'Programming Language :: Python :: 3.8'
],
keywords='quant stock finance fintech big-data zvt technical-analysis trading-platform pandas fundamental-analysis',
packages=find_packages(include=['zvt.*', 'zvt']),
python_requires='>=3.5, <4',
include_package_data=True,
install_requires=requirements,
project_urls={ # Optional
'Bug Reports': 'https://github.com/zvtvz/zvt/issues',
'Funding': 'https://www.foolcage.com/zvt',
'Say Thanks!': 'https://saythanks.io/to/foolcage',
'Source': 'https://github.com/zvtvz/zvt',
},
long_description_content_type="text/markdown",
entry_points={
'console_scripts': [
'zvt = zvt.main:main',
'zvt_plugin = zvt.plugin:main',
'zvt_export = zvt.plugin:export',
],
},
)
| [
"5533061@qq.com"
] | 5533061@qq.com |
55cc7d562159c367091e1e40ec9f93092edf7587 | b7827696e69040eaad15d8b16d62dce2c457c82b | /tools/workspace/optitrack_driver/BUILD.bazel | 0d557cca48ace2e9ae1e208ab9de4e71acaf05b3 | [
"BSD-3-Clause"
] | permissive | osrf/drake-release | def0c54e353dfb27f7ee4623e5fb01f9ab1cbf03 | 7439308734194bb8c99dd7aeac66b068c40abd38 | refs/heads/master | 2021-01-01T16:36:44.948209 | 2018-02-15T01:36:05 | 2018-02-15T01:36:05 | 97,869,230 | 1 | 1 | null | 2017-08-10T18:46:23 | 2017-07-20T19:02:09 | C++ | UTF-8 | Python | false | false | 1,549 | bazel | # -*- python -*-
load(
"@drake//tools/install:install.bzl",
"cmake_config",
"install",
"install_cmake_config",
"install_files",
)
load("//tools/lint:lint.bzl", "add_lint_tests")
package(default_visibility = ["//visibility:public"])
install_files(
name = "install_optitrack_client",
dest = "bin",
files = ["optitrack_client"],
)
CMAKE_PACKAGE = "optitrack"
cmake_config(
cps_file_name = "package.cps",
package = CMAKE_PACKAGE,
)
install_cmake_config(
package = CMAKE_PACKAGE,
versioned = 0,
)
OPTITRACK_LICENSE_DOCS = ["@optitrack_driver//:LICENSE"]
OPTITRACK_TARGETS = [
"@optitrack_driver//lcmtypes:lcmtypes_optitrack",
"@optitrack_driver//lcmtypes:optitrack_lcmtypes",
"@optitrack_driver//lcmtypes:py_optitrack_lcmtypes",
"@optitrack_driver//src:optitrack_client_py",
]
install(
name = "install",
workspace = CMAKE_PACKAGE,
targets = OPTITRACK_TARGETS,
java_strip_prefix = ["**/"],
py_dest = "lib/python2.7/site-packages/optitrack",
py_strip_prefix = ["**/"],
hdr_dest = "include/lcmtypes",
guess_hdrs = "PACKAGE",
docs = OPTITRACK_LICENSE_DOCS,
doc_strip_prefix = ["**/"],
rename = {
"share/java/liblcmtypes_optitrack.jar": "lcmtypes_optitrack.jar",
},
allowed_externals = OPTITRACK_LICENSE_DOCS + OPTITRACK_TARGETS,
visibility = ["//:__subpackages__"],
deps = [
":install_cmake_config",
":install_optitrack_client",
],
)
add_lint_tests(python_lint_extra_srcs = ["optitrack_client"])
| [
"jrivero@osrfoundation.org"
] | jrivero@osrfoundation.org |
0a261a997e8b133dd2f20809de2b05a9df10aa1a | ca7aa979e7059467e158830b76673f5b77a0f5a3 | /Python_codes/p03041/s690420831.py | d69751f59907935676518728b9785bda095c49de | [] | no_license | Aasthaengg/IBMdataset | 7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901 | f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8 | refs/heads/main | 2023-04-22T10:22:44.763102 | 2021-05-13T17:27:22 | 2021-05-13T17:27:22 | 367,112,348 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 290 | py | n, k = map(int, input().split())
s = str(input())
ans = ''
if s[k-1] == 'A':
ans = s[:k-1] + 'a' + s[k:]
print(ans)
exit()
elif s[k-1] == 'B':
ans = s[:k-1] + 'b' + s[k:]
print(ans)
exit()
elif s[k-1] == 'C':
ans = s[:k-1] + 'c' + s[k:]
print(ans)
exit() | [
"66529651+Aastha2104@users.noreply.github.com"
] | 66529651+Aastha2104@users.noreply.github.com |
6a383d9d4e5b16b886251e649c817d67092982c8 | 38f154c7f8eb8c7092a40a7f8b48f4846ed83033 | /hacker_rank/sorting/order_check.py | c568de18dae268c3e7365539cdc51e9f99bdd5b1 | [] | no_license | ginalamp/Coding_Challenges | 174bfe7794056e6fa646e025cc51817b0fb75da3 | d6a00298ac68ed50c73693f9081ae1fc156d7b9d | refs/heads/master | 2022-06-20T22:55:02.506910 | 2020-05-05T21:07:48 | 2020-05-05T21:07:48 | 259,048,788 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 520 | py | '''
Given a list of integers, determine amount of elements not
in the correct order (the list should be in assending order)
'''
def main(height):
'''
@param list of integers
@return number of elements out of order
'''
count = 0
sorted_height = sorted(height)
for i in range(len(height)):
if height[i] != sorted_height[i]:
count += 1
print(count)
return(count)
if __name__ == '__main__':
height = [1,1,3,3,4,1] #should return 3
main()
| [
"gina.n.lamprecht@gmail.com"
] | gina.n.lamprecht@gmail.com |
88e0ef59855ab306106f1985a9b5d1c4cc398679 | 8bebf6b1fbd833de10450385668e416cba3d0f8e | /Generating_datasets_in_real_time.py | 2715fda92f1fda2e8651bdd445b4bb92ddfcfc7f | [] | no_license | rajneesh44/Face_Recognition | 6eb625bfa506d35a67d6f44dd363d4278b74ec30 | 99286af9994ffd5343093d7ab878e7143353c4a1 | refs/heads/master | 2020-06-25T19:45:37.576776 | 2019-10-27T04:01:20 | 2019-10-27T04:01:20 | 199,405,414 | 1 | 1 | null | 2019-10-27T04:01:21 | 2019-07-29T07:53:21 | Jupyter Notebook | UTF-8 | Python | false | false | 1,922 | py | import cv2
def generate_dataset(img,id,img_id):
cv2.imwrite("D:/images/user."+str(id)+"."+str(img_id)+".jpg",img)
# Method to draw boundary around the detected feature
def draw_boundary(img, classifier, scaleFactor, minNeighbors, color, text):
# Converting image to gray-scale
gray_img = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
# detecting features in gray-scale image, returns coordinates, width and height of features
features = classifier.detectMultiScale(gray_img, scaleFactor, minNeighbors)
coords = []
# drawing rectangle around the feature and labeling it
for (x, y, w, h) in features:
cv2.rectangle(img, (x,y), (x+w, y+h), color, 2)
cv2.putText(img,text, (x, y-4), cv2.FONT_HERSHEY_SIMPLEX, 0.8, color, 1, cv2.LINE_AA)
coords = [x, y, w, h]
return coords
# Method to detect the features
def detect(img, faceCascade,img_id):
color = {"blue":(255,0,0), "red":(0,0,255), "green":(0,255,0), "white":(255,255,255)}
coords = draw_boundary(img, faceCascade, 1.1, 10, color['white'], "New_person")
# If feature is detected, the draw_boundary method will return the x,y coordinates and width and height of rectangle else the length of coords will be 0
if len(coords)==4:
roi_img = img[coords[1]:coords[1]+coords[3], coords[0]:coords[0]+coords[2]]
user_id =4
generate_dataset(roi_img,user_id,img_id)
return img
# Loading face classifier
faceCascade = cv2.CascadeClassifier('C:/Users/Rajneesh/Downloads/Compressed/FaceDetection-master/frontalface.xml')
video_capture = cv2.VideoCapture(0)
img_id=0
while True:
# Reading image from video stream
_, img = video_capture.read()
# Call method we defined above
img = detect(img, faceCascade,img_id)
cv2.imshow("face detection", img)
img_id+=1
if cv2.waitKey(1) & 0xFF == ord('q'):
break
video_capture.release()
cv2.destroyAllWindows()
| [
"rajneeshsharma338@gmail.com"
] | rajneeshsharma338@gmail.com |
b63e70420a9ec408b9cd00a2fc8aa9fe8994fa8a | 31c433902500b67d55bf68579482dde270aa084e | /main.py | 859d28993a916659f46c421ae8757621e6061daf | [] | no_license | legonzalezn/cajero-api | 9f4096bc67372e151811853d56a9c05b2e9a93eb | 10d438b3c7ea3eb50977a4195bdd888bace2902a | refs/heads/main | 2023-02-01T11:00:13.124588 | 2020-12-17T07:10:12 | 2020-12-17T07:10:12 | 320,278,311 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,179 | py | from db.user_db import UserInDB
from db.user_db import update_user, get_user
from db.transaction_db import TransactionInDB
from db.transaction_db import save_transaction
from models.user_models import UserIn, UserOut
from models.transaction_models import TransactionIn, TransactionOut
import datetime
from fastapi import FastAPI
from fastapi import HTTPException
api = FastAPI()
from fastapi.middleware.cors import CORSMiddleware
url_real =[
[
"http://localhost.tiangolo.com", "https://localhost.tiangolo.com",
"http://localhost", "http://localhost:8080","http://127.0.0.1:8000"
],
"https://cajero-app-unal.herokuapp.com"
];
current = 1;
origins = url_real[current];
api.add_middleware(
CORSMiddleware, allow_origins=origins,
allow_credentials=True, allow_methods=["*"], allow_headers=["*"],
)
@api.post("/user/auth/")
async def auth_user(user_in:UserIn):
user_in_db = get_user(user_in.username)
if user_in_db == None:
raise HTTPException(status_code=404, detail="El usuario no existe!")
if user_in_db.password != user_in.password:
return {"Autenticado":False}
return{"Autenticado":True}
@api.get("/user/balance/{username}")
async def get_balance(username:str):
user_in_db = get_user(username)
if user_in_db == None:
raise HTTPException(status_code=404, detail="El usuario no existe!")
user_out = UserOut(**user_in_db.dict())
return user_out
@api.put("/user/transaction/")
async def make_transaction(transaction_in:TransactionIn):
user_in_db = get_user(transaction_in.username)
if user_in_db == None:
raise HTTPException(status_code=404, detail="El usuario no existe!")
if user_in_db.balance < transaction_in.value:
raise HTTPException(status_code=400, detail="Sin fondos suficientes")
user_in_db.balance = user_in_db.balance - transaction_in.value
update_user(user_in_db)
transaction_in_db = TransactionInDB(**transaction_in.dict(),actual_balance = user_in_db.balance)
transaction_in_db = save_transaction(transaction_in_db)
transaction_out = TransactionOut(**tratransaction_in_db.dict())
return transaction_out
| [
"legonzalezn@gmail.com"
] | legonzalezn@gmail.com |
8d46088e29502efcf6dd9e83c9e7bd681a4d2731 | 91f79f5970f34828db4a68b76245a98b2f64c3c5 | /ghpkg.py | cbd0b9a2a53005b72c5c08b2c2233ca4ee9ed2ee | [] | no_license | LuanDaSilva/Optmization | d08ce5d38c9f02a65ddde5ef66be062679756d0c | f3e350d7ccd70e20645e694313ecb663b8433718 | refs/heads/master | 2023-03-11T00:08:13.187574 | 2021-03-03T21:03:49 | 2021-03-03T21:03:49 | 273,323,689 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,164 | py | # -*- coding: utf-8 -*-
"""
Created on Wed Aug 28 08:22:11 2019
@author: Luan
"""
import numpy as np
def basis(i,n):
d = np.zeros(n)
d[i-1] = 1
return d
def step(v,h):
return [v+h*basis(i,len(v)) for i in range(1,len(v)+1)]
def gradient(f,v,h):
return np.array([(f(step(v,h)[i])-f(step(v,-h)[i]))/(2*h) for i in range(len(v))])
def Hessian(f,v,h):
H = np.zeros([len(v),len(v)])
for i in range(len(v)):
for j in range(i,len(v)):
if i==j:
H[i,i] = (f(step(v,h)[i])+f(step(v,-h)[i])-2*f(v))/(h**2)
else:
H[i,j] = H[j,i] = (f(step(v,h)[i]+step(v,h)[j])+f(step(v,-h)[i]+step(v,-h)[j])-f(step(v,-h)[i]+step(v,h)[j])-f(step(v,h)[i]+step(v,-h)[j]))/(4*h**2)
return H
def Jacobian(F,v,h):
m = len(F)
n = len(v)
J = np.zeros([m,n])
for i in range(m):
J[i,:] = (gradient(F[i],v,h))
return np.matrix(J)
#print(Jacobian(F,v,h))
f1 = lambda x: np.linalg.norm(x)#x[0]**2+x[1]**2+x[2]**2
f2 = lambda x: np.prod(x)#x[0]*x[1]
f3 = lambda x: x[0]
F = np.array([f1,f2,f3])
x0 = np.array([1,1,2])
| [
"noreply@github.com"
] | LuanDaSilva.noreply@github.com |
8eb8befca2042eed6d3b2420afd635376bafecec | 82e8bb2bc1a8b383837da5b563f7637ba9f74fe7 | /see_diagonals.py | 39480ba14ca3447133d53c7144bfc4e969d82c66 | [] | no_license | archanray/low_embed | b760d03713d77b6eda5502f63182cb94cf9bba91 | 15f5219b54d8a844f576ac114da13c29555ceccc | refs/heads/master | 2023-05-06T13:42:48.265172 | 2021-05-27T22:49:14 | 2021-05-27T22:49:14 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 945 | py | import numpy as np
import matplotlib.pyplot as plt
import sys
import seaborn as sns
from matplotlib.colors import ListedColormap
from tqdm import tqdm
from utils import read_file, is_pos_def, is_pos_semi_def, viz_eigenvalues, is_real_eig, norm_diag, viz_diagonal
from Nystrom import simple_nystrom
from copy import deepcopy
import scipy.misc as scm
from scipy.io import savemat
id_count = 1118
similarity_matrix = read_file(pred_id_count=id_count, file_="predicts_1.npy")
# check for similar rows or columns
unique_rows, indices = np.unique(similarity_matrix, axis=0, return_index=True)
similarity_matrix = similarity_matrix[indices][:, indices]
sym_similarity_matrix = similarity_matrix + similarity_matrix.T
diag = np.diagonal(sym_similarity_matrix)
viz_diagonal(sym_similarity_matrix, mat_type="symmetrized_matrix_1")
# normalize diagonal
K = norm_diag(sym_similarity_matrix)
viz_diagonal(K, mat_type="normalized_symmetrized_matrix_1")
| [
"ray@cs.umass.edu"
] | ray@cs.umass.edu |
feb5b5b9942b836a874b3a07264b9012e4b7df0b | 3f9bec3201cc255c5ad6023cc746488306224015 | /Chapter 13/Example_13-2.py | 08ddcf523baaba0c0e1dc8735da55bee0e9ae257 | [] | no_license | afettouhi/FluentPython-py37 | 64927a908c5804d8970ea3f4b667c109c5867a6a | a14a721d738b8908f9e8e78552d70fbb2d6dd74f | refs/heads/master | 2022-06-14T18:26:47.456090 | 2020-05-08T04:13:51 | 2020-05-08T04:13:51 | 259,222,101 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 199 | py | import decimal
ctx = decimal.getcontext()
ctx.prec = 40
one_third = decimal.Decimal('1') / decimal.Decimal('3')
one_third
one_third == +one_third
ctx.prec = 28
one_third == +one_third
+one_third
| [
"A.Fettouhi@gmail.com"
] | A.Fettouhi@gmail.com |
ea566c781d0e6f7ed3612211e0138868a141630c | 780af071416ece1e1f6ead426e95155c3de209e9 | /notebooks/rv/__init__.py | 5918d5994e584d77b4e31b849f510e080fa8203b | [] | no_license | o-smirnov/docker-notebook | b2afd38cf16a1db9d3049c4ce79f7bc61c6183fb | 9cdb1f3fbaaca8edb94d9706a1e62410942a2f1a | refs/heads/master | 2021-01-22T17:14:03.346539 | 2015-05-07T12:31:01 | 2015-05-07T12:31:01 | 35,032,895 | 0 | 0 | null | 2015-05-04T12:15:27 | 2015-05-04T12:15:27 | null | UTF-8 | Python | false | false | 7,750 | py | import os, time, math, astropy, pyfits, traceback, fnmatch
from pandas import DataFrame, Series
import IPython.display
from IPython.display import Image, HTML, display
from rv.FITSFile import FITSFile
from rv.ImageFile import ImageFile
import matplotlib.pyplot as plt
NOTEBOOK_DIR = os.environ.get('RVNB_NOTEBOOK_DIR', '/notebooks')
RESULTDIR = os.environ.get('RVNB_DATA_DIR', '/notebooks/data')
ORIGINAL_RESULTDIR = os.environ.get('RVNB_ORIGINAL_DIR', '/notebooks/data')
WIDTH = None # globally fix a plot width (inches)
MINCOL = 2 # default min # of columns to display in thumbnail view
MAXCOL = 4 # default max # of columns to display in thumbnail view
MAXWIDTH = 16 # default width of thumbnail view (inches)
DPI = 80 # screen DPI
TIMEFORMAT = "%H:%M:%S %b %d"
astropy.log.setLevel('ERROR')
import os, time, math, astropy, pyfits, traceback, fnmatch
from pandas import DataFrame, Series
import IPython.display
from IPython.display import Image, HTML, display
import matplotlib.pyplot as plt
from rv.File import DataFile
from rv.Render import renderTitle,renderTable
class FileList(list):
_sort_attributes=dict(x="ext",n="basename",s="size",t="mtime")
def __init__(self, files=[], extcol=True, thumbs=None, title="", sort="xnt"):
list.__init__(self, files)
self._extcol = extcol
self._thumbs = thumbs
self._title = title
if sort:
self.sort(sort)
def sort(self, opt="xnt"):
"""Sort the filelist by name, eXtension, Time, Size, optionally Reverse"""
opt = opt.lower()
# build up order of comparison
cmpattr = []
for attr in opt:
if attr in self._sort_attributes:
cmpattr.append(self._sort_attributes[attr])
def compare(a, b, attrs=cmpattr):
for attr in attrs:
result = cmp(getattr(a,attr),getattr(b,attr))
if result:
return result
return 0
list.sort(self, cmp=compare, reverse='r' in opt)
self._init_df()
return self
def _init_df(self):
if self._extcol:
df_files = [(f.basename, f.ext, f.size, f.mtime_str) for f in self]
self._df = DataFrame(df_files,
columns=('name', 'ext', 'size',
'modified')) if df_files else None
else:
df_files = [(f.name, f.size, f.mtime_str) for f in self]
self._df = DataFrame(
df_files,
columns=('name', 'size', 'modified')) if df_files else None
def _repr_html_(self,ncol=1):
html = renderTitle(self._title)
if self._extcol:
labels = "name", "ext", "size", "modified"
data = [ (df.basename, df.ext, df.size_str, df.mtime_str) for df in self ]
links = [ (df.fullpath, df.fullpath, None, None) for df in self ]
else:
labels = "name", "size", "modified"
data = [ (df.basename, df.size_str, df.mtime_str) for df in self ]
links = [ (df.fullpath, None, None) for df in self ]
html += renderTable(data,labels,links=links,ncol=ncol)
return html
def show(self,ncol=1):
return IPython.display.display(HTML(self._repr_html_(ncol=ncol)))
def show_all(self):
for f in self:
f.show()
def __call__(self, pattern):
files = [f for f in self if fnmatch.fnmatch(f.name, pattern)]
return FileList(files,
extcol=self._extcol,
thumbs=self._thumbs,
title=os.path.join(self._title, pattern))
def thumbs(self, **kw):
kw['title'] = self._title
return self._thumbs(self, **kw) if self._thumbs else None
def __getslice__(self, *slc):
return FileList(list.__getslice__(self, *slc),
extcol=self._extcol,
thumbs=self._thumbs,
title="%s[%s]"%(self._title,":".join(map(str,slc))))
class DataDir(object):
"""This class represents a directory in the data folder"""
def __init__(self, name, files=[], root=""):
self.fullpath = name
if root and name.startswith(root):
name = name[len(root):]
if name.startswith("/"):
name = name[1:]
name = name or "."
self.name = self.path = name
self.mtime = os.path.getmtime(self.fullpath)
files = [ f for f in files if not f.startswith('.') ]
# our title, in HTML
self._title = os.path.join(ORIGINAL_RESULTDIR, self.path
if self.path is not "." else "")
# make list of DataFiles and sort by time
self.files = FileList([ DataFile(os.path.join(self.fullpath, f),
root=root) for f in files],
title=self._title)
# make separate lists of fits files and image files
self.fits = FileList([ f for f in self.files
if type(f) is FITSFile],
extcol=False,
thumbs=FITSFile._show_thumbs,
title="FITS files, " + self._title);
self.images = FileList([ f for f in self.files
if type(f) is ImageFile],
extcol=False,
thumbs=ImageFile._show_thumbs,
title="Images, " + self._title)
def sort(self, opt):
for f in self.files, self.fits, self.images:
f.sort(opt)
return self
def show(self):
return IPython.display.display(self)
def _repr_html_(self):
return renderTitle(self._title) + self.files._repr_html_()
class DirList(list):
def __init__(self, rootfolder=None, pattern="*", scan=True, title=None):
self._root = rootfolder = rootfolder or RESULTDIR
self._title = title or ORIGINAL_RESULTDIR
if scan:
for dir_, _, files in os.walk(rootfolder):
basename = os.path.basename(dir_)
if fnmatch.fnmatch(basename, pattern) and not basename.startswith("."):
self.append(DataDir(dir_, files, root=rootfolder))
self._sort()
def _sort(self):
self.sort(cmp=lambda x, y: cmp(x.name, y.name))
def _repr_html_(self):
html = renderTitle(self._title)
dirlist = []
for dir_ in self:
nfits = len(dir_.fits)
nimg = len(dir_.images)
nother = len(dir_.files) - nfits - nimg
dirlist.append(
(dir_.name, nfits, nimg, nother, time.strftime(TIMEFORMAT,time.localtime(dir_.mtime))))
html += renderTable(dirlist,
labels=("name", "# FITS", "# img", "# others", "modified"))
return html
def show(self):
return IPython.display.display(self)
def __call__(self, pattern):
return DirList(self._root, pattern,
title=os.path.join(self._title, pattern))
def __getslice__(self, *slc):
newlist = DirList(self._root, scan=False,
title="%s[%s]"%(self._title,":".join(map(str,slc))))
newlist += list.__getslice__(self, *slc)
newlist._sort()
return newlist
# def scandirs (datafolder=DATAFOLDER):
# """Scans all directories under datafolder and populates the DIRS list"""
# global DIRS;
# DIRS = DirList(datafolder);
# for name,ds in sorted(all_dirs):
# print "Contents of",name
# display(d)
| [
"osmirnov@gmail.com"
] | osmirnov@gmail.com |
99cbf86713b07499e57c02d95ba061f54909e2b4 | 0aa150f1bfe3fdbdeaaeeaef5754c3e90378e935 | /yearapp/migrations/0034_auto_20191008_0609.py | 75095d6fe84241d240057f54d63809fb82a11f8f | [] | no_license | arshpreetsingh12/yearbook | 6232eba52330b36a7404317985aea4482befd101 | dac303e3cc448985256b44baae6e9baa4c8d8292 | refs/heads/master | 2020-08-07T19:57:00.281613 | 2019-10-11T13:41:49 | 2019-10-11T13:41:49 | 213,571,523 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 767 | py | # Generated by Django 2.2.5 on 2019-10-08 06:09
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('yearapp', '0033_sale'),
]
operations = [
migrations.AlterField(
model_name='invitation',
name='address',
field=models.CharField(blank=True, max_length=50, null=True),
),
migrations.AlterField(
model_name='invitation',
name='name_of_venue',
field=models.CharField(blank=True, max_length=50, null=True),
),
migrations.AlterField(
model_name='sale',
name='description',
field=models.CharField(blank=True, max_length=100, null=True),
),
]
| [
"director@avioxtechnologies.com"
] | director@avioxtechnologies.com |
c9df5a62127dfac76653865a0129f9b10b63072f | f47e5f9c08b691b2444ec97adbcdc4022fbdcc75 | /py_0710_1.py | ddd4c012c5c25b806919e7696d36b78d8f13f2fc | [] | no_license | nvcoden/working-out-problems | 893066f646a0babb3e03982b6042aa19442785aa | 0db5131b74f903e0cd3aa0bdec37b659abe63da6 | refs/heads/master | 2023-01-04T11:48:52.626980 | 2020-11-06T07:36:19 | 2020-11-06T07:36:19 | 297,917,631 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 255 | py | '''
Create a function that returns true if a string contains any spaces.
'''
def hasSpaces (string):
return ' ' in string
print(hasSpaces("hello"))
print(hasSpaces("hello, world"))
print(hasSpaces(" "))
print(hasSpaces(""))
print(hasSpaces(",./!@#")) | [
"navneeths55555@gmail.com"
] | navneeths55555@gmail.com |
862491768d6eba456ebf0e1ea79d633839949c26 | 56f5b2ea36a2258b8ca21e2a3af9a5c7a9df3c6e | /CMGTools/H2TauTau/prod/TauES_test/nom/emb/DoubleMuParked/StoreResults-Run2012C_22Jan2013_v1_PFembedded_trans1_tau132_pthad1_30had2_30_v1-5ef1c0fd428eb740081f19333520fdc8/USER/V5_B/PAT_CMG_V5_16_0_1374658142/HTT_24Jul_newTES_manzoni_Nom_Jobs/Job_149/run_cfg.py | 36401bb8144102988ca277182f23311dd0e887ef | [] | no_license | rmanzoni/HTT | 18e6b583f04c0a6ca10142d9da3dd4c850cddabc | a03b227073b2d4d8a2abe95367c014694588bf98 | refs/heads/master | 2016-09-06T05:55:52.602604 | 2014-02-20T16:35:34 | 2014-02-20T16:35:34 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 69,049 | py | import FWCore.ParameterSet.Config as cms
import os,sys
sys.path.append('/afs/cern.ch/user/m/manzoni/summer13/CMGTools/CMSSW_5_3_9/src/CMGTools/H2TauTau/prod/TauES_test/nom/emb/DoubleMuParked/StoreResults-Run2012C_22Jan2013_v1_PFembedded_trans1_tau132_pthad1_30had2_30_v1-5ef1c0fd428eb740081f19333520fdc8/USER/V5_B/PAT_CMG_V5_16_0_1374658142/HTT_24Jul_newTES_manzoni_Nom_Jobs')
from base_cfg import *
process.source = cms.Source("PoolSource",
noEventSort = cms.untracked.bool(True),
inputCommands = cms.untracked.vstring('keep *',
'drop cmgStructuredPFJets_cmgStructuredPFJetSel__PAT'),
lumisToProcess = cms.untracked.VLuminosityBlockRange( ("190645:10-190645:110", "190646:1-190646:111", "190659:33-190659:167", "190679:1-190679:55", "190688:69-190688:249",
"190702:51-190702:53", "190702:55-190702:122", "190702:124-190702:169", "190703:1-190703:252", "190704:1-190704:3",
"190705:1-190705:5", "190705:7-190705:65", "190705:81-190705:336", "190705:338-190705:350", "190705:353-190705:383",
"190706:1-190706:126", "190707:1-190707:237", "190707:239-190707:257", "190708:1-190708:189", "190733:71-190733:96",
"190733:99-190733:389", "190733:392-190733:460", "190736:1-190736:80", "190736:83-190736:185", "190738:1-190738:130",
"190738:133-190738:226", "190738:229-190738:349", "190782:55-190782:181", "190782:184-190782:233", "190782:236-190782:399",
"190782:401-190782:409", "190895:64-190895:202", "190895:210-190895:302", "190895:305-190895:584", "190895:587-190895:948",
"190906:73-190906:256", "190906:259-190906:354", "190906:356-190906:496", "190945:124-190945:207", "190949:1-190949:81",
"191043:45-191043:46", "191046:1-191046:21", "191046:24-191046:82", "191046:84-191046:88", "191046:92-191046:116",
"191046:119-191046:180", "191046:183", "191046:185-191046:239", "191056:1", "191056:4-191056:9",
"191056:16-191056:17", "191056:19", "191057:1", "191057:4-191057:40", "191062:1",
"191062:3", "191062:5-191062:214", "191062:216-191062:541", "191090:1-191090:55", "191201:38-191201:49",
"191201:52-191201:79", "191202:1-191202:64", "191202:66-191202:68", "191202:87-191202:105", "191202:108-191202:118",
"191226:77-191226:78", "191226:81-191226:831", "191226:833-191226:1454", "191226:1456-191226:1466", "191226:1469-191226:1507",
"191226:1510-191226:1686", "191247:1-191247:153", "191247:156-191247:280", "191247:283-191247:606", "191247:608-191247:620",
"191247:622-191247:818", "191247:821-191247:834", "191247:837-191247:1031", "191247:1034-191247:1046", "191247:1049-191247:1140",
"191247:1143-191247:1187", "191247:1190-191247:1214", "191247:1217-191247:1224", "191248:1-191248:103", "191264:59-191264:79",
"191264:82-191264:152", "191264:155-191264:189", "191271:56-191271:223", "191271:225-191271:363", "191276:1-191276:16",
"191277:1-191277:28", "191277:30-191277:164", "191277:167-191277:253", "191277:255-191277:457", "191277:460-191277:535",
"191277:537-191277:576", "191277:579-191277:775", "191277:778-191277:811", "191277:813-191277:849", "191367:1-191367:2",
"191411:1-191411:23", "191695:1", "191718:43-191718:95", "191718:98-191718:207", "191720:1",
"191720:3-191720:15", "191720:17-191720:181", "191721:1", "191721:3-191721:34", "191721:36-191721:183",
"191721:186-191721:189", "191726:1-191726:13", "191810:15", "191810:22-191810:49", "191810:52-191810:92",
"191830:54-191830:242", "191830:245-191830:301", "191830:304-191830:393", "191833:1", "191833:3-191833:103",
"191834:1-191834:30", "191834:33-191834:74", "191834:77-191834:299", "191834:302-191834:352", "191837:1-191837:44",
"191837:47-191837:53", "191837:56-191837:65", "191856:1-191856:133", "191859:1-191859:28", "191859:31-191859:126",
"193093:1-193093:33", "193123:1-193123:27", "193124:1-193124:52", "193192:58-193192:86", "193193:1-193193:6",
"193193:8", "193193:11-193193:83", "193193:86-193193:120", "193193:122-193193:160", "193193:162-193193:274",
"193193:276-193193:495", "193193:497-193193:506", "193207:54-193207:182", "193334:29-193334:172", "193336:1-193336:264",
"193336:267-193336:492", "193336:495-193336:684", "193336:687-193336:729", "193336:732-193336:951", "193541:77-193541:101",
"193541:103-193541:413", "193541:416-193541:575", "193541:578-193541:619", "193556:41-193556:83", "193557:1-193557:84",
"193575:48-193575:173", "193575:176-193575:349", "193575:351-193575:394", "193575:397-193575:415", "193575:417-193575:658",
"193575:660-193575:752", "193621:60-193621:570", "193621:573-193621:769", "193621:772-193621:976", "193621:979-193621:1053",
"193621:1056-193621:1137", "193621:1139-193621:1193", "193621:1195-193621:1371", "193621:1373-193621:1654", "193834:1-193834:35",
"193835:1-193835:20", "193835:22-193835:26", "193836:1-193836:2", "193998:66-193998:113", "193998:115-193998:278",
"193999:1-193999:45", "194027:57-194027:113", "194050:53-194050:113", "194050:116-194050:273", "194050:275-194050:355",
"194050:357-194050:369", "194050:372-194050:391", "194050:394-194050:490", "194050:492-194050:814", "194050:816-194050:1435",
"194050:1437-194050:1735", "194050:1760-194050:1888", "194051:1-194051:12", "194052:1-194052:99", "194052:102-194052:166",
"194075:48-194075:101", "194075:103", "194075:105-194075:107", "194075:109", "194075:111",
"194076:1-194076:9", "194076:11-194076:55", "194076:58-194076:163", "194076:165-194076:228", "194076:230-194076:264",
"194076:267-194076:507", "194076:509-194076:527", "194076:530-194076:538", "194076:541-194076:562", "194076:565-194076:748",
"194108:81-194108:161", "194108:164-194108:264", "194108:266-194108:373", "194108:376-194108:396", "194108:398-194108:433",
"194108:436-194108:452", "194108:454-194108:577", "194108:579-194108:590", "194108:593-194108:668", "194108:671-194108:872",
"194115:66-194115:184", "194115:186-194115:338", "194115:340-194115:346", "194115:348-194115:493", "194115:496-194115:731",
"194115:819-194115:857", "194117:1-194117:38", "194119:1-194119:229", "194119:232-194119:261", "194120:1-194120:162",
"194120:165-194120:406", "194150:42-194150:127", "194150:129-194150:261", "194150:264-194150:311", "194151:47-194151:72",
"194151:75-194151:191", "194151:193-194151:238", "194151:240-194151:617", "194151:619", "194151:621",
"194151:623", "194153:1-194153:115", "194199:96-194199:227", "194199:229-194199:336", "194199:339-194199:402",
"194210:3-194210:195", "194210:198-194210:217", "194210:220-194210:359", "194210:361-194210:555", "194223:61-194223:112",
"194224:1-194224:126", "194224:129-194224:206", "194224:208-194224:250", "194224:253-194224:309", "194224:312-194224:386",
"194224:389-194224:412", "194225:1-194225:23", "194225:26-194225:47", "194225:49-194225:85", "194225:88-194225:149",
"194270:56-194270:68", "194303:56-194303:66", "194303:69-194303:102", "194304:1-194304:43", "194304:46",
"194305:1-194305:84", "194314:52-194314:130", "194314:133-194314:300", "194315:1-194315:10", "194315:13-194315:314",
"194315:317-194315:428", "194315:431-194315:452", "194315:455-194315:467", "194317:1-194317:20", "194424:63-194424:141",
"194424:144-194424:195", "194424:198-194424:266", "194424:268-194424:421", "194424:424-194424:478", "194424:481-194424:531",
"194424:534-194424:553", "194424:556-194424:706", "194424:708", "194428:1-194428:85", "194428:87-194428:122",
"194428:125-194428:294", "194428:296-194428:465", "194429:1-194429:4", "194429:7-194429:54", "194429:57-194429:147",
"194429:150-194429:411", "194429:413-194429:742", "194429:745-194429:986", "194429:988-194429:1019", "194439:46-194439:77",
"194439:79-194439:106", "194455:45-194455:64", "194455:67-194455:140", "194455:142-194455:255", "194455:293-194455:303",
"194464:1-194464:127", "194464:130-194464:142", "194464:145-194464:210", "194479:1-194479:44", "194479:165-194479:232",
"194479:235-194479:262", "194479:265-194479:374", "194479:377-194479:431", "194479:434-194479:489", "194479:492-194479:529",
"194479:531-194479:566", "194480:1-194480:32", "194480:34-194480:205", "194480:207-194480:375", "194480:377-194480:387",
"194480:389-194480:759", "194480:762-194480:956", "194480:959-194480:1402", "194533:46-194533:379", "194533:382-194533:415",
"194533:417-194533:618", "194533:620-194533:872", "194619:31-194619:110", "194631:1-194631:42", "194631:44-194631:100",
"194631:102-194631:169", "194631:171-194631:222", "194643:1-194643:287", "194644:1-194644:168", "194644:171-194644:181",
"194644:184-194644:185", "194644:187-194644:319", "194644:321-194644:421", "194691:61-194691:104", "194691:107-194691:155",
"194691:158-194691:251", "194691:254-194691:268", "194691:271-194691:272", "194691:275-194691:289", "194691:292-194691:313",
"194699:1-194699:30", "194699:32-194699:52", "194699:55-194699:64", "194699:67-194699:71", "194699:73-194699:154",
"194699:157-194699:215", "194699:218-194699:238", "194699:241-194699:259", "194702:1-194702:138", "194702:141-194702:191",
"194704:1-194704:41", "194704:44-194704:545", "194704:548-194704:592", "194711:1-194711:7", "194711:9-194711:619",
"194712:1-194712:56", "194712:61-194712:418", "194712:420-194712:625", "194712:627-194712:759", "194735:44-194735:71",
"194735:74-194735:101", "194735:104-194735:130", "194778:60-194778:118", "194778:120-194778:219", "194789:1-194789:18",
"194789:21-194789:32", "194789:34-194789:80", "194789:82-194789:166", "194789:168-194789:269", "194789:272-194789:405",
"194789:409-194789:414", "194789:417-194789:427", "194789:430-194789:566", "194790:1-194790:45", "194825:72-194825:117",
"194825:120-194825:221", "194896:34-194896:55", "194896:58-194896:79", "194896:82-194896:103", "194897:1-194897:6",
"194897:8-194897:78", "194897:80-194897:96", "194897:98-194897:102", "194912:53-194912:70", "194912:72-194912:96",
"194912:98-194912:444", "194912:446-194912:450", "194912:453-194912:467", "194912:470-194912:561", "194912:564-194912:660",
"194912:663-194912:813", "194912:815-194912:840", "194912:843-194912:864", "194912:866-194912:1004", "194912:1007-194912:1025",
"194912:1027-194912:1067", "194912:1069-194912:1137", "194912:1140-194912:1166", "194912:1168-194912:1249", "194912:1251-194912:1304",
"194912:1307-194912:1444", "194912:1447-194912:1487", "194912:1489-194912:1503", "194912:1506-194912:1662", "194914:1-194914:38",
"194915:1-194915:74", "195013:94-195013:144", "195013:146-195013:185", "195013:187-195013:206", "195013:208-195013:299",
"195013:302-195013:324", "195013:326-195013:366", "195013:369-195013:447", "195013:450-195013:526", "195013:528-195013:541",
"195014:1-195014:6", "195014:9-195014:119", "195014:121-195014:148", "195015:1-195015:13", "195016:1-195016:21",
"195016:23-195016:55", "195016:58-195016:63", "195016:65-195016:174", "195016:177-195016:184", "195016:186-195016:241",
"195016:243-195016:246", "195016:248-195016:251", "195016:254-195016:367", "195016:370-195016:422", "195016:425-195016:560",
"195016:563-195016:569", "195099:70-195099:144", "195099:147-195099:186", "195099:189-195099:208", "195099:211-195099:224",
"195099:227-195099:248", "195109:98-195109:241", "195112:1-195112:12", "195112:15-195112:26", "195113:1-195113:209",
"195113:212-195113:388", "195113:391-195113:403", "195113:406-195113:419", "195113:422-195113:492", "195113:495-195113:579",
"195114:1-195114:69", "195114:72-195114:103", "195115:1-195115:7", "195115:10-195115:22", "195147:132-195147:282",
"195147:285-195147:294", "195147:297-195147:331", "195147:334-195147:363", "195147:366-195147:442", "195147:445-195147:536",
"195147:539-195147:559", "195163:72-195163:138", "195163:140-195163:224", "195163:227-195163:240", "195163:243",
"195163:246-195163:347", "195164:1-195164:64", "195165:1-195165:4", "195165:7-195165:41", "195165:44-195165:54",
"195165:56-195165:153", "195165:156-195165:260", "195165:263-195165:266", "195251:1-195251:131", "195251:134-195251:137",
"195251:140-195251:152", "195251:154-195251:165", "195251:167-195251:242", "195303:109-195303:191", "195303:194-195303:277",
"195303:280-195303:310", "195303:312-195303:316", "195303:318-195303:409", "195304:1-195304:3", "195304:6-195304:22",
"195304:27-195304:80", "195304:83-195304:100", "195304:103-195304:154", "195304:157-195304:341", "195304:344-195304:588",
"195304:590-195304:727", "195304:729-195304:1003", "195304:1006-195304:1079", "195304:1083-195304:1140", "195304:1143-195304:1229",
"195378:90-195378:117", "195378:120-195378:127", "195378:130-195378:185", "195378:187-195378:204", "195378:206-195378:302",
"195378:305-195378:542", "195378:544-195378:565", "195378:567-195378:645", "195378:647-195378:701", "195378:703-195378:734",
"195378:737-195378:1120", "195378:1122-195378:1133", "195390:1", "195390:4-195390:27", "195390:30-195390:145",
"195390:147-195390:183", "195390:186-195390:187", "195390:190-195390:208", "195390:210-195390:213", "195390:215-195390:400",
"195396:49-195396:55", "195396:58-195396:63", "195396:66-195396:131", "195397:1-195397:10", "195397:12-195397:89",
"195397:92-195397:120", "195397:123-195397:141", "195397:143-195397:251", "195397:253", "195397:256-195397:475",
"195397:478-195397:525", "195397:527-195397:608", "195397:611-195397:776", "195397:779-195397:970", "195397:972-195397:1121",
"195397:1123-195397:1181", "195397:1184-195397:1198", "195397:1200-195397:1209", "195398:3-195398:137", "195398:139-195398:494",
"195398:497-195398:585", "195398:587-195398:817", "195398:820-195398:824", "195398:827-195398:1225", "195398:1228-195398:1307",
"195398:1309-195398:1712", "195398:1721-195398:1736", "195398:1741-195398:1752", "195398:1767-195398:1795", "195399:1-195399:192",
"195399:194-195399:382", "195530:1-195530:80", "195530:82-195530:104", "195530:107-195530:156", "195530:159-195530:300",
"195530:302-195530:405", "195540:68-195540:123", "195540:126-195540:137", "195540:140-195540:283", "195540:286-195540:319",
"195551:91-195551:106", "195552:1-195552:21", "195552:23-195552:27", "195552:30-195552:147", "195552:149-195552:155",
"195552:158-195552:182", "195552:185-195552:287", "195552:290-195552:349", "195552:352-195552:469", "195552:472-195552:815",
"195552:818-195552:823", "195552:825-195552:883", "195552:885-195552:1152", "195552:1154-195552:1300", "195552:1303-195552:1789",
"195633:40-195633:42", "195647:1-195647:41", "195649:1-195649:69", "195649:72-195649:151", "195649:154-195649:181",
"195649:183-195649:247", "195655:1-195655:129", "195655:131-195655:184", "195655:186-195655:260", "195655:263-195655:350",
"195655:353-195655:446", "195655:448-195655:483", "195655:485-195655:498", "195656:1-195656:362", "195658:1-195658:37",
"195658:40-195658:362", "195658:364-195658:382", "195658:384-195658:386", "195749:1-195749:8", "195749:10-195749:33",
"195749:36-195749:131", "195757:1-195757:82", "195757:85-195757:115", "195757:118-195757:161", "195757:163-195757:206",
"195758:1-195758:18", "195774:1-195774:13", "195774:16-195774:137", "195774:139-195774:151", "195774:154-195774:162",
"195774:164-195774:256", "195774:258-195774:276", "195774:279-195774:362", "195774:365-195774:466", "195774:469-195774:618",
"195774:620-195774:649", "195774:651-195774:830", "195775:1-195775:57", "195775:60-195775:100", "195775:103-195775:170",
"195776:1-195776:63", "195776:66-195776:283", "195776:286-195776:337", "195776:340-195776:399", "195776:401-195776:409",
"195776:411-195776:477", "195841:74-195841:85", "195868:1-195868:88", "195868:90-195868:107", "195868:110-195868:205",
"195915:1-195915:109", "195915:111-195915:275", "195915:278-195915:390", "195915:393-195915:417", "195915:419-195915:429",
"195915:432-195915:505", "195915:507-195915:747", "195915:749-195915:785", "195915:787-195915:828", "195915:830-195915:850",
"195916:1-195916:16", "195916:19-195916:68", "195916:71-195916:212", "195917:1-195917:4", "195918:1-195918:44",
"195918:46", "195918:49-195918:64", "195919:1-195919:15", "195923:1-195923:14", "195925:1-195925:12",
"195926:1", "195926:3-195926:19", "195926:21-195926:34", "195929:1-195929:29", "195930:1-195930:77",
"195930:80-195930:176", "195930:179-195930:526", "195930:529-195930:596", "195937:1-195937:28", "195937:31-195937:186",
"195937:188-195937:396", "195947:23-195947:62", "195947:64-195947:88", "195948:51-195948:116", "195948:119-195948:144",
"195948:147", "195948:150-195948:352", "195948:355-195948:369", "195948:372-195948:402", "195948:404-195948:500",
"195948:503-195948:540", "195948:543-195948:565", "195948:567-195948:602", "195948:605-195948:615", "195950:1-195950:71",
"195950:73-195950:138", "195950:141-195950:169", "195950:172-195950:332", "195950:335-195950:350", "195950:353-195950:382",
"195950:385-195950:421", "195950:424-195950:450", "195950:453-195950:483", "195950:485-195950:616", "195950:619-195950:715",
"195950:718-195950:787", "195950:789-195950:800", "195950:803-195950:829", "195950:831", "195950:833-195950:1587",
"195963:54-195963:58", "195970:44-195970:49", "195970:51-195970:85", "196019:54-196019:68", "196027:1-196027:55",
"196027:58-196027:119", "196027:121-196027:155", "196027:158-196027:186", "196046:12-196046:40", "196047:1-196047:64",
"196047:70-196047:75", "196048:1-196048:44", "196048:46-196048:48", "196197:58-196197:122", "196197:125-196197:179",
"196197:181-196197:311", "196197:313-196197:516", "196197:519-196197:562", "196199:1-196199:33", "196199:36-196199:83",
"196199:86-196199:118", "196199:121-196199:147", "196199:150-196199:237", "196199:239-196199:285", "196199:287-196199:534",
"196200:1-196200:68", "196202:3-196202:61", "196202:64-196202:108", "196203:1-196203:102", "196203:107-196203:117",
"196218:55-196218:199", "196218:201-196218:224", "196218:226-196218:393", "196218:396-196218:494", "196218:496-196218:741",
"196218:744-196218:752", "196218:754-196218:757", "196218:759-196218:820", "196239:1-196239:59", "196239:62-196239:154",
"196239:157-196239:272", "196239:274-196239:373", "196239:375-196239:432", "196239:435-196239:465", "196239:468-196239:647",
"196239:650-196239:706", "196239:709-196239:1025", "196249:63-196249:77", "196249:80-196249:99", "196250:1-196250:2",
"196250:5-196250:265", "196250:267-196250:426", "196252:1-196252:35", "196334:59-196334:111", "196334:113-196334:123",
"196334:126-196334:132", "196334:135-196334:167", "196334:170-196334:193", "196334:196-196334:257", "196334:259-196334:267",
"196334:270-196334:289", "196334:292-196334:342", "196349:65-196349:84", "196349:86-196349:154", "196349:157-196349:244",
"196349:246-196349:258", "196357:1-196357:4", "196359:1-196359:2", "196362:1-196362:88", "196363:1-196363:8",
"196363:11-196363:34", "196364:1-196364:93", "196364:96-196364:136", "196364:139-196364:365", "196364:368-196364:380",
"196364:382-196364:601", "196364:603-196364:795", "196364:798-196364:884", "196364:887-196364:1196", "196364:1199-196364:1200",
"196364:1203-196364:1299", "196437:1", "196437:3-196437:74", "196437:77-196437:169", "196438:1-196438:181",
"196438:184-196438:699", "196438:701-196438:1269", "196452:82-196452:112", "196452:114-196452:490", "196452:493-196452:586",
"196452:589-196452:618", "196452:622-196452:668", "196452:671-196452:716", "196452:718-196452:726", "196452:728-196452:956",
"196452:958-196452:1004", "196452:1007-196452:1091", "196453:1-196453:74", "196453:77-196453:145", "196453:147-196453:669",
"196453:673-196453:714", "196453:717-196453:799", "196453:802-196453:988", "196453:991-196453:1178", "196453:1180",
"196453:1182-196453:1248", "196453:1250-196453:1528", "196453:1531-196453:1647", "196495:114-196495:180", "196495:182-196495:272",
"196509:1-196509:68", "196531:62-196531:150", "196531:152-196531:253", "196531:256-196531:285", "196531:288-196531:302",
"196531:305-196531:422", "196531:425-196531:440", "198049:1-198049:11", "198049:14-198049:57", "198050:2-198050:155",
"198063:1-198063:37", "198063:40-198063:72", "198063:74-198063:124", "198063:127-198063:294", "198116:36-198116:52",
"198116:54-198116:55", "198116:58-198116:96", "198116:98-198116:112", "198207:1-198207:97", "198208:1-198208:92",
"198208:94-198208:134", "198208:137-198208:147", "198208:150-198208:209", "198210:1-198210:221", "198212:1-198212:574",
"198213:1-198213:107", "198215:1-198215:12", "198230:1-198230:33", "198230:36-198230:57", "198230:60-198230:235",
"198230:237-198230:324", "198230:326-198230:388", "198230:390-198230:459", "198230:462-198230:625", "198230:627-198230:651",
"198230:653-198230:805", "198230:808-198230:811", "198230:814-198230:948", "198230:950-198230:1090", "198230:1093-198230:1103",
"198230:1106-198230:1332", "198230:1335-198230:1380", "198249:1-198249:7", "198269:3-198269:198", "198271:1-198271:91",
"198271:93-198271:170", "198271:173-198271:299", "198271:301-198271:450", "198271:453-198271:513", "198271:516-198271:616",
"198271:619-198271:628", "198271:631-198271:791", "198271:793-198271:797", "198272:1-198272:185", "198272:188-198272:245",
"198272:248-198272:314", "198272:317-198272:433", "198272:436-198272:444", "198272:454-198272:620", "198346:44-198346:47",
"198372:57-198372:110", "198485:68-198485:109", "198485:112-198485:134", "198485:136-198485:181", "198485:184-198485:239",
"198487:1-198487:145", "198487:147-198487:514", "198487:517-198487:668", "198487:671-198487:733", "198487:736-198487:757",
"198487:760-198487:852", "198487:854-198487:994", "198487:997-198487:1434", "198487:1437-198487:1610", "198522:65-198522:144",
"198522:147-198522:208", "198941:102-198941:189", "198941:191-198941:220", "198941:222-198941:241", "198941:243-198941:249",
"198941:252-198941:284", "198954:108-198954:156", "198954:159-198954:277", "198955:1-198955:45", "198955:47-198955:50",
"198955:53-198955:220", "198955:223-198955:269", "198955:271-198955:284", "198955:286-198955:338", "198955:340-198955:580",
"198955:583-198955:742", "198955:744-198955:910", "198955:913-198955:946", "198955:949-198955:1162", "198955:1165-198955:1169",
"198955:1172-198955:1182", "198955:1185-198955:1188", "198955:1190-198955:1246", "198955:1249-198955:1304", "198955:1306-198955:1467",
"198955:1470-198955:1485", "198955:1487-198955:1552", "198969:58-198969:81", "198969:84-198969:247", "198969:249-198969:323",
"198969:325-198969:365", "198969:367-198969:413", "198969:416-198969:466", "198969:468-198969:643", "198969:646-198969:918",
"198969:920-198969:1011", "198969:1013-198969:1175", "198969:1178-198969:1236", "198969:1239-198969:1253", "199008:75-199008:93",
"199008:95-199008:121", "199008:124-199008:208", "199008:211-199008:331", "199008:333-199008:373", "199008:376-199008:482",
"199008:485-199008:605", "199008:608-199008:644", "199011:1-199011:11", "199011:13-199011:24", "199021:59-199021:88",
"199021:91-199021:128", "199021:130-199021:133", "199021:136-199021:309", "199021:311-199021:333", "199021:335-199021:410",
"199021:414-199021:469", "199021:471-199021:533", "199021:535-199021:563", "199021:565-199021:1223", "199021:1226-199021:1479",
"199021:1481-199021:1494", "199318:65-199318:138", "199319:1-199319:7", "199319:9-199319:223", "199319:226-199319:277",
"199319:280-199319:348", "199319:351-199319:358", "199319:360-199319:422", "199319:424-199319:490", "199319:492-199319:493",
"199319:496-199319:612", "199319:615-199319:642", "199319:645-199319:720", "199319:723-199319:728", "199319:730-199319:731",
"199319:734-199319:741", "199319:744-199319:752", "199319:754-199319:943", "199319:945-199319:997", "199336:1-199336:33",
"199336:36-199336:122", "199336:125-199336:231", "199336:234-199336:614", "199336:617-199336:789", "199336:791-199336:977",
"199356:95-199356:121", "199356:123-199356:168", "199356:171-199356:205", "199356:208-199356:231", "199409:25-199409:54",
"199409:56-199409:89", "199409:91-199409:204", "199409:206-199409:290", "199409:293-199409:583", "199409:586-199409:602",
"199409:604-199409:1014", "199409:1016-199409:1300", "199428:61-199428:197", "199428:200-199428:210", "199428:212-199428:382",
"199428:387-199428:414", "199428:417-199428:436", "199428:439-199428:530", "199428:533-199428:648", "199429:1-199429:28",
"199429:30-199429:36", "199429:39-199429:55", "199429:58-199429:101", "199429:103-199429:148", "199429:151-199429:154",
"199435:63-199435:106", "199435:109-199435:261", "199435:263-199435:579", "199435:582-199435:654", "199435:656-199435:696",
"199435:699-199435:1034", "199435:1037-199435:1144", "199435:1147-199435:1327", "199435:1330-199435:1411", "199435:1414-199435:1431",
"199435:1434-199435:1441", "199435:1444-199435:1487", "199435:1489-199435:1610", "199436:1-199436:113", "199436:116-199436:254",
"199436:257-199436:675", "199436:678-199436:748", "199564:1-199564:3", "199569:1-199569:2", "199569:5-199569:136",
"199569:139-199569:367", "199570:1-199570:17", "199571:1-199571:184", "199571:186-199571:360", "199571:363-199571:561",
"199572:1-199572:317", "199573:1-199573:22", "199574:1-199574:53", "199574:56-199574:153", "199574:156-199574:246",
"199608:60-199608:157", "199608:159-199608:209", "199608:211-199608:341", "199608:344-199608:390", "199608:392-199608:461",
"199608:464-199608:800", "199608:802-199608:1064", "199608:1067-199608:1392", "199608:1395-199608:1630", "199608:1633-199608:1904",
"199608:1907-199608:1962", "199608:1965-199608:2252", "199608:2255-199608:2422", "199698:72-199698:94", "199698:96-199698:127",
"199699:1-199699:154", "199699:157-199699:169", "199699:172-199699:410", "199699:412-199699:756", "199703:1-199703:94",
"199703:97-199703:482", "199703:485-199703:529", "199739:66-199739:133", "199751:103-199751:119", "199751:121-199751:127",
"199752:1-199752:141", "199752:144-199752:180", "199752:182-199752:186", "199752:188-199752:211", "199752:214-199752:322",
"199753:1-199753:59", "199754:1-199754:203", "199754:205-199754:325", "199754:328-199754:457", "199754:459-199754:607",
"199754:610-199754:613", "199754:615-199754:806", "199754:808-199754:998", "199804:78-199804:88", "199804:90-199804:181",
"199804:183-199804:235", "199804:238-199804:278", "199804:281-199804:290", "199804:292-199804:519", "199804:522-199804:575",
"199804:577-199804:628", "199804:631-199804:632", "199812:70-199812:141", "199812:144-199812:163", "199812:182-199812:211",
"199812:214-199812:471", "199812:474-199812:505", "199812:508-199812:557", "199812:560-199812:571", "199812:574-199812:623",
"199812:626-199812:751", "199812:754-199812:796", "199832:58-199832:62", "199832:65-199832:118", "199832:121-199832:139",
"199832:142-199832:286", "199833:1-199833:13", "199833:16-199833:103", "199833:105-199833:250", "199833:253-199833:493",
"199833:496-199833:794", "199833:797-199833:1032", "199833:1034-199833:1185", "199833:1188-199833:1239", "199834:1-199834:9",
"199834:11", "199834:14-199834:18", "199834:21-199834:54", "199834:56-199834:57", "199834:62-199834:65",
"199834:69-199834:284", "199834:286-199834:503", "199834:505-199834:942", "199862:59-199862:141", "199864:1-199864:87",
"199864:89", "199864:92-199864:103", "199864:106-199864:372", "199864:374-199864:385", "199864:388-199864:486",
"199867:1-199867:134", "199867:136-199867:172", "199867:174-199867:218", "199867:221-199867:320", "199868:1-199868:21",
"199875:70-199875:150", "199875:152-199875:334", "199876:1-199876:19", "199876:22-199876:95", "199876:97-199876:249",
"199876:252-199876:272", "199876:274-199876:340", "199876:343-199876:362", "199876:365-199876:376", "199877:1-199877:173",
"199877:175-199877:605", "199877:607-199877:701", "199877:703-199877:871", "199960:72-199960:139", "199960:141-199960:197",
"199960:204-199960:232", "199960:235-199960:363", "199960:365-199960:367", "199960:370-199960:380", "199960:383-199960:459",
"199960:461-199960:466", "199960:469-199960:485", "199961:1-199961:211", "199961:213-199961:287", "199967:60-199967:120",
"199967:122-199967:170", "199967:172-199967:198", "199973:73-199973:89", "200041:62-200041:83", "200041:85-200041:157",
"200041:162-200041:274", "200041:277-200041:318", "200041:321-200041:335", "200041:337-200041:386", "200041:388-200041:389",
"200041:392-200041:400", "200041:402-200041:568", "200041:571-200041:593", "200041:595-200041:646", "200041:649-200041:728",
"200041:731-200041:860", "200041:862-200041:930", "200041:932-200041:1096", "200042:1-200042:110", "200042:112-200042:536",
"200049:1-200049:177", "200075:76-200075:139", "200075:142-200075:232", "200075:256-200075:326", "200075:329-200075:422",
"200075:425-200075:431", "200075:434-200075:500", "200075:502-200075:605", "200091:67", "200091:70-200091:151",
"200091:154-200091:172", "200091:174-200091:187", "200091:190-200091:196", "200091:199-200091:201", "200091:204-200091:425",
"200091:428-200091:535", "200091:537-200091:607", "200091:610-200091:879", "200091:881-200091:943", "200091:946-200091:999",
"200091:1001-200091:1025", "200091:1027-200091:1132", "200091:1135-200091:1339", "200091:1341-200091:1433", "200091:1435-200091:1450",
"200091:1453-200091:1523", "200091:1526-200091:1664", "200091:1667-200091:1680", "200091:1683-200091:1710", "200152:74-200152:116",
"200160:52-200160:68", "200161:1-200161:97", "200161:100-200161:112", "200174:81-200174:84", "200177:1-200177:56",
"200178:1-200178:38", "200180:1-200180:18", "200186:1-200186:3", "200186:6-200186:24", "200188:1-200188:24",
"200188:27-200188:28", "200188:31-200188:76", "200188:79-200188:271", "200188:274-200188:352", "200190:1-200190:4",
"200190:6-200190:76", "200190:79-200190:143", "200190:146-200190:159", "200190:162-200190:256", "200190:258-200190:321",
"200190:324-200190:401", "200190:403-200190:453", "200190:456-200190:457", "200190:460-200190:565", "200190:567-200190:588",
"200190:591", "200190:593-200190:595", "200190:597-200190:646", "200190:649-200190:878", "200229:1-200229:33",
"200229:41-200229:219", "200229:222-200229:244", "200229:247-200229:290", "200229:293-200229:624", "200229:627-200229:629",
"200243:69-200243:103", "200243:106-200243:139", "200244:3-200244:304", "200244:307-200244:442", "200244:445-200244:507",
"200244:510-200244:619", "200245:1-200245:103", "200245:105-200245:128", "200245:131-200245:248", "200245:251-200245:357",
"200368:72-200368:180", "200369:1-200369:5", "200369:8-200369:61", "200369:64-200369:360", "200369:363-200369:439",
"200369:441-200369:578", "200369:580-200369:603", "200369:606-200369:684", "200369:686", "200381:8-200381:15",
"200381:18-200381:36", "200381:38-200381:89", "200381:91-200381:195", "200466:134-200466:274", "200473:96-200473:157",
"200473:159-200473:224", "200473:226-200473:304", "200473:306-200473:469", "200473:472-200473:524", "200473:527-200473:542",
"200473:545-200473:619", "200473:622-200473:688", "200473:691-200473:730", "200473:733-200473:738", "200473:740-200473:1324",
"200491:87-200491:107", "200491:110-200491:149", "200491:152-200491:157", "200491:160-200491:197", "200491:199-200491:237",
"200491:240-200491:270", "200491:273", "200491:276-200491:334", "200491:336-200491:360", "200491:363-200491:419",
"200515:97-200515:183", "200519:1-200519:111", "200519:114-200519:126", "200519:129-200519:136", "200519:138-200519:224",
"200519:227-200519:258", "200519:261-200519:350", "200519:353-200519:611", "200519:613-200519:747", "200525:77-200525:149",
"200525:151-200525:164", "200525:166-200525:190", "200525:193-200525:276", "200525:278-200525:311", "200525:314-200525:464",
"200525:467-200525:488", "200525:491-200525:674", "200525:676-200525:704", "200525:707-200525:755", "200525:757-200525:895",
"200525:898-200525:937", "200525:939-200525:990", "200532:1-200532:37", "200599:75-200599:129", "200599:132-200599:137",
"200600:1-200600:183", "200600:186-200600:299", "200600:302-200600:313", "200600:316-200600:324", "200600:327-200600:334",
"200600:336-200600:397", "200600:399-200600:417", "200600:420-200600:526", "200600:529-200600:591", "200600:594-200600:596",
"200600:598-200600:609", "200600:611-200600:660", "200600:663-200600:823", "200600:826-200600:900", "200600:902-200600:943",
"200600:945-200600:1139", "200961:1-200961:115", "200976:94-200976:164", "200990:75-200990:143", "200991:1-200991:42",
"200991:44", "200991:47-200991:80", "200991:83-200991:175", "200991:178-200991:181", "200991:184-200991:252",
"200991:255-200991:632", "200991:635-200991:916", "200991:918-200991:1017", "200991:1019-200991:1048", "200992:1-200992:405",
"200992:408-200992:434", "200992:436-200992:581", "201062:78-201062:268", "201097:83-201097:136", "201097:138-201097:245",
"201097:248-201097:300", "201097:303-201097:370", "201097:372-201097:429", "201097:432-201097:497", "201114:1-201114:14",
"201115:1-201115:73", "201159:70-201159:211", "201164:1-201164:8", "201164:10-201164:94", "201164:96-201164:125",
"201164:128-201164:178", "201164:180-201164:198", "201164:200-201164:271", "201164:274-201164:416", "201164:418",
"201168:1-201168:37", "201168:39-201168:275", "201168:278-201168:481", "201168:483-201168:558", "201168:560-201168:730",
"201173:1-201173:194", "201173:197-201173:586", "201174:1-201174:214", "201174:216-201174:263", "201174:265-201174:339",
"201174:342-201174:451", "201191:75-201191:98", "201191:100-201191:216", "201191:218-201191:389", "201191:392-201191:492",
"201191:494-201191:506", "201191:509-201191:585", "201191:587-201191:594", "201191:597-201191:607", "201191:609-201191:794",
"201191:796-201191:838", "201191:841-201191:974", "201191:977-201191:1105", "201191:1108-201191:1117", "201191:1120-201191:1382",
"201191:1385-201191:1386", "201193:1-201193:19", "201196:1-201196:238", "201196:241-201196:278", "201196:286-201196:299",
"201196:302-201196:338", "201196:341-201196:515", "201196:518-201196:720", "201196:723-201196:789", "201196:803-201196:841",
"201197:1-201197:23", "201202:1-201202:437", "201229:1-201229:5", "201229:8-201229:26", "201229:29-201229:73",
"201278:62-201278:163", "201278:166-201278:229", "201278:232-201278:256", "201278:259-201278:316", "201278:318-201278:595",
"201278:598-201278:938", "201278:942-201278:974", "201278:976-201278:1160", "201278:1163-201278:1304", "201278:1306-201278:1793",
"201278:1796-201278:1802", "201278:1805-201278:1906", "201278:1909-201278:1929", "201278:1932-201278:2174", "201554:70-201554:86",
"201554:88-201554:114", "201554:116-201554:126", "201602:76-201602:81", "201602:83-201602:194", "201602:196-201602:494",
"201602:496-201602:614", "201602:617-201602:635", "201611:87-201611:145", "201611:149-201611:182", "201611:184-201611:186",
"201613:1-201613:42", "201613:44-201613:49", "201613:53-201613:210", "201613:213-201613:215", "201613:218-201613:225",
"201613:228-201613:646", "201624:83-201624:92", "201624:95-201624:240", "201624:270", "201625:211-201625:312",
"201625:315-201625:348", "201625:351-201625:416", "201625:418-201625:588", "201625:591-201625:671", "201625:673-201625:758",
"201625:760-201625:791", "201625:793-201625:944", "201657:77-201657:93", "201657:95-201657:108", "201657:110-201657:118",
"201658:1-201658:19", "201658:21-201658:118", "201658:121-201658:136", "201658:139-201658:288", "201668:78-201668:157",
"201669:1-201669:9", "201669:12-201669:136", "201669:139-201669:141", "201669:143-201669:165", "201671:1-201671:120",
"201671:122-201671:174", "201671:177-201671:462", "201671:464-201671:482", "201671:485-201671:499", "201671:501-201671:545",
"201671:547-201671:571", "201671:574-201671:614", "201671:617-201671:766", "201671:768-201671:896", "201671:899-201671:911",
"201671:914-201671:1007", "201678:1-201678:120", "201679:1-201679:110", "201679:112-201679:241", "201679:244-201679:298",
"201679:302-201679:321", "201679:324-201679:461", "201679:463-201679:483", "201692:78-201692:81", "201692:83-201692:179",
"201705:65-201705:73", "201705:75-201705:109", "201705:111-201705:187", "201706:1-201706:62", "201707:1-201707:23",
"201707:26-201707:42", "201707:45-201707:115", "201707:118-201707:130", "201707:133-201707:160", "201707:163-201707:276",
"201707:279-201707:471", "201707:473-201707:511", "201707:514-201707:545", "201707:547-201707:570", "201707:572-201707:622",
"201707:625-201707:735", "201707:738-201707:806", "201707:809-201707:876", "201707:879-201707:964", "201708:1-201708:79",
"201718:58-201718:108", "201727:67-201727:185", "201729:6-201729:20", "201729:22-201729:75", "201729:77-201729:126",
"201729:129-201729:154", "201729:156-201729:216", "201729:219-201729:244", "201794:58-201794:94", "201802:68-201802:209",
"201802:211-201802:214", "201802:216-201802:220", "201802:223-201802:288", "201802:290-201802:296", "201816:1-201816:72",
"201816:74-201816:105", "201816:107-201816:157", "201817:1-201817:274", "201818:1", "201819:1-201819:94",
"201819:96-201819:241", "201824:1-201824:139", "201824:141-201824:176", "201824:179-201824:286", "201824:289-201824:492",
"202012:98-202012:121", "202012:126-202012:131", "202013:1-202013:2", "202013:5-202013:35", "202013:38-202013:57",
"202014:1-202014:5", "202014:8-202014:14", "202014:16-202014:18", "202014:20-202014:77", "202014:79-202014:102",
"202014:104-202014:174", "202014:177-202014:190", "202014:192-202014:196", "202016:1-202016:48", "202016:51-202016:134",
"202016:137-202016:177", "202016:179-202016:743", "202016:745-202016:831", "202016:834-202016:890", "202016:893-202016:896",
"202016:898-202016:932", "202016:934-202016:1010", "202044:84-202044:101", "202044:104-202044:266", "202044:268-202044:461",
"202044:463-202044:466", "202045:1-202045:30", "202045:33-202045:72", "202045:75-202045:528", "202045:531-202045:601",
"202045:603-202045:785", "202045:788-202045:809", "202045:822-202045:823", "202054:6-202054:266", "202054:268-202054:489",
"202054:492-202054:605", "202054:608-202054:631", "202060:76-202060:142", "202060:144-202060:154", "202060:156-202060:244",
"202060:246-202060:497", "202060:499-202060:642", "202060:644-202060:682", "202060:684-202060:743", "202060:746-202060:936",
"202074:66-202074:174", "202075:1-202075:18", "202075:21-202075:187", "202075:189-202075:214", "202075:217-202075:247",
"202075:250-202075:342", "202075:345-202075:406", "202075:409-202075:497", "202075:500-202075:537", "202075:539",
"202075:542-202075:560", "202075:562-202075:615", "202075:618-202075:628", "202084:83-202084:156", "202084:159-202084:177",
"202084:179-202084:180", "202084:182-202084:239", "202087:1-202087:25", "202087:28-202087:208", "202087:210-202087:357",
"202087:359-202087:652", "202087:655-202087:853", "202087:856-202087:1093", "202088:1-202088:286", "202093:1-202093:104",
"202093:107-202093:320", "202093:322-202093:360", "202116:59-202116:60", "202178:67-202178:78", "202178:80-202178:88",
"202178:91-202178:177", "202178:180-202178:186", "202178:188-202178:337", "202178:340-202178:377", "202178:379-202178:425",
"202178:428-202178:475", "202178:478-202178:548", "202178:551-202178:717", "202178:720-202178:965", "202178:967-202178:1444",
"202178:1447-202178:1505", "202178:1508-202178:1519", "202178:1522-202178:1555", "202205:94-202205:114", "202209:1-202209:48",
"202209:51-202209:142", "202237:39-202237:128", "202237:131", "202237:134-202237:219", "202237:222-202237:235",
"202237:238-202237:275", "202237:277-202237:289", "202237:291-202237:316", "202237:319-202237:419", "202237:422-202237:538",
"202237:540-202237:936", "202237:939-202237:950", "202237:952-202237:976", "202237:979-202237:1079", "202272:76-202272:112",
"202272:115-202272:141", "202272:144-202272:185", "202272:188-202272:205", "202272:208-202272:305", "202272:307-202272:313",
"202272:315-202272:371", "202272:436-202272:480", "202272:483-202272:555", "202272:558-202272:577", "202272:579-202272:683",
"202272:686-202272:705", "202272:707-202272:740", "202272:742-202272:890", "202272:937-202272:1295", "202272:1299-202272:1481",
"202299:68-202299:84", "202299:87-202299:141", "202299:143-202299:193", "202299:196-202299:358", "202299:361-202299:379",
"202299:382-202299:414", "202299:416-202299:452", "202299:455-202299:555", "202305:1-202305:89", "202305:92-202305:130",
"202305:133-202305:323", "202314:67-202314:104", "202314:107-202314:265", "202314:268-202314:278", "202328:46-202328:89",
"202328:92-202328:156", "202328:158-202328:276", "202328:278-202328:291", "202328:294-202328:434", "202328:437-202328:460",
"202328:463-202328:586", "202328:588-202328:610", "202328:612-202328:614", "202333:1-202333:235", "202389:81-202389:182",
"202389:185-202389:190", "202389:192-202389:199", "202469:87-202469:158", "202469:160-202469:174", "202469:177-202469:352",
"202472:1-202472:96", "202472:99-202472:112", "202477:1-202477:129", "202477:131-202477:150", "202478:1-202478:177",
"202478:180-202478:183", "202478:186-202478:219", "202478:222-202478:360", "202478:362-202478:506", "202478:509-202478:531",
"202478:534-202478:718", "202478:720-202478:927", "202478:929-202478:973", "202478:975-202478:1029", "202478:1031-202478:1186",
"202478:1189-202478:1212", "202478:1215-202478:1248", "202504:77-202504:96", "202504:99-202504:133", "202504:135-202504:182",
"202504:184-202504:211", "202504:213-202504:241", "202504:243-202504:392", "202504:395-202504:527", "202504:529-202504:617",
"202504:620-202504:715", "202504:718-202504:763", "202504:766-202504:1172", "202504:1174-202504:1247", "202504:1250-202504:1471",
"202504:1474-202504:1679", "202504:1682-202504:1704", "202972:1-202972:30", "202972:33-202972:184", "202972:186-202972:290",
"202972:292-202972:295", "202972:298-202972:371", "202972:374-202972:429", "202972:431-202972:544", "202973:1-202973:234",
"202973:237-202973:305", "202973:308-202973:437", "202973:439-202973:530", "202973:532-202973:541", "202973:544-202973:552",
"202973:555-202973:851", "202973:853-202973:1408", "203002:77-203002:128", "203002:130-203002:141", "203002:144-203002:207",
"203002:209-203002:267", "203002:270-203002:360", "203002:362-203002:501", "203002:504-203002:641", "203002:643-203002:669",
"203002:671", "203002:674-203002:717", "203002:720-203002:1034", "203002:1037-203002:1070", "203002:1073-203002:1370",
"203002:1372-203002:1392", "203002:1395-203002:1410", "203002:1413-203002:1596", "203709:1-203709:121", "203742:1-203742:29",
"203777:103-203777:113", "203830:82-203830:182", "203832:1-203832:11", "203833:1-203833:70", "203833:73-203833:128",
"203834:1-203834:40", "203835:1-203835:70", "203835:73-203835:358", "203853:122-203853:222", "203894:82-203894:272",
"203894:275-203894:477", "203894:480-203894:902", "203894:905-203894:1319", "203909:79-203909:113", "203909:116-203909:117",
"203909:120-203909:140", "203909:143-203909:382", "203912:1-203912:306", "203912:308-203912:566", "203912:569-203912:609",
"203912:611-203912:698", "203912:701-203912:820", "203912:823-203912:865", "203912:867-203912:1033", "203912:1035-203912:1321",
"203987:1-203987:9", "203987:12-203987:241", "203987:243-203987:339", "203987:342-203987:781", "203987:784-203987:1014",
"203992:1-203992:15", "203994:1-203994:56", "203994:59-203994:136", "203994:139-203994:304", "203994:306-203994:342",
"203994:344-203994:425", "204100:117-204100:139", "204101:1-204101:74", "204113:82-204113:96", "204113:98-204113:102",
"204113:105-204113:127", "204113:129-204113:191", "204113:194-204113:258", "204113:261-204113:327", "204113:329-204113:388",
"204113:390-204113:400", "204113:402-204113:583", "204113:585-204113:690", "204114:1-204114:358", "204238:23-204238:52",
"204238:55", "204250:92-204250:118", "204250:121-204250:177", "204250:179-204250:285", "204250:287-204250:336",
"204250:339-204250:400", "204250:403-204250:521", "204250:524-204250:543", "204250:546-204250:682", "204250:684-204250:801",
"204511:1-204511:56", "204541:5-204541:39", "204541:42", "204541:44-204541:139", "204541:142-204541:149",
"204541:151-204541:204", "204544:1-204544:11", "204544:13-204544:93", "204544:96-204544:195", "204544:197-204544:224",
"204544:226-204544:334", "204544:337-204544:426", "204552:1-204552:9", "204553:1-204553:51", "204553:53-204553:60",
"204553:63-204553:101", "204554:1-204554:5", "204554:7-204554:221", "204554:224-204554:455", "204554:458-204554:470",
"204554:472-204554:481", "204554:483-204554:514", "204555:1-204555:329", "204555:331-204555:334", "204563:91-204563:99",
"204563:102-204563:178", "204563:180-204563:219", "204563:222-204563:229", "204563:231-204563:364", "204563:366",
"204563:369-204563:470", "204563:473-204563:524", "204563:527-204563:571", "204564:1-204564:84", "204564:87-204564:89",
"204564:92-204564:159", "204564:161-204564:187", "204564:190-204564:191", "204564:193-204564:293", "204564:296-204564:315",
"204564:317-204564:340", "204564:343-204564:427", "204564:429-204564:434", "204564:437-204564:735", "204564:737-204564:855",
"204564:858-204564:1206", "204564:1209-204564:1248", "204564:1251-204564:1284", "204565:1-204565:48", "204566:1-204566:12",
"204567:1-204567:38", "204576:49-204576:192", "204576:195-204576:301", "204577:1-204577:46", "204577:49-204577:64",
"204577:67-204577:105", "204577:107-204577:170", "204577:173-204577:181", "204577:183-204577:193", "204577:196-204577:653",
"204577:656-204577:669", "204577:671-204577:740", "204577:742-204577:913", "204577:915-204577:1057", "204577:1059-204577:1115",
"204577:1117-204577:1282", "204599:73-204599:83", "204599:85-204599:94", "204599:97-204599:121", "204599:124-204599:125",
"204599:128-204599:173", "204599:175-204599:240", "204599:243-204599:245", "204599:248-204599:264", "204599:266-204599:292",
"204599:294-204599:334", "204601:1-204601:25", "204601:28-204601:62", "204601:65-204601:80", "204601:83-204601:89",
"204601:92-204601:290", "204601:292-204601:563", "204601:565-204601:591", "204601:593-204601:652", "204601:655-204601:780",
"204601:783-204601:812", "204601:814-204601:892", "204601:894-204601:984", "204601:986-204601:1003", "204601:1006-204601:1038",
"204601:1040-204601:1088", "204601:1091-204601:1102", "204601:1105-204601:1161", "204601:1164-204601:1250", "205086:95-205086:149",
"205111:88-205111:390", "205111:392-205111:441", "205111:444-205111:446", "205158:81-205158:289", "205158:292-205158:313",
"205158:315-205158:473", "205158:476-205158:591", "205158:594-205158:595", "205158:597-205158:612", "205158:615-205158:663",
"205158:665-205158:667", "205158:672-205158:685", "205158:687-205158:733", "205193:80-205193:109", "205193:111-205193:349",
"205193:352-205193:486", "205193:488-205193:650", "205193:652-205193:712", "205193:714-205193:902", "205217:1-205217:12",
"205217:16-205217:111", "205217:113-205217:171", "205217:174-205217:250", "205217:253-205217:318", "205233:94-205233:153",
"205236:1-205236:190", "205236:193-205236:207", "205236:209-205236:260", "205236:263-205236:331", "205236:334-205236:352",
"205238:1-205238:6", "205238:9-205238:199", "205238:202-205238:254", "205238:256-205238:304", "205238:306-205238:355",
"205238:358-205238:381", "205238:384-205238:596", "205238:598-205238:617", "205303:35-205303:54", "205303:90-205303:132",
"205303:135-205303:144", "205310:76-205310:306", "205310:309-205310:313", "205310:316", "205310:319-205310:321",
"205310:324-205310:457", "205310:460-205310:559", "205311:1-205311:85", "205311:88-205311:92", "205311:95-205311:183",
"205311:186-205311:395", "205311:397-205311:592", "205311:595-205311:910", "205311:913-205311:1260", "205339:71-205339:175",
"205339:178-205339:213", "205339:216-205339:230", "205339:233-205339:262", "205339:265-205339:404", "205344:1-205344:83",
"205344:86-205344:104", "205344:106-205344:359", "205344:362-205344:431", "205344:433-205344:949", "205344:951-205344:967",
"205344:969-205344:1127", "205344:1129-205344:1346", "205344:1348-205344:1586", "205515:82-205515:201", "205515:203-205515:216",
"205519:1-205519:47", "205519:50-205519:172", "205519:175-205519:367", "205519:370-205519:386", "205519:389-205519:472",
"205526:1-205526:269", "205526:272-205526:277", "205526:280-205526:332", "205614:1-205614:4", "205614:7-205614:40",
"205617:1-205617:29", "205617:32-205617:102", "205617:105-205617:123", "205617:125-205617:140", "205617:143-205617:264",
"205617:266-205617:448", "205617:451-205617:532", "205617:534-205617:547", "205618:1-205618:12", "205620:1-205620:175",
"205666:60-205666:119", "205666:122-205666:165", "205666:168-205666:259", "205666:261-205666:322", "205666:325-205666:578",
"205666:580-205666:594", "205666:597-205666:721", "205666:724-205666:739", "205667:1-205667:165", "205667:168-205667:282",
"205667:285-205667:318", "205667:321-205667:412", "205667:415-205667:689", "205667:692-205667:751", "205667:754-205667:774",
"205667:777-205667:1109", "205683:76-205683:82", "205683:85-205683:178", "205683:181-205683:198", "205683:201-205683:305",
"205690:1-205690:40", "205694:1-205694:205", "205694:208-205694:230", "205694:233-205694:347", "205694:350-205694:452",
"205694:455-205694:593", "205694:595-205694:890", "205718:49-205718:75", "205718:78-205718:97", "205718:100-205718:103",
"205718:105-205718:176", "205718:178-205718:338", "205718:341-205718:361", "205718:363-205718:524", "205718:527-205718:531",
"205718:534-205718:589", "205718:591-205718:694", "205774:1-205774:80", "205777:1-205777:8", "205781:1-205781:89",
"205781:91-205781:197", "205781:200-205781:502", "205826:80-205826:232", "205826:235-205826:303", "205826:306-205826:468",
"205833:84-205833:86", "205833:89-205833:121", "205833:123-205833:155", "205833:157-205833:165", "205833:167-205833:173",
"205833:176-205833:219", "205833:221-205833:267", "205833:270-205833:312", "205833:315-205833:346", "205833:350-205833:355",
"205833:360-205833:366", "205834:1-205834:12", "205834:14-205834:195", "205908:68-205908:200", "205908:202-205908:209",
"205921:22-205921:73", "205921:76-205921:268", "205921:271-205921:394", "205921:397-205921:401", "205921:410-205921:428",
"205921:431-205921:498", "205921:500-205921:571", "205921:574-205921:779", "205921:782-205921:853", "206066:89-206066:146",
"206088:86-206088:159", "206088:161-206088:178", "206088:181-206088:199", "206088:202-206088:286", "206102:83-206102:116",
"206102:120-206102:130", "206102:133-206102:208", "206102:211-206102:235", "206102:238-206102:246", "206102:249-206102:278",
"206102:281-206102:349", "206187:107-206187:169", "206187:172-206187:242", "206187:245-206187:288", "206187:290-206187:340",
"206187:343-206187:427", "206187:429-206187:435", "206187:437-206187:486", "206187:489-206187:569", "206187:571-206187:647",
"206187:649-206187:662", "206187:664-206187:708", "206188:1-206188:40", "206188:42-206188:55", "206199:1-206199:75",
"206199:77-206199:82", "206199:85-206199:114", "206207:82-206207:130", "206207:132-206207:176", "206207:179-206207:194",
"206207:196-206207:388", "206207:390-206207:419", "206207:422-206207:447", "206207:450-206207:569", "206207:572-206207:690",
"206208:1-206208:470", "206208:472-206208:518", "206210:11-206210:25", "206210:28-206210:275", "206210:277-206210:298",
"206210:300-206210:383", "206210:386-206210:466", "206243:62-206243:169", "206243:172-206243:196", "206243:199-206243:354",
"206243:357-206243:433", "206243:435-206243:448", "206243:451-206243:533", "206243:536-206243:554", "206243:557-206243:723",
"206243:726-206243:905", "206245:1-206245:62", "206246:1-206246:14", "206246:16-206246:237", "206246:240-206246:285",
"206246:288-206246:407", "206246:412-206246:676", "206246:678-206246:704", "206246:706-206246:785", "206246:787-206246:962",
"206246:965-206246:997", "206246:1000-206246:1198", "206246:1201-206246:1290", "206257:1-206257:29", "206258:1-206258:36",
"206258:39-206258:223", "206258:226-206258:249", "206302:1-206302:8", "206302:11-206302:33", "206302:36-206302:44",
"206302:47-206302:82", "206302:84-206302:108", "206302:110-206302:149", "206302:151-206302:186", "206302:189-206302:229",
"206302:231-206302:232", "206302:234-206302:241", "206302:243-206302:276", "206303:1-206303:19", "206303:23-206303:286",
"206304:1-206304:4", "206304:6-206304:62", "206331:91-206331:222", "206331:225-206331:312", "206389:88-206389:185",
"206389:187-206389:249", "206389:252-206389:272", "206389:275-206389:392", "206391:1-206391:55", "206391:57-206391:91",
"206401:69-206401:90", "206401:92-206401:194", "206401:197-206401:210", "206401:212-206401:249", "206401:251-206401:265",
"206401:267-206401:409", "206446:92-206446:141", "206446:143-206446:159", "206446:162-206446:205", "206446:208-206446:301",
"206446:304-206446:442", "206446:445", "206446:448-206446:474", "206446:476-206446:616", "206446:619-206446:872",
"206446:874-206446:910", "206446:912-206446:948", "206446:950-206446:989", "206446:992-206446:1030", "206446:1033-206446:1075",
"206446:1109-206446:1149", "206448:1-206448:143", "206448:145-206448:559", "206448:561-206448:1170", "206448:1173-206448:1231",
"206448:1235-206448:1237", "206466:24-206466:137", "206466:140-206466:277", "206466:280-206466:296", "206466:299-206466:303",
"206466:306-206466:405", "206466:407-206466:419", "206466:422-206466:477", "206466:480-206466:511", "206466:514-206466:676",
"206476:73-206476:129", "206476:133-206476:137", "206476:140-206476:141", "206476:143-206476:219", "206477:1-206477:14",
"206477:16-206477:31", "206477:33-206477:41", "206477:44-206477:51", "206477:53-206477:70", "206477:73-206477:75",
"206477:77-206477:89", "206477:91-206477:94", "206477:97-206477:115", "206477:118-206477:184", "206478:1-206478:27",
"206478:29-206478:136", "206478:139-206478:144", "206484:73-206484:95", "206484:98-206484:133", "206484:136-206484:163",
"206484:166-206484:186", "206484:189-206484:384", "206484:387-206484:463", "206484:465-206484:551", "206484:554",
"206484:556-206484:669", "206512:91-206512:123", "206512:125-206512:133", "206512:136-206512:161", "206512:163-206512:190",
"206512:193-206512:201", "206512:203-206512:212", "206512:214-206512:332", "206512:334-206512:584", "206512:587-206512:604",
"206512:607-206512:1005", "206512:1008-206512:1123", "206512:1126-206512:1163", "206512:1165-206512:1211", "206513:3-206513:39",
"206513:42-206513:188", "206513:191-206513:234", "206513:237-206513:238", "206513:241-206513:323", "206542:1-206542:115",
"206542:117-206542:165", "206542:168-206542:511", "206542:514-206542:547", "206542:550-206542:603", "206542:606-206542:668",
"206542:671-206542:727", "206542:730-206542:739", "206542:741-206542:833", "206550:77-206550:132", "206550:135-206550:144",
"206572:37-206572:47", "206573:2-206573:14", "206574:1-206574:87", "206575:1-206575:7", "206575:10",
"206575:12-206575:69", "206594:72-206594:107", "206594:110-206594:246", "206594:249-206594:281", "206595:1-206595:34",
"206595:37-206595:42", "206595:45-206595:193", "206596:1-206596:13", "206596:15-206596:220", "206596:222-206596:228",
"206596:231-206596:236", "206596:239-206596:292", "206596:295-206596:695", "206596:697-206596:728", "206596:730-206596:810",
"206598:1-206598:81", "206598:83-206598:103", "206598:105-206598:588", "206598:591-206598:657", "206598:659-206598:719",
"206605:1-206605:36", "206605:39-206605:78", "206744:49-206744:157", "206744:160-206744:192", "206744:195-206744:395",
"206744:398-206744:452", "206745:1-206745:81", "206745:84-206745:199", "206745:202-206745:224", "206745:227-206745:237",
"206745:240-206745:304", "206745:306-206745:318", "206745:321-206745:720", "206745:723-206745:796", "206745:799-206745:894",
"206745:897-206745:944", "206745:946-206745:1106", "206745:1108-206745:1524", "206745:1527-206745:1862", "206745:1988-206745:1996",
"206859:79-206859:210", "206859:212-206859:258", "206859:260-206859:323", "206859:325-206859:356", "206859:359-206859:609",
"206859:612-206859:681", "206859:684-206859:732", "206859:734-206859:768", "206859:771-206859:808", "206859:811-206859:827",
"206859:830-206859:848", "206866:1-206866:30", "206866:33-206866:113", "206866:115-206866:274", "206868:1-206868:3",
"206868:10-206868:16", "206869:1-206869:251", "206869:253-206869:271", "206869:274-206869:502", "206869:507-206869:520",
"206869:522-206869:566", "206869:568-206869:752", "206897:1-206897:34", "206897:38-206897:61", "206897:63-206897:102",
"206897:109", "206897:111-206897:112", "206897:114-206897:131", "206897:133-206897:137", "206901:1-206901:98",
"206906:1-206906:31", "206906:38-206906:94", "206906:96-206906:136", "206906:138-206906:139", "206906:142-206906:149",
"206906:151-206906:175", "206906:177-206906:206", "206940:1-206940:151", "206940:153", "206940:155-206940:298",
"206940:301-206940:382", "206940:384-206940:712", "206940:715-206940:803", "206940:805-206940:960", "206940:963-206940:1027",
"207099:83-207099:134", "207099:137-207099:172", "207099:175-207099:213", "207099:216-207099:314", "207099:316-207099:320",
"207099:323-207099:330", "207099:333-207099:367", "207099:370-207099:481", "207099:484-207099:602", "207099:605-207099:755",
"207099:757-207099:1046", "207099:1048-207099:1171", "207100:1-207100:91", "207100:94", "207214:57-207214:112",
"207214:114-207214:177", "207214:179-207214:181", "207214:184-207214:196", "207214:199-207214:220", "207214:223-207214:262",
"207214:265-207214:405", "207214:408-207214:482", "207214:485-207214:640", "207214:643-207214:708", "207214:718-207214:757",
"207214:759-207214:808", "207214:811-207214:829", "207217:1-207217:32", "207219:1-207219:112", "207220:1-207220:160",
"207221:1-207221:102", "207222:1-207222:17", "207222:20-207222:289", "207231:70-207231:84", "207231:86-207231:121",
"207231:123-207231:184", "207231:187-207231:189", "207231:192-207231:303", "207231:306-207231:354", "207231:357-207231:481",
"207231:484-207231:504", "207231:508-207231:549", "207231:552-207231:626", "207231:628-207231:690", "207231:693-207231:875",
"207231:878-207231:1000", "207231:1003-207231:1170", "207231:1173-207231:1187", "207231:1189-207231:1227", "207231:1229-207231:1415",
"207231:1418-207231:1445", "207231:1447-207231:1505", "207233:1-207233:119", "207233:121-207233:148", "207269:80-207269:394",
"207269:397-207269:436", "207269:439-207269:463", "207269:466-207269:551", "207269:568-207269:577", "207273:3-207273:877",
"207279:68-207279:138", "207279:141-207279:149", "207279:151-207279:237", "207279:240-207279:266", "207279:269-207279:307",
"207279:309-207279:416", "207279:498-207279:551", "207279:554-207279:640", "207279:643-207279:961", "207279:963-207279:1095",
"207279:1098-207279:1160", "207320:1-207320:110", "207320:112-207320:350", "207371:72-207371:117", "207371:120-207371:124",
"207372:1-207372:27", "207372:30-207372:113", "207372:116-207372:154", "207372:156-207372:174", "207372:176-207372:478",
"207372:480-207372:496", "207397:32-207397:77", "207397:80-207397:140", "207397:143-207397:179", "207398:1-207398:14",
"207398:16-207398:33", "207454:79-207454:95", "207454:98-207454:123", "207454:126-207454:259", "207454:261-207454:363",
"207454:365-207454:458", "207454:461-207454:498", "207454:501-207454:609", "207454:612-207454:632", "207454:635-207454:781",
"207454:784-207454:866", "207454:869-207454:974", "207454:977-207454:1064", "207454:1067-207454:1079", "207454:1081-207454:1321",
"207454:1323-207454:1464", "207454:1467-207454:1569", "207454:1571-207454:1604", "207454:1607-207454:1712", "207454:1714-207454:1988",
"207469:1-207469:31", "207469:34-207469:45", "207477:76-207477:104", "207477:107-207477:111", "207477:114-207477:147",
"207477:150-207477:295", "207477:298-207477:483", "207477:486-207477:494", "207477:497-207477:527", "207477:530-207477:563",
"207477:565-207477:570", "207487:50-207487:98", "207487:101-207487:311", "207487:313-207487:359", "207487:363-207487:468",
"207487:471-207487:472", "207488:1-207488:63", "207488:66-207488:92", "207488:95-207488:113", "207488:116-207488:198",
"207488:200-207488:250", "207488:252-207488:288", "207488:291-207488:365", "207488:368-207488:377", "207488:379-207488:440",
"207490:1-207490:48", "207490:51-207490:111", "207491:1-207491:176", "207491:179-207491:458", "207492:1-207492:20",
"207492:23-207492:298", "207515:79-207515:109", "207515:112-207515:132", "207515:134-207515:208", "207515:211-207515:225",
"207515:228-207515:320", "207515:322-207515:381", "207515:383-207515:498", "207515:500-207515:730", "207515:733-207515:849",
"207515:851-207515:954", "207515:957-207515:994", "207515:997-207515:1052", "207515:1055-207515:1143", "207515:1145-207515:1211",
"207517:1-207517:12", "207517:15-207517:57", "207518:1-207518:59", "207518:61-207518:83", "207882:22-207882:45",
"207883:1", "207883:3-207883:4", "207883:7-207883:75", "207884:1-207884:106", "207884:108-207884:183",
"207885:1-207885:90", "207886:1-207886:30", "207886:32-207886:90", "207886:92-207886:156", "207886:158-207886:166",
"207886:168-207886:171", "207889:1-207889:43", "207889:47-207889:57", "207889:60-207889:303", "207889:306-207889:442",
"207889:445", "207889:447-207889:551", "207889:553-207889:731", "207889:733-207889:907", "207889:910-207889:945",
"207898:1-207898:33", "207898:36-207898:57", "207898:60-207898:235", "207898:239-207898:257", "207898:260-207898:277",
"207905:75-207905:196", "207905:198-207905:281", "207905:284-207905:329", "207905:331-207905:402", "207905:404-207905:565",
"207905:568-207905:672", "207905:675-207905:805", "207905:807-207905:850", "207905:852-207905:861", "207905:864-207905:884",
"207905:886-207905:1180", "207905:1183-207905:1283", "207905:1285-207905:1331", "207905:1333-207905:1515", "207905:1518-207905:1734",
"207905:1737-207905:1796", "207920:84-207920:146", "207920:149-207920:241", "207920:243-207920:261", "207920:264-207920:291",
"207920:294-207920:486", "207920:489-207920:518", "207920:520-207920:598", "207920:600-207920:708", "207920:710-207920:826",
"207921:1-207921:37", "207921:40-207921:58", "207922:1-207922:69", "207922:71-207922:100", "207922:103-207922:126",
"207922:129-207922:242", "207922:274-207922:291", "207924:1-207924:52", "207924:54-207924:171", "207924:173-207924:178",
"207924:181-207924:339", "208307:2-208307:42", "208307:45", "208307:47-208307:70", "208307:72-208307:147",
"208307:150-208307:252", "208307:256-208307:259", "208307:262-208307:275", "208307:278-208307:342", "208307:345-208307:450",
"208307:453-208307:527", "208307:530-208307:583", "208307:586-208307:605", "208307:608-208307:616", "208307:618-208307:667",
"208307:670-208307:761", "208307:763-208307:798", "208307:800-208307:889", "208307:891-208307:893", "208307:896-208307:1055",
"208307:1057-208307:1205", "208307:1208-208307:1294", "208307:1297-208307:1328", "208339:77-208339:89", "208339:91-208339:122",
"208339:125-208339:208", "208339:211-208339:346", "208339:349-208339:363", "208341:1-208341:84", "208341:87-208341:117",
"208341:120-208341:513", "208341:515-208341:685", "208341:688-208341:693", "208341:695-208341:775", "208341:777-208341:824",
"208351:83-208351:97", "208351:100-208351:356", "208351:359-208351:367", "208351:369", "208352:1-208352:15",
"208352:17", "208352:19", "208353:1-208353:76", "208353:78-208353:269", "208353:271-208353:348",
"208357:1-208357:70", "208357:73-208357:507", "208390:72-208390:128", "208390:130-208390:169", "208391:52-208391:82",
"208391:84-208391:162", "208391:164-208391:216", "208391:219-208391:493", "208391:495-208391:498", "208391:500-208391:523",
"208391:526-208391:533", "208391:535-208391:588", "208391:591-208391:660", "208391:663-208391:869", "208427:49-208427:89",
"208427:92-208427:161", "208427:164", "208427:166-208427:173", "208427:175-208427:268", "208427:271-208427:312",
"208427:315", "208427:317-208427:335", "208427:337-208427:361", "208427:364-208427:402", "208427:404-208427:422",
"208427:425-208427:577", "208427:580-208427:647", "208428:1-208428:58", "208428:61-208428:68", "208428:70-208428:156",
"208428:159-208428:227", "208429:1-208429:56", "208429:59-208429:139", "208429:141-208429:159", "208429:162-208429:237",
"208429:240-208429:440", "208429:442-208429:452", "208429:455-208429:589", "208429:592-208429:712", "208429:715-208429:922",
"208487:2-208487:26", "208487:29-208487:159", "208487:161-208487:307", "208487:309-208487:459", "208487:462-208487:476",
"208487:479-208487:621", "208509:71-208509:232", "208538:2-208538:43", "208540:1-208540:26", "208540:29-208540:98",
"208541:1-208541:57", "208541:59-208541:173", "208541:175-208541:376", "208541:378-208541:413", "208551:119-208551:193",
"208551:195-208551:212", "208551:215-208551:300", "208551:303-208551:354", "208551:356-208551:554", "208551:557-208551:580",
"208686:73-208686:79", "208686:82-208686:181", "208686:183-208686:224", "208686:227-208686:243", "208686:246-208686:311",
"208686:313-208686:459" ) ),
duplicateCheckMode = cms.untracked.string('noDuplicateCheck'),
fileNames = cms.untracked.vstring('/store/cmst3/user/cmgtools/CMG/DoubleMuParked/StoreResults-Run2012C_22Jan2013_v1_PFembedded_trans1_tau132_pthad1_30had2_30_v1-5ef1c0fd428eb740081f19333520fdc8/USER/V5_B/PAT_CMG_V5_16_0/cmgTuple_5.root',
'/store/cmst3/user/cmgtools/CMG/DoubleMuParked/StoreResults-Run2012C_22Jan2013_v1_PFembedded_trans1_tau132_pthad1_30had2_30_v1-5ef1c0fd428eb740081f19333520fdc8/USER/V5_B/PAT_CMG_V5_16_0/cmgTuple_50.root',
'/store/cmst3/user/cmgtools/CMG/DoubleMuParked/StoreResults-Run2012C_22Jan2013_v1_PFembedded_trans1_tau132_pthad1_30had2_30_v1-5ef1c0fd428eb740081f19333520fdc8/USER/V5_B/PAT_CMG_V5_16_0/cmgTuple_500.root')
)
| [
"riccardo.manzoni@cern.ch"
] | riccardo.manzoni@cern.ch |
ebbe93d928c04185948fa9f6113846b7f0317e43 | e0a475d2ee6d029f2e933010c96b9b8d48786bcb | /seg_models/losses.py | 7708e8f1545587bf344daf7a4c6af1d793cd3a77 | [
"MIT"
] | permissive | Ayat-Abedalla/Ens4B-UNet | 9d837244b8241fae01cf49464ec2156c9af2409a | 38c64aabe4001bf4d41e1c969ebf1f3ea89877b1 | refs/heads/main | 2023-07-12T07:17:48.002386 | 2021-08-24T19:44:42 | 2021-08-24T19:44:42 | 312,731,473 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,834 | py | from .base import Loss
from .base import functional as F
SMOOTH = 1e-5
class DiceLoss(Loss):
r"""Creates a criterion to measure Dice loss:
.. math:: L(precision, recall) = 1 - (1 + \beta^2) \frac{precision \cdot recall}
{\beta^2 \cdot precision + recall}
The formula in terms of *Type I* and *Type II* errors:
.. math:: L(tp, fp, fn) = \frac{(1 + \beta^2) \cdot tp} {(1 + \beta^2) \cdot fp + \beta^2 \cdot fn + fp}
where:
- tp - true positives;
- fp - false positives;
- fn - false negatives;
Args:
beta: Float or integer coefficient for precision and recall balance.
class_weights: Array (``np.array``) of class weights (``len(weights) = num_classes``).
class_indexes: Optional integer or list of integers, classes to consider, if ``None`` all classes are used.
per_image: If ``True`` loss is calculated for each image in batch and then averaged,
else loss is calculated for the whole batch.
smooth: Value to avoid division by zero.
Returns:
A callable ``dice_loss`` instance. Can be used in ``model.compile(...)`` function`
or combined with other losses.
Example:
.. code:: python
loss = DiceLoss()
model.compile('SGD', loss=loss)
"""
def __init__(self, beta=1, class_weights=None, class_indexes=None, per_image=False, smooth=SMOOTH):
super().__init__(name='dice_loss')
self.beta = beta
self.class_weights = class_weights if class_weights is not None else 1
self.class_indexes = class_indexes
self.per_image = per_image
self.smooth = smooth
def __call__(self, gt, pr):
return 1 - F.f_score(
gt,
pr,
beta=self.beta,
class_weights=self.class_weights,
class_indexes=self.class_indexes,
smooth=self.smooth,
per_image=self.per_image,
threshold=None,
**self.submodules
)
class BinaryCELoss(Loss):
"""Creates a criterion that measures the Binary Cross Entropy between the
ground truth (gt) and the prediction (pr).
.. math:: L(gt, pr) = - gt \cdot \log(pr) - (1 - gt) \cdot \log(1 - pr)
Returns:
A callable ``binary_crossentropy`` instance. Can be used in ``model.compile(...)`` function
or combined with other losses.
Example:
.. code:: python
loss = BinaryCELoss()
model.compile('SGD', loss=loss)
"""
def __init__(self):
super().__init__(name='binary_crossentropy')
def __call__(self, gt, pr):
return F.bianary_crossentropy(gt, pr, **self.submodules)
# aliases
dice_loss = DiceLoss()
binary_crossentropy = BinaryCELoss()
# loss combinations
bce_dice_loss = binary_crossentropy + dice_loss
| [
"noreply@github.com"
] | Ayat-Abedalla.noreply@github.com |
3a5b94e7131990cbea8bb74c7ffa251444708848 | 440f518020b428085f8b68b4f947bc07245281d6 | /the first method/model_exp11.py | bf6a07a30d716a6313c0ccc89a9a3a9531f06d8c | [] | no_license | pyjsnnu/Scene-change-detection-1 | 5250c30984ce93901ad1a635fb67fe2e0ce241f9 | cd15f9c1a4268a22c44855455999c85f3792e689 | refs/heads/master | 2021-05-26T03:21:24.326910 | 2019-03-30T13:02:50 | 2019-03-30T13:02:50 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,627 | py | from keras.applications.resnet50 import ResNet50
import keras.layers as layers
from keras.models import Model
from keras.layers import Conv2D, MaxPooling2D, UpSampling2D,concatenate, Flatten, Dropout, Activation, Input,Dense,GlobalAveragePooling2D
from keras import metrics, losses
import keras
def getModel():
resnet_model_notop=ResNet50(include_top=False,
weights='imagenet',
input_tensor=None,
input_shape=(256,256,3)
)
out = resnet_model_notop.get_layer(index=-1).output #
model_resnet = Model(inputs=resnet_model_notop.input,output= out )
img1 = Input((256, 256, 3))
img2 = Input((256, 256, 3))
feature1 = model_resnet(img1)
feature2 = model_resnet(img2)
feature1 = layers.GlobalAveragePooling2D(name='avg_pool1')(feature1)
out1 = layers.Dense(1, activation='sigmoid', name='medense_3')(feature1)
feature2 = layers.GlobalAveragePooling2D(name='avg_pool2')(feature2)
out2 = layers.Dense(1, activation='sigmoid', name='medense_4')(feature2)
feature = concatenate([feature1,feature2],axis=-1)
out = layers.Dense(512, activation='relu', name='medense_1')(feature)
out = layers.Dense(1, activation='sigmoid', name='medense_2')(out)
model=Model(input=[img1,img2],output=[out,out1,out2])
model.summary()
adam = keras.optimizers.Adam(lr=0.0001)
model.compile(optimizer=adam,
loss=[losses.binary_crossentropy,losses.binary_crossentropy,losses.binary_crossentropy],
loss_weights=[0.7,0.15,0.15],
metrics=[metrics.binary_accuracy])
return model | [
"pangchao@whu.edu.cn"
] | pangchao@whu.edu.cn |
ef94b32dec93fe156549f2e821e7e2798f65812c | 5b0aebb53c33124b87c8655a5923858d6a2a5bc7 | /bm_preproc.py | 266dc2e049dc3c7569d58d10d38f24412cdec468 | [] | no_license | corylstewart/DNA-Class | 440e8c0304ea568347d2dad77424ee77a74f9e01 | 5706b95181ef7dd73a6a9d97cc879a50663ca60a | refs/heads/master | 2021-01-10T13:18:07.538528 | 2016-03-29T18:50:26 | 2016-03-29T18:50:26 | 55,001,838 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,110 | py | """bm_preproc.py: Boyer-Moore preprocessing."""
__author__ = "Ben Langmead"
def z_array(s):
""" Use Z algorithm (Gusfield theorem 1.4.1) to preprocess s """
assert len(s) > 1
z = [len(s)] + [0] * (len(s)-1)
# Initial comparison of s[1:] with prefix
for i in range(1, len(s)):
if s[i] == s[i-1]:
z[1] += 1
else:
break
r, l = 0, 0
if z[1] > 0:
r, l = z[1], 1
for k in range(2, len(s)):
assert z[k] == 0
if k > r:
# Case 1
for i in range(k, len(s)):
if s[i] == s[i-k]:
z[k] += 1
else:
break
r, l = k + z[k] - 1, k
else:
# Case 2
# Calculate length of beta
nbeta = r - k + 1
zkp = z[k - l]
if nbeta > zkp:
# Case 2a: zkp wins
z[k] = zkp
else:
# Case 2b: Compare characters just past r
nmatch = 0
for i in range(r+1, len(s)):
if s[i] == s[i - k]:
nmatch += 1
else:
break
l, r = k, r + nmatch
z[k] = r - k + 1
return z
def n_array(s):
""" Compile the N array (Gusfield theorem 2.2.2) from the Z array """
return z_array(s[::-1])[::-1]
def big_l_prime_array(p, n):
""" Compile L' array (Gusfield theorem 2.2.2) using p and N array.
L'[i] = largest index j less than n such that N[j] = |P[i:]| """
lp = [0] * len(p)
for j in range(len(p)-1):
i = len(p) - n[j]
if i < len(p):
lp[i] = j + 1
return lp
def big_l_array(p, lp):
""" Compile L array (Gusfield theorem 2.2.2) using p and L' array.
L[i] = largest index j less than n such that N[j] >= |P[i:]| """
l = [0] * len(p)
l[1] = lp[1]
for i in range(2, len(p)):
l[i] = max(l[i-1], lp[i])
return l
def small_l_prime_array(n):
""" Compile lp' array (Gusfield theorem 2.2.4) using N array. """
small_lp = [0] * len(n)
for i in range(len(n)):
if n[i] == i+1: # prefix matching a suffix
small_lp[len(n)-i-1] = i+1
for i in range(len(n)-2, -1, -1): # "smear" them out to the left
if small_lp[i] == 0:
small_lp[i] = small_lp[i+1]
return small_lp
def good_suffix_table(p):
""" Return tables needed to apply good suffix rule. """
n = n_array(p)
lp = big_l_prime_array(p, n)
return lp, big_l_array(p, lp), small_l_prime_array(n)
def good_suffix_mismatch(i, big_l_prime, small_l_prime):
""" Given a mismatch at offset i, and given L/L' and l' arrays,
return amount to shift as determined by good suffix rule. """
length = len(big_l_prime)
assert i < length
if i == length - 1:
return 0
i += 1 # i points to leftmost matching position of P
if big_l_prime[i] > 0:
return length - big_l_prime[i]
return length - small_l_prime[i]
def good_suffix_match(small_l_prime):
""" Given a full match of P to T, return amount to shift as
determined by good suffix rule. """
return len(small_l_prime) - small_l_prime[1]
def dense_bad_char_tab(p, amap):
""" Given pattern string and list with ordered alphabet characters, create
and return a dense bad character table. Table is indexed by offset
then by character. """
tab = []
nxt = [0] * len(amap)
for i in range(0, len(p)):
c = p[i]
assert c in amap
tab.append(nxt[:])
nxt[amap[c]] = i+1
return tab
class BoyerMoore(object):
""" Encapsulates pattern and associated Boyer-Moore preprocessing. """
def __init__(self, p, alphabet='ACGT'):
# Create map from alphabet characters to integers
self.amap = {alphabet[i]: i for i in range(len(alphabet))}
# Make bad character rule table
self.bad_char = dense_bad_char_tab(p, self.amap)
# Create good suffix rule table
_, self.big_l, self.small_l_prime = good_suffix_table(p)
def bad_character_rule(self, i, c):
""" Return # skips given by bad character rule at offset i """
assert c in self.amap
assert i < len(self.bad_char)
ci = self.amap[c]
return i - (self.bad_char[i][ci]-1)
def good_suffix_rule(self, i):
""" Given a mismatch at offset i, return amount to shift
as determined by (weak) good suffix rule. """
length = len(self.big_l)
assert i < length
if i == length - 1:
return 0
i += 1 # i points to leftmost matching position of P
if self.big_l[i] > 0:
return length - self.big_l[i]
return length - self.small_l_prime[i]
def match_skip(self):
""" Return amount to shift in case where P matches T """
return len(self.small_l_prime) - self.small_l_prime[1]
def naive_find_matches_with_counter(p, t):
matches = list()
total_comps = 0
for i in xrange(len(t)-len(p)+1):
matched = True
for j in range(len(p)):
total_comps += 1
if p[j] != t[i+j]:
matched = False
break
if matched:
matches.append(i)
return (total_comps, matches)
def boyer_moore_with_counter(p, p_bm, t):
""" Do Boyer-Moore matching. p=pattern, t=text,
p_bm=BoyerMoore object for p """
i = 0
total_comps = 0
while i < len(t) - len(p) + 1:
total_comps += 1
shift = 1
mismatched = False
for j in range(len(p)-1, -1, -1):
if p[j] != t[i+j]:
skip_bc = p_bm.bad_character_rule(j, t[i+j])
skip_gs = p_bm.good_suffix_rule(j)
shift = max(shift, skip_bc, skip_gs)
mismatched = True
break
if not mismatched:
skip_gs = p_bm.match_skip()
shift = max(shift, skip_gs)
i += shift
return total_comps
| [
"corylstewart@gmail.com"
] | corylstewart@gmail.com |
554a4731361f55d2ba8209fc56d025e787dd641e | f4e36d286419b2a297524d4e9a67ee7036702e1a | /Edge.py | b02afef09fa6abdaab37f54fa3e1137cccb8b3dd | [] | no_license | nygupta/Edge-Detector | 36d5617983105db5d682b4c66eb23168ad6db827 | 0b94b47e4e3d71204654df7531c33adf8bfb283b | refs/heads/master | 2023-04-29T15:41:59.843295 | 2021-05-18T10:20:38 | 2021-05-18T10:20:38 | 368,134,691 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 804 | py | import cv2
from matplotlib import pyplot as plt
def image(url):
img = cv2.imread(url, 0)
canny = cv2.Canny(img, 100, 200)
titles = ['grayscale', 'edge']
images = [img, canny]
for i in range(2):
plt.subplot(1, 2, i + 1), plt.imshow(images[i], 'gray')
plt.title(titles[i])
plt.xticks([]), plt.yticks([])
plt.show()
def camcapture():
cap = cv2.VideoCapture(0)
while(1):
ret, frame = cap.read()
cv2.imshow('Original',frame)
edges = cv2.Canny(frame,100,200)
cv2.imshow('Edges',edges)
if cv2.waitKey(1) & 0xFF ==ord('x'):
break
cap.release()
cv2.destroyAllWindows()
def main():
print("1: if you have the image")
print("2: if you want to start the camera")
val = int(input())
if val == 1:
url = input("url of the image:")
image(url)
elif val == 2:
camcapture()
main() | [
"nygupta21@gmail.com"
] | nygupta21@gmail.com |
92a0e721e3eda4875af9bb32e92fe8fa1ed99fea | e9fbb4718f6b68bf73ca6acd63fa068169e53e28 | /src/python/com/expleague/media_space/publisher_parser.py | da21d0a828959559849f39e87251f23d237e82d6 | [] | no_license | mrMakaronka/topic_modeling | 940f752953acf5c8ef18f811933fbedbab3f40d1 | 15ccd3473cd85ec7472a2b00fc1ac21109fdba13 | refs/heads/master | 2020-05-03T05:10:39.935471 | 2020-02-15T00:54:31 | 2020-02-15T00:54:31 | 178,441,410 | 0 | 0 | null | 2019-10-19T15:44:23 | 2019-03-29T16:34:40 | Python | UTF-8 | Python | false | false | 704 | py | from typing import Optional
from urllib.parse import urlparse
class PublisherParser:
@staticmethod
def parse(url: str) -> Optional[str]:
parse = urlparse(url)
if parse.hostname is None:
return None
publisher = parse.hostname.replace('www.', '')
if publisher == 'feedproxy.google.com':
return None
elif publisher == 'vk.com':
publisher += parse.path
elif publisher == 't.me':
split = parse.path.split('/')
publisher += ('/' + split[1])
elif publisher == 'twitter.com':
split = parse.path.split('/')
publisher += ('/' + split[1])
return publisher
| [
"trofimov9artem@gmail.com"
] | trofimov9artem@gmail.com |
3e5340cc5926effd53458f38b91b0e5c0faac3a2 | 63eecf0ae9dc4db0e3667635d7070623a031fc59 | /TestAPI/venv/Scripts/pip-script.py | 45b35a1dd9d7e9151dbd67070e5b4900ae38cd78 | [] | no_license | howtodefault/unitest_pro | 528c17d2130695d0c57b4d13cdfc861b08b958d3 | 9d35fe8ac752f545ed8fe462ab3fa9e06303dccb | refs/heads/master | 2023-06-29T08:34:24.917053 | 2021-08-02T09:11:57 | 2021-08-02T09:11:57 | 391,882,578 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 383 | py | #!C:\TestAPI\venv\Scripts\python.exe
# EASY-INSTALL-ENTRY-SCRIPT: 'pip==19.0.3','console_scripts','pip'
__requires__ = 'pip==19.0.3'
import re
import sys
from pkg_resources import load_entry_point
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(
load_entry_point('pip==19.0.3', 'console_scripts', 'pip')()
)
| [
"v_guoqzou@tencent.com"
] | v_guoqzou@tencent.com |
099667299286cf88413adc62ba733f68c1b6a527 | 55b57d64ec547869835334318f3059fbb507558c | /Fred2/Data/pssms/smm/mat/A_02_02_9.py | 7a41146d9d9f5b3170add4863afcb1b9d7b5f894 | [
"BSD-3-Clause"
] | permissive | FRED-2/Fred2 | 9845f6678d4011cb746c7a5a6f283eea68077a02 | b3e54c8c4ed12b780b61f74672e9667245a7bb78 | refs/heads/master | 2021-07-12T05:05:54.515427 | 2020-05-25T06:56:25 | 2020-05-25T06:56:25 | 16,275,425 | 42 | 35 | null | 2021-07-07T12:05:11 | 2014-01-27T10:08:11 | Python | UTF-8 | Python | false | false | 2,302 | py | A_02_02_9 = {0: {'A': -0.145, 'C': 0.221, 'E': 0.72, 'D': 0.844, 'G': 0.058, 'F': -0.922, 'I': -0.135, 'H': 0.116, 'K': -0.195, 'M': -0.461, 'L': -0.138, 'N': 0.087, 'Q': 0.011, 'P': 0.503, 'S': -0.089, 'R': 0.099, 'T': 0.161, 'W': -0.221, 'V': 0.035, 'Y': -0.547}, 1: {'A': 0.108, 'C': 0.324, 'E': 0.89, 'D': 0.324, 'G': -0.085, 'F': -0.094, 'I': -0.572, 'H': 0.05, 'K': 0.233, 'M': -1.25, 'L': -1.345, 'N': 0.41, 'Q': -0.308, 'P': 1.043, 'S': -0.004, 'R': 0.877, 'T': -0.128, 'W': -0.272, 'V': -0.341, 'Y': 0.14}, 2: {'A': -0.513, 'C': 0.144, 'E': 0.353, 'D': 0.04, 'G': 0.163, 'F': -0.354, 'I': -0.132, 'H': 0.102, 'K': 0.352, 'M': -0.561, 'L': 0.233, 'N': -0.217, 'Q': 0.135, 'P': 0.1, 'S': -0.352, 'R': 0.425, 'T': 0.128, 'W': 0.149, 'V': -0.037, 'Y': -0.157}, 3: {'A': -0.172, 'C': -0.042, 'E': -0.216, 'D': -0.315, 'G': -0.157, 'F': 0.003, 'I': 0.129, 'H': 0.033, 'K': 0.103, 'M': 0.093, 'L': 0.145, 'N': 0.118, 'Q': 0.037, 'P': -0.045, 'S': -0.121, 'R': 0.226, 'T': 0.118, 'W': 0.026, 'V': 0.092, 'Y': -0.056}, 4: {'A': 0.035, 'C': -0.054, 'E': 0.023, 'D': 0.049, 'G': 0.109, 'F': -0.272, 'I': -0.3, 'H': -0.127, 'K': 0.131, 'M': 0.092, 'L': -0.107, 'N': 0.122, 'Q': 0.034, 'P': 0.264, 'S': 0.04, 'R': 0.161, 'T': 0.195, 'W': 0.052, 'V': -0.097, 'Y': -0.351}, 5: {'A': 0.099, 'C': -0.034, 'E': 0.087, 'D': 0.139, 'G': 0.167, 'F': -0.218, 'I': -0.196, 'H': 0.144, 'K': 0.449, 'M': -0.138, 'L': -0.265, 'N': -0.078, 'Q': -0.003, 'P': 0.028, 'S': -0.151, 'R': 0.218, 'T': -0.17, 'W': 0.112, 'V': -0.145, 'Y': -0.044}, 6: {'A': -0.116, 'C': 0.037, 'E': -0.098, 'D': -0.071, 'G': 0.241, 'F': -0.355, 'I': 0.156, 'H': -0.175, 'K': 0.554, 'M': -0.063, 'L': 0.183, 'N': -0.031, 'Q': 0.062, 'P': 0.19, 'S': -0.029, 'R': 0.47, 'T': -0.083, 'W': -0.39, 'V': -0.06, 'Y': -0.422}, 7: {'A': -0.048, 'C': 0.154, 'E': -0.175, 'D': 0.432, 'G': -0.001, 'F': -0.374, 'I': 0.173, 'H': 0.007, 'K': 0.243, 'M': 0.1, 'L': -0.233, 'N': -0.014, 'Q': -0.004, 'P': -0.08, 'S': -0.086, 'R': 0.077, 'T': 0.143, 'W': -0.157, 'V': 0.264, 'Y': -0.42}, 8: {'A': -0.423, 'C': 0.65, 'E': -0.065, 'D': -0.186, 'G': -0.273, 'F': 0.009, 'I': -0.619, 'H': 0.454, 'K': 0.779, 'M': -0.252, 'L': -0.945, 'N': -0.315, 'Q': 0.288, 'P': -0.101, 'S': 0.282, 'R': 0.578, 'T': 0.148, 'W': 0.44, 'V': -1.051, 'Y': 0.602}, -1: {'con': 4.16801}} | [
"schubert@informatik.uni-tuebingen.de"
] | schubert@informatik.uni-tuebingen.de |
567cb97bfd38e9390ec153a2cb1562ba54d4c807 | 89f49d4788a1fa32e249a4a1c9f95ea8e5deaf9c | /AJAXHandlers/IAJAXHandler.py | e6bb3e64a489472f82e012da44628908a2697559 | [] | no_license | ramvijay/VivaManagementSystem | c7a61d39833f2f517c30d979e9b53e063e2ac53d | e67038f93270b05c15c8a2b4d95959c17e85ff33 | refs/heads/master | 2020-06-23T12:38:57.843965 | 2016-12-30T09:08:28 | 2016-12-30T09:08:28 | 74,648,521 | 2 | 1 | null | 2016-12-30T09:18:11 | 2016-11-24T07:28:21 | JavaScript | UTF-8 | Python | false | false | 201 | py | """
Interface used for all AJAX Calls
"""
from abc import ABCMeta, abstractmethod
class IAJAXHandler(metaclass=ABCMeta):
@abstractmethod
def handle_request(self, http_request):
pass
| [
"ramvijay35@gmail.com"
] | ramvijay35@gmail.com |
8a52d3c92c69cc00748921fe893c5598258899b6 | 3afc028773b238c6fd58eee8afcf0dd8455d3968 | /100-example-using-dummies.py | c345d5a7c043536962e784a0c5c92fe683d0f27a | [] | no_license | JohnAskew/titanic | ebd26cf9b65dacfc95907fe5c160203cb4a3c881 | 40c080e2668677a99d5fd348b3d8da67c4859d59 | refs/heads/master | 2020-06-19T15:06:31.561938 | 2019-08-06T19:33:12 | 2019-08-06T19:33:12 | 196,755,779 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 893 | py | #!/usr/bin/env python
import os, sys
from askew_utils import DF_Magic as dfm
import pandas as pd
train = dfm.get_df('http://bit.ly/kaggletrain')
test = dfm.get_df('http://bit.ly/kaggletest')
train_test_dataset = [train, test]
for column in train.columns:
if (train[column].dtype != 'int64') and (train[column].dtype != 'float64'):
print(train[column].sample(n=1))
category_columns = ['sex','embarked']
for dataset in train_test_dataset:
for category_column in category_columns:
df= pd.get_dummies(dataset[category_column], prefix = category_column)
dataset = pd.concat([dataset, df], axis = 1,)
dataset.drop(columns = [category_column], axis = 1, inplace = True)
print("#------------------------------------#")
print("# ", category_column, )
print("#------------------------------------#")
print(dataset.info())
| [
"noreply@github.com"
] | JohnAskew.noreply@github.com |
0ac66c93fc56a51233a0653392ab650a75169c12 | 8ee8aaa89fe4861d1f9c8261620de735044ac72d | /submission/dag.py | b5576401ea7d9803aaac4d892c253427ada365f5 | [] | no_license | Jekyll1021/170_final_proj | 4e7385ef98daaaee8bf07bafebc16084d8ade78c | b6f029ac27271d65a2d88fc1081fcc184f99e379 | refs/heads/master | 2021-03-27T15:45:44.025914 | 2016-12-06T02:46:50 | 2016-12-06T02:46:50 | 75,233,998 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,182 | py | """
Exact DAG Algorithm:
1. Run on Graphs that are exactly DAGs
2. File contains a DAG class and a run() script
"""
import data_structure as ds
import copy
import time
import dfs
# Transform a graph into a dag of SCC
class DAG(ds.Graph):
def __init__(self, filename):
ds.Graph.__init__(self, filename)
self.sccs = []
self.scc_neighbors = []
self.sub_graphs = []
self.find_sccs()
self.scc_size = len(self.sccs)
def find_sccs(self):
"""
Find all SCCs
"""
self.reverse()
dfs_helper = dfs.DFS(self)
post_nums = dfs_helper.dfs()[1]
self.connected = len(dfs_helper.explore(post_nums.index(min(post_nums)))) == self.size
self.reverse()
# Explore in decreasing post number order
to_explore = post_nums
# indices of removed vertices
removed = set()
# removed vertices
removed_vertices = set()
scc_index = 0
while (len(to_explore) > 0):
scc_indices = dfs_helper.explore(post_nums.index(max(to_explore)))
removed = removed.union(scc_indices)
to_explore = [post_nums[i] for i in range(len(post_nums)) if i not in removed]
scc_vertices = [vertex for vertex in self.vertices if vertex.index in scc_indices]
scc_vertices = set(scc_vertices).difference(removed_vertices)
new_scc = ds.SCC(self, list(scc_vertices), scc_index)
scc_index += 1
removed_vertices = removed_vertices.union(scc_vertices)
self.sccs.append(new_scc)
# Index sccs
self.sccs.reverse()
for i in range(len(self.sccs)):
self.sccs[i].index = i
# Find the neighbor sccs of all sccs
for i in range(len(self.sccs)):
neighbors = []
for vertex in self.sccs[i].vertices:
for vertex_neighbor in vertex.neighbors:
if vertex_neighbor not in self.sccs[i].internals:
neighbors.append(self.which_scc(vertex_neighbor))
self.scc_neighbors.append(neighbors)
def which_scc(self, vertex_index):
"""Given a index of a vertex, return the index of the SCC it belongs to"""
for scc in self.sccs:
if vertex_index in scc.internals:
return scc.index
return -1
def is_dag(self):
"""Return if the original graph is a dag or not"""
return len(self.sccs) == len(self.vertices)
def delete_path(self, path):
"""
Use in longest_path.py
"""
if path == []:
return
for v in self.vertices:
if v.index in path:
self.vertices[v.index].value = 0
self.vertices[v.index].neighbors = []
else:
for neighbor in v.neighbors:
if neighbor in path:
v.neighbors.remove(neighbor)
scc = self.sccs[self.which_scc(path[0])]
for vertex in scc.in_vertices:
if vertex in path:
scc.in_vertices.remove(vertex)
for vertex in scc.out_vertices:
if vertex in path:
scc.out_vertices.remove(vertex)
for scc in self.sccs:
for neighbor in scc.neighbors:
if neighbor in path:
scc.neighbors.remove(neighbor)
def get_score(self, assignment):
score = 0
for path in assignment:
values = [self.sccs[i].value for i in path]
score += sum(values) * len(values)
return score
def solve(self):
sub = [[[0, []] for _ in range(self.scc_size)] for _ in range(self.scc_size)]
for i in range(self.scc_size):
sub[i][i][0] = self.sccs[i].value
sub[i][i][1].append([self.sccs[i].index])
for m in range(1, self.scc_size):
sub_range = list(range(m))
sub_range.reverse()
for i in sub_range:
max_val = -1
max_assignment = []
for k in range(i+1, m+1):
assignment = copy.deepcopy(sub[k][m][1])
if k in self.scc_neighbors[i]:
assignment[0].insert(0,i)
else:
assignment.insert(0,[i])
if k > i + 1:
value = self.get_score(assignment) + sub[i + 1][k - 1][0]
assignment = assignment + sub[i+1][k-1][1]
else:
value = self.get_score(assignment)
if (value > max_val):
max_val = value
max_assignment = copy.deepcopy(assignment)
sub[i][m] = [max_val, max_assignment]
max_assignment = sub[0][self.scc_size - 1][1]
max_assignment_vertices = []
for path in max_assignment:
max_assignment_vertices.append([self.sccs[scc_index].vertices[0].index for scc_index in path])
return sub[0][self.scc_size - 1][0], max_assignment_vertices
# Test
# def run(p):
#
# result = open('new_dag.txt', "a")
# for i in range(p, p+1):
#
# try:
# g = ds.Graph("../inputs/dag_exact/" + str(i) + ".in")
# if (len(g.vertices) in range(0, 501)):
# start_time = time.time()
# print str(i)+".in"
# g = DAG("../inputs/dag_exact/"+str(i)+".in")
# print("--- %s seconds to process DAG ---" % (time.time() - start_time))
#
# if g.is_dag():
# print str(i)+" is a DAG"
# start_time = time.time()
# soln = g.solve()[1]
# result.write(str(i) + ". " + str(soln))
# result.write("\n")
# print(i, soln)
# print("--- %s seconds to solve DAG---" % (time.time() - start_time))
# result = open('new_dag.txt', "a")
# except (IOError):
# pass
# except (IndexError):
# pass | [
"yujialuo@berkeley.edu"
] | yujialuo@berkeley.edu |
4fbe20ecec8e14cfb3c78833c50936a9f7650146 | aebfac46798783a877db752573b243d4fde67c61 | /RestServer/apps.py | a1f43317779ff0d31bc3653a48fe6a2b09dc0f7e | [] | no_license | markovsergey/rest_server | 7ad31a8c3400d0b09286a830619935c69bcfbffd | a2d47fb1baf0581f5a7696ad8d0cf77a3ee576d6 | refs/heads/master | 2021-08-16T07:04:20.583523 | 2017-11-19T07:45:23 | 2017-11-19T07:45:23 | 111,275,085 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 95 | py | from django.apps import AppConfig
class RestserverConfig(AppConfig):
name = 'RestServer'
| [
"bestbest@yandex.ru"
] | bestbest@yandex.ru |
faa6b2da86c679c89330398595fa654c00ef4d33 | a5a9d8d11320e1a9a96b7f27b1bba8583c989504 | /src/ethereum/frontier/__init__.py | 11b11bc41de51c5267e75080418384451fdf1d6d | [
"LicenseRef-scancode-unknown-license-reference",
"CC0-1.0"
] | permissive | KenMan79/execution-specs | 3c36fde2e3b96a61d4d623211d73abacbf2f7d52 | ac8e65a422032a8ebb4077068627bbfb2fce6eda | refs/heads/master | 2023-08-14T13:03:52.633736 | 2021-10-06T15:47:33 | 2021-10-06T15:47:33 | 414,630,897 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 116 | py | """
Ethereum Frontier Hardfork
^^^^^^^^^^^^^^^^^^^^^^^^^^
The first Ethereum hardfork.
"""
MAINNET_FORK_BLOCK = 1
| [
"sam.wilson@mesh.xyz"
] | sam.wilson@mesh.xyz |
3b33c6da73e70bcb25b56b4fd175de4ac366f2a8 | f0d713996eb095bcdc701f3fab0a8110b8541cbb | /9S8qp4XKG2qwQMdrb_2.py | 07908c297beae33944959e2c40e6e492d0f35bf6 | [] | no_license | daniel-reich/turbo-robot | feda6c0523bb83ab8954b6d06302bfec5b16ebdf | a7a25c63097674c0a81675eed7e6b763785f1c41 | refs/heads/main | 2023-03-26T01:55:14.210264 | 2021-03-23T16:08:01 | 2021-03-23T16:08:01 | 350,773,815 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 542 | py | """
Write a function that returns the number of ways a person can climb **n
stairs** , where the person may only climb **1** or **2** steps at a time.
To illustrate, if **n = 4** there are **5** ways to climb:
[1, 1, 1, 1]
[2, 1, 1]
[1, 2, 1]
[1, 1, 2]
[2, 2]
### Examples
ways_to_climb(1) ➞ 1
ways_to_climb(2) ➞ 2
ways_to_climb(5) ➞ 8
### Notes
A staircase of height `0` should return `1`.
"""
def ways_to_climb(n):
r=(1+5**.5)/2
return round((r**(n+1)-(1-r)**(n+1))/(5**.5))
| [
"daniel.reich@danielreichs-MacBook-Pro.local"
] | daniel.reich@danielreichs-MacBook-Pro.local |
9fe4cb94c81a6b0a10f86ec898adfb99833b6625 | 9743d5fd24822f79c156ad112229e25adb9ed6f6 | /xai/brain/wordbase/verbs/_nicking.py | 8ade774452ec36eabf9b8b12da80103b68a5a982 | [
"MIT"
] | permissive | cash2one/xai | de7adad1758f50dd6786bf0111e71a903f039b64 | e76f12c9f4dcf3ac1c7c08b0cc8844c0b0a104b6 | refs/heads/master | 2021-01-19T12:33:54.964379 | 2017-01-28T02:00:50 | 2017-01-28T02:00:50 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 228 | py |
from xai.brain.wordbase.verbs._nick import _NICK
#calss header
class _NICKING(_NICK, ):
def __init__(self,):
_NICK.__init__(self)
self.name = "NICKING"
self.specie = 'verbs'
self.basic = "nick"
self.jsondata = {}
| [
"xingwang1991@gmail.com"
] | xingwang1991@gmail.com |
44e5115d831d8f11ee4ec8b575906d3138700fbf | 348aeccddd5fdb48fb91a63d170b7f0453f70e36 | /libcloud/utils/files.py | 201e94a4e3a873553fc3a035aa2b8953785c0c0e | [
"Apache-2.0"
] | permissive | lelou6666/libcloud | 4eb08e236cb9f4b787fa73ce963347f708faf092 | bff26fe27fdd53979e32e08038ecd2fc108b6083 | refs/heads/trunk | 2021-01-14T14:02:16.661579 | 2013-10-28T11:18:08 | 2013-10-28T11:18:08 | 55,902,523 | 0 | 0 | null | 2016-04-10T14:08:20 | 2016-04-10T14:08:20 | null | UTF-8 | Python | false | false | 3,437 | py | # Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import mimetypes
from libcloud.utils.py3 import PY3
from libcloud.utils.py3 import httplib
from libcloud.utils.py3 import next
from libcloud.utils.py3 import b
CHUNK_SIZE = 8096
if PY3:
from io import FileIO as file
def read_in_chunks(iterator, chunk_size=None, fill_size=False):
"""
Return a generator which yields data in chunks.
:type iterator: :class:`object` which implements iterator interface.
:param response: An object which implements an iterator interface
or a File like object with read method.
:type chunk_size: ``int``
:param chunk_size: Optional chunk size (defaults to CHUNK_SIZE)
:type fill_size: ``bool``
:param fill_size: If True, make sure chunks are chunk_size in length
(except for last chunk).
TODO: At some point in the future we could use byte arrays here if version
>= Python 3. This should speed things up a bit and reduce memory usage.
"""
chunk_size = chunk_size or CHUNK_SIZE
if isinstance(iterator, (file, httplib.HTTPResponse)):
get_data = iterator.read
args = (chunk_size, )
else:
get_data = next
args = (iterator, )
data = b('')
empty = False
while not empty or len(data) > 0:
if not empty:
try:
chunk = b(get_data(*args))
if len(chunk) > 0:
data += chunk
else:
empty = True
except StopIteration:
empty = True
if len(data) == 0:
raise StopIteration
if fill_size:
if empty or len(data) >= chunk_size:
yield data[:chunk_size]
data = data[chunk_size:]
else:
yield data
data = b('')
def exhaust_iterator(iterator):
"""
Exhaust an iterator and return all data returned by it.
:type iterator: :class:`object` which implements iterator interface.
:param response: An object which implements an iterator interface
or a File like object with read method.
:rtype ``str``
:return Data returned by the iterator.
"""
data = b('')
try:
chunk = b(next(iterator))
except StopIteration:
chunk = b('')
while len(chunk) > 0:
data += chunk
try:
chunk = b(next(iterator))
except StopIteration:
chunk = b('')
return data
def guess_file_mime_type(file_path):
filename = os.path.basename(file_path)
(mimetype, encoding) = mimetypes.guess_type(filename)
return mimetype, encoding
| [
"tomaz@apache.org"
] | tomaz@apache.org |
b068c9ec4338a441747459ee6ea0a3d51fa55ab0 | ee7c034def8d44564184034fb0450bb7be5adbcd | /EMSCalc/wsgi.py | 1760c228a81301fffa276c563917dd218708ce20 | [] | no_license | IOnvin/EMS_Calc | feb7604746f66a5b74ba44de2475abc7cd5785aa | 3650b71db6db16c4d2719459596f4e26e84b464a | refs/heads/master | 2023-04-04T10:08:34.629205 | 2021-04-01T01:14:24 | 2021-04-01T01:14:24 | 299,623,427 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 391 | py | """
WSGI config for EMSCalc project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/2.2/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'EMSCalc.settings')
application = get_wsgi_application()
| [
"vbolisetti@opentext.com"
] | vbolisetti@opentext.com |
613925b034412d3c86b6051a5087e78c483496be | 4acfafc2823826be09375e6db98d4b0cf9a7839b | /main.py | df48c30f7a6b6a22210d923bf8d6dd17789acd67 | [
"MIT"
] | permissive | edwinmillan/TrelloAttachmentCleanup | 0508da65ebcb6c7297f45a62d75aabdf5de827b3 | 6c6246bcd485cc87c9a999c1954b733356f94038 | refs/heads/main | 2023-03-27T06:03:07.693747 | 2021-03-23T03:35:25 | 2021-03-23T03:35:25 | 350,567,026 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,485 | py | import requests
import re
import json
import configparser
from trello import TrelloApi, Cards
from typing import List, Optional, Iterable, NoReturn
class TrelloCards(Cards):
def __init__(self, apikey, token=None):
super(TrelloCards, self).__init__(apikey, token)
def update_attachment(self, card_id_or_shortlink: str, attachment_id: str, data: dict):
resp = requests.put(f"https://trello.com/1/cards/{card_id_or_shortlink}/attachments/{attachment_id}",
params={"key": self._apikey, "token": self._token}, data=data)
return self.raise_or_json(resp)
class Trello(TrelloApi):
def __init__(self, apikey, token=None):
super(Trello, self).__init__(apikey, token)
self.cards = TrelloCards(apikey, token)
def get_target_board(trello: Trello, board_name: str) -> Optional[dict]:
my_boards = trello.members.get_board('me')
api_filter = tuple(filter(lambda b: b.get('name') == board_name, my_boards))
if api_filter:
return api_filter[0]
else:
return tuple()
def filter_target_list(board_lists: List[dict], board_name: str) -> Optional[dict]:
for board_list in board_lists:
if board_list.get('name') == board_name:
return board_list
def get_list_info(trello: Trello, api_board_info: dict, target_list_name: str) -> Optional[dict]:
board_id = api_board_info.get('id')
board_lists = trello.boards.get_list(board_id)
return filter_target_list(board_lists, target_list_name)
def remove_file_extension(file_name: str) -> str:
match = re.search(r'(.+)\.\S+', file_name)
if match:
return match[1]
else:
return file_name
def update_board_attachments(trello: Trello, board_name: str, target_list_names: Iterable) -> NoReturn:
board_info = get_target_board(trello, board_name=board_name)
if board_info:
print(f"Working on Board: {board_name}")
# Go through each list names and update each card's attachments
for list_name in target_list_names:
# Get the dict holding the list ID using the board.
list_info = get_list_info(trello=trello, api_board_info=board_info, target_list_name=list_name)
if list_info:
print(f"Working on List: {list_info.get('name')}")
list_id = list_info.get('id')
# Get the list of cards
list_cards = trello.lists.get_card(list_id)
# Iterates over each card and gets the attachments.
for card in list_cards:
print(f"\tLooking through card: {card.get('name')}")
card_id = card.get('id')
attachments = trello.cards.get_attachment(card_id)
for attachment in attachments:
attachment_id = attachment.get('id')
raw_name = attachment.get('name')
# If the name has an ext, return a version without the ext.
parsed_name = remove_file_extension(raw_name)
# If it's not already fixed, go update it via the API.
if raw_name and parsed_name != raw_name:
print(f"\t\tUpdating attachment: {raw_name} -> {parsed_name}")
payload = {'name': parsed_name}
trello.cards.update_attachment(card_id_or_shortlink=card_id,
attachment_id=attachment_id, data=payload)
else:
print('No Board info found')
def load_credentials(credential_json: str) -> (str, str):
with open(credential_json, 'r') as cred_file:
creds = json.load(cred_file)
return creds.get('key'), creds.get('token')
def load_config_settings(config_filename: str) -> (str, Iterable):
config = configparser.ConfigParser()
config.read(config_filename)
settings = config['settings']
target_board_name = settings['board_name']
target_list_names = map(str.strip, settings['list_names'].split(','))
return target_board_name, target_list_names
def main() -> NoReturn:
key, token = load_credentials('token.json')
target_board_name, target_list_names = load_config_settings(config_filename='config.ini')
trello = Trello(apikey=key, token=token)
update_board_attachments(trello, target_board_name, target_list_names)
if __name__ == '__main__':
main()
| [
"bassgs3000@gmail.com"
] | bassgs3000@gmail.com |
e7c303e74cb2ca4aa5d425d66ae106adb6d62157 | d6fac648ddfd6c42a566affa9ad0d153e6856793 | /test_transaccion.py | 3185e092535234d03b37e59b878c245e0324d309 | [] | no_license | LucianoBartomioli/-EDU-Parcial-Integrador | 199f6516b598f6773d183eb5cb740b17315833d6 | f9f1dca03b34474b41eb09d9f09a8ac0bc728799 | refs/heads/main | 2023-01-06T04:46:30.148733 | 2020-11-06T01:26:45 | 2020-11-06T01:26:45 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,258 | py | from transaccion import Transaccion
import json
def creacion_de_archivo():
transaccion_a = Transaccion(45990339, "CONSUMO", 2000, "RECHAZADO", "MUSIMUNDO")
transaccion_b = Transaccion(45990339, "CONSUMO", 2000, "APROBADO", "MUSIMUNDO")
transaccion_c = Transaccion(30949303, "CASH_IN", 50000, "APROBADO", "PAGOFACIL")
transaccion_a.cargar_archivo(transaccion_a)
transaccion_b.cargar_archivo(transaccion_b)
transaccion_c.cargar_archivo(transaccion_c)
def test_monto_movimiento():
transaccion_a = Transaccion(dni_cliente=45990339, tipo_movimiento="CONSUMO", monto_movimiento=200000, estado="APROBADO", nombre_comercio="DISCO")
transaccion_a.monto_menor_a_100000()
def test_json_movimiento():
transaccion_a = Transaccion(30949303, "CASH_IN", 500, "APROBADO", "PAGOFACIL")
movimiento_to_dict = json.loads(transaccion_a.toJSON())
keys = movimiento_to_dict.keys()
items = movimiento_to_dict.values()
print()
print(f"Las keys son: {keys}")
print()
print(f"Los items son: {items}")
print()
key_tipo_movimiento = transaccion_a.return_key_tipo_movimiento()
print(f"El tipo de movimiento es {key_tipo_movimiento}")
creacion_de_archivo()
test_monto_movimiento()
test_json_movimiento()
| [
"69654179+LucianoBartomioli@users.noreply.github.com"
] | 69654179+LucianoBartomioli@users.noreply.github.com |
cead28e09d8898e94fd635d1ede4ab5cabf171fe | 16b77438b7a7923a391a12f1f4bc12b49429bb73 | /src/PIPE/PIPE.py | afa369355271987d911ce5454c61b803916fa8aa | [] | no_license | OpenJ92/zebra | eb582c36fd7110ccf5866eb34418ff9e725efd5d | 2d3d3d42bb0461901f2418069a55e47cf8450c50 | refs/heads/master | 2020-11-29T14:29:37.279589 | 2020-01-18T19:54:07 | 2020-01-18T19:54:07 | 230,138,421 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,042 | py | from src.NODE.NODE import NODE
class PIPE(object):
def __init__(self, form):
self._name, self._kwargs = *form.keys(), *form.values()
self.__gen_nodes__();
self._transformed = self.__execute__({'Data1':1, 'Data2':1})
def __gen_nodes__(self):
self._nodes = [NODE(kw) for kw in self._kwargs]
self._nodes = {f"{self._name}_{node._name}": node \
for node in self._nodes}
def __execute__(self, Xs):
node = self._nodes[f"{self._name}_HEAD"]
while True:
print(Xs, node._name)
Xs = { \
name: \
(node._map._apply_(data) if name in node._on else data)\
for name, data in Xs.items() \
}
if "TAIL" in node._name:
return Xs
node = self._nodes[f"{self._name}_{next(node)}"]
return Xs
| [
"jacob.vartuli.92@gmail.com"
] | jacob.vartuli.92@gmail.com |
19b365204ddcf74e34ab42a5f2b0d756622c9ad5 | ca55dcaa64ea9db4068e13091321cfebecc0ff41 | /codeUp/codeUpBasic/1990.py | bca5b69987f830843fdbdeecd27fbb8549319697 | [] | no_license | gomtinQQ/algorithm-python | 8fb8343594b945099ae2a4dfa794ecb47e54ab0b | 751562922b66e335f621d366bb73dacdc7125140 | refs/heads/master | 2022-12-07T23:05:44.535593 | 2020-08-21T12:29:58 | 2020-08-21T12:29:58 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 239 | py | '''
1990 : 3의 배수 판별하기
자연수 n이 입력되면 3의 배수인지 아닌지 판별하시오.
3의 배수이면 1을 출력하고, 아니면 0을 출력한다.
'''
n = int(input())
if(n%3==0):
print(1)
else:
print(0) | [
"minhyeonlee1@gmail.com"
] | minhyeonlee1@gmail.com |
a8d857fd952c7045995da00ae55c5829465182b6 | 042de81f5de5c3c6fd936183e29dee1c279b0234 | /flask_assignment4/google_test.py | f146fddabb9396279ea1aa19d532cc109042b02e | [] | no_license | wcarruthers/flask_hw4 | 00f3a1f4d210484eb0d3823b48e1206479abb940 | 0395a83ac30a9dc8ac1423f26aea9ad7c6ea4894 | refs/heads/main | 2023-01-09T19:42:52.752618 | 2020-11-14T17:26:57 | 2020-11-14T17:26:57 | 312,848,030 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 483 | py | from PIL import Image
import numpy as np
from google_pred import predict_uva_landmark
import os
credential_path = 'lucid-honor-295522-305e6cacd6aa.json'
os.environ['GOOGLE_APPLICATION_CREDENTIALS'] = credential_path
img = Image.open('0000002780.jpg')
img = img.resize((224,224))
img_array = np.array(img)
print(img_array.shape)
img_array = np.expand_dims(img_array,0)
print(img_array.shape)
print('Predicted Landmark: ' + str(predict_uva_landmark(img_array)))
| [
"noreply@github.com"
] | wcarruthers.noreply@github.com |
cb213fab46a81136900649c6e2d647f07a2d0d1b | 635fdb159643744e28e750f6b664190939ce0b49 | /Ex1.py | 6948edbd2cc76dc0fa36ed8207c8c3755c18f1e8 | [] | no_license | EdgarUrias/Aprendiendo-Python | 6e0fac8ab584ac91d0d31e2047702b6f8631b14c | beb99e693724102d0902d33b9470d9bf484357ac | refs/heads/master | 2020-04-25T23:01:10.544266 | 2019-02-28T14:54:44 | 2019-02-28T14:54:44 | 173,130,756 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 189 | py | print("Hola Mundo!")
print("Hello Again")
print("I like typing this.!")
print("This is fun")
print('Yay! Printing')
print("I'd much rather You 'not'.")
print('I "said" do not touch this.')
| [
"aurias@superdelnorte.com.mx"
] | aurias@superdelnorte.com.mx |
446e54a2b55b65fc7e01ae925b12b57e624e134e | 590aac0a99d3430a1cc549301ce4dfcc7ce3b6ff | /HW4/trials/search.py | 0b9e955157c9a77d1f720f6dd96e2e9dfd7189d9 | [] | no_license | ianngiaw/CS3245-Homework | c06708a7ee3cecd578b0ea11fa136077479621ba | 0e82d4465582db97478c50998bf922abe38d07bd | refs/heads/master | 2021-01-10T10:12:40.748330 | 2016-04-15T06:45:14 | 2016-04-15T06:45:14 | 50,405,493 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 17,901 | py | #!/usr/bin/python
import re
import nltk
import sys
import getopt
from nltk.tokenize import word_tokenize
from nltk.corpus import stopwords
stemmer = nltk.stem.porter.PorterStemmer()
from math import log10, sqrt
import xml.etree.ElementTree as ET
pseudo_relevance_threshold = None
pseudo_non_relevance_threshold = None
original_weight = relevant_weight = non_relevant_weight = None
# ============================
# Initialisation functions
# ============================
def build_dict(input_dict_file):
"""
Builds the dictionary from the dictionary file. Kept in memory.
Returns the total number of documents and a dictionary
"""
mode = -1 # 0: term_dict, 1: doc_dict, 2: fields_dict
dict_file = file(input_dict_file, 'r')
term_dict = {} # key: term, value: (byte_offset, idf)
docs_dict = {} # key: doc_id, value: (byte_offset, total_term_count)
doc_fields_dict = {} # key: doc_id, value: byte_offset
for line in dict_file:
line = line.strip()
if line == "## Term ##":
mode = 0
elif line == "## Doc ID ##":
mode = 1
elif line == "## Fields ##":
mode = 2
elif mode == 0:
split_line = line.split(" ")
token = split_line[0]
byte_offset = int(split_line[1])
idf = float(split_line[2])
term_dict[token] = (byte_offset, idf)
elif mode == 1:
split_line = line.split(" ")
doc_id = split_line[0]
byte_offset = int(split_line[1])
term_count = int(split_line[2])
docs_dict[doc_id] = (byte_offset, term_count)
elif mode == 2:
split_line = line.split(" ")
doc_id = split_line[0]
byte_offset = int(split_line[1])
doc_fields_dict[doc_id] = byte_offset
dict_file.close()
return (term_dict, docs_dict, doc_fields_dict)
def execute_query(input_post_file, input_query_file, output_file, term_dict, docs_dict, doc_fields_dict):
"""
Tests the queries in the input_query_file based on the dictionary and postings.
Writes results into output_file.
"""
# Initialisation
tree = ET.parse(input_query_file)
root = tree.getroot()
postings = file(input_post_file, 'r')
output = file(output_file, 'w')
# Treat query as a string (concatenate title and description)
# Also removes "Relevant documents will describe" from start of description
query = root[0].text.strip() + " " + root[1].text.strip()[33:].strip()
# TODO: Perform LM query
# Still experimental
# print docs_dict
# print term_dict
LM_results = [x[0] for x in language_model_query(query.strip(), docs_dict, term_dict, postings)]
lm_rel_threshold = int(len(LM_results) * pseudo_relevance_threshold)
lm_non_threshold = int(len(LM_results) * pseudo_non_relevance_threshold)
lm_rel = LM_results[:lm_rel_threshold]
lm_non = LM_results[len(LM_results) - lm_non_threshold:]
# print LM_results
# Perform VSM query
vsm_results = map(lambda x: x[0], vsm_query(query.strip(), term_dict, postings))
vsm_rel_threshold = int(len(vsm_results) * pseudo_relevance_threshold)
vsm_non_threshold = int(len(vsm_results) * pseudo_non_relevance_threshold)
vsm_rel = vsm_results[:vsm_rel_threshold]
vsm_non = vsm_results[len(vsm_results) - vsm_non_threshold:]
# Make use of Patent's Family and Cites fields to find relevant documents
relevant_documents = list(set(lm_rel) | set(vsm_rel))
relevant_documents = find_more_relevant_documents(relevant_documents, doc_fields_dict, postings)
# Find irrelevant docs (documents that are not returned by the query)
non_relevant_documents = list(set(lm_non) | set(vsm_non))
# Generate relevant docs vector
relevant_vector = generate_average_document_vector(relevant_documents, term_dict, docs_dict, postings)
# Generate non-relevant docs vector
non_relevant_vector = generate_average_document_vector(non_relevant_documents, term_dict, docs_dict, postings)
# Generate query vector
query_vector = generate_query_vector(query, term_dict)
# Generate document vectors
doc_tf_dict = get_document_normalized_term_freq(query_vector.keys(), term_dict, postings)
# Generate rocciho vector
rocchio_vector = combine_vectors(query_vector, relevant_vector, non_relevant_vector)
results = score_documents(rocchio_vector, doc_tf_dict)
#results = filter(lambda x: x[1] > 0, results)
output_line = reduce(lambda x, y: x + str(y[0]) + " ", results, "").strip()
output.write(output_line)
output.close()
# ============================
# Language Model querying
# ============================
# Convenience method
# python -i search.py -d dictionary.txt -p postings.txt -q q1.xml -o output1.txt
# python search.py -d dictionary.txt -p postings.txt -q q1.xml -o output1.txt
def language_model_query(query, docs_dict, term_dict, postings):
scored_docs = {}
probability = 1 # P(t|d)
lambda_weight = 0.5 # hard-coded, probably need to find the optimal weight based on how long the query is
collection_length = get_collection_length(docs_dict)
for token in word_tokenize(query):
token = stemmer.stem(token).lower()
# To remove stop words and punctuation from the query (since neither of which are indexed)
if token not in term_dict:
continue
# For each token, find the list of documents in which the term appears and the token's number of occurrences
doc_term_frequency = build_doc_term_frequency(token, term_dict, postings) # a dictionary
docs = [k for k,v in doc_term_frequency.items()]
collection_term_frequency = sum([v for k,v in doc_term_frequency.items()])
for doc in docs:
# Formula: weight * n(token appears in doc)/doc_length + (1-weight) * n(token appears in collection)/collection_length
term_frequency = doc_term_frequency[doc]
document_length = get_document_length(docs_dict, doc)
probability = lambda_weight * term_frequency/document_length + (1-lambda_weight) * collection_term_frequency/collection_length
if doc in scored_docs:
scored_docs[doc] *= probability
else:
scored_docs[doc] = probability
# print scored_docs
output = [(k, v) for k, v in scored_docs.items()]
output.sort(key=lambda x: x[1], reverse=True)
return output
# return sorted([(k,v) for k, v in scored_docs.items()], lambda x: x[1], reverse=True)
def build_doc_term_frequency(token, term_dict, postings):
reader = get_term_postings_reader(token, term_dict, postings)
dct = {} # { doc_id: raw_term_frequency, ... }
while True:
# next() return format: (doc_id, raw_term_frequency, normalized_log_weighted_term_frequency)
next = reader.next()
if next != "END":
doc_id = next[0]
raw_term_frequency = next[1]
dct[doc_id] = raw_term_frequency
else:
break
return dct
def get_document_length(docs_dict, doc):
# docs_dict in the form { 'US5132543': (2451368, 139), ...}
# doc_id: (byte_offset, document_length)
return docs_dict[doc][1]
def get_collection_length(docs_dict):
# Returns total number of words in the Collection
# For our purposes, the total number of words = sum of number of words in Title and Abstract ONLY
collection_length = sum([v[1] for k, v in docs_dict.items()])
return collection_length
# ================================
# Vector Space Model querying
# ================================
def vsm_query(query, dictionary, postings_file):
"""
Processes the free text query and retrieves the document ids of the
documents containing terms in the query.
Returns a list of doc_ids in decending order of relevance.
"""
token_normalized = generate_query_vector(query, dictionary)
tokens = token_normalized.keys()
doc_tf_dict = get_document_normalized_term_freq(tokens, dictionary, postings_file)
return score_documents(token_normalized, doc_tf_dict)
# ================================
# Query Expansion
# ================================
def find_more_relevant_documents(relevant_documents, doc_fields_dict, postings_file):
"""
Finds more relevant documents based on the given relevant documents' "Family Members"
and "Cites" fields.
Returns an expanded list of relevant documents
"""
all_relevant_docs = set(relevant_documents)
for doc in relevant_documents:
if doc in doc_fields_dict:
reader = get_doc_fields_postings_reader(doc, doc_fields_dict, postings_file)
while True:
next_doc = reader.next()
if next_doc == "END":
break
all_relevant_docs.add(next_doc[0])
return list(all_relevant_docs)
def generate_query_vector(query, dictionary):
"""
Returns a dictionary with keys being the tokens present in the query
and values being the tf-idf values of these tokens.
"""
# Get all tokens from query and the raw query term frequencies
tokens = {}
for token in word_tokenize(query):
token = stemmer.stem(token).lower()
if token not in tokens:
tokens[token] = 0
tokens[token] += 1
# Use document frequecies of terms to calculate the tf-idf of tokens
token_tfidf = {}
for token in tokens:
term_freq = tokens[token]
log_weighted_tf = 1 + log10(term_freq)
if token in dictionary:
idf = dictionary[token][1]
token_tfidf[token] = log_weighted_tf * idf
# Length normalize the tf-idf values obtained
normalizer = sqrt(reduce(lambda x, y: x + y**2, token_tfidf.values(), 0))
token_normalized = {}
for token in token_tfidf:
token_normalized[token] = token_tfidf[token] / normalizer
return token_normalized
def generate_average_document_vector(doc_ids, term_dict, docs_dict, postings_file):
"""
Generates a normalized log weighted tf vector for each of the documents provided
and adds all these vectors together, and divides each component by the total number
of documents
"""
total_vector = {}
num_docs = len(doc_ids)
for doc_id in doc_ids:
document_vector = generate_document_vector(doc_id, term_dict, docs_dict, postings_file)
for token in document_vector:
if token not in total_vector:
total_vector[token] = 0
total_vector[token] += document_vector[token]
for token in total_vector:
total_vector[token] /= num_docs
return total_vector
def combine_vectors(query_vector, relevant_vector, non_relevant_vector):
"""
Perform Rocchio Algorithm on the three vectors
Returns an expanded query vector
"""
query_vector_weight = original_weight
relevant_vector_weight = relevant_weight
non_relevant_vector_weight = non_relevant_weight
vectors = [query_vector, relevant_vector, non_relevant_vector]
weights = [query_vector_weight, relevant_vector_weight, non_relevant_vector_weight]
total_vector = {}
for (i, vector) in enumerate(vectors):
weight = weights[i]
for token in vector:
if token not in total_vector:
total_vector[token] = 0
total_vector[token] += weight * vector[token]
return total_vector
def generate_document_vector(doc_id, term_dict, docs_dict, postings_file):
"""
Generates a normalized log weighted tf vector for a single document
"""
log_tf_dict = {}
reader = get_docs_postings_reader(doc_id, docs_dict, postings_file)
while True:
next_token = reader.next()
if next_token == "END":
break
(token, tf) = next_token
log_tf_dict[token] = (1 + log10(tf)) * term_dict[token][1]
normalizer = sqrt(reduce(lambda x, y: x + y**2, log_tf_dict.values(), 0))
normalized_vector = {}
for token in log_tf_dict:
normalized_vector[token] = float(log_tf_dict[token]) / float(normalizer)
return normalized_vector
def get_document_normalized_term_freq(tokens, dictionary, postings_file):
"""
Gets the normalized term frequencies for each document containing the
tokens provided in tokens.
Since the term frequencies are already log weighted and normalized in
indexing stage, this function simply retrieves the value using
PostingReader.
Returns a dictionary of dictionaries. The outer dictionary is keyed by
the doc_ids of the documents containing the tokens, and the inner
dictionary is keyed by the tokens present in the document, with values
being the normalized term frequencies of that term in that document.
"""
doc_tf_dict = {}
for token in tokens:
if token not in dictionary:
continue
reader = get_term_postings_reader(token, dictionary, postings_file)
next_doc = reader.next()
while next_doc != "END":
doc_id = next_doc[0]
if doc_id not in doc_tf_dict:
doc_tf_dict[doc_id] = {}
doc_tf_dict[doc_id][token] = next_doc[2]
next_doc = reader.next()
return doc_tf_dict
def score_documents(query_freqs, doc_freqs):
"""
Scores documents in the provided doc_freqs dictionary based on the values
in itself and the query_freqs dictionary using cosine similarity.
Returns a list of doc_ids sorted in the order of highest score to lowest.
"""
scored_docs = []
for doc_id in doc_freqs:
score = 0
for token in doc_freqs[doc_id]:
score += doc_freqs[doc_id][token] * query_freqs[token]
scored_docs.append((doc_id, score))
# Sort in reverse order
scored_docs.sort(key=lambda x: (-x[1], x[0]))
#return map(lambda x: x[0], scored_docs)
return scored_docs
# =========================================
# Helper functions for PostingsReader
# =========================================
def get_term_postings_reader(token, term_dict, postings_file):
normalized_tf_parser = lambda x: float("0." + x)
return PostingReader(postings_file, term_dict[token][0], 3, [str, int, normalized_tf_parser])
def get_docs_postings_reader(doc_id, docs_dict, postings_file):
return PostingReader(postings_file, docs_dict[doc_id][0], 2, [str, int])
def get_doc_fields_postings_reader(doc_id, doc_fields_dict, postings_file):
return PostingReader(postings_file, doc_fields_dict[doc_id], 1)
# ===================
# PostingReader
# ===================
class PostingReader:
"""
PostingReader reads a posting list in a provided postings file object
using the byte offset provided by a dictionary.
"""
def __init__(self, postings_file, byte_offset, num_vals, type_converters = None):
assert type(num_vals) is int and num_vals > 0
self.postings_file = postings_file
self.byte_offset = byte_offset
self.num_vals = num_vals
if not type_converters or len(type_converters) != num_vals:
self.type_converters = [lambda x: x for i in range(0, num_vals)]
else:
self.type_converters = type_converters
self.current = 0 # this is the offset that is added to the byte offset when seeking
self.end = False # set to true when reached end of the list (end of line)
def next(self):
"""
Gets the next document id and its normalized log weighted term
frequecy.
Returns a 2-tuple of the document id and the term frequency.
If the posting reader has reached the end of the postings list,
returns 'END'.
"""
if self.end:
return "END"
current_offset = self.current
result = []
for i in range(0, self.num_vals):
val = ""
while True:
self.postings_file.seek(self.byte_offset + current_offset)
next_char = self.postings_file.read(1)
current_offset += 1
if next_char == "\n":
self.end = True
break
if next_char == " ":
break
val += next_char
result.append(val)
self.current = current_offset
result = map(lambda x: self.type_converters[x](result[x]), range(0, self.num_vals))
return tuple(result)
def usage():
print "usage: " + sys.argv[0] + " -d dictionary-file -p postings-file -q file-of-queries -o output-file-of-results"
if __name__ == "__main__":
input_dict_file = input_post_file = input_query_file = output_file = None
try:
opts, args = getopt.getopt(sys.argv[1:], 'd:p:q:o:i:r:n:t:v:')
except getopt.GetoptError, err:
usage()
sys.exit(2)
for o, a in opts:
if o == '-d':
input_dict_file = a
elif o == '-p':
input_post_file = a
elif o == '-q':
input_query_file = a
elif o == '-o':
output_file = a
elif o == '-i':
original_weight = float(a)
elif o == '-r':
relevant_weight = float(a)
elif o == '-n':
non_relevant_weight = float(a)
elif o == '-t':
pseudo_relevance_threshold = float(a)
elif o == '-v':
pseudo_non_relevance_threshold = float(a)
else:
assert False, "unhandled option"
if input_dict_file == None or input_post_file == None or input_query_file == None or output_file == None:
usage()
sys.exit(2)
(term_dict, docs_dict, doc_fields_dict) = build_dict(input_dict_file)
execute_query(input_post_file, input_query_file, output_file, term_dict, docs_dict, doc_fields_dict) | [
"ianngiawtingan@gmail.com"
] | ianngiawtingan@gmail.com |
8f24a9a6ea5019f3fd050ea11dd90611d80dceed | ddd00d6de378e674635d1eb1c72441948524b6ea | /P133_20-04__name__.py | 63f2861dcd63326fb25eb0862db48cf1797d21c5 | [
"MIT"
] | permissive | Aloof-0/Code-area | b00464a5759d15f9645cb85b080cf5bb8d17bb7b | 9c3a189213981477cea29d8119d8dea25e580b21 | refs/heads/main | 2023-01-29T10:55:36.740741 | 2020-12-05T12:40:51 | 2020-12-05T12:40:51 | 318,759,430 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,841 | py | # -*- coding: utf-8 -*-
# @Time : 2019/11/21 7:29
# @Author : 高冷
# @FileName : P133_20-04__name__.py
"""
python中的模块(.py文件)在创建之初会自动加载一些内建变量,__name__就是其中之一。Python模块中通常会定义很多变量和函数,这些变量和函数相当于模块中的一个功能,模块被导入到别的文件中,可以调用这些变量和函数。那么这时 __name__ 的作用就彰显了,它可以标识模块的名字,可以显示一个模块的某功能是被自己执行还是被别的文件调用执行,假设模块A、B,模块A自己定义了功能C,模块B调用模块A,现在功能C被执行了:
如果C被A自己执行,也就是说模块执行了自己定义的功能,那么 __name__=='__main__'
如果C被B调用执行,也就是说当前模块调用执行了别的模块的功能,那么__name__=='A'(被调用模块的名字)
其实换一种说法也就是表示当前程序运行在哪一个模块中
如果C被A自己执行,也就是说模块执行了自己定义的功能,那么 __name__=='__main__' 本文件自己执行 __name__=='__main__'
如果C被B调用执行,也就是说当前模块调用执行了别的模块的功能,那么__name__=='A'(被调用模块的名字) 被其他文件掉用 __name__=='A'(被调用模块的名字)
作用为 不会重复调用
"""
def jojo():
print("hellow world")
if __name__ == '__main__': # __name__变量在当前文件为__main__ 即 当前 __name__ == '__main__':
jojo() # 在本文件可以执行本文件 但是在其他文件中调用了jojo() , 则__name__ == '文件名' 本文件的jojo函数不会触发
| [
"noreply@github.com"
] | Aloof-0.noreply@github.com |
cd86d6a2444404b88ab73eadb4dd12a2babf07c7 | 291dde70bdfc65a1023598957d33c0b15f3fde42 | /12_dictionary.py | 74a9f670f69a2855c03a096539d569cbc2b2b7e2 | [] | no_license | romalagu92/Python-Basics | 8590b8c2a4704d9e89968166ef5e4ea931d54907 | 5b61398dc9f8e30f49dd976e1e31b9fe54217bdb | refs/heads/master | 2022-07-04T02:40:55.323387 | 2020-05-13T11:56:26 | 2020-05-13T11:56:26 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,358 | py | # Hash
alpha = {
"a": "A",
"b": "B",
"c": "C",
"q": "Q",
"q": "kyu"
}
# TypeError: unhashable type
# beta = {
# {}: "a"
# }
# print(beta)
if "a" in alpha:
print(alpha["a"])
print()
for key in alpha:
print("Key", key, "Value", alpha[key])
print()
print("Ordered Dict")
un_ordered_dict = {
3: "Three",
2: "Two",
1: "One"
}
un_ordered_dict[4] = "Four"
print("Unordered ", un_ordered_dict)
# returns a list of all the values available in a given dictionary.
# the values have been stored in a reversed manner.
# dict_values
print("Unordered Values ", un_ordered_dict.values())
# dict_keys
ordered_keys = list(un_ordered_dict.keys())
ordered_keys.sort()
print("Ordered ", un_ordered_dict)
for key in ordered_keys:
print(un_ordered_dict[key])
print()
# dict_items
un_ordered_dict_tuple = un_ordered_dict.items()
print("dict items --> tuple", un_ordered_dict_tuple)
print()
print("dic to tuple, by keys", tuple(un_ordered_dict))
print("tuple to dict ", dict(un_ordered_dict_tuple))
print()
delete = {
"a": "A"
}
del delete["a"]
print("Delete by key ", delete)
print()
# Dict comprehension
string = "abcdefghi"
dict_comp = {k: v**3 for (k, v) in zip(string.ascii_lowercase, range(26))}
print("dict_comp ", dict_comp)
print()
# NameError: name 'delete' is not defined
del delete
print("Delete ", delete)
print()
| [
"satvikpurohit92@gmail.com"
] | satvikpurohit92@gmail.com |
42dffd0c7b9052ada3ac5a0ff8bde48f27d5b316 | 6b792704cf7f29f87fe810f524a00c59af5b65a9 | /06/p06_01.py | 8b7b2ac102b787eabeb2b726be757b919feaa852 | [] | no_license | canislatranscoxus/leetcode | d850662ab2f6edf768ec76529c6862e3266c33e2 | a5842a029dfa88298cab0306605b7521667e1efd | refs/heads/master | 2021-05-25T10:03:58.898222 | 2020-07-03T17:57:57 | 2020-07-03T17:57:57 | 127,052,192 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,437 | py | '''
-------------------------------------------------------------------------------
Problem : 6. ZigZag Conversion
Description :
The string "PAYPALISHIRING" is written in a zigzag pattern
on a given number of rows like this:
(you may want to display this pattern in a fixed font for better legibility)
P A H N
A P L S I I G
Y I R
And then read line by line: "PAHNAPLSIIGYIR"
Write the code that will take a string and make this conversion given a number of rows:
string convert(string text, int nRows);
convert("PAYPALISHIRING", 3) should return "PAHNAPLSIIGYIR".
Algorithm : Use a hash table and fill it up-down-up, up-down-up, ...
Author : AAT.
-------------------------------------------------------------------------------
'''
from collections import defaultdict
class Solution:
def convert(self, s, numRows):
"""
:type s: str
:type numRows: int
:rtype: str
"""
n = len( s )
# create hash table with rows
d = defaultdict( list )
i = 0
row = 0
while i < n:
row = 0
while i < n and row <numRows:
ch = s[i]
d[ row ].append( ch )
i += 1
row += 1
row = numRows - 2
while i < n and 0 < row:
ch = s[i]
space = '' * (numRows - row)
d[ row ].append( ch + space )
i += 1
row -= 1
# print Zigzag pattern
for row in range( 0, numRows):
#d[row] = ' '.join(d[row])
#if row % 2 == 0:
# d[ row ] = ' '.join( d[ row ] )
print( ' '.join( d[ row ] ) )
result = ''
for row in range( 0, numRows):
result += ''.join(d[row])
return result
# -------------------------------------------------------------------------------
#
# -------------------------------------------------------------------------------
s = 'PAYPALISHIRING'
numRows = 3
sol = Solution()
result = sol.convert( s, numRows )
print( 's: {}'.format( s ) )
print( 'result: {}'.format( result ) ) | [
"noreply@github.com"
] | canislatranscoxus.noreply@github.com |
dcfe921846e90921b926fed1aa706a3fa700de5c | ca603d1544ed3f558e968dd72ae224f4ae85464c | /PythonLab Assignment 1/SumDigitInNumber_FOUR.py | ad0443c02f358815c875cb35921828a6f5153ac5 | [] | no_license | bhanugudheniya/Python-Program-Directory | d6c71effec2240c00d1f1356e5ece1e3154a7d9a | c42cb321d94605eb99c11ad04e08f8511ea91107 | refs/heads/main | 2023-07-14T11:37:54.133529 | 2021-08-03T20:43:30 | 2021-08-03T20:43:30 | 378,257,251 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 214 | py | numberValue = int(input("Enter a number : "))
sum = 0
copy = numberValue
while numberValue > 0:
temp = numberValue % 10
sum = sum + temp
numberValue = numberValue//10
print("Sum of digit : ", copy, "is : ", sum) | [
"bhanugudheniya1409@gmail.com"
] | bhanugudheniya1409@gmail.com |
256bfe2c0fd60786ded8064eb6e99b336021b73c | 47af7a5ccf1f75e9057c6061fa64c314d8fc6633 | /mysite/settings.py | 7a473711d3f4a40606417cafd5bb81f42d3aa9a2 | [] | no_license | jmeneb/mysite | 061c5e71f7e706755254c35cbeff311584a9fcec | f04a8f4200ade22c848f75faa0fc350c0b3006af | refs/heads/master | 2021-09-28T17:15:47.949325 | 2015-01-16T09:28:53 | 2015-01-16T09:28:53 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,133 | py | """
Django settings for mysite project.
For more information on this file, see
https://docs.djangoproject.com/en/1.7/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.7/ref/settings/
"""
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
import os
BASE_DIR = os.path.dirname(os.path.dirname(__file__))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.7/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = '8c2qrpzt(@#$)@qu4e03d!99*n%np)g%vml932$gje2ihdg*my'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
TEMPLATE_DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = (
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'polls',
)
MIDDLEWARE_CLASSES = (
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.auth.middleware.SessionAuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
)
ROOT_URLCONF = 'mysite.urls'
WSGI_APPLICATION = 'mysite.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.7/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'mysql.connector.django',
'NAME': 'm14015016',
'USER': 'm14015016',
'PASSWORD': 'm8fh=!',
'HOST': 'dbs-perso.luminy.univmed.fr',
}
}
# Internationalization
# https://docs.djangoproject.com/en/1.7/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.7/howto/static-files/
STATIC_URL = '/static/'
| [
"m14015016@tempo21-2.luminy.univ-amu.fr"
] | m14015016@tempo21-2.luminy.univ-amu.fr |
1a32163b0190c3c69f2385daa82e7b1b0e818a4b | dcfba9fa0012bc267146774f1cb249e70457367a | /medx/admin.py | caad6d22e7f344a779820920dcf9572e49a92dd0 | [] | no_license | ag602/medstartup | 0ce4d68f4af8337c6ec223c62aae4497ce8f4e29 | 76c95d9bec5b3811c0607f44dddb8ae45d9d6ef7 | refs/heads/master | 2021-06-23T04:20:28.083129 | 2020-02-22T09:43:40 | 2020-02-22T09:43:40 | 226,587,306 | 0 | 1 | null | 2021-06-10T22:22:30 | 2019-12-07T23:04:51 | Python | UTF-8 | Python | false | false | 144 | py | from django.contrib import admin
from .models import *
# Register your models here.
admin.site.register(contact)
admin.site.register(diabetes) | [
"akul.gupta.phe17@itbhu.ac.in"
] | akul.gupta.phe17@itbhu.ac.in |
a47f26082d2d2749fd94345ceee1afa04c5030da | 97813689e4fa48b74950bec8c2d6588fdc756f0e | /Hydrogel pore size- source code- R1.py | 1900534e78aa466c94ee31e974189bcf94ca6887 | [
"MIT",
"CC0-1.0"
] | permissive | Eng-RSMY/Hydrogel-pore-size | 0b3990aa926ee7af8d508c30cdba05daf883afb2 | cdfc9080401ed7be5be658d32321127321e0112c | refs/heads/main | 2023-03-27T15:34:16.877982 | 2021-03-25T07:49:46 | 2021-03-25T07:49:46 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 8,026 | py | ##Authors: Maryam Jamshidi (nasstaran.jamshidi@gmail.com) , Cavus Falamaki (c.falamaki@aut.ac.ir)
###Authors Acknowledge Niloufar Jamshidi for her helps (niloufar.jamshidy@gmail.com) Nov. 2020
import os
import cv2 as cv
import skimage
import glob
import csv
import numpy as np
import scipy as sp
import matplotlib
from matplotlib import pyplot as plt
from scipy import ndimage, misc
from skimage import io, color, measure
from skimage.measure import label, regionprops, regionprops_table
from skimage.data import gravel
from skimage import filters
from skimage.segmentation import clear_border
# from skimage.filters import difference_of_gaussians, meijering, sato, frangi, hessian, window
from skimage.feature import hessian_matrix, hessian_matrix_eigvals
from scipy.fftpack import fftn, fftshift
from mpl_toolkits.axes_grid1 import make_axes_locatable
# 1: reading and loading the image from directory ##############################################################
PATH_in = 'E:/1- Image Analysis/python images/python images/C40i.tif'
name = os.path.split(PATH_in)[-1]
PATH = 'E:/1- Image Analysis/python images/Results-R1/' #### specify the path to save the images
pixel_to_um = 1 #### specify scale of the image
image = cv.imread(PATH_in,0)
# 2: Normalize the image #######################################################################################
mean, STD = cv.meanStdDev(image)
offset = 1
clipped = np.clip(image, mean - offset*STD, mean + offset*STD).astype(np.uint8)
Nimage = cv.normalize(clipped, None, 0, 255, cv.NORM_MINMAX)
cv.imwrite(os.path.join(PATH+'1-Normalized_'+name), Nimage)
# 3: Gaussian Blure and Edge detection filtering ###############################################################
kwargs = {'sigmas': [0.7], 'mode': 'reflect'}
Gimage = filters.gaussian(Nimage, sigma=0.7)
cv.imwrite(os.path.join(PATH+'2-Gaussian filter_'+name), Gimage)
GN = cv.normalize(Gimage, None, 0, 255, cv.NORM_MINMAX)
GNN = GN.astype(np.uint8)
Eimage = filters.sobel(GN)
cv.imwrite(os.path.join(PATH+'3- Sobel Edge detection_'+name), Eimage)
Eimage += Nimage
EN = cv.normalize(Eimage, None, 0, 255, cv.NORM_MINMAX)
ENN = EN.astype(np.uint8)
# DoG = filters.difference_of_gaussians(Gimage,0.7)
# # cv.imwrite(os.path.join(PATH+'DoG_'+name), DoG)
# DoG += Nimage
# DoGN = cv.normalize(DoG, None, 0, 255, cv.NORM_MINMAX)
# DoGNN = DoGN.astype(np.uint8)
LoG = ndimage.gaussian_laplace(Gimage, sigma=0.7)
# cv.imwrite(os.path.join(PATH+'LoG_'+name), LoG)
LoG += Nimage
LoGN = cv.normalize(LoG, None, 0, 255, cv.NORM_MINMAX)
LoGNN = LoGN.astype(np.uint8)
GGM = ndimage.gaussian_gradient_magnitude(Gimage, sigma=0.7)
# cv.imwrite(os.path.join(PATH+'GGM_'+name), GGM)
GGM += Nimage
GGMN = cv.normalize(GGM, None, 0, 255, cv.NORM_MINMAX)
GGMNN = GGMN.astype(np.uint8)
# eigen = np.linalg.eigvals(Gimage[0:image.ndim, 0:image.ndim])
# HoEig= filters.hessian(Gimage, **kwargs)
# # cv.imwrite(os.path.join(PATH+'HoEig_'+name), HoEig)
# HoEig += Nimage
# HoEigN = cv.normalize(HoEig, None, 0, 255, cv.NORM_MINMAX)
# HoEigNN = HoEigN.astype(np.uint8)
# 4: FFT Bandpass Filter########################################################################################
rows, cols = ENN.shape
crow, ccol = int(rows / 2), int(cols / 2)
# apply mask and inverse DFT
f = np.fft.fft2(ENN) #fourier transform
fshift1 = np.fft.fftshift(f) #shift the zero to the center
maskNew = np.zeros((rows, cols), np.uint8)
maskNew[(np.abs(fshift1) > 1000)&(np.abs(fshift1) < 10**15)] = 1
fshift = fshift1 * maskNew #Apply the mask
fftmask = np.abs(fshift)
fshift_mask_mag = np.log(fshift)
f_ishift = np.fft.ifftshift(fshift) #inverse shift
img_back = np.fft.ifft2(f_ishift) #inverse fourier transform
img_back2 = np.abs(img_back)
fftimage = np.abs(fshift1)
fig, ax = plt.subplots(nrows=2, ncols=2, figsize=(10, 10))
Original = ax[0,0].imshow(image, cmap = plt.cm.gray)
ax[0,0].set_title('Original Image')
FFT = ax[0,1].imshow(np.log(fftimage), cmap=plt.cm.jet)
ax[0,1].set_title('Original FFT Magnitude (log)')
Bandpass = ax[1,0].imshow(img_back2, cmap=plt.cm.gray)
ax[1,0].set_title('Bandpass Filter Result')
FFTmask = ax[1,1].imshow(np.log(fftmask), cmap=plt.cm.jet)
ax[1,1].set_title('FFT Mask Magnitude (log)')
BPFN = cv.normalize(img_back2, None, 0, 255, cv.NORM_MINMAX)
BPFNN = BPFN.astype(np.uint8)
fig.colorbar(Original, ax=ax[0,0])
fig.colorbar(FFT, ax=ax[0,1])
fig.colorbar(FFTmask, ax=ax[1,1])
fig.colorbar(Bandpass, ax=ax[1,0])
cv.imwrite(os.path.join(PATH+'4- BandPassFilter_'+name), img_back2)
# 5: Threshold, morphological correction and Label particles ##################################################
# retG,thG = cv.threshold(BPFNN , 0, 255, cv.THRESH_BINARY + cv.THRESH_OTSU)
thG = cv.adaptiveThreshold(BPFNN , 255, cv.ADAPTIVE_THRESH_GAUSSIAN_C, cv.THRESH_BINARY, 25, 15)
se1 = cv.getStructuringElement(cv.MORPH_ELLIPSE, (3,3))
se2 = cv.getStructuringElement(cv.MORPH_RECT, (2,2))
se3 = cv.getStructuringElement(cv.MORPH_CROSS, (3,3))
kernel = np.ones((3,3), np.uint8)
Morphimage = cv.erode(thG , se1, iterations = 1)
for number in range(5):
Morphimage = cv.dilate(Morphimage , se1, iterations = 1)
Morphimage = cv.erode(Morphimage , se1, iterations = 1)
clearimage = clear_border(Morphimage)
cv.imshow('Morph', Morphimage)
cv.imwrite(os.path.join(PATH+'5- Threshold_'+name), thG)
cv.imwrite(os.path.join(PATH+'7- Cleared_'+name), clearimage)
cv.imwrite(os.path.join(PATH+'6- Morphological correction_'+name), Morphimage)
# 6: Aplying watershed to separate pores #######################################################################
sure_bg = cv.dilate(Morphimage , se3, iterations = 2)
DT = cv.distanceTransform (Morphimage , cv.DIST_L2, 3)
DT = DT.astype(np.uint8)
ret2, sure_fg = cv.threshold (DT, 0.1* DT.max(), 255, 0)
unknown = cv.subtract(sure_bg, sure_fg)
ret3, markers = cv.connectedComponents (sure_fg)
markers = markers + 10
markers [unknown == 255] = 0
ENN = cv.cvtColor(ENN, cv.COLOR_GRAY2BGR)
print (ENN.shape)
markers = cv.watershed (ENN, markers)
ENN [markers == -1] = [0, 255, 255]
seg = color.label2rgb(markers, bg_label=0)
cv.imshow('overlay', ENN)
cv.imshow('Segmented pores', seg)
cv.imwrite(os.path.join(PATH+'8- overlay_'+name), ENN)
cv.imwrite(os.path.join(PATH+'9- Segmented pores_'+name), seg)
# 7: Saving pores measured features in csv file ################################################################
pores = measure.regionprops(markers , intensity_image=image)
proplist = ['Area', 'Centroid X','Centroid Y', 'equivalent_diameter', 'Orientation',
'MajorAxisLength', 'MinorAxisLength','Perimeter',
'MinIntensity', 'MeanIntensity', 'MaxIntensity']
OutputResult = open('Pore Features.csv', 'w', newline='')
OutputResult.write((',' + ",".join(proplist) + '\n'))
for pores_prop in pores:
OutputResult.write(str(pores_prop['Label']))
for i,prop in enumerate(proplist):
if (prop == 'Area'):
to_print = pores_prop.area * pixel_to_um **2 #### Area pixel2 to um2
elif (prop == 'Centroid X'):
to_print = pores_prop.centroid[0]
elif (prop == 'Centroid Y'):
to_print = pores_prop.centroid[1]
elif (prop == 'Orientation'):
to_print = pores_prop.orientation * 57.2958 ##### Radians to Degrees
elif (prop.find('Intensity') <0):
to_print = pores_prop[prop] * pixel_to_um ##### without intensity in its name
else:
to_print = pores_prop[prop] ####### Remaining props with intensity in names
OutputResult.write(',' + str(to_print))
OutputResult.write('\n')
cv.waitKey(0)
| [
"noreply@github.com"
] | Eng-RSMY.noreply@github.com |
0f035ba1c524afe06432726820c34655340ac8c6 | 82fce9aae9e855a73f4e92d750e6a8df2ef877a5 | /Lab/venv/lib/python3.8/site-packages/OpenGL/raw/GL/ARB/texture_storage_multisample.py | fdcdbc160823f7a5f0c538918cf1a7c652b4e9a0 | [] | no_license | BartoszRudnik/GK | 1294f7708902e867dacd7da591b9f2e741bfe9e5 | 6dc09184a3af07143b9729e42a6f62f13da50128 | refs/heads/main | 2023-02-20T19:02:12.408974 | 2021-01-22T10:51:14 | 2021-01-22T10:51:14 | 307,847,589 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 879 | py | '''Autogenerated by xml_generate script, do not edit!'''
from OpenGL import platform as _p
# End users want this...
from OpenGL.raw.GL import _errors
# Code generation uses this
from OpenGL.raw.GL import _types as _cs
_EXTENSION_NAME = 'GL_ARB_texture_storage_multisample'
def _f(function):
return _p.createFunction(function, _p.PLATFORM.GL, 'GL_ARB_texture_storage_multisample',
error_checker=_errors._error_checker)
@_f
@_p.types(None, _cs.GLenum, _cs.GLsizei, _cs.GLenum, _cs.GLsizei, _cs.GLsizei, _cs.GLboolean)
def glTexStorage2DMultisample(target, samples, internalformat, width, height, fixedsamplelocations): pass
@_f
@_p.types(None, _cs.GLenum, _cs.GLsizei, _cs.GLenum, _cs.GLsizei, _cs.GLsizei, _cs.GLsizei, _cs.GLboolean)
def glTexStorage3DMultisample(target,samples,internalformat,width,height,depth,fixedsamplelocations):pass
| [
"rudnik49@gmail.com"
] | rudnik49@gmail.com |
bbeb25988bf5b7b74f12b1665fe09e7aa5d17f91 | 94ea88a0e1bc662443a37d6e6c71fdbdc258131a | /Game/FlyPlane/Panel/StartPanel.py | 5ac8d442cc578e5a8de660e211412af39165c9fe | [] | no_license | 1549469775/Blog | 0c291aa6b7d9dc6182ed046b37ed47ec290b758b | fdaf99cafb13b737dfc570ae34193b3a2a361445 | refs/heads/master | 2021-04-12T08:00:46.184419 | 2018-04-24T00:01:11 | 2018-04-24T00:01:11 | 125,953,013 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,710 | py | import math
import os
import time
import pygame as pg
from Base.Panel import Panel
from UI.Image import Image
from UI.Button import Button
class StartPanel(Panel):
first=True
player=None
up=False
down=False
left=False
right=False
rect=(0,0,800,600)
speed=2
def __init__(self,sprites_path,sprites,img=[],voice=[],extra=[],*args, **kwargs):
Panel.__init__(self,sprites_path,sprites,img,voice,extra,*args, **kwargs)
self.player=Image(sprites_path['plane'], (0, 0, 0), (0, 0))
def event_loop(self,events):
for event in events:
if event.type == pg.KEYDOWN:
if event.key == pg.K_w:
self.up=True
if event.key == pg.K_s:
self.down=True
if event.key == pg.K_a:
self.left=True
if event.key == pg.K_d:
self.right=True
if event.key == pg.K_SPACE:
self.get_bomb.play()
if event.type == pg.KEYUP:
if event.key == pg.K_w:
self.up=False
if event.key == pg.K_s:
self.down=False
if event.key == pg.K_a:
self.left=False
if event.key == pg.K_d:
self.right=False
def move_hero(self,plane):
if self.up:
plane.rect = (plane.rect[0], plane.rect[1] - self.speed)
if self.down:
plane.rect = (plane.rect[0], plane.rect[1] + self.speed)
if self.left:
plane.rect = (plane.rect[0] - self.speed, plane.rect[1])
if self.right:
plane.rect = (plane.rect[0] + self.speed, plane.rect[1])
self.limteInSide(plane)
if self.up==True or self.down==True or self.left==True or self.right==True:
self.player.set_image(self.my_extra['hero2'].image)
elif self.up==False and self.down==False and self.left==False and self.right==False:
self.player.set_image(self.my_extra['hero1'].image)
def limteInSide(self,plane):
if plane.rect[1] <= self.rect[1]:
plane.rect = (plane.rect[0], self.rect[1])
if plane.rect[1] >= self.rect[3] - plane.get_height():
plane.rect = (plane.rect[0], self.rect[3] - plane.get_height())
if plane.rect[0] <= self.rect[0]:
plane.rect = (self.rect[0], plane.rect[1])
if plane.rect[0] >= self.rect[2] - plane.get_width():
plane.rect = (self.rect[2] - plane.get_width(), plane.rect[1])
def set_limte(self):
self.rect = (self.bg.get_width() / 2 - self.background.get_width() / 2, 200,
self.bg.get_width() / 2 + self.background.get_width() / 2, 600)
def update(self,screen):
if self.first:
self.first=False
self.player.set_image(self.my_extra['hero1'].image)
self.group.add(self.player)
# self.game_music.play(loops=100000000)
self.background.rect = (self.bg.get_width() / 2 - self.background.get_width() / 2, 0)
self.name.rect = (self.bg.get_width() / 2 - self.name.get_width() / 2, 10)
self.player.rect = (self.bg.get_width() / 2 - self.name.get_width() / 2 + 30, 400)
self.btn_finish.rect = (self.bg.get_width() / 2 - self.btn_finish.get_width() / 2, 500)
self.restart_nor.rect = (self.bg.get_width() / 2 - self.restart_nor.get_width() / 2, 300)
self.bullet1.rect = (self.bg.get_width() / 2 - self.bullet1.get_width() / 2, 300)
self.set_limte()
self.move_hero(self.player)
self.group.update(screen) | [
"1549469775@qq.com"
] | 1549469775@qq.com |
cffdcbfbaa82b5d852467a0d0c6dc6c18e11fa76 | d2d0fb55d0d47a357b49c9807cdf52b9607fb0d3 | /clownApp/migrations/0003_appointment_clown.py | 73a68c8e061aa2b2fdee5cff7e9f6e63ac4c56e5 | [] | no_license | Pinky-alt-spec/clownApp | 21dd88731e69ca88f1d4aaf21a53234fe8440e7f | 0f5c61234630d0b6909dd536f4d822b933a127f9 | refs/heads/master | 2023-07-17T13:02:32.504283 | 2021-08-30T10:39:35 | 2021-08-30T10:39:35 | 401,120,432 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 481 | py | # Generated by Django 3.2.6 on 2021-08-29 18:45
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('clownApp', '0002_clown_clown'),
]
operations = [
migrations.AddField(
model_name='appointment',
name='clown',
field=models.ForeignKey(default=True, on_delete=django.db.models.deletion.CASCADE, to='clownApp.clown'),
),
]
| [
"pinkym@ringier.co.za"
] | pinkym@ringier.co.za |
2a0c9c6ff1da12b801ac64b8f63b94f91370b408 | 2dae0060f4b75e3aec45940af9667b400717be4e | /MARS/MARS_Tools/subplot.py | 445b8e65c434564860ccf09968e32257d01acf59 | [] | no_license | caslano/mars-launcher | 5c9dda77565ee05503226f866a62df6100a0af79 | 4c6e8378a940ded83ad3db20c921e2bb1568aa89 | refs/heads/master | 2021-05-24T11:40:15.824467 | 2020-05-24T06:10:08 | 2020-05-24T06:10:08 | 253,542,282 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 864 | py | import matplotlib.pyplot as plt
import matplotlib
import numpy as np
import io
matplotlib.pyplot.ioff()
print(f"is_interactive = {matplotlib.is_interactive()}")
x = np.linspace(0, 2, 100)
fig, (ax1, ax2) = plt.subplots(nrows=1, ncols=2, figsize=(20,10))
ax1.plot(x, x, label='linear')
ax1.plot(x, x**2, label='quadratic')
ax1.set_xlabel('x label')
ax1.set_ylabel('y label')
ax1.set_title("First Plot")
ax1.legend(loc='lower right')
ax2.plot(x, x**2, label='quadratic')
ax2.plot(x, x**3, label='cubic')
ax2.set_xlabel('x label')
ax2.set_ylabel('y label')
ax2.set_title("Second Plot")
ax2.legend(loc='lower right')
fig.savefig("linear-quedratic.png")
fig.savefig("linear-quedratic.png")
buffer = io.BytesIO()
fig.savefig(buffer, format='png')
buffer.seek(0)
print(buffer.read())
plt.show()
input("Press Enter to continue...")
print("Done")
| [
"35153274+caslano@users.noreply.github.com"
] | 35153274+caslano@users.noreply.github.com |
f0704c0206258c0a70d63a11a2d26e498e315be5 | 628a698c070e55cae4a8925ce7cd474d9a211be5 | /Web/views.py | 2af6192e243eaf3bc2a768293db2430a3c959a01 | [] | no_license | umbali/Umbali | a926ebbe2c1fc0dde63a7b0eaff70b5572ce0793 | 1a3bc435d7a9fbf41cfda2a1f991f96c8a6cef6b | refs/heads/master | 2020-05-21T07:41:04.016368 | 2019-05-10T10:49:38 | 2019-05-10T10:49:38 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 8,062 | py | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.contrib.sites.shortcuts import get_current_site
from django.http import HttpResponse
from django.shortcuts import render, redirect, get_object_or_404
from django.core.urlresolvers import reverse
from django.views import View
from django.template.loader import render_to_string
from django.utils import timezone
from django.http import Http404
from django.contrib.auth import login, authenticate
from Web.forms import *
from Event.models import *
from Web.models import *
from MyEvent.models import *
from paypal.standard.forms import PayPalPaymentsForm
from django.conf import settings
import thread
import json
# Get Client IP address from request obj
def get_client_ip(request):
x_forwarded_for = request.META.get('HTTP_X_FORWARDED_FOR')
if x_forwarded_for:
ip = x_forwarded_for.split(',')[0]
else:
ip = request.META.get('REMOTE_ADDR')
return ip
# Get single object from args
def get_or_none(classmodel, **kwargs):
try:
return classmodel.objects.get(**kwargs)
except classmodel.DoesNotExist or Exception as e:
return None
def index(request):
now = timezone.now()
if 'event' in request.GET:
last_event = get_object_or_404(EventLive,slug=request.GET.get('event',None),is_active=True,end_time__gt=timezone.now())
else:
last_event = EventLive.objects.filter(is_active=True,end_time__gt=timezone.now()).order_by('-created_on')
if last_event.count() > 0:
last_event = last_event.last()
else:
last_event = None
return render(request,"Web/base.html",locals())
def contact(request):
return render(request,"Web/contact.html",locals())
#---------------------------------------------------------------------------------------------------------------------
# Register views
class RegisterUserView(View):
template = "Web/register.html"
def get(self,request):
if request.user.is_authenticated():
return redirect('Web:index')
form = SignUpForm()
return render(request,self.template,locals())
def post(self,request):
if request.user.is_authenticated():
return redirect('Web:index')
form = SignUpForm(request.POST)
if form.is_valid():
form.save()
username = form.cleaned_data.get('email')
raw_password = form.cleaned_data.get('password1')
user = authenticate(username=username, password=raw_password)
login(request, user)
# Send thanks mail process
current_site = get_current_site(request)
subject = 'Bienvenue | Umbali'
message = render_to_string('Mail/thank_register_email.html', {
'user': user,
'domain': current_site.domain,
})
#user.email_user(subject, message,settings.EMAIL_GROUP_CONTACT)
thread.start_new_thread(user.email_user,args=(subject, message,settings.EMAIL_SENDER_GROUP))
if 'next' in request.GET:
return redirect(request.GET.get('next','index'))
return redirect('/?sign_up=1')
return render(request, self.template, locals())
#---------------------------------------------------------------------------------------------------------------------
class CheckoutView(View):
template = "Web/pay.html"
def get(self,request,event_slug):
if not request.user.is_authenticated():
return redirect('/login/?next=/pay/'+str(event_slug)+'/')
event = get_object_or_404(EventLive,slug=event_slug,is_active=True)
# What you want the button to do.
paypal_dict = {
"cmd":"_xclick",
"business": settings.PAYPAL_BUSINESS,
"lc":"GB",
"amount": event.price.amount,
"currency_code": event.price.currency,
"notify_url": request.build_absolute_uri(reverse('paypal:paypal-ipn')),
"return": request.build_absolute_uri(reverse('Web:checkout_success',args=[event.slug,])),
"cancel_return": request.build_absolute_uri(reverse('Web:checkout_fail',args=[event.slug,])),
"invoice": str(event.id)+"#"+str(request.user.id), # Custom command to correlate to some function later (optional)
}
# Create the instance.
form = PayPalPaymentsForm(initial=paypal_dict)
return render(request,self.template,locals())
class CheckoutSuccessView(View):
template = "Web/pay_success.html"
def get(self,request,event_slug):
event = get_object_or_404(EventLive,slug=event_slug,is_active=True)
return render(request,self.template,locals())
class CheckoutFailView(View):
template = "Web/pay_fail.html"
def get(self,request,event_slug):
event = get_object_or_404(EventLive,slug=event_slug,is_active=True)
return render(request,self.template,locals())
class LiveView(View):
template_tizen = "Web/live_on_tizen.html"
template = "Web/live.html"
def get(self,request,event_slug):
event = get_object_or_404(EventLive, slug=event_slug,is_active=True)
myEvent = get_object_or_404(MyEventLive,event=event,user=request.user)
if myEvent.watcher_ip != None and myEvent.watcher_ip != get_client_ip(request):
myEvent = None
elif myEvent.watcher_ip == None or myEvent.watcher_ip == get_client_ip(request):
myEvent.watcher_ip = get_client_ip(request)
myEvent.save()
return render(request,self.template,locals())
class GetMyLive(View):
def get(self,request,event_slug):
event = get_object_or_404(EventLive,slug=event_slug,is_active=True)
myEvent = get_object_or_404(MyEventLive,event=event,user=request.user)
if myEvent.watcher_ip == None or myEvent.watcher_ip == get_client_ip(request):
streamConf = get_object_or_404(EventLiveStreamConf,event=event)
response_data = {}
response_data['hls'] = streamConf.hls
return HttpResponse(json.dumps(response_data), content_type="application/json")
raise Http404("Please disconnect from other device to watch here, contact us if the problem persist")
def post(self,request,event_slug):
event = get_object_or_404(EventLive,slug=event_slug,is_active=True)
myEvent = get_object_or_404(MyEventLive,event=event,user=request.user)
if myEvent.watcher_ip == None or myEvent.watcher_ip == get_client_ip(request):
myEvent.watcher_ip = get_client_ip(request)
myEvent.save()
streamConf = get_object_or_404(EventLiveStreamConf,event=event)
response_data = {}
response_data['hls'] = streamConf.hls
return HttpResponse(json.dumps(response_data), content_type="application/json")
raise Http404("Please disconnect from other device to watch here, contact us if the problem persist")
def delete(self,request,event_slug):
event = get_object_or_404(EventLive,slug=event_slug,is_active=True)
myEvent = get_object_or_404(MyEventLive,event=event,user=request.user)
if myEvent.watcher_ip == None or myEvent.watcher_ip == get_client_ip(request):
myEvent.watcher_ip = None
myEvent.save()
streamConf = get_object_or_404(EventLiveStreamConf,event=event)
response_data = {}
response_data['hls'] = streamConf.hls
return HttpResponse(json.dumps(response_data), content_type="application/json")
raise Http404("Please disconnect from other device to watch here, contact us if the problem persist")
class PageView(View):
template = "Web/page.html"
def get(self,request,page_slug):
page = get_object_or_404(Page,slug=page_slug)
return render(request,self.template,locals())
def handler404(request):
return render(request, "Web/404.html", status=404)
def handler500(request):
return render(request, "Web/500.html", status=404)
| [
"guillainbisimwa@hotmail.fr"
] | guillainbisimwa@hotmail.fr |
7cfc4dae89d190b3976df6346ac060f21e4e3be1 | dac9951e4f182c097194450627ffbadf2b7705b2 | /murraylab_tools/echo/echo_source_material.py | f9c723f893877d4166c0055cfbee61b53aa2ef9e | [
"MIT"
] | permissive | Kiksmahn/murraylab_tools | 733fdbd8cd0da5394b5e47afb7f880e803e680ca | c8874b5b0078021dc257e11d8204a1632dd6eb9d | refs/heads/master | 2020-05-18T21:56:37.928027 | 2019-05-03T00:49:59 | 2019-05-03T00:49:59 | 184,676,852 | 0 | 0 | null | 2019-05-03T00:22:40 | 2019-05-03T00:22:40 | null | UTF-8 | Python | false | false | 225 | py | # coding=utf-8
import numpy as np
import pandas as pd
import string
import math
import csv
import collections
import os
import warnings
from echo_functions import *
from echo_base import *
__all__ = ["EchoSourceMaterial"]
| [
"sclamons@gmail.com"
] | sclamons@gmail.com |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.