index int64 0 1,000k | blob_id stringlengths 40 40 | code stringlengths 7 10.4M |
|---|---|---|
10,200 | 358d39b8b4bbc07a64bd16edb25b5e963e9c3bd0 | from PIL import Image
im = Image.open("monalisa.jpg","r")
def effect_spread(self, distance):
"""
Randomly spread pixels in an image.
:param distance: Distance to spread pixels.
"""
self.load()
return self._new(self.im.effect_spread(distance))
im2 = im.effect_spread(100)
im2.show()
|
10,201 | 88fc3e904ba286b2d4f8852be2aeec59f85de83c | # -*- coding: utf-8 -*-
import csv
import xml.dom.minidom
import os
import time
import openpyxl
import requests
cook='foav0m0k120tcsrq32n82pj0h6'
def getImporter(name):
name = name.replace(',', '').replace('\'', '')
r = requests.post('https://fsrar.gov.ru/frap/frap', data={'FrapForm[name_prod]': name},
cookies={'PHPSESSID': cook}, verify=False)
# print(r.text)
Answer = r.text
Answer = Answer.split('<td ><b>Уведомитель</b></td>')[1].split('<td ><b>Производители</b></td>')[0]
Answer = Answer.replace('\r', '').replace('\n', '').replace('<tr>', '').replace('</td>', '').replace('<td>',
'').replace(
' ', '')
Answer = Answer[1:-1].split('<br />')
Importer = Answer[0]
if len(Importer) < 3:
return 'http://www.fsrar.ru/frap/frap', 'ИЩИ!!!', 'На сайте'
INN, KPP = Answer[1].split(',')
return Importer, INN[5:], KPP[6:]
def xmlParser(file):
dom = xml.dom.minidom.parse(file)
dom.normalize()
xml_StockPosition = dom.getElementsByTagName('rst:StockPosition')
if len(xml_StockPosition) == 0:
xml_StockPosition = dom.getElementsByTagName('rst:ShopPosition')
AlcoList = []
for line in xml_StockPosition:
tList = []
for V in ['pref:AlcCode', 'pref:FullName', 'pref:Capacity', 'pref:ProductVCode', 'oref:UL', 'rst:Quantity']:
if V == 'oref:UL' or V == 'pref:Capacity':
try:
line.getElementsByTagName(V)[0].childNodes[0].nodeValue
except:
if V != 'pref:Capacity':
ask = getImporter(line.getElementsByTagName('pref:FullName')[0].childNodes[0].nodeValue)
Importer, INN, KPP = ask[0], ask[1], ask[2]
tList.append(Importer)
tList.append(INN)
tList.append(KPP)
else:
tList.append('Нет Тары')
else:
if V != 'pref:Capacity':
INN = line.getElementsByTagName('oref:INN')[0].childNodes[0].nodeValue
KPP = line.getElementsByTagName('oref:KPP')[0].childNodes[0].nodeValue
Importer = line.getElementsByTagName('oref:FullName')[0].childNodes[0].nodeValue
tList.append(Importer)
tList.append(INN)
tList.append(KPP)
else:
node = line.getElementsByTagName(V)[0].childNodes[0].nodeValue
tList.append(node)
else:
node = line.getElementsByTagName(V)[0].childNodes[0].nodeValue
if V == 'rst:Quantity':
node = float(node)
tList.append(node)
AlcoList.append(tList)
return AlcoList
a = str(input('Enter PHPSESSID from cookies: '))
if len(a)== 26:cook=str(a)
print('Start to find xml files.... \n')
files = os.listdir('./')
xml_f = []
for x in files:
if x[-3:]=='xml':
xml_f.append(x)
print('Find xml file: '+x)
a = [['Алкокод', 'Наименование', 'Объём', 'Код вида', 'Импортер/производитель', 'ИНН', 'КПП', 'Остатки']]
print('\n'*2)
for fn in xml_f:
print('Start xml parsing....: '+fn)
temp = xmlParser(fn)
tf = open(fn+'.csv', "w", newline='')
csv.writer(tf, delimiter=';').writerow(['Алкокод', 'Наименование', 'Объём', 'Код вида', 'Импортер/производитель', 'ИНН', 'КПП', 'Остатки'])
for i in temp:
csv.writer(tf, delimiter=';').writerow(i)
for y in a:
if i[1] == y[1]:
y[7] += i[7]
else:
a.append(i)
break
print('End xml parsing....: '+fn)
print('---\n')
print('Save to Exel....')
xls_file = openpyxl.Workbook()
sheet = xls_file.active
for i in a:
sheet.append(i)
xls_file.save(time.strftime("%Y%m%d%H%M%S", time.localtime()) + '_result.xlsx')
print('Save file to '+time.strftime("%Y%m%d%H%M%S", time.localtime()) + '_result.xlsx')
print('-------------------------------------\n')
input('All Done!.... Press any key')
|
10,202 | d4a1e7f0043eb35305b63689130e09501c1ce57d | from app.core import Forca
def test_animais():
f = Forca('animais')
assert isinstance(f.palavra(), str)
def test_paises():
f = Forca('paises')
assert isinstance(f.palavra(), str) |
10,203 | 2127dc0db40f6f76a95cabdc1bcf4372b14b87f3 | # -*- coding: utf-8 -*-
"""
Created on Sat May 19 21:19:56 2018
@author: 王磊
"""
import os
def loadintxt(fileName):
with open(fileName, 'r', encoding='utf-8') as file:
comment = file.readlines()
return comment
def addtxt(textcomments, fileName='D:\\Python\\Spider\\allcommenttxt.txt'):
with open(fileName, 'a', encoding='utf-8') as file:
file.writelines(textcomments)
os.chdir('D:\\Python\\Spider\\AMAZONcom')
files = os.listdir()
for each in files:
os.chdir(each)
eachtxt = os.listdir()
for eachtwo in eachtxt:
comment = loadintxt(eachtwo)
addtxt(comment)
os.chdir('..')
|
10,204 | 50218e8f7eb43cbc010748ea3215ad9134a7ad53 | import discord
from discord.ext import commands
import asyncio
import os
class Others(commands.Cog):
def __init__(self, client):
self.client = client
@commands.command(brief='Shows you my ping', help="Using this command i'll tell you my latency")
async def ping(self, ctx):
await ctx.send(f'Pong! {round(self.client.latency * 1000, 2)} ms')
@commands.command(brief='A link so you can invite me to other servers')
async def invite(self, ctx):
await ctx.send('https://discord.com/oauth2/authorize?client_id=767504106633035796&scope=bot&permissions=8')
def setup(client):
client.add_cog(Others(client)) |
10,205 | 027e69f64c3a06db55de882c1499177345fe0784 | #!/bin/python3
import math
import os
import random
import re
import sys
if __name__ == '__main__':
#n = int( input().strip() )
for n in [ 1, 4, 6, 8, 21, 24 ]:
res = ""
if n%2 == 0 and ( n in range( 2, 6 ) or n > 20 ):
res += "Not"
res += "Weird"
print( str( n ) + ": " + res ) |
10,206 | db0a963a8de1b3db7b73fdff09bdb87895fde7f6 | from pyvmmonitor_core.compat import * |
10,207 | 1f06bfd8f5226e1c8bdd3824da1dfab299b6c115 | def myfunc(*args):
evens = []
for item in args:
if item%2 == 0:
evens.append(item)
return evens
nums = myfunc(1,2,3,4,5,6)
print(nums)
def myfunc(string):
s = ''
counter = 1
for letter in string:
if counter%2 != 0:
s += letter.lower()
else:
s += letter.upper()
counter += 1
print(s)
word = myfunc('Robert')
print(word) |
10,208 | fc1c6d6b8cd08d3c35c3057e60206bb9aeff0d38 | from django.core.management.base import BaseCommand, CommandError
from django.core.serializers.json import DjangoJSONEncoder
from urllib.request import urlopen
import json
from shows.models import Episode
from shows.models import Show
from datetime import date
from datetime import datetime
from datetime import timedelta
from bs4 import BeautifulSoup
# Cron job that fetches episodes from wikipedia.
class Command(BaseCommand):
def handle(self, *args, **options):
# first remove all episodes
allEps = Episode.objects.all()
for ep in allEps:
ep.delete()
# now add episodes
shows = Show.objects.all();
for show in shows:
# handle current show
url = show.wiki_url
try:
self.update_episodes_for_show(url, show.show_name)
except:
continue;
# check for new seasons
self.check_new_seasons();
# Checks for any new seasons of the show and if so adds them to the database
# and removes much older seasons
def check_new_seasons(self):
shows = Show.objects.all()
for show in shows:
resp = urlopen(show.wiki_url)
html = BeautifulSoup(resp.read())
try:
nextSeason = html.findAll(text="Next")[0].parent.find_next_sibling("a").get('href');
nsurl = 'http://en.wikipedia.org'
if nextSeason.find("/") == 0:
nsurl = nsurl + nextSeason;
else:
nsurl = nsurl + '/' + nextSeason;
name = html.findAll(text="Next")[0].parent.find_next_sibling("a").contents[0];
season_exists = False
for s in shows:
if s.wiki_url == nsurl:
season_exists = True
if season_exists:
# see if this season is old and can be removed
episodeTable = html.find("span", id="Episodes").parent.find_next_sibling("table");
dateSpans = episodeTable.find_all("span", attrs={"class": "published"})
if dateSpans and len(dateSpans) > 0:
episodeDate = datetime.strptime(dateSpans[0].string, '%Y-%m-%d').date()
if episodeDate < (date.today() - timedelta(days = 700)):
show.delete();
continue
seasonName = self.form_show_season_name(show.show_name, name);
newSeason = Show(show_name = seasonName, wiki_url = nsurl);
newSeason.save()
except:
continue;
# Helper to form the new show + season name based on the current name and the
# name of the new season.
def form_show_season_name(self, currName, seasonName):
seasonIndex = currName.find("Season");
rootName = currName;
if seasonIndex > 0:
rootName = currName[0:seasonIndex];
else:
if currName.rfind("S") > len(currName) - 4:
rootName = currName[0:currName.rfind("S")]
return rootName + " " + seasonName;
def update_episodes_for_show(self, url, showname):
print("updating episodes for: " + showname + " " + url + "\n")
response = urlopen(url)
data = response.read()
html = BeautifulSoup(data)
episodeTable = html.find("span", id="Episodes").parent.find_next_sibling("table");
dateSpans = episodeTable.find_all("span", attrs={"class": "published"})
min_date = date.today() - timedelta(days = 30)
count = 0;
for ds in dateSpans:
count = count + 1
#title = ds.parent.parent.find_previous_sibling("td", attrs={"class": "summary"}).contents[0]
title = "Episode " + str(count)
datestr = ds.string
epdate = datetime.strptime(datestr, '%Y-%m-%d').date()
if (epdate < min_date) :
continue
ep = Episode(show_name=showname, episode_name = title, date = epdate)
ep.save()
|
10,209 | 93e3bc6c103b47aa13c79f7f60b0b6656efd2a82 | class Model1(object):
def __init__( self, root = None, expanded = None):
self.root = root or []
self.expanded = expanded or []
self.flattened_list = []
self.listeners = []
self.depths = {}
self.filters = []
self.donotexpand = []
class Model2(object):
def __init__( self, root = None, expanded = None):
vars(self).update(root = root or [],
expanded = expanded or [],
flattened_list = [],
listeners = [],
depths = {},
filters = [],
donotexpand = [])
class Model3(object):
def __init__( self, root = None, expanded = None):
self.__dict__.update(root = root or [],
expanded = expanded or [],
flattened_list = [],
listeners = [],
depths = {},
filters = [],
donotexpand = [])
class Model4(object):
def __init__( self, root = None, expanded = None):
for key, val in [('root', root or []),
('expanded', expanded or []),
('flattened_list', []),
('listeners', []),
('depths', {}),
('filters', []),
('donotexpand', [])]:
setattr(self, key, val)
if __name__ == '__main__':
from timeit import Timer
print 'Model1', Timer('Model1()', 'from __main__ import Model1').timeit()
print 'Model2', Timer('Model2()', 'from __main__ import Model2').timeit()
print 'Model3', Timer('Model3()', 'from __main__ import Model3').timeit()
print 'Model4', Timer('Model4()', 'from __main__ import Model4').timeit()
|
10,210 | c859908f65cda4fbc88d717f662b7259779007a6 | # An OAuth access token is needed, see: https://docs.github.com/en/free-pro-team@latest/github/authenticating-to-github/creating-a-personal-access-token
# Rate limit is 500 per day or 50 if you do not meet certain requirements.
# For more informations see: https://docs.github.com/en/free-pro-team@latest/rest/reference/orgs#set-organization-membership-for-a-user
import requests
import time
import sys
import getopt
examplecommandline = 'Expecting 4 arguments: github_batchadd.py -o <your_organistionname> -u <github_username> -t <github_personal_token> -f <list_of_emails_input_file>'
if (len(sys.argv)!=9):
print(examplecommandline)
sys.exit(2)
org = ''
username = ''
token = ''
inputfile = ''
try:
opts, args = getopt.getopt(sys.argv[1:],"ho:u:t:f:",["organisation=","username=","token=","listofemailsfile="])
except getopt.GetoptError:
print(examplecommandline)
sys.exit(2)
for opt, arg in opts:
if opt == '-h':
print(examplecommandline)
sys.exit(0)
elif opt in ("-o", "--iorganisation"):
org = arg
elif opt in ("-u", "--iusername"):
username = arg
elif opt in ("-t", "--itoken"):
token = arg
elif opt in ("-f", "--ifile"):
inputfile = arg
h = {
'Content-type': 'application/json',
'Accept' : 'application/vnd.github.v3+json'
}
try:
with open(inputfile) as f:
content = f.readlines()
except:
print('File could not be opened.')
sys.exit(3)
content = [line.strip() for line in content]
invitecount = 0
for email in content:
if (email!=""):
r = requests.post('https://api.github.com/orgs/' + org + '/invitations', headers=h, json={"email":email}, auth = (username, token))
time.sleep(1)
print(r.status_code, r.reason)
print(r.text)
if (r.status_code!=201):
print("Error occurred. " + str(invitecount) + " have been invited. See error information above.")
sys.exit(4)
invitecount+=1
print("Finished. " + str(invitecount) + " has been invited.")
|
10,211 | bf5653c6239e12b362f8eeebce1c0d0570c29d73 | from rest_framework.response import Response
from rest_framework.views import APIView
from .serializer import ProfileSerializer,ProjectSerializer
from django.http.response import HttpResponseRedirect
from django.urls import reverse
from review.forms import ReviewForm, SignUpForm,UserProfileForm,ProjectForm
from review.models import Profile,Project, Review,User
from django.contrib.auth import authenticate, login
from django.shortcuts import get_object_or_404, redirect, render
from django.contrib.auth.decorators import login_required
from review import serializer
# Create your views here.
def homepage(request):
title = 'Review'
profile = Profile.objects.all()
projects = Project.objects.all()
reviews = Review.objects.all()
return render(request,'index.html',{'profile':profile,'projects':projects,'title':title,'review': reviews})
def SignUp(request):
if request.method == 'POST':
form = SignUpForm(request.POST)
if form.is_valid():
form.save()
username = form.cleaned_data.get('username')
raw_password = form.cleaned_data.get('password1')
user = authenticate(username=username,password=raw_password)
login(request, user)
return redirect('login')
else:
form = SignUpForm()
return render(request,'auth/signup.html',{'form': form})
@login_required(login_url='/accounts/login/')
def profile(request):
if request.method == 'POST':
profile_form = UserProfileForm(request.POST, request.FILES, instance=request.user)
if profile_form.is_valid():
profile_form.save()
return redirect('homepage')
else:
profile_form = UserProfileForm(instance=request.user)
return render(request, 'profile.html',{ "profile_form": profile_form})
@login_required(login_url='/accounts/login')
def new_project(request):
current_user = request.user
if request.method == 'POST':
form = ProjectForm(request.POST,request.FILES)
if form.is_valid():
new_project = form.save(commit=False)
new_project.user = current_user
new_project.save()
return redirect('homepage')
else:
form = ProjectForm()
return render(request, 'new_project.html',{"form":form})
@login_required(login_url='/accounts/login')
def project(request,id):
project = Project.objects.get(id=id)
reviews = Review.objects.get(id=id)
return render(request,'project.html',{'project':project,'reviews':reviews})
@login_required(login_url='/accounts/login/')
def project_review(request, proj_id):
prj = Project.get_project_by_id(id=proj_id)
project = get_object_or_404(Project,pk=proj_id)
current_user = request.user
if request.method == 'POST':
form = ReviewForm(request.POST)
if form.is_valid():
design = form.cleaned_data['design']
usability = form.cleaned_data['usability']
content = form.cleaned_data['content']
review = Review()
review.user = current_user
review.project = project
review.content = content
review.usability = usability
review.design = design
review.average = (review.content + review.usability + review.design)/3
review.save()
return HttpResponseRedirect(reverse('projectinfo', args=(project,)))
else:
form = ReviewForm()
return render(request,'review.html',{'user':current_user,'form':form,'project':prj})
class ProjectList(APIView):
def get(self,request,format= None):
all_projects = Project.objects.all()
serializer = ProjectSerializer(all_projects, many=True)
return Response(serializer.data)
class ProfileList(APIView):
def get(self, request, format=None):
all_profiles = Profile.objects.all()
serializer = ProfileSerializer(all_profiles, many=True)
return Response(serializer.data)
|
10,212 | 7f42f7f2815ce595c5b5a061f7c54aa3d4777ed8 | from django.conf.urls import patterns, include, url
from django.contrib import admin
from mysite.views import test,welcome,books
admin.autodiscover()
urlpatterns = patterns('',
('^test/',test),
('^welcome/',welcome),
('^books/',books),
(r'^admin/',include(admin.site.urls)),
)
|
10,213 | c40e1d3c794232f6d2f7311067eba0b851c46067 | from procedures import BuildProcedure
from buildbot.steps.source import Git
from buildbot.steps.shell import Test, SetProperty
from buildbot.steps.slave import SetPropertiesFromEnv
from buildbot.process.properties import WithProperties
def Emacs():
return WithProperties(
'%(EMACS)s'
, EMACS=lambda build: build.getProperties().getProperty('EMACS','emacs')
)
def EmacsTest(*args, **kw):
return Test(
command=[Emacs(), '--no-splash', '--debug-init'] + (
list(args)
+ reduce(lambda r, kv: r+['--'+kv[0],kv[1]], kw.items(), [])),
env = { 'HOME': WithProperties('%(FakeHome)s') },
timeout = kw.get('timeout', 40),
logfiles = dict(testlog=dict(filename='test.log'))
)
class GitHubElisp(BuildProcedure):
def __init__(self, repo, *testnames):
BuildProcedure.__init__(self, 'elisp')
self.addSteps(
Git(repourl='git://github.com/%s.git' % repo),
SetPropertiesFromEnv(variables=['EMACS']),
SetProperty(
command=[Emacs(), '--batch', '--eval',
'(princ (make-temp-file "home" t ".bbot"))'],
extract_fn=lambda rc, stdout, stderr: dict(FakeHome=stdout)
))
for t in testnames or ['test/test']:
self.addStep(EmacsTest(load= t+'.el'))
|
10,214 | 2a974f2c94a6c46c3ba7a1d34c65a4acb9f4c6b0 | # -*- coding: utf-8 -*-
# Copyright 2018 IBM.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# =============================================================================
"""
This module contains the definition of a base class for
feature map. Several types of commonly used approaches.
"""
import numpy as np
from qiskit import CompositeGate, QuantumCircuit, QuantumRegister
from qiskit.extensions.standard.u1 import U1Gate
from qiskit.extensions.standard.u2 import U2Gate
from qiskit_aqua.algorithms.components.feature_maps import FeatureMap
class FirstOrderExpansion(FeatureMap):
"""
Mapping data with the first order expansion without entangling gates.
Refer to https://arxiv.org/pdf/1804.11326.pdf for details.
"""
FIRST_ORDER_EXPANSION_CONFIGURATION = {
'name': 'FirstOrderExpansion',
'description': 'First order expansion for feature map',
'input_schema': {
'$schema': 'http://json-schema.org/schema#',
'id': 'First_Order_Expansion_schema',
'type': 'object',
'properties': {
'depth': {
'type': 'integer',
'default': 2,
'minimum': 1
}
},
'additionalProperties': False
}
}
def __init__(self, configuration=None):
super().__init__(configuration or self.FIRST_ORDER_EXPANSION_CONFIGURATION.copy())
self._ret = {}
def init_args(self, num_qubits, depth):
self._num_qubits = num_qubits
self._depth = depth
def _build_composite_gate(self, x, qr):
composite_gate = CompositeGate("first_order_expansion",
[], [qr[i] for i in range(self._num_qubits)])
for _ in range(self._depth):
for i in range(x.shape[0]):
composite_gate._attach(U2Gate(0, np.pi, qr[i]))
composite_gate._attach(U1Gate(2 * x[i], qr[i]))
return composite_gate
def construct_circuit(self, x, qr=None, inverse=False):
"""
Construct the first order expansion based on given data.
Args:
x (numpy.ndarray): 1-D to-be-transformed data.
qr (QauntumRegister): the QuantumRegister object for the circuit, if None,
generate new registers with name q.
inverse (bool): whether or not inverse the circuit
Returns:
QuantumCircuit: a quantum circuit transform data x.
"""
if not isinstance(x, np.ndarray):
raise TypeError("x should be numpy array.")
if x.ndim != 1:
raise ValueError("x should be 1-D array.")
if x.shape[0] != self._num_qubits:
raise ValueError("number of qubits and data dimension must be the same.")
if qr is None:
qr = QuantumRegister(self._num_qubits, 'q')
qc = QuantumCircuit(qr)
composite_gate = self._build_composite_gate(x, qr)
qc._attach(composite_gate if not inverse else composite_gate.inverse())
return qc
|
10,215 | 99084c12239034766371f8fce3538a3a8f5736ba | def cruise(filename, outname):
infile = open(filename, "r+")
outfile = open(outname, "w+")
lines = infile.readlines()
T = int(lines[0])
line_num = 1
for i in range(T):
D = int(lines[line_num].split(" ")[0])
N = int(lines[line_num].split(" ")[1])
max_time = 0
line_num += 1
for j in range(N):
K_i = int(lines[line_num+j].split(" ")[0])
S_i = int(lines[line_num+j].split(" ")[1])
max_time = max(max_time, float(D-K_i)/S_i)
line_num += N
max_speed = float(D)/max_time
outfile.write("Case #" + str(i+1) + ": " + str(max_speed) + "\n")
infile.close()
outfile.close()
|
10,216 | 63026794355a652feb605695eec7ab379364d51b | import numpy
import tkinter
from tkinter import filedialog
from matplotlib import pyplot as plt
def LoadTexturesFromBytes(texturesBytes):
dataTypeMap = { 2 : 'float16', 4 : 'float32' }
totalBytesToRead = len(texturesBytes)
bytesRead = 0
textures = {}
while bytesRead < totalBytesToRead:
renderModeNameSize = numpy.frombuffer(buffer=texturesBytes, dtype='int32', count=1, offset=bytesRead).item()
bytesRead += 4
renderMode = texturesBytes[bytesRead:bytesRead+renderModeNameSize].decode('utf-16')
bytesRead += renderModeNameSize
width = numpy.frombuffer(buffer=texturesBytes, dtype='int32', count=1, offset=bytesRead).item()
bytesRead += 4
height = numpy.frombuffer(buffer=texturesBytes, dtype='int32', count=1, offset=bytesRead).item()
bytesRead += 4
channels = numpy.frombuffer(buffer=texturesBytes, dtype='int32', count=1, offset=bytesRead).item()
bytesRead += 4
elementSizeInBytes = numpy.frombuffer(buffer=texturesBytes, dtype='int32', count=1, offset=bytesRead).item()
bytesRead += 4
textureElementCount = width * height * channels
texture = numpy.frombuffer(buffer=texturesBytes, dtype=dataTypeMap[elementSizeInBytes], count=textureElementCount, offset=bytesRead)
bytesRead += textureElementCount * elementSizeInBytes
texture = numpy.reshape(texture, (height, width, channels))
texture = texture.astype('float32')
textures[renderMode] = texture
return textures
# This script loads all textures from a .textures file into numpy arrays and displays them
tk = tkinter.Tk()
tk.withdraw()
texturesFilename = filedialog.askopenfile(title='Choose .textures file').name
texturesBytes = open(texturesFilename, 'rb').read()
textures = LoadTexturesFromBytes(texturesBytes)
for renderMode in textures:
texture = textures[renderMode]
print(renderMode + ': ' + str(texture.shape))
plt.title(renderMode)
plt.imshow(texture)
plt.show()
|
10,217 | 7b540b0c3aacc8fe379e095c9a26d6ec724eaad1 | """
test_get_webpage.py -- Given a URI of a webpage, return a python
structure representing the attributes of the webpage
Version 0.1 MC 2013-12-27
-- Initial version
Version 0.2 MC 2014-09-21
-- Update for PEP 8, Tools 2
"""
__author__ = "Michael Conlon"
__copyright__ = "Copyright 2014, University of Florida"
__license__ = "BSD 3-Clause license"
__version__ = "0.2"
from vivofoundation import get_webpage
from datetime import datetime
print datetime.now(), "Start"
webpages = \
[
"http://vivo.ufl.edu/individual/n3549388983",
"http://vivo.ufl.edu/individual/n5167070257",
"http://vivo.ufl.edu/individual/n7734440333",
"http://vivo.ufl.edu/individual/n4996654872",
"http://vivo.ufl.edu/individual/n2167668630",
"http://vivo.ufl.edu/individual/n4627222448",
"http://vivo.ufl.edu/individual/n328795",
"http://vivo.ufl.edu/individual/n2274340",
"http://vivo.ufl.edu/individual/n7404140895",
"http://vivo.ufl.edu/individual/n8657219888"
]
for webpage in webpages:
print "\n", get_webpage(webpage)
print datetime.now(), "Finish"
|
10,218 | fadb4967afd5bd91e56243d84119169fb8c42d44 | import os
import cv2 as cv
import numpy as np
from time import sleep
def save_image(img, path):
cv.imwrite(path, img)
def show_image(img):
cv.imshow('frame', img)
def detect_face(cascade, image):
image_copy = image.copy()
grayscale = cv.cvtColor(image_copy, cv.COLOR_BGR2GRAY)
faces = cascade.detectMultiScale(grayscale, scaleFactor=1.1, minNeighbors=5)
if len(faces) > 0:
face_found = True
else:
face_found = False
return face_found
class Camera:
def __init__(self):
self.cam = cv.VideoCapture(0)
self.frame_size = (800, 550)
self.recording = False
def __del__(self):
self.cam.release()
cv.destroyAllWindows()
# Grabs frame from passed VideoCapture object (usb camera)
def grab_frame(self):
ret, frame = self.cam.read()
while ret is False:
print("No camera detected")
sleep(5)
self.cam = cv.VideoCapture(0)
ret, frame = self.cam.read()
frame = cv.resize(frame, self.frame_size, interpolation=cv.INTER_NEAREST)
norm = np.zeros(self.frame_size)
norm = cv.normalize(frame, norm, 0, 255, cv.NORM_MINMAX)
# found = detect_face(self.cascade, norm)
return norm
|
10,219 | f7edfb23d4bc14900e1a3ea7d2496fc5b14ac52f | import unittest
import os
from org.geppetto.recording.creators import NeuronRecordingCreator
from org.geppetto.recording.creators.tests.abstest import AbstractTestCase
class NeuronRecordingCreatorTestCase(AbstractTestCase):
"""Unittests for the NeuronRecordingCreator class."""
def test_text_recording_1(self):
c = NeuronRecordingCreator('test_text_recording_1.h5')
self.register_recording_creator(c)
c.add_text_recording(os.path.abspath('neuron_recordings/text/graph_gui.dat'), variable_units=['ms', 'mV'])
self.assertAlmostEquals(c.values['soma.segmentAt0_5.v'], [-65, -65.0156, -65.0244, -65.0285])
self.assertEqual(c.units['soma.segmentAt0_5.v'], 'mV')
self.assertAlmostEquals(c.time_points, [0, 0.025, 0.05, 0.075])
self.assertEqual(c.time_unit, 'ms')
c.create()
def test_text_recording_2(self):
c = NeuronRecordingCreator('test_text_recording_2.h5')
self.register_recording_creator(c)
c.add_text_recording(os.path.abspath('neuron_recordings/text/printf.dat'))
self.assertAlmostEquals(c.values['ica'], [-0.000422814, -0.000422814])
self.assertAlmostEquals(c.values['ica_nacax'], [-0.00028025, -0.00028025])
self.assertAlmostEquals(c.values['ica_capump'], [0, 0])
self.assertAlmostEquals(c.values['ica_cachan'], [-0.000142564, -0.000142564])
self.assertAlmostEquals(c.values['ica_pmp_cadifpmp'], [0, 0.00083607])
self.assertAlmostEquals(c.time_points, [0, 0.025])
c.create()
def test_text_recording_3(self):
c = NeuronRecordingCreator('test_text_recording_3.h5')
self.register_recording_creator(c)
c.add_text_recording(os.path.abspath('neuron_recordings/text/vector_printf_time.dat'), time_column=0)
c.add_text_recording(os.path.abspath('neuron_recordings/text/vector_printf_voltage.dat'), variable_names=['soma.segmentAt0_5.v'], variable_units=['mV'])
self.assertAlmostEquals(c.values['soma.segmentAt0_5.v'], [-65, -65.0156, -65.0244, -65.0285])
self.assertEqual(c.units['soma.segmentAt0_5.v'], 'mV')
self.assertAlmostEquals(c.time_points, [0, 0.025, 0.05, 0.075])
c.create()
def test_binary_recording(self):
c = NeuronRecordingCreator('test_binary_recording.h5')
self.register_recording_creator(c)
c.add_binary_recording(os.path.abspath('neuron_recordings/binary/voltage.dat'), variable_name='v', variable_unit='mV')
c.add_binary_recording(os.path.abspath('neuron_recordings/binary/time.dat'), variable_name='t', variable_unit='ms', is_time=True)
# TODO: Make test recordings shorter and run assertEquals.
c.create()
def test_corrupted_binary_recording(self):
c = NeuronRecordingCreator('test_corrupted_binary_recording.h5')
self.register_recording_creator(c)
self.assertRaises(IOError, c.add_binary_recording, os.path.abspath('neuron_recordings/binary/corrupted.dat'), 'name')
def test_hoc_model(self):
c = NeuronRecordingCreator('test_hoc_model.h5')
self.register_recording_creator(c)
c.record_model(os.path.abspath('neuron_models/sthB.hoc'))
# TODO: Make test model shorter and run assertEquals.
c.create()
def test_py_model(self):
c = NeuronRecordingCreator('test_py_model.h5')
self.register_recording_creator(c)
c.record_model(os.path.abspath('neuron_models/sthB.py'))
# TODO: Make test model shorter and run assertEquals.
c.create()
if __name__ == '__main__':
unittest.main() # automatically executes all methods above that start with 'test_'
|
10,220 | a8c00f46b749a7454169cfe8c2bfa521f81cd24e | # gensim modules
from gensim import utils
from gensim.models.doc2vec import TaggedDocument
from gensim.models import Doc2Vec
from sources import sources
import string
# numpy
import numpy
# shuffle
from random import shuffle
# logging
import logging
import os.path
import sys
import _pickle as pickle
log = logging.getLogger()
log.setLevel(logging.INFO)
ch = logging.StreamHandler(sys.stdout)
ch.setLevel(logging.INFO)
formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')
ch.setFormatter(formatter)
log.addHandler(ch)
class LabeledLineSentence(object):
def __init__(self, sources):
self.sources = sources
flipped = {}
# make sure that keys are unique
for key, value in sources.items():
if value not in flipped:
flipped[value] = [key]
else:
raise Exception('Non-unique prefix encountered')
def __iter__(self):
for source, prefix in self.sources.items():
with utils.smart_open('data/' + source) as fin:
for item_no, line in enumerate(fin):
yield TaggedDocument(utils.to_unicode(line).split(), [prefix + '_%s' % item_no])
def to_array(self):
self.sentences = []
for source, prefix in self.sources.items():
with utils.smart_open('data/' + source) as fin:
for item_no, line in enumerate(fin):
self.sentences.append(TaggedDocument(
utils.to_unicode(line).split(), [prefix + '_%s' % item_no]))
return self.sentences
def sentences_perm(self):
shuffle(self.sentences)
return self.sentences
from pathlib import Path
if not Path("./imdb.d2v").is_file():
# file exists
sentences = LabeledLineSentence(sources)
model = Doc2Vec(min_count=1, window=10, size=100, sample=1e-4, negative=5, workers=16)
model.build_vocab(sentences.to_array())
for epoch in range(20):
log.info('Epoch %d' % epoch)
model.train(sentences.sentences_perm(),
total_examples=model.corpus_count,
epochs=model.iter,
)
model.save('./imdb.d2v')
model = Doc2Vec.load('./imdb.d2v')
log.info('Sentiment')
train_size = 100
half_train_size = int(train_size/ 2 )
train_arrays = numpy.zeros((train_size, 100))
train_labels = numpy.zeros(train_size)
for i in range(half_train_size):
prefix_train_pos = 'TRAIN_POS_' + str(i)
prefix_train_neg = 'TRAIN_NEG_' + str(i)
train_arrays[i] = model.docvecs[prefix_train_pos]
train_arrays[half_train_size + i] = model.docvecs[prefix_train_neg]
train_labels[i] = 1
train_labels[half_train_size + i] = 0
test_size = 100
half_test_size = int(test_size/ 2 )
test_arrays = numpy.zeros((test_size, 100))
test_labels = numpy.zeros(test_size)
for i in range(half_test_size):
prefix_test_pos = 'TEST_POS_' + str(i)
prefix_test_neg = 'TEST_NEG_' + str(i)
test_arrays[i] = model.docvecs[prefix_test_pos]
test_arrays[half_test_size + i] = model.docvecs[prefix_test_neg]
test_labels[i] = 1
test_labels[half_test_size + i] = 0
from sklearn.neighbors import KNeighborsClassifier
from sklearn.metrics import confusion_matrix
from sklearn.datasets import load_svmlight_file
from sklearn import preprocessing
import pylab as pl
from sklearn.metrics import classification_report
from knn_naive import KnnClassifier
knn = KnnClassifier(9, 2)
print ('Fitting knn...')
knn.fit(train_arrays, train_labels)
print ('Predicting...')
pred = knn.predict(test_arrays)
print ('Score:')
print (knn.score(test_arrays, test_labels))
print ('Confusion matrix:')
print (knn.confusion_matrix(test_labels, pred))
# cria um kNN
neigh = KNeighborsClassifier(n_neighbors=9, metric='euclidean')
print ('Fitting knn...')
neigh.fit(train_arrays, train_labels)
# predicao do classificador
print ('Predicting...')
y_pred = neigh.predict(test_arrays)
# mostra o resultado do classificador na base de teste
print ('Score:')
print (neigh.score(test_arrays, test_labels))
# cria a matriz de confusao
print ('Confusion matrix:')
cm = confusion_matrix(test_labels, y_pred)
print (cm)
# print ('Report:')
# print (classification_report(test_labels, y_pred))
# pl.matshow(cm)
# pl.colorbar()
# pl.show()
|
10,221 | 63433e91668d0a19a6072a881599b611b7d5be72 | from django.urls import path
from curricula.api.views import (
carrera,
anio_lectivo,
anio,
materia,
evaluacion,
)
urlpatterns = [
# Carrera
path("carrera/", carrera.create_carrera, name="carrera-create"),
path(
"carrera/<int:pk>/",
carrera.view_edit_carrera,
name="carrera-view-edit",
),
path("carrera/list/", carrera.list_carrera, name="carrera-list"),
# Año
path("anio/", anio.create_anio, name="anio-create"),
path("anio/<int:pk>/", anio.view_edit_anio, name="anio-view-edit"),
# TODO : Decidir que hacer con este list
path(
"carrera/<int:carrera_id>/anio/list/",
anio.list_anio,
name="carrera-list",
),
# Curso
path("curso/", anio.create_curso, name="curso-create"),
path("curso/<int:pk>/", anio.view_edit_curso, name="curso-view-edit"),
path("anio/<int:anio_id>/curso/list/", anio.list_curso, name="curso-list"),
# Materia
path(
"materia/<int:pk>/",
materia.view_edit_materia,
name="materia-view-edit",
),
path(
"anio/<int:anio_id>/materia/list/",
materia.list_materia,
name="materia-list",
),
path("materia/", materia.create_materia, name="materia-create"),
# Evaluacion
path(
"materia/<int:materia_id>/evaluacion/list/",
evaluacion.list_evaluacion,
name="evaluacion-list",
),
path(
"evaluacion/<int:pk>/",
evaluacion.view_evaluacion,
name="evaluacion-view-edit",
),
path(
"evaluacion/", evaluacion.create_evaluacion, name="evaluacion-create",
),
# Año Lectivo
path(
"anio_lectivo/",
anio_lectivo.create_anio_lectivo,
name="anio-lectivo-create",
),
path(
"anio_lectivo/list/",
anio_lectivo.list_anio_lectivo,
name="anio-lectivo-list",
),
path(
"anio_lectivo/actual/",
anio_lectivo.actual_anio_lectivo,
name="anio-lectivo-actual",
),
path(
"anio_lectivo/<int:pk>/",
anio_lectivo.update_anio_lectivo,
name="anio-lectivo-update",
),
]
|
10,222 | fc5bd65b75cdbb48386de74da0798bf7656b7fc3 | ####################################################
# A unit fraction contains 1 in the numerator. The decimal representation of the unit fractions with denominators 2 to 10 are given:
# 1/2 = 0.5
# 1/3 = 0.(3)
# 1/4 = 0.25
# 1/5 = 0.2
# 1/6 = 0.1(6)
# 1/7 = 0.(142857)
# 1/8 = 0.125
# 1/9 = 0.(1)
# 1/10 = 0.1
# Where 0.1(6) means 0.166666..., and has a 1-digit recurring cycle. It can be seen that 1/7 has a 6-digit recurring cycle.
# Find the value of d < 1000 for which 1/d contains the longest recurring cycle in its decimal fraction part.
####################################################
# There are two parts to this problem:
# 1) When does a fraction have a recurring part?
# 2) If it has one, how long will that recurring part be?
# Part 1 is easy: a fraction written in simplest form as m/n has a recurring part when n has a prime divisor that the base doesn't
# We're in base 10, so that means that n has a prime factor that isn't 2 or 5
# Part 2 is harder, but we can solve it using modular arithmetic
# To do this, let's think about what a digit in a decimal representation really means
# If a number has i as the nth digit after the decimal point, we're saying that we get a contribution of i/10^n
# Since a decimal representation is really a way of writing a number as ... + a * 10^2 + b * 10 + c + d * 10^(-1) + e * 10^(-2) + ...
# We're only considering values of the form 1/n for n > 1, so 1/n < 1
# Suppose we have 1/n = a1 * 10^(-1) + a2 * 10^(-2) + ... + an * 10^(-n) + ...
# Note that ai >= 0 for each i
# We can extract the ith digit ai using modular arithmetic as ai = floor(10^i / n) mod 10
# For example, if i = 2, 100/n = a1 * 10 + a2 + a3, so floor(100/n) = a1 * 10 + a2, which is equal to a2 mod 10
# Using this, we can determine the length of the recurring part of the fraction, which is equal to the order of 10 modulo n
def modulo_power(x, b):
"""Find the power of x modulo b"""
r = x % b
ct = 0
pows = {}
while r not in pows:
pows[r] = ct
ct += 1
r = x * r % b
return ct - pows[r]
max_len = 0
den = 1
for n in range(2, 1000):
cycle_len = modulo_power(10, n)
if cycle_len > max_len:
max_len = cycle_len
den = n
print(den) |
10,223 | 2f3238eeb45a1684a218ee6f8ac401f31b005c2d | # 按题目说明解法
class Solution(object):
def lastRemaining(self, n):
nums = [i+1 for i in range(n)]
res = []
while len(nums) > 1:
for i in range(1, len(nums), 2):
res.append(nums[i])
nums, res = res[::-1], []
return nums[0]
# 找规律,如果输入a输出b,则输入2a输出2*(a-b+1)
class Solution(object):
def lastRemaining(self, n):
if n == 1:
return 1
return 2 * (n/2 - self.lastRemaining(n/2) + 1) |
10,224 | 7cdd60a42d19d37584d268be06322fce5b011e84 | # -*- coding: utf-8 -*-
import os
import re
import csv
import unicodedata
csv_path = r"C:\Users\glago\YandexDisk\Fests\AtomCosCon 2022\AtomCosCon 22 - Заявки.csv"
id_row = '#'
folder_path = r"C:\Users\glago\YandexDisk\Fests\AtomCosCon 2022\Tracks"
id_regex_filename = r"^(?P<id>\d{3})"
def make_name(d):
return to_filename(f"{d['#']}. {d['Начало']}. {d['Категория']}. {d['Название номера']}")
def to_filename(string):
filename = string.replace('й', "<икраткое>")
filename = unicodedata.normalize('NFD', filename).encode('cp1251', 'replace').decode('cp1251')
filename = filename.replace("<икраткое>", 'й')
filename = filename.replace(':', "")\
.replace('|', "-").replace('/', "-").replace('\\', "-")\
.replace('"', "'")\
.replace('’', "'")\
.replace(' ,', ", ")\
.replace(' ', " ")
filename = ''.join(i if i not in "*?<>" else '' for i in filename)
return filename
with open(csv_path, 'r', encoding='utf-8') as f:
reader = csv.reader(f)
head = reader.__next__()
csv_data = {int(row[head.index(id_row)]):
{head[i]: row[i].strip() for i in range(len(head))} for row in reader if row[head.index(id_row)]}
dir_data = dict()
for file_name in os.listdir(folder_path):
dir_data[int(re.search(id_regex_filename, file_name).group("id"))] = file_name
for num, d in csv_data.items():
if num not in dir_data.keys():
print(f"[NO FILE for № {d['№']} ] {make_name(d)}") |
10,225 | a0349cfa08a5095d7b20d9e26953d614655b415f | # Generated by Django 3.2.7 on 2021-09-02 23:58
import django.db.models.deletion
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='City',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('city_name', models.CharField(max_length=20, verbose_name='Город')),
],
),
migrations.CreateModel(
name='Product',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=250, unique=True)),
('price', models.DecimalField(decimal_places=2, max_digits=8)),
('availability', models.PositiveIntegerField(verbose_name='Наличие')),
],
),
migrations.CreateModel(
name='Supplier',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=20, verbose_name='Поставщик')),
('legal_form', models.CharField(choices=[('TOV', 'TOV'), ('FOP', 'FOP'), ('PAT', 'PAT')],
max_length=20, verbose_name='Орг.форма')),
('city', models.OneToOneField(on_delete=django.db.models.deletion.CASCADE, to='logistics.city')),
],
),
migrations.CreateModel(
name='Client',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('first_name', models.CharField(default='Client', max_length=20, verbose_name='Имя Клиента')),
('last_name', models.CharField(max_length=20, verbose_name='Фамилия Клиента')),
('phone', models.CharField(max_length=10, unique=True)),
('city', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='logistics.city')),
('product', models.ManyToManyField(to='logistics.Product')),
],
),
]
|
10,226 | 5d0d2d9c5c32f9da54462c15fd48d0862f4cdb4c | # Copyright (c) 2007 Twisted Matrix Laboratories.
# See LICENSE for details.
"""
See how fast deferreds are.
This is mainly useful to compare cdefer.Deferred to defer.Deferred
"""
from twisted.internet import defer
from timer import timeit
benchmarkFuncs = []
def benchmarkFunc(iter, args=()):
"""
A decorator for benchmark functions that measure a single iteration
count. Registers the function with the given iteration count to the global
benchmarkFuncs list
"""
def decorator(func):
benchmarkFuncs.append((func, args, iter))
return func
return decorator
def benchmarkNFunc(iter, ns):
"""
A decorator for benchmark functions that measure multiple iteration
counts. Registers the function with the given iteration count to the global
benchmarkFuncs list.
"""
def decorator(func):
for n in ns:
benchmarkFuncs.append((func, (n,), iter))
return func
return decorator
def instantiate():
"""
Only create a deferred
"""
d = defer.Deferred()
instantiate = benchmarkFunc(100000)(instantiate)
def instantiateShootCallback():
"""
Create a deferred and give it a normal result
"""
d = defer.Deferred()
d.callback(1)
instantiateShootCallback = benchmarkFunc(100000)(instantiateShootCallback)
def instantiateShootErrback():
"""
Create a deferred and give it an exception result. To avoid Unhandled
Errors, also register an errback that eats the error
"""
d = defer.Deferred()
try:
1/0
except:
d.errback()
d.addErrback(lambda x: None)
instantiateShootErrback = benchmarkFunc(200)(instantiateShootErrback)
ns = [10, 1000, 10000]
def instantiateAddCallbacksNoResult(n):
"""
Creates a deferred and adds a trivial callback/errback/both to it the given
number of times.
"""
d = defer.Deferred()
def f(result):
return result
for i in xrange(n):
d.addCallback(f)
d.addErrback(f)
d.addBoth(f)
d.addCallbacks(f, f)
instantiateAddCallbacksNoResult = benchmarkNFunc(20, ns)(instantiateAddCallbacksNoResult)
def instantiateAddCallbacksBeforeResult(n):
"""
Create a deferred and adds a trivial callback/errback/both to it the given
number of times, and then shoots a result through all of the callbacks.
"""
d = defer.Deferred()
def f(result):
return result
for i in xrange(n):
d.addCallback(f)
d.addErrback(f)
d.addBoth(f)
d.addCallbacks(f)
d.callback(1)
instantiateAddCallbacksBeforeResult = benchmarkNFunc(20, ns)(instantiateAddCallbacksBeforeResult)
def instantiateAddCallbacksAfterResult(n):
"""
Create a deferred, shoots it and then adds a trivial callback/errback/both
to it the given number of times. The result is processed through the
callbacks as they are added.
"""
d = defer.Deferred()
def f(result):
return result
d.callback(1)
for i in xrange(n):
d.addCallback(f)
d.addErrback(f)
d.addBoth(f)
d.addCallbacks(f)
instantiateAddCallbacksAfterResult = benchmarkNFunc(20, ns)(instantiateAddCallbacksAfterResult)
def pauseUnpause(n):
"""
Adds the given number of callbacks/errbacks/both to a deferred while it is
paused, and unpauses it, trigerring the processing of the value through the
callbacks.
"""
d = defer.Deferred()
def f(result):
return result
d.callback(1)
d.pause()
for i in xrange(n):
d.addCallback(f)
d.addErrback(f)
d.addBoth(f)
d.addCallbacks(f)
d.unpause()
pauseUnpause = benchmarkNFunc(20, ns)(pauseUnpause)
def benchmark():
"""
Run all of the benchmarks registered in the benchmarkFuncs list
"""
print defer.Deferred.__module__
for func, args, iter in benchmarkFuncs:
print func.__name__, args, timeit(func, iter, *args)
if __name__ == '__main__':
benchmark()
|
10,227 | 32cffe48918261c0094c8ca59d6f72d01884ac2b | from sdoc.application.SDocApplication import SDocApplication
def main():
"""
The main of the sdoc program.
"""
sdoc_application = SDocApplication()
sdoc_application.run()
# ----------------------------------------------------------------------------------------------------------------------
|
10,228 | 519119ceb5a3bd526ffb5af741eb28969298863d | # -*- coding: utf-8 -*-
# !/usr/bin/env python3
# Function decorator: prints when a function is called along
# with its parameters
def debug(func):
def decorated(*args, **kwargs):
print('Function: {} called with args: {} and kwargs: {}'.format(
func.__name__,
args,
kwargs))
return func(*args, **kwargs)
return decorated
# Class decorator: decorate all class methods with the
# @debug decorator
def debug_all_functions(cls_obj):
for name, val in vars(cls_obj).items():
if callable(val):
setattr(cls_obj, name, debug(val))
return cls_obj
# Metaclass: generate a class having all methods debuggable
class DebugMetaclass(type):
def __new__(mcs, cls_name, bases, cls_dict):
cls_obj = super().__new__(mcs, cls_name, bases, cls_dict)
cls_obj = debug_all_functions(cls_obj)
return cls_obj
# Finally, our class
class MyClass(metaclass=DebugMetaclass):
def __init__(self, a, b):
self.a = a
self.b = b
def foo(self):
return self.a
def bar(self):
return self.b
if __name__ == '__main__':
instance = MyClass('hello', 'world')
instance.foo()
instance.bar() |
10,229 | 866930e9038c3f7fc528ef470c4b3e5d3c4fce1f | import monitors
myPR650 = monitors.Photometer(1)
myPR650.measure()
spec = myPR650.getLastSpectrum()
|
10,230 | 61ab2006f29d1fb7b040b1f2f63317d1a81c1990 | from abc import ABC, abstractmethod
class IMove(ABC):
@abstractmethod
def move(self):
pass
|
10,231 | aecab19cb45a60895ccbc91df2f45bcb3221f3c3 | # import the necessary packages
from tracker.centroidtracker import CentroidTracker
from tracker.trackableobject import TrackableObject
from imutils.video import VideoStream
from imutils.video import FPS
import numpy as np
import argparse
import imutils
import time
import dlib
import cv2
# import pretrained SSD Model - set arguments
prototxt="mobilenet_ssd/MobileNetSSD_deploy.prototxt" #path to Caffe 'deploy' prototxt file
model="mobilenet_ssd/MobileNetSSD_deploy.caffemodel" #path to Caffe pre-trained model
input="videos/test.mp4" #path to optional input video file
output="output/output_01.avi" #path to optional output video file
model_confidence=0.4 #minimum probability to filter weak detections
skip_frames=30 #number of skip frames between detections
classestxt="mobilenet_ssd/yolov3.txt"
# list of class
classes = None
with open(classestxt, 'r') as f:
classes = [line.strip() for line in f.readlines()]
# load model from disk
#net = cv2.dnn.readNetFromCaffe(prototxt, model)
weights="mobilenet_ssd/yolov3.weights"
config="mobilenet_ssd/yolov3.cfg"
net = cv2.dnn.readNet(weights, config)
def get_output_layers(net):
layer_names = net.getLayerNames()
output_layers = [layer_names[i[0] - 1] for i in net.getUnconnectedOutLayers()]
return output_layers
# if an input file is not specified, open the video stream
if not input:
vs = VideoStream(src=0).start()
time.sleep(2.0)
# otherwise, grab a reference to the video file
else:
vs = cv2.VideoCapture(input)
# initialize the video writer, initialize the frame dimensions
writer = None
W = None
H = None
# instantiate our centroid tracker, then initialize a list to store each of our dlib correlation trackers, followed by a dictionary to map each unique object ID to a TrackableObject
ct = CentroidTracker(maxDisappeared=40, maxDistance=50)
trackers = []
trackableObjects = {}
# initialize the total number of frames processed thus far, along with the total number of objects that have moved either up or down
totalFrames = 0
totalDown = 0
totalUp = 0
# start the frames per second throughput estimator
fps = FPS().start()
# loop over frames from the video stream
while True:
# grab the next frame
frame = vs.read()
frame = frame[1] if input else frame
# end of the video
if input is not None and frame is None:
break
# resize the frame and convert the frame from BGR to RGB for dlib
frame = imutils.resize(frame, width=700)
rgb = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
# set dimension of frame
if W is None or H is None:
(H, W) = frame.shape[:2]
# if we are supposed to be writing a video to disk, initialize the writer
if output is not None and writer is None:
fourcc = cv2.VideoWriter_fourcc(*"MJPG")
writer = cv2.VideoWriter(output, fourcc, 30,
(W, H), True)
# initialize the current status along with our list of bounding box rectangles
status = "Waiting"
rects = []
# check to see if we should run a detection
if totalFrames % skip_frames == 0:
#initialize new set of object trackers
status = "Detecting"
trackers = []
# convert the frame to a blob and pass the blob through the network and obtain the detections
blob = cv2.dnn.blobFromImage(frame, 0.00392, (800,800), (0,0,0), True, crop=False)
net.setInput(blob)
outs = net.forward(get_output_layers(net))
boxes = []
conf_threshold = 0.5
nms_threshold = 0.4
# for each detetion from each output layer
# get the confidence, class id, bounding box params
# and ignore weak detections (confidence < 0.5)
for out in outs:
for detection in out:
scores = detection[5:]
class_id = np.argmax(scores)
confidence = scores[class_id]
#print(class_id)
if confidence > 0.98 and class_id == 0:
center_x = int(detection[0] * W)
center_y = int(detection[1] * H)
w = int(detection[2] * W)
h = int(detection[3] * H)
x = center_x - w / 2
y = center_y - h / 2
# compute the (x, y)-coordinates of the bounding box
box = np.array([x, y, W, H])
(startX, startY, endX, endY) = box.astype("int")
# construct a dlib rectangle object from the bounding box coordinates and then start the dlib correlation tracker
tracker = dlib.correlation_tracker()
rect = dlib.rectangle(startX, startY, endX, endY)
tracker.start_track(rgb, rect)
# add the tracker to list of trackers
trackers.append(tracker)
# otherwise, run tracking algorithm
else:
# loop over the trackers
for tracker in trackers:
# set the status
status = "Tracking"
# update the tracker and grab the updated position
tracker.update(rgb)
pos = tracker.get_position()
# unpack the position object
startX = int(pos.left())
startY = int(pos.top())
endX = int(pos.right())
endY = int(pos.bottom())
# add the bounding box coordinates to the rectangles list
rects.append((startX, startY, endX, endY))
# draw a horizontal line in the center of the frame
#cv2.line(frame, (0, H // 2), (W, H // 2), (0, 255, 255), 2)
# use the centroid tracker
objects = ct.update(rects)
# loop over the tracked objects
for (objectID, centroid) in objects.items():
# check to see if a trackable object exists
to = trackableObjects.get(objectID, None)
# if there is no existing trackable object, create one
if to is None:
to = TrackableObject(objectID, centroid)
else:
# the difference between the y-coordinate of the *current* centroid and the mean of *previous* centroids
y = [c[1] for c in to.centroids]
direction = centroid[1] - np.mean(y)
to.centroids.append(centroid)
# check to see if the object has been counted or not
if not to.counted:
# if the direction is negative and the centroid is above the centerline, count the object
if direction < 0 and centroid[1] < H // 2:
totalUp += 1
to.counted = True
# if the direction is positive and the centroid is below the center line, count the object
elif direction > 0 and centroid[1] > H // 2:
totalDown += 1
to.counted = True
# store the trackable object in dictionary
trackableObjects[objectID] = to
# draw both the ID of the object and the centroid of the object on the output frame
text = "ID {}".format(objectID)
cv2.putText(frame, text, (centroid[0] - 10, centroid[1] - 10),
cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 255, 0), 2)
cv2.circle(frame, (centroid[0], centroid[1]), 4, (0, 255, 0), -1)
# construct a tuple of information we will be displaying on the frame
info = [
("Up", totalUp),
("Down", totalDown),
("Status", status),
]
for (i, (k, v)) in enumerate(info):
text = "{}: {}".format(k, v)
cv2.putText(frame, text, (10, H - ((i * 20) + 20)),
cv2.FONT_HERSHEY_SIMPLEX, 0.6, (0, 0, 255), 2)
# check to see if we should write the frame to disk
if writer is not None:
writer.write(frame)
# show the output frame
cv2.imshow("Frame", frame)
key = cv2.waitKey(1) & 0xFF
# if the `q` key was pressed, break from the loop
if key == ord("q"):
break
# increment the total number of frames processed thus far and then update the FPS counter
totalFrames += 1
fps.update()
# stop the timer and display FPS information
fps.stop()
print("Time: {:.2f}".format(fps.elapsed()))
print("FPS: {:.2f}".format(fps.fps()))
print("Down: ", totalDown)
print("Up: ", totalUp)
# check to see if we need to release the video writer pointer
if writer is not None:
writer.release()
# if we are not using a video file, stop the camera video stream
if not input:
vs.stop()
# otherwise, release the video file pointer
else:
vs.release()
# close any open windows
cv2.destroyAllWindows() |
10,232 | c72e625760e2a94320146539cabfd247be029298 | import itertools
import os
import re
import jieba
from wordcloud import WordCloud
def get_rhyme(f_name):
f = open('./lyrics/' + f_name, 'r', encoding='UTF-8')
text = f.readlines()
f.close()
'''处理开头'''
for m, n in enumerate(text):
if '编曲:' in n:
lyric_drop_head = text[m + 1:]
elif '评论数' in n:
lyric_drop_head = text[m + 1:]
'''处理结尾'''
for o, p in enumerate(lyric_drop_head):
if '制作人:陈令韬/欧智\n' in p:
lyric_text_tail = lyric_drop_head[:o]
break
elif '音乐监制:' in p:
lyric_text_tail = lyric_drop_head[:o]
break
elif '混音:' in p:
lyric_text_tail = lyric_drop_head[:o]
break
elif '收起' in p:
lyric_text_tail = lyric_drop_head[:o]
break
else:
lyric_text_tail = lyric_drop_head
'''处理中间段'''
# 处理掉空列表
if '\n' in lyric_text_tail:
while '\n' in lyric_text_tail:
lyric_text_tail.remove('\n')
# 处理掉演唱者及冒号的行列
del_list = []
for a in lyric_text_tail:
if ':' in a:
del_list.append(a)
elif ':' in a:
del_list.append(a)
lyric_text_tail = list(set(lyric_text_tail) - set(del_list))
# 处理掉换行符、特殊符号, 并分行
lyric_text = []
re_text = r'([\u4E00-\u9FA5\w\s]+)|\([\u4E00-\u9FA5\w\s]+\)|([\u4E00-\u9FA5\w\s]+\)|\([\u4E00-\u9FA5\w\s]+)'
re_brackets = re.compile(re_text)
for i in lyric_text_tail:
i = i.replace('\n', '')
j = re.sub(re_brackets, '《', i)
# while '' in j:
# j.remove('')
j = ''.join(itertools.chain(j))
if '《' in j:
j = j.replace('《', '').replace('》', '')
if '”' in j:
j = j.replace('“', '').replace('”', '')
lyric_text.append(j)
#设置分词动态字典
cut_dict = ('飙翻', 'A等货')
for cut in cut_dict:
jieba.add_word(cut, freq=100)
# 分词写入文件
for words_cut in lyric_text:
words = list(jieba.cut(words_cut, cut_all=False))
if words != []:
with open('rhyme_word.txt', 'a', encoding='UTF-8') as f:
f.write(words[-1] + ',')
print(f_name + '写入完成')
def word_cloud():
f = open('rhyme_word.txt', 'r', encoding='UTF-8').read()
# f = [i for i in f if i != ' ']
wordcloud = WordCloud(background_color='white',
width=800,
height=600,
margin=2,
font_path='simsun.ttc')
wordcloud.generate(f)
wordcloud.to_file('rhyme_word.png')
# for f_name in os.listdir('./lyrics'):
# get_rhyme(f_name)
word_cloud() |
10,233 | 449a58836d1fffaaa465707d2f7e5caf5678a255 | #deltoid curve
#x = 2cos(theta) + cos(2theta)
#y = 2sin(theta) + sin(2theta)
#0 <= theta < 2pi
#polar plot r = f(theta)
#x = rcos(theta), y = rsin(theta)
#Galilean spiral = r=(theta)^2 for 0 <= theta < 10pi
# Fey's Function
#r = e^(cos(theta)) - 2 cos(4theta) + sin^5(theta/12)
from numpy import pi, cos, sin, linspace, e
from pylab import plot, show
#Deltoid Curve
theta = linspace(0, 2*pi, 100)
x = 2*cos(theta) + cos(2*theta)
y = 2*sin(theta) + sin(2*theta)
plot(theta, x)
plot(theta, y)
show()
#Polar Plot to Galilean Spiral
theta = linspace(0, 10*pi, 1000)
r = theta**2
x = r*cos(theta)
y = r*sin(theta)
plot(x, y)
show()
#Fey's Function
theta = linspace(0, 24*pi, 1000)
r = e**(cos(theta)) - 2*cos((4*theta)) + (sin((theta/12)))**5
x = r*cos(theta)
y = r*sin(theta)
plot(x, y)
show() |
10,234 | 4f83c902cb8ac4afd6d1a83eb26c74f1567302f1 |
from .discriminator import Discriminator
|
10,235 | c83e84a08e6668409441cc3ec89e0352c6ed1aee | #!/usr/bin/python
# -*- coding: utf-8 -*-
# (c) 2019, Adam Miller (admiller@redhat.com)
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
DOCUMENTATION = """
---
module: rule
short_description: Manage state of QRadar Rules, with filter options
description:
- Manage state of QRadar Rules, with filter options
version_added: "1.0.0"
options:
id:
description:
- Manage state of a QRadar Rule by ID
required: false
type: int
name:
description:
- Manage state of a QRadar Rule by name
required: false
type: str
state:
description:
- Manage state of a QRadar Rule
required: True
choices: [ "enabled", "disabled", "absent" ]
type: str
owner:
description:
- Manage ownership of a QRadar Rule
required: false
type: str
author: Ansible Security Automation Team (@maxamillion) <https://github.com/ansible-security>
"""
# FIXME - provide correct example here
RETURN = """
"""
EXAMPLES = """
- name: Enable Rule 'Ansible Example DDoS Rule'
qradar_rule:
name: 'Ansible Example DDOS Rule'
state: enabled
"""
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils._text import to_text
from ansible.module_utils.urls import Request
from ansible.module_utils.six.moves.urllib.parse import quote
from ansible.module_utils.six.moves.urllib.error import HTTPError
from ansible_collections.ibm.qradar.plugins.module_utils.qradar import (
QRadarRequest,
find_dict_in_list,
set_offense_values,
)
import copy
import json
def main():
argspec = dict(
id=dict(required=False, type="int"),
name=dict(required=False, type="str"),
state=dict(
required=True, choices=["enabled", "disabled", "absent"], type="str"
),
owner=dict(required=False, type="str"),
)
module = AnsibleModule(
argument_spec=argspec,
supports_check_mode=True,
required_one_of=[("name", "id")],
mutually_exclusive=[("name", "id")],
)
qradar_request = QRadarRequest(
module, not_rest_data_keys=["id", "name", "state", "owner"],
)
# if module.params['name']:
# # FIXME - QUERY HERE BY NAME NATIVELY VIA REST API (DOESN'T EXIST YET)
# found_offense = qradar_request.get('/api/analytics/rules?filter={0}'.format(module.params['name']))
module.params["rule"] = {}
if module.params["id"]:
module.params["rule"] = qradar_request.get(
"/api/analytics/rules/{0}".format(module.params["id"])
)
elif module.params["name"]:
rules = qradar_request.get(
"/api/analytics/rules?filter={0}".format(
quote('"{0}"'.format(module.params["name"]))
)
)
if rules:
module.params["rule"] = rules[0]
module.params["id"] = rules[0]["id"]
if module.params["state"] == "enabled":
if module.params["rule"]:
if module.params["rule"]["enabled"] is True:
# Already enabled
if module.params["id"]:
module.exit_json(
msg="No change needed for rule ID: {0}".format(
module.params["id"]
),
qradar_return_data={},
changed=False,
)
if module.params["name"]:
module.exit_json(
msg="Successfully enabled rule named: {0}".format(
module.params["name"]
),
qradar_return_data={},
changed=False,
)
else:
# Not enabled, enable It
module.params["rule"]["enabled"] = True
qradar_return_data = qradar_request.post_by_path(
"api/analytics/rules/{0}".format(module.params["rule"]["id"]),
data=json.dumps(module.params["rule"]),
)
if module.params["id"]:
module.exit_json(
msg="Successfully enabled rule ID: {0}".format(
module.params["id"]
),
qradar_return_data=qradar_return_data,
changed=True,
)
if module.params["name"]:
module.exit_json(
msg="Successfully enabled rule named: {0}".format(
module.params["name"]
),
qradar_return_data=qradar_return_data,
changed=True,
)
else:
if module.params["id"]:
module.fail_json(
msg="Unable to find rule ID: {0}".format(module.params["id"])
)
if module.params["name"]:
module.fail_json(
msg='Unable to find rule named: "{0}"'.format(module.params["name"])
)
elif module.params["state"] == "disabled":
if module.params["rule"]:
if module.params["rule"]["enabled"] is False:
# Already disabled
if module.params["id"]:
module.exit_json(
msg="No change needed for rule ID: {0}".format(
module.params["id"]
),
qradar_return_data={},
changed=False,
)
if module.params["name"]:
module.exit_json(
msg="Successfully enabled rule named: {0}".format(
module.params["name"]
),
qradar_return_data={},
changed=False,
)
else:
# Not disabled, disable It
module.params["rule"]["enabled"] = False
qradar_return_data = qradar_request.post_by_path(
"api/analytics/rules/{0}".format(module.params["rule"]["id"]),
data=json.dumps(module.params["rule"]),
)
if module.params["id"]:
module.exit_json(
msg="Successfully disabled rule ID: {0}".format(
module.params["id"]
),
qradar_return_data=qradar_return_data,
changed=True,
)
if module.params["name"]:
module.exit_json(
msg="Successfully disabled rule named: {0}".format(
module.params["name"]
),
qradar_return_data=qradar_return_data,
changed=True,
)
else:
if module.params["id"]:
module.fail_json(
msg="Unable to find rule ID: {0}".format(module.params["id"])
)
if module.params["name"]:
module.fail_json(
msg='Unable to find rule named: "{0}"'.format(module.params["name"])
)
elif module.params["state"] == "absent":
if module.params["rule"]:
qradar_return_data = qradar_request.delete(
"/api/analytics/rules/{0}".format(module.params["rule"]["id"])
)
if module.params["id"]:
module.exit_json(
msg="Successfully deleted rule ID: {0}".format(module.params["id"]),
qradar_return_data=qradar_return_data,
changed=True,
)
if module.params["name"]:
module.exit_json(
msg="Successfully deleted rule named: {0}".format(
module.params["name"]
),
qradar_return_data=qradar_return_data,
changed=True,
)
else:
module.exit_json(msg="Nothing to do, rule not found.")
module.exit_json(rules=rules, changed=False)
if __name__ == "__main__":
main()
|
10,236 | 0b59b3e8721b8d251c1c79b73db8d2caa5155e63 | """ DataFiles
"""
from autodir import factory
import autofile
import autoinf
def information(ddir, file_prefix, function=None):
""" generate information DataFile
"""
def writer_(inf_obj):
if function is not None:
assert autoinf.matches_function_signature(inf_obj, function)
inf_str = autofile.write.information(inf_obj)
return inf_str
def reader_(inf_str):
inf_obj = autofile.read.information(inf_str)
if function is not None:
assert autoinf.matches_function_signature(inf_obj, function)
return inf_obj
name = autofile.name.information(file_prefix)
return factory.DataFile(ddir=ddir, name=name,
writer_=writer_, reader_=reader_)
def input_file(ddir, file_prefix):
""" generate input file DataFile
"""
name = autofile.name.input_file(file_prefix)
return factory.DataFile(ddir=ddir, name=name)
def output_file(ddir, file_prefix):
""" generate output file DataFile
"""
name = autofile.name.output_file(file_prefix)
return factory.DataFile(ddir=ddir, name=name)
def energy(ddir, file_prefix):
""" generate energy DataFile
"""
name = autofile.name.energy(file_prefix)
writer_ = autofile.write.energy
reader_ = autofile.read.energy
return factory.DataFile(ddir=ddir, name=name,
writer_=writer_, reader_=reader_)
def geometry(ddir, file_prefix):
""" generate geometry DataFile
"""
name = autofile.name.geometry(file_prefix)
writer_ = autofile.write.geometry
reader_ = autofile.read.geometry
return factory.DataFile(ddir=ddir, name=name,
writer_=writer_, reader_=reader_)
def gradient(ddir, file_prefix):
""" generate gradient DataFile
"""
name = autofile.name.gradient(file_prefix)
writer_ = autofile.write.gradient
reader_ = autofile.read.gradient
return factory.DataFile(ddir=ddir, name=name,
writer_=writer_, reader_=reader_)
def hessian(ddir, file_prefix):
""" generate hessian DataFile
"""
name = autofile.name.hessian(file_prefix)
writer_ = autofile.write.hessian
reader_ = autofile.read.hessian
return factory.DataFile(ddir=ddir, name=name,
writer_=writer_, reader_=reader_)
def zmatrix(ddir, file_prefix):
""" generate zmatrix DataFile
"""
name = autofile.name.zmatrix(file_prefix)
writer_ = autofile.write.zmatrix
reader_ = autofile.read.zmatrix
return factory.DataFile(ddir=ddir, name=name,
writer_=writer_, reader_=reader_)
def vmatrix(ddir, file_prefix):
""" generate vmatrix DataFile
"""
name = autofile.name.vmatrix(file_prefix)
writer_ = autofile.write.vmatrix
reader_ = autofile.read.vmatrix
return factory.DataFile(ddir=ddir, name=name,
writer_=writer_, reader_=reader_)
def trajectory(ddir, file_prefix):
""" generate trajectory DataFile
"""
name = autofile.name.trajectory(file_prefix)
writer_ = autofile.write.trajectory
reader_ = _not_implemented
return factory.DataFile(ddir=ddir, name=name,
writer_=writer_, reader_=reader_)
def lennard_jones_epsilon(ddir, file_prefix):
""" generate lennard_jones_epsilon DataFile
"""
name = autofile.name.lennard_jones_epsilon(file_prefix)
writer_ = autofile.write.lennard_jones_epsilon
reader_ = autofile.read.lennard_jones_epsilon
return factory.DataFile(ddir=ddir, name=name,
writer_=writer_, reader_=reader_)
def lennard_jones_sigma(ddir, file_prefix):
""" generate lennard_jones_sigma DataFile
"""
name = autofile.name.lennard_jones_sigma(file_prefix)
writer_ = autofile.write.lennard_jones_sigma
reader_ = autofile.read.lennard_jones_sigma
return factory.DataFile(ddir=ddir, name=name,
writer_=writer_, reader_=reader_)
# helpers
def _not_implemented(*_args, **_kwargs):
raise NotImplementedError
|
10,237 | 4788c86a3f78d5877fb07b415209fe9b5d8ddd34 | import numpy as np
def unpickle(file):
import cPickle
with open(file, 'rb') as fo:
dict = cPickle.load(fo)
return dict
#Exctracts 8-bit RGB image from arr of size nxm starting at i0
def extract_img(arr, n,m, i0):
im = np.zeros((n,m,3),dtype=np.uint8)
for i in range(3):
for j in range(n):
im[j,:,i] = arr[i0,i*n*m + j*m : i*n*m + (j+1)*m]
return im
def create_target_array(labels_in, n_classes):
target_len = len(labels_in)
labels_out = np.zeros((target_len, n_classes), dtype=int)
for i in range(target_len):
class_i = labels_in[i]
labels_out[i][class_i] = 1
return labels_out
#get normalized percentage-based distribution of each of the classes associated with
#labels_in from 0 to n_classes
def get_norm_dist(labels_in, n_classes):
h = np.histogram(labels_in, range(0,n_classes+1)) #data histogram
dist = list(h[0])
# return list(dist / np.linalg.norm(dist))
nv = 1.0 * sum(dist)
return dist / nv |
10,238 | c9baccb09e5ac57daef9000707807c94034c59e4 | # -*- coding: utf-8 -*-
"""
Created on Mon Apr 28 2020
@author: Cassio (chmendonca)
Description: This class was created as a container to the game characteristics
and configurations
"""
from random import randint
class Settings():
"""A class that have all configurations of the game"""
def __init__(self):
"""Initialize the game configs"""
#Screen configuration
self.screen_width = 1200
self.screen_height = 680
self.bg_color = (0,20,50)
#Hero configuration
#Increase of ship speed to 1.5 pixels instead of 1
#self.hero_speed_factor = 1.5
self.hero_limit = 3
#Syringes (bullets) configuration
#self.bullet_speed_factor = 1
self.bullets_allowed = 5
#Covids configuration
self.covid_vertical_speed_factor = 1
#The value of the movement is negative because it is increasing
# from the right to the left
#self.covid_horizontal_speed_factor = -10
#The pandemy direction equals 1 means to the bottom; -1 means to the top
# The randint ensures an randomly direction when starting the game
#if randint(0,1) == 1:
# self.pandemy_direction = 1
#else:
# self.pandemy_direction = -1
#The rate that increases the game speed
self.speedup_scale = 1.1
self.initialize_dynamic_settings()
def initialize_dynamic_settings(self):
"""Initializes the configurations that increase during the game"""
self.hero_speed_factor = 1.5
self.bullet_speed_factor = 1
self.covid_horizontal_speed_factor = -10
self.alien_points = 50
#The pandemy direction equals 1 means to the bottom; -1 means to the top
# The randint ensures an randomly direction when starting the game
if randint(0,1) == 1:
self.pandemy_direction = 1
else:
self.pandemy_direction = -1
def increase_speed(self):
"""Increase the speed configurations"""
self.covid_horizontal_speed_factor *= self.speedup_scale
self.bullet_speed_factor *= self.speedup_scale
self.hero_speed_factor *= self.speedup_scale |
10,239 | 1c9626b5654166f6c5022d38fd8f15fbd1b46b0f | # coding=utf8
import matplotlib.pyplot as plt
import numpy as np
open,close=np.loadtxt('G:\\PythonCode\\DataBase\\000001.csv',delimiter=',',skiprows=1,usecols=(1,4),unpack=True)
change=close-open
print(change)
yesterday=change[:-1]
today=change[1:]
plt.scatter(yesterday,today,10,'r','<',alpha=0.5)
plt.show() |
10,240 | 1c099dc8e0c13102164b7368b0ed091d1ee0fbe1 | import rospy
import ros_numpy
import cv2
import darknet
import math
import numpy as np
from sensor_msgs.msg import Image, PointCloud2, PointField
from std_msgs.msg import Header, String
from cv_bridge import CvBridge, CvBridgeError
from pose_detector import PoseDetector
import sensor_msgs.point_cloud2 as pc2
class BlitzDetection:
def __init__(self, cfg, weight, meta, W, H, camera_offset, tcp):
self.net = darknet.load_net(cfg, weight, 0)
self.meta = darknet.load_meta(meta)
self.bridge = CvBridge()
# receiving image's resolution
self.W = W
self.H = H
# camera's position referencing base_link
self.CAMERA_OFFSET = camera_offset
# tcp
self.TCP = tcp
# Openpose Pose Detector
self.pose_detector = PoseDetector(thresh=0.5, visualize=True)
print('Detector loaded')
def get_image(self, camera='/camera/color/image_raw'):
img = rospy.wait_for_message(camera, Image)
try:
cv2_img = self.bridge.imgmsg_to_cv2(img, 'bgr8')
except CvBridgeError as e:
print e
print "Image Received"
return cv2_img
'''
def get_depth_image(self):
depth_img = rospy.wait_for_message('/xtion/depth/image_raw', Image)
try:
depth_img = self.bridge.imgmsg_to_cv2(depth_img) # depth in mm unit
except CvBridgeError as e:
print e
return depth_img/1000.0 # change to m unit
'''
def detection_all(self, thresh=0.7):
cv_img = self.get_image()
r = darknet.darknet(self.net, self.meta, cv_img)
if len(r) == 0:
print 'Could not detect anything'
return None
detection_list = []
for item in r:
name, prob, box_info = item
if prob >= thresh:
print '{} detected!'.format(name)
detection_list.append(item)
return detection_list
def detection(self, target='teddy bear'):
cv2_img = self.get_image()
r = darknet.detect(self.net, self.meta, cv2_img)
if len(r) == 0:
print "Could not detect anything"
return None
for item in r:
if target in item:
print "Found teddy bear in the image"
return r
else:
pass
print "No teddy bear in this image"
return None
def detection_image_input(self, cv_img, target='teddy bear'):
r = darknet.detect(self.net, self.meta, cv_img)
if len(r) == 0:
print "Could not detect anything"
return None
for item in r:
if target in item:
print "Found {} in the image".format(target)
return r
else:
pass
print "No {} in this image".format(target)
return None
def target_object(self, r, target='teddy bear'):
for item in r:
name, prob, box_info = item
print(name)
return [item for item in r if target in item][0]
def detected_cloud(self, target, box_info):
cloud = ros_numpy.numpify(rospy.wait_for_message('/camera/depth_registered/points', PointCloud2))
target_cloud = []
for i in range(self.H):
for j in range(self.W):
point = cloud[i, j]
if math.isnan(point[0]) or math.isnan(point[1]) or math.isnan(point[2]):
target_cloud.append((0, 0, 0))
continue
(x, y, w, h) = box_info
if j >= x - w/2 and j <= x + w/2 and i >= y - h/2 and i <= y + h/2:
target_cloud.append((point[0], point[1], point[2]))
else:
target_cloud.append((0, 0, 0))
# visualize target's point cloud
'''
fields = [
PointField('x', 0, PointField.FLOAT32, 1),
PointField('y', 4, PointField.FLOAT32, 1),
PointField('z', 8, PointField.FLOAT32, 1),
]
header = Header()
header.frame_id = 'camera_depth_optical_frame'
pub = rospy.Publisher('arm_move_point', PointCloud2, queue_size=2)
pub.publish(pc2.create_cloud(header, fields, target_cloud))
'''
return target_cloud
def target_position(self, cloud):
point_list = []
for point in cloud:
if point == (0, 0, 0): continue
else:
x = self.CAMERA_OFFSET[0] + point[2]
y = self.CAMERA_OFFSET[1] - point[0]
z = self.CAMERA_OFFSET[2] - point[1]
point_list.append((x, y, z))
sorted_by_depth = sorted(point_list, key=lambda point: point[0])
object_list = sorted_by_depth[:len(sorted_by_depth)/2]
x = sum([item[0] for item in object_list])/len(object_list)
y = sum([item[1] for item in object_list])/len(object_list)
z = sum([item[2] for item in object_list])/len(object_list)
return (x, y, z)
def tcp_calc(self, x, y, z):
differ = (x-self.TCP[0], y-self.TCP[1], z-self.TCP[2])
return differ
def crop(self, cv_img, x, y, w, h):
cropped = cv_img[int(y-h/2):int(y+h/2), int(x-w/2):int(x+w/2)]
return cropped
def hand_waving_callback(self, img):
try:
rgb_img = self.bridge.imgmsg_to_cv2(img, 'bgr8')
except CvBridgeError as e:
print e
return
self.hand_wave_img = rgb_img
return
def is_hand_waving(self):
N = 10
i = 0
'''
rate = rospy.Rate(5)
rospy.Subscriber('/xtion/rgb/image_rect_color', Image, self.hand_waving_callback)
'''
while i < N:
'''
while self.hand_wave_img is None:
pass
'''
hand_wave_img = self.get_image(camera='/xtion/rgb/image_rect_color')
r = self.detection_image_input(hand_wave_img, 'person')
if r is None:
continue
name, prob, (x, y, w, h) = self.target_object(r, target='person')
person_img = self.crop(hand_wave_img, x, y, w, h)
# Directly putting briged img to Openpose causes fcuked up results
cv2.imwrite('hand_waving_frames/person_frame_{}.jpg'.format(i), person_img)
i += 1
is_waving = self.pose_detector.predict(N)
if is_waving:
print('A person is waving hand')
else:
print('A person is not waving hand')
return
######################################################################
import time
def main():
CFG_PATH = "/home/user/kji/darknet/cfg/yolov3.cfg"
WEIGHT_PATH = "/home/user/kji/darknet/backup/yolov3.weights"
META_PATH = "/home/user/kji/darknet/cfg/coco.data"
W = 640
H = 480
camera_offset = (0.5, -0.1, 0.98)
tcp = (0.72, -0.108, 0.905)
detector = BlitzDetection(CFG_PATH, WEIGHT_PATH, META_PATH, W, H, camera_offset, tcp)
rospy.init_node('blitz_navigate_and_pick_detection', anonymous=True)
# Target Detection
'''
r = detector.detection()
target, prob, box_info = detector.target_object(r)
cloud = detector.detected_cloud(target, box_info)
x, y, z = detector.target_position(cloud)
print(x)
print(y)
print(z)
x, y, z = detector.tcp_calc(x, y, z)
print(x)
print(y)
print(z)
'''
# Hand Waving
print('Get ready to wave your hands after 7 secs')
tic = time.time()
rospy.sleep(7)
detector.is_hand_waving()
toc = time.time()
print('Time Cost: {}'.format(toc-tic))
'''
depth = detector.get_depth_image()
print(depth.shape)
print(depth[240, 320])
'''
if __name__ == '__main__':
main()
|
10,241 | 1828198d2a146d96420050f5925a8456eeb66b3a | # Lendo valores inteiros e guardando em um vetor para mostrar no final o menor valor lido
num = []
maior = 0
menor = 0
print('Insira dez números e descubra qual é o menor dentre eles!')
for c in range(0,10):
num.append(int(input('Insira um número: ')))
if c == 0:
maior = menor = num[c]
else:
if num[c] > maior:
maior = num[c]
if num[c] < menor:
menor = num[c]
print('O menor valor digitado é de:',menor) |
10,242 | 2698d0e6904bdf38b0d10cfd2b630da2ad529e66 | # from .sgf import *
from dlgo.gosgf.sgf import Sgf_game
|
10,243 | 63baadbcc6d44d06d30d3d752cf93e4bc8d05a46 | string = input("Enter String: ")
word_list = str.split(string.lower())
word_dict = {}
for word in word_list:
if word in word_dict:
word_dict[word] += 1
else:
word_dict[word] = 1
list_of_words = sorted(word_dict.keys())
word_length = []
for word in word_dict:
word_length.append(len(word))
spacing = max(word_length)
for word in list_of_words:
print("{:<{}}: {}".format(word, spacing+1, word_dict[word]))
|
10,244 | d82ef65caf5ba2f4fe44ac09d4c179b1f19a17fc | #!/usr/bin/env python
#
# VMAccess extension
#
# Copyright 2014 Microsoft Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import re
import shutil
import sys
import tempfile
import time
import traceback
import Utils.handlerutil2 as handler_util
import Utils.logger as logger
import Utils.extensionutils as ext_utils
import Utils.distroutils as dist_utils
import Utils.constants as constants
import Utils.ovfutils as ovf_utils
# Define global variables
ExtensionShortName = 'VMAccess'
BeginCertificateTag = '-----BEGIN CERTIFICATE-----'
EndCertificateTag = '-----END CERTIFICATE-----'
BeginSSHTag = '---- BEGIN SSH2 PUBLIC KEY ----'
OutputSplitter = ';'
SshdConfigPath = '/etc/ssh/sshd_config'
# overwrite the default logger
logger.global_shared_context_logger = logger.Logger('/var/log/waagent.log', '/dev/stdout')
def get_os_name():
if os.path.isfile(constants.os_release):
return ext_utils.get_line_starting_with("NAME", constants.os_release)
elif os.path.isfile(constants.system_release):
return ext_utils.get_file_contents(constants.system_release)
return None
def get_linux_agent_conf_filename(os_name):
if os_name is not None:
if re.search("coreos", os_name, re.IGNORECASE) or re.search("flatcar", os_name, re.IGNORECASE):
return "/usr/share/oem/waagent.conf"
return "/etc/waagent.conf"
class ConfigurationProvider(object):
"""
Parse amd store key:values in waagent.conf
"""
def __init__(self, wala_config_file):
self.values = dict()
if not os.path.isfile(wala_config_file):
logger.warning("Missing configuration in {0}, setting default values for PasswordCryptId and PasswordCryptSaltLength".format(wala_config_file))
self.values["Provisioning.PasswordCryptId"] = "6"
self.values["Provisioning.PasswordCryptSaltLength"] = 10
return
try:
for line in ext_utils.get_file_contents(wala_config_file).split('\n'):
if not line.startswith("#") and "=" in line:
parts = line.split()[0].split('=')
value = parts[1].strip("\" ")
if value != "None":
self.values[parts[0]] = value
else:
self.values[parts[0]] = None
# when get_file_contents returns none
except AttributeError:
logger.error("Unable to parse {0}".format(wala_config_file))
raise
return
def get(self, key):
return self.values.get(key)
def yes(self, key):
config_value = self.get(key)
if config_value is not None and config_value.lower().startswith("y"):
return True
else:
return False
def no(self, key):
config_value = self.get(key)
if config_value is not None and config_value.lower().startswith("n"):
return True
else:
return False
OSName = get_os_name()
Configuration = ConfigurationProvider(get_linux_agent_conf_filename(OSName))
MyDistro = dist_utils.get_my_distro(Configuration, OSName)
def main():
logger.log("%s started to handle." % ExtensionShortName)
try:
for a in sys.argv[1:]:
if re.match("^([-/]*)(disable)", a):
disable()
elif re.match("^([-/]*)(uninstall)", a):
uninstall()
elif re.match("^([-/]*)(install)", a):
install()
elif re.match("^([-/]*)(enable)", a):
enable()
elif re.match("^([-/]*)(update)", a):
update()
except Exception as e:
err_msg = "Failed with error: {0}, {1}".format(e, traceback.format_exc())
logger.error(err_msg)
def install():
hutil = handler_util.HandlerUtility()
hutil.do_parse_context('Install')
hutil.do_exit(0, 'Install', 'success', '0', 'Install Succeeded')
def enable():
hutil = handler_util.HandlerUtility()
hutil.do_parse_context('Enable')
try:
hutil.exit_if_enabled(remove_protected_settings=True) # If no new seqNum received, exit.
reset_ssh = None
remove_user = None
protect_settings = hutil.get_protected_settings()
if protect_settings:
reset_ssh = protect_settings.get('reset_ssh')
remove_user = protect_settings.get('remove_user')
if remove_user and _is_sshd_config_modified(protect_settings):
ext_utils.add_extension_event(name=hutil.get_name(),
op=constants.WALAEventOperation.Enable,
is_success=False,
message="(03002)Argument error, conflicting operations")
raise Exception("Cannot reset sshd_config and remove a user in one operation.")
_forcibly_reset_chap(hutil)
if _is_sshd_config_modified(protect_settings):
_backup_sshd_config(SshdConfigPath)
if reset_ssh:
_open_ssh_port()
hutil.log("Succeeded in check and open ssh port.")
ext_utils.add_extension_event(name=hutil.get_name(), op="scenario", is_success=True, message="reset-ssh")
_reset_sshd_config(SshdConfigPath)
hutil.log("Succeeded in reset sshd_config.")
if remove_user:
ext_utils.add_extension_event(name=hutil.get_name(), op="scenario", is_success=True, message="remove-user")
_remove_user_account(remove_user, hutil)
_set_user_account_pub_key(protect_settings, hutil)
if _is_sshd_config_modified(protect_settings):
MyDistro.restart_ssh_service()
check_and_repair_disk(hutil)
hutil.do_exit(0, 'Enable', 'success', '0', 'Enable succeeded.')
except Exception as e:
hutil.error(("Failed to enable the extension with error: {0}, "
"stack trace: {1}").format(str(e), traceback.format_exc()))
hutil.do_exit(1, 'Enable', 'error', '0', "Enable failed: {0}".format(str(e)))
def _forcibly_reset_chap(hutil):
name = "ChallengeResponseAuthentication"
config = ext_utils.get_file_contents(SshdConfigPath).split("\n")
for i in range(0, len(config)):
if config[i].startswith(name) and "no" in config[i].lower():
ext_utils.add_extension_event(name=hutil.get_name(), op="sshd", is_success=True,
message="ChallengeResponseAuthentication no")
return
ext_utils.add_extension_event(name=hutil.get_name(), op="sshd", is_success=True,
message="ChallengeResponseAuthentication yes")
_backup_sshd_config(SshdConfigPath)
_set_sshd_config(config, name, "no")
ext_utils.replace_file_with_contents_atomic(SshdConfigPath, "\n".join(config))
MyDistro.restart_ssh_service()
def _is_sshd_config_modified(protected_settings):
result = protected_settings.get('reset_ssh') or protected_settings.get('password')
return result is not None
def uninstall():
hutil = handler_util.HandlerUtility()
hutil.do_parse_context('Uninstall')
hutil.do_exit(0, 'Uninstall', 'success', '0', 'Uninstall succeeded')
def disable():
hutil = handler_util.HandlerUtility()
hutil.do_parse_context('Disable')
hutil.do_exit(0, 'Disable', 'success', '0', 'Disable Succeeded')
def update():
hutil = handler_util.HandlerUtility()
hutil.do_parse_context('Update')
hutil.do_exit(0, 'Update', 'success', '0', 'Update Succeeded')
def _remove_user_account(user_name, hutil):
hutil.log("Removing user account")
try:
sudoers = _get_other_sudoers(user_name)
MyDistro.delete_account(user_name)
_save_other_sudoers(sudoers)
except Exception as e:
ext_utils.add_extension_event(name=hutil.get_name(),
op=constants.WALAEventOperation.Enable,
is_success=False,
message="(02102)Failed to remove user.")
raise Exception("Failed to remove user {0}".format(e))
ext_utils.add_extension_event(name=hutil.get_name(),
op=constants.WALAEventOperation.Enable,
is_success=True,
message="Successfully removed user")
def _set_user_account_pub_key(protect_settings, hutil):
ovf_env = None
try:
ovf_xml = ext_utils.get_file_contents('/var/lib/waagent/ovf-env.xml')
if ovf_xml is not None:
ovf_env = ovf_utils.OvfEnv.parse(ovf_xml, Configuration, False, False)
except (EnvironmentError, ValueError, KeyError, AttributeError, TypeError):
pass
if ovf_env is None:
# default ovf_env with empty data
ovf_env = ovf_utils.OvfEnv()
logger.log("could not load ovf-env.xml")
# user name must be provided if set ssh key or password
if not protect_settings or 'username' not in protect_settings:
return
user_name = protect_settings['username']
user_pass = protect_settings.get('password')
cert_txt = protect_settings.get('ssh_key')
expiration = protect_settings.get('expiration')
remove_prior_keys = protect_settings.get('remove_prior_keys')
no_convert = False
if not user_pass and not cert_txt and not ovf_env.SshPublicKeys:
raise Exception("No password or ssh_key is specified.")
if user_pass is not None and len(user_pass) == 0:
user_pass = None
hutil.log("empty passwords are not allowed, ignoring password reset")
# Reset user account and password, password could be empty
sudoers = _get_other_sudoers(user_name)
error_string = MyDistro.create_account(
user_name, user_pass, expiration, None)
_save_other_sudoers(sudoers)
if error_string is not None:
err_msg = "Failed to create the account or set the password"
ext_utils.add_extension_event(name=hutil.get_name(),
op=constants.WALAEventOperation.Enable,
is_success=False,
message="(02101)" + err_msg)
raise Exception(err_msg + " with " + error_string)
hutil.log("Succeeded in creating the account or setting the password.")
# Allow password authentication if user_pass is provided
if user_pass is not None:
ext_utils.add_extension_event(name=hutil.get_name(), op="scenario", is_success=True,
message="create-user-with-password")
_allow_password_auth()
# Reset ssh key with the new public key passed in or reuse old public key.
if cert_txt:
# support for SSH2-compatible format for public keys in addition to OpenSSH-compatible format
if cert_txt.strip().startswith(BeginSSHTag):
ext_utils.set_file_contents("temp.pub", cert_txt.strip())
retcode, output = ext_utils.run_command_get_output(['ssh-keygen', '-i', '-f', 'temp.pub'])
if retcode > 0:
raise Exception("Failed to convert SSH2 key to OpenSSH key.")
hutil.log("Succeeded in converting SSH2 key to OpenSSH key.")
cert_txt = output
os.remove("temp.pub")
if cert_txt.strip().lower().startswith("ssh-rsa") or cert_txt.strip().lower().startswith("ssh-ed25519"):
no_convert = True
try:
pub_path = os.path.join('/home/', user_name, '.ssh',
'authorized_keys')
ovf_env.UserName = user_name
if no_convert:
if cert_txt:
pub_path = ovf_env.prepare_dir(pub_path, MyDistro)
final_cert_txt = cert_txt
if not cert_txt.endswith("\n"):
final_cert_txt = final_cert_txt + "\n"
if remove_prior_keys == True:
ext_utils.set_file_contents(pub_path, final_cert_txt)
hutil.log("Removed prior ssh keys and added new key for user %s" % user_name)
else:
ext_utils.append_file_contents(pub_path, final_cert_txt)
MyDistro.set_se_linux_context(
pub_path, 'unconfined_u:object_r:ssh_home_t:s0')
ext_utils.change_owner(pub_path, user_name)
ext_utils.add_extension_event(name=hutil.get_name(), op="scenario", is_success=True,
message="create-user")
hutil.log("Succeeded in resetting ssh_key.")
else:
err_msg = "Failed to reset ssh key because the cert content is empty."
ext_utils.add_extension_event(name=hutil.get_name(),
op=constants.WALAEventOperation.Enable,
is_success=False,
message="(02100)" + err_msg)
else:
# do the certificate conversion
# we support PKCS8 certificates besides ssh-rsa public keys
_save_cert_str_as_file(cert_txt, 'temp.crt')
pub_path = ovf_env.prepare_dir(pub_path, MyDistro)
retcode = ext_utils.run_command_and_write_stdout_to_file(
[constants.Openssl, 'x509', '-in', 'temp.crt', '-noout', '-pubkey'], "temp.pub")
if retcode > 0:
raise Exception("Failed to generate public key file.")
MyDistro.ssh_deploy_public_key('temp.pub', pub_path)
os.remove('temp.pub')
os.remove('temp.crt')
ext_utils.add_extension_event(name=hutil.get_name(), op="scenario", is_success=True,
message="create-user")
hutil.log("Succeeded in resetting ssh_key.")
except Exception as e:
hutil.log(str(e))
ext_utils.add_extension_event(name=hutil.get_name(),
op=constants.WALAEventOperation.Enable,
is_success=False,
message="(02100)Failed to reset ssh key.")
raise e
def _get_other_sudoers(user_name):
sudoers_file = '/etc/sudoers.d/waagent'
if not os.path.isfile(sudoers_file):
return None
sudoers = ext_utils.get_file_contents(sudoers_file).split("\n")
pattern = '^{0}\s'.format(user_name)
sudoers = list(filter(lambda x: re.match(pattern, x) is None, sudoers))
return sudoers
def _save_other_sudoers(sudoers):
sudoers_file = '/etc/sudoers.d/waagent'
if sudoers is None:
return
ext_utils.append_file_contents(sudoers_file, "\n".join(sudoers))
os.chmod("/etc/sudoers.d/waagent", 0o440)
def _allow_password_auth():
config = ext_utils.get_file_contents(SshdConfigPath).split("\n")
_set_sshd_config(config, "PasswordAuthentication", "yes")
ext_utils.replace_file_with_contents_atomic(SshdConfigPath, "\n".join(config))
if isinstance(MyDistro, dist_utils.UbuntuDistro): #handle ubuntu 22.04 (sshd_config.d directory)
cloudInitConfigPath = "/etc/ssh/sshd_config.d/50-cloud-init.conf"
config = ext_utils.get_file_contents(cloudInitConfigPath)
if config is not None: #other versions of ubuntu don't contain this file
config = config.split("\n")
_set_sshd_config(config, "PasswordAuthentication", "yes")
ext_utils.replace_file_with_contents_atomic(cloudInitConfigPath, "\n".join(config))
def _set_sshd_config(config, name, val):
notfound = True
i = None
for i in range(0, len(config)):
if config[i].startswith(name):
config[i] = "{0} {1}".format(name, val)
notfound = False
elif config[i].startswith("Match"):
# Match block must be put in the end of sshd config
break
if notfound:
if i is None:
i = 0
config.insert(i, "{0} {1}".format(name, val))
return config
def _get_default_ssh_config_filename():
if OSName is not None:
# the default ssh config files are present in
# /var/lib/waagent/Microsoft.OSTCExtensions.VMAccessForLinux-<version>/resources/
if re.search("centos", OSName, re.IGNORECASE):
return "centos_default"
if re.search("debian", OSName, re.IGNORECASE):
return "debian_default"
if re.search("fedora", OSName, re.IGNORECASE):
return "fedora_default"
if re.search("red\s?hat", OSName, re.IGNORECASE):
return "redhat_default"
if re.search("suse", OSName, re.IGNORECASE):
return "SuSE_default"
if re.search("ubuntu", OSName, re.IGNORECASE):
return "ubuntu_default"
return "default"
def _reset_sshd_config(sshd_file_path):
ssh_default_config_filename = _get_default_ssh_config_filename()
ssh_default_config_file_path = os.path.join(os.getcwd(), 'resources', ssh_default_config_filename)
if not (os.path.exists(ssh_default_config_file_path)):
ssh_default_config_file_path = os.path.join(os.getcwd(), 'resources', 'default')
# handle CoreOS differently
if isinstance(MyDistro, dist_utils.CoreOSDistro):
# Parse sshd port from ssh_default_config_file_path
sshd_port = 22
regex = re.compile(r"^Port\s+(\d+)", re.VERBOSE)
with open(ssh_default_config_file_path) as f:
for line in f:
match = regex.match(line)
if match:
sshd_port = match.group(1)
break
# Prepare cloud init config for coreos-cloudinit
f = tempfile.NamedTemporaryFile(delete=False)
f.close()
cfg_tempfile = f.name
cfg_content = "#cloud-config\n\n"
# Overwrite /etc/ssh/sshd_config
cfg_content += "write_files:\n"
cfg_content += " - path: {0}\n".format(sshd_file_path)
cfg_content += " permissions: 0600\n"
cfg_content += " owner: root:root\n"
cfg_content += " content: |\n"
for line in ext_utils.get_file_contents(ssh_default_config_file_path).split('\n'):
cfg_content += " {0}\n".format(line)
# Change the sshd port in /etc/systemd/system/sshd.socket
cfg_content += "\ncoreos:\n"
cfg_content += " units:\n"
cfg_content += " - name: sshd.socket\n"
cfg_content += " command: restart\n"
cfg_content += " content: |\n"
cfg_content += " [Socket]\n"
cfg_content += " ListenStream={0}\n".format(sshd_port)
cfg_content += " Accept=yes\n"
ext_utils.set_file_contents(cfg_tempfile, cfg_content)
ext_utils.run(['coreos-cloudinit', '-from-file', cfg_tempfile], chk_err=False)
os.remove(cfg_tempfile)
else:
shutil.copyfile(ssh_default_config_file_path, sshd_file_path)
MyDistro.restart_ssh_service()
def _backup_sshd_config(sshd_file_path):
if os.path.exists(sshd_file_path):
backup_file_name = '%s_%s' % (
sshd_file_path, time.strftime("%Y-%m-%dT%H:%M:%SZ", time.gmtime()))
# When copying, make sure to preserve permissions and ownership.
ownership = os.stat(sshd_file_path)
shutil.copy2(sshd_file_path, backup_file_name)
os.chown(backup_file_name, ownership.st_uid, ownership.st_gid)
def _save_cert_str_as_file(cert_txt, file_name):
cert_start = cert_txt.find(BeginCertificateTag)
if cert_start >= 0:
cert_txt = cert_txt[cert_start + len(BeginCertificateTag):]
cert_end = cert_txt.find(EndCertificateTag)
if cert_end >= 0:
cert_txt = cert_txt[:cert_end]
cert_txt = cert_txt.strip()
cert_txt = "{0}\n{1}\n{2}\n".format(BeginCertificateTag, cert_txt, EndCertificateTag)
ext_utils.set_file_contents(file_name, cert_txt)
def _open_ssh_port():
_del_rule_if_exists(['INPUT', '-p', 'tcp', '-m', 'tcp', '--dport', '22', '-j', 'DROP'])
_del_rule_if_exists(['INPUT', '-p', 'tcp', '-m', 'tcp', '--dport', '22', '-j', 'REJECT'])
_del_rule_if_exists(['INPUT', '-p', '-j', 'DROP'])
_del_rule_if_exists(['INPUT', '-p', '-j', 'REJECT'])
_insert_rule_if_not_exists(['INPUT', '-p', 'tcp', '-m', 'tcp', '--dport', '22', '-j', 'ACCEPT'])
_del_rule_if_exists(['OUTPUT', '-p', 'tcp', '-m', 'tcp', '--sport', '22', '-j', 'DROP'])
_del_rule_if_exists(['OUTPUT', '-p', 'tcp', '-m', 'tcp', '--sport', '22', '-j', 'REJECT'])
_del_rule_if_exists(['OUTPUT', '-p', '-j', 'DROP'])
_del_rule_if_exists(['OUTPUT', '-p', '-j', 'REJECT'])
_insert_rule_if_not_exists(['OUTPUT', '-p', 'tcp', '-m', 'tcp', '--dport', '22', '-j', 'ACCEPT'])
def _del_rule_if_exists(rule_string):
rule_string_for_cmp = " ".join(rule_string)
cmd_result = ext_utils.run_command_get_output(['iptables-save'])
while cmd_result[0] == 0 and (rule_string_for_cmp in cmd_result[1]):
ext_utils.run(['iptables', '-D'] + rule_string)
cmd_result = ext_utils.run_command_get_output(['iptables-save'])
def _insert_rule_if_not_exists(rule_string):
rule_string_for_cmp = " ".join(rule_string)
cmd_result = ext_utils.run_command_get_output(['iptables-save'])
if cmd_result[0] == 0 and (rule_string_for_cmp not in cmd_result[1]):
ext_utils.run_command_get_output(['iptables', '-I'] + rule_string)
def check_and_repair_disk(hutil):
public_settings = hutil.get_public_settings()
if public_settings:
check_disk = public_settings.get('check_disk')
repair_disk = public_settings.get('repair_disk')
disk_name = public_settings.get('disk_name')
if check_disk and repair_disk:
err_msg = ("check_disk and repair_disk was both specified."
"Only one of them can be specified")
hutil.error(err_msg)
hutil.do_exit(1, 'Enable', 'error', '0', 'Enable failed.')
if check_disk:
ext_utils.add_extension_event(name=hutil.get_name(), op="scenario", is_success=True, message="check_disk")
outretcode = _fsck_check(hutil)
hutil.log("Successfully checked disk")
return outretcode
if repair_disk:
ext_utils.add_extension_event(name=hutil.get_name(), op="scenario", is_success=True, message="repair_disk")
outdata = _fsck_repair(hutil, disk_name)
hutil.log("Repaired and remounted disk")
return outdata
def _fsck_check(hutil):
try:
retcode = ext_utils.run(['fsck', '-As', '-y'])
if retcode > 0:
hutil.log(retcode)
raise Exception("Disk check was not successful")
else:
return retcode
except Exception as e:
hutil.error("Failed to run disk check with error: {0}, {1}".format(
str(e), traceback.format_exc()))
hutil.do_exit(1, 'Check', 'error', '0', 'Check failed.')
def _fsck_repair(hutil, disk_name):
# first unmount disks and loop devices lazy + forced
try:
cmd_result = ext_utils.run(['umount', '-f', '/' + disk_name])
if cmd_result != 0:
# Fail fast
hutil.log("Failed to unmount disk: %s" % disk_name)
# run repair
retcode = ext_utils.run(['fsck', '-AR', '-y'])
hutil.log("Ran fsck with return code: %d" % retcode)
if retcode == 0:
retcode, output = ext_utils.run_command_get_output(["mount"])
hutil.log(output)
return output
else:
raise Exception("Failed to mount disks")
except Exception as e:
hutil.error("{0}, {1}".format(str(e), traceback.format_exc()))
hutil.do_exit(1, 'Repair', 'error', '0', 'Repair failed.')
if __name__ == '__main__':
main()
|
10,245 | a89649153fa3127dc789ec79efd8c7e2e862e09e | #!/usr/bin/env python
import numpy
def estimate_norm(X):
return X.mean(axis=0), X.std(axis=0, ddof=1)
def normalize(X, norm):
return numpy.array([(k - norm[0]) / norm[1] for k in X])
|
10,246 | 812c892a78f9c399b48033c1c36f8780d4a2bcfa | from django import template
register = template.Library()
@register.filter()
def sweat_change(value):
try:
if value:
strlist = value.split('-',1)
class_int = strlist[0]
seat_int = strlist[1]
return '第%s试室%s号'%(class_int, seat_int)
else:
return ''
except:
return ''
@register.filter()
def zkzh_create(value):
try:
if value:
strlist = value.split('-',1)
return strlist[0]+strlist[1]
else:
return ''
except:
return ''
@register.filter()
def get_class(value):
try:
strlist = value.split('-',1)
return '第%s试室'%(strlist[0])
except:
return ''
@register.filter()
def get_seat(value):
try:
strlist = value.split('-', 1)
return strlist[1]
except:
return ''
@register.filter()
def zkz_replace(value):
if len(value) < 13:
return value
try:
temp = value[0:4]+value[6:]
return temp
except:
return value
|
10,247 | 6ba7c55a3c0b71d4991595aab2601ee559b347bb | import sys
import requests
def getem(key):
url = ('http://beta.content.guardianapis.com/search?'
'section=film&api-key=%s&page-size=50&page=%i')
with open('results.json', 'w') as fout:
page = 1
total = 0
while True:
r = requests.get(
url % (key, page))
js = r.json()
fout.write('\n'.join(res['webTitle'].encode('utf-8', 'ignore')
for res in js['response']['results']))
page += 1
if page > js['response']['pages']:
break
total += js['response']['pageSize']
print "DONE: %i" % total
if __name__ == '__main__':
getem(sys.argv[1])
|
10,248 | 1dbcbf97893eb6f6096be082f74ac14d4f7ced8e | import image
img =image.Image("img.jpg")
print(img.getWidth())
print(img.getHeight())
p = img.getPixel(45, 55)
print(p.getRed(), p.getGreen(), p.getBlue())
|
10,249 | 3d10ffaa55daab465e84eef0e313371af7c269f7 | import torch
class Memory_OnPolicy():
def __init__(self):
self.actions = []
self.states = []
self.next_states = []
self.logprobs = []
self.rewards = []
self.dones = []
def push(self, state, action, reward,next_state,done, logprob):
self.actions.append(action)
self.states.append(state)
self.rewards.append(reward)
self.next_states.append(next_state)
self.dones.append(done)
self.logprobs.append(logprob)
def clear_memory(self):
del self.actions[:]
del self.states[:]
del self.logprobs[:]
del self.rewards[:]
del self.dones[:]
del self.next_states[:] |
10,250 | 9948cbc5f8bfbb4516e7d5effebdd0224d24e0f3 | #!/usr/bin/python
'''
Calculate the overall sentiment score for a review.
'''
import sys
import hashlib
def sentiment_score(text,pos_list,neg_list):
pos_score=0
neg_score=0
for w in text.split(' '):
if w in pos_list: pos_score+=1
if w in neg_list: neg_score+=1
return pos_score-neg_score
positive_words=open('./positive-words.txt').read().split('\n')
negative_words=open('./negative-words.txt').read().split('\n')
for l in sys.stdin:
l=l.strip()
l=l.lower()
score=sentiment_score(l,positive_words,negative_words)
hash_object=hashlib.md5(l)
print '%s\t%s' % (hash_object.hexdigest(), score)
|
10,251 | 581d1b9e6cd9df5fb1fdae1bcc26818938f5906d | import os
from datetime import datetime
from nest_py.core.db.sqla_resources import JobsSqlaResources
from nest_py.nest_envs import ProjectEnv, RunLevel
from nest_py.ops.nest_sites import NestSite
def generate_db_config(project_env=None, runlevel=None):
if project_env is None:
project_env = ProjectEnv.hello_world_instance()
if runlevel is None:
runlevel = RunLevel.development_instance()
config = {
"user":os.getenv('POSTGRES_USERNAME', "nest"),
"port": os.getenv('POSTGRES_PORT', 5432), #exported in docker startup
"password":os.getenv('POSTGRES_PASSWORD', "GARBAGESECRET"),
"db_name":os.getenv('POSTGRES_DATABASE', "nest"),
#"verbose_logging":True
"verbose_logging":False
}
host = os.getenv('POSTGRES_HOST', NestSite.localhost_instance().get_server_ip_address())
config['host'] = host
return config
#At import time, we default to a plain JobsSqlaResources in order
#to create a declarative_base that the ORM classes can use.
#This is a bit complicated because once the configs are actually
#processed, a job or nest_ops will need to assign database connection
#information to this object. In the case of flask, this object will be
#overwritten completely and this default instance will be ignored
#thereafter and flask_sqlalchemy package will be responsible for
#binding the ORM classes to the Metadata
GLOBAL_SQLA_RESOURCES = JobsSqlaResources(generate_db_config())
def set_global_sqla_resources(sqla_resources):
"""
sqla_resources (either JobsSqlaResources or FlaskSqlaResources)
nest_project (ProjectEnv)
"""
md = sqla_resources.get_metadata()
#_bind_tables_to_metadata(md, nest_project)
GLOBAL_SQLA_RESOURCES = sqla_resources
return
def get_global_sqlalchemy_base():
if GLOBAL_SQLA_RESOURCES is None:
raise Exception('SQLA resources not initialized')
base = GLOBAL_SQLA_RESOURCES.get_declarative_base()
return base
def get_global_sqlalchemy_metadata():
if GLOBAL_SQLA_RESOURCES is None:
raise Exception('SQLA resources not initialized')
md = GLOBAL_SQLA_RESOURCES.get_metadata()
return md
def get_global_sqlalchemy_session():
if GLOBAL_SQLA_RESOURCES is None:
raise Exception('SQLA resources not initialized')
session = GLOBAL_SQLA_RESOURCES.get_session()
return session
def get_global_sqlalchemy_engine():
if GLOBAL_SQLA_RESOURCES is None:
raise Exception('SQLA resources not initialized')
engine = GLOBAL_SQLA_RESOURCES.get_engine()
return engine
|
10,252 | 4771fc205e78947925bfa7bcbf45e44114836226 | from flask import Flask, render_template, flash, request, redirect, url_for, session, send_file
from wtforms import Form, TextField, TextAreaField, validators, StringField, SubmitField
import sendgrid
import os
users = 0
class ContactForm(Form):
name = TextField('Full Name', validators=[validators.DataRequired()], render_kw={"placeholder": "Name"})
email = TextField('Email ID', validators=[validators.DataRequired()], render_kw={"placeholder": "Email"} )
phone = TextField('Phone Number', validators=[validators.DataRequired()], render_kw={"placeholder": "Phone Number"})
foi = TextField('Field Of Interest', validators=[validators.DataRequired()], render_kw={"placeholder": "Field Of Interest"})
def send_mail(users, name, email, ph_num, field):
sg = sendgrid.SendGridAPIClient(apikey = os.environ.get("SG_API_KEY"))
from_email = sendgrid.helpers.mail.Email("widhya.org@gmail.com", name="Widhya Org")
#print(subject_given.split("-")[0])
to_email = sendgrid.helpers.mail.Email("rahuldravid313@gmail.com")
#print(to_email)
subject = "Subscribers List "
mail_content = "Name : <b>%s</b> <br>Email ID : <b>%s</b> <br>Number : <b>%s</b> <br>Field Of Interest : <b>%s</b> <br>"%(name, email, ph_num, field)
content = sendgrid.helpers.mail.Content("text/html", "<html><body><p>Thanks for actually using this particular thingy. I hope you're doing good! Thank those who actually agreed to use this particular website.</p> <br> <pre>%s</pre></body></html>"%(mail_content))
mail = sendgrid.helpers.mail.Mail(from_email, subject, to_email, content)
response = sg.client.mail.send.post(request_body=mail.get())
return response
DEBUG = True
app = Flask(__name__) #initialising flask
app.config.from_object(__name__) #configuring flask
app.config['SECRET_KEY'] = '7d441f27d441f27567d441f2b6176a'
app = Flask(__name__)
@app.route("/", methods=['GET', 'POST'])
def index():
form = ContactForm(request.form)
if(request.method == 'POST'):
if(form.validate()):
global users
users += 1
response = send_mail(users, request.form['name'],request.form['email'],request.form['phone'],request.form['foi'])
return redirect(url_for("index"))
return render_template("index.html", form=form)
@app.route("/rahul", methods=['GET', 'POST'])
def Rahul_Widhya_VisitingCard():
return send_file("docs/Rahul_VisitingCard.pdf")
@app.route("/rishabh", methods=['GET', 'POST'])
def Rishabh_Widhya_VisitingCard():
return send_file("docs/Rishabh_VisitingCard.pdf")
'''@app.errorhandler(404)
def not_found(e):
return render_template("404.html")
'''
@app.errorhandler(500)
def application_error(e):
return 'Sorry, unexpected error: {}'.format(e), 500
if(__name__ == "__main__"):
app.run(debug=True)
|
10,253 | 2a6abf28c23a925b8cc02621b5210a579cfe65de | # Generated by Django 2.0.4 on 2018-06-25 10:42
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('postcontent', '0001_initial'),
]
operations = [
migrations.AddField(
model_name='author',
name='slug',
field=models.SlugField(default='test', max_length=200),
preserve_default=False,
),
migrations.AddField(
model_name='hashtag',
name='slug',
field=models.SlugField(default='test'),
preserve_default=False,
),
migrations.AddField(
model_name='post',
name='slug',
field=models.SlugField(default='test'),
preserve_default=False,
),
]
|
10,254 | a07ef98502948a5cbb1a306bf65d80b256eaf28c |
import dynet as dy
import Saxe
import numpy as np
class MLP:
""" MLP with 1 hidden Layer """
def __init__(self, model, input_dim, hidden_dim, output_dim, dropout = 0, softmax=False):
self.input = input_dim
self.hidden = hidden_dim
self.output = output_dim
self.dropout = dropout
self.softmax = softmax
self.WI2H = model.add_parameters((self.hidden,self.input))
self.bI2H = model.add_parameters((self.hidden), init = dy.ConstInitializer(0))
self.WH2O = model.add_parameters((self.output,self.hidden))
self.bH20 = model.add_parameters((self.output), init = dy.ConstInitializer(0))
def __call__(self, inputs, predict=False):
WI2H = dy.parameter(self.WI2H)
bI2H = dy.parameter(self.bI2H)
WH2O = dy.parameter(self.WH2O)
bH20 = dy.parameter(self.bH20)
if (predict):
hidden = dy.tanh(dy.affine_transform([bI2H,WI2H,inputs]))
else:
hidden = dy.dropout(dy.tanh(dy.affine_transform([bI2H,WI2H,inputs])),self.dropout)
output = dy.affine_transform([bH20,WH2O,hidden])
if (self.softmax):
return dy.softmax(output)
else:
return output
class Lin_Projection:
""" Linear projection Layer """
def __init__(self, model, input_dim,output_dim):
self.input = input_dim
self.output = output_dim
Saxe_initializer = Saxe.Orthogonal()
self.W = model.add_parameters((self.output,self.input), init=dy.NumpyInitializer(Saxe_initializer(((self.output,self.input)))))
self.b = model.add_parameters((self.output), init = dy.ConstInitializer(0))
def __call__(self, inputs):
W = dy.parameter(self.W)
b = dy.parameter(self.b)
output = dy.affine_transform([b,W,inputs])
return output
def L2_req_term(self):
W = dy.parameter(self.W)
WW = W *dy.transpose(W)
loss = dy.squared_norm(WW - dy.inputTensor(np.eye(self.output))) / 2
return loss
|
10,255 | 0181084a016f075cd0626db2522e0cd12accecdd | import numpy as np
file_data = np.loadtxt('./data/final1.csv',delimiter=',')
# Using Min - Max normalisation
data =[]
final_data= []
target_data = []
valmean_nor = ((file_data[:,0]) - min(file_data[:,0]))/(max(file_data[:,0]) - min(file_data[:,0]))
valmax_nor = ((file_data[:,1]) - min(file_data[:,1]))/(max(file_data[:,1]) - min(file_data[:,1]))
valmin_nor = ((file_data[:,2]) - min(file_data[:,2]))/(max(file_data[:,2]) - min(file_data[:,2]))
valstd_nor = ((file_data[:,3]) - min(file_data[:,3]))/(max(file_data[:,3]) - min(file_data[:,3]))
valenergy_nor = ((file_data[:,4]) - min(file_data[:,4]))/(max(file_data[:,4]) - min(file_data[:,4]))
energy_signal_nor = ((file_data[:,5]) - min(file_data[:,5]))/(max(file_data[:,5]) - min(file_data[:,5]))
for i ,val in enumerate(valmean_nor):
saveFile = open ('./data/norm_feature.csv','a')
saveFile.write(str(valmean_nor[i]) + ',' +str(valmax_nor[i]) + ',' + str(valmin_nor[i]) + ',' + str(valstd_nor[i]) + ',' + str(valenergy_nor[i])+','+ str(energy_signal_nor[i]) + ',' + str(file_data[i][6]))
saveFile.write('\n')
saveFile.close() |
10,256 | e9100720fc706803ca5208c335a4a3b2ef5044c2 | from typing import List
from django.urls import (path, URLPattern)
from . import views
urlpatterns: List[URLPattern] = [
path(route='', view=views.StaffView.as_view(), name='staff'),
path(route='products/', view=views.ProductListView.as_view(), name='product-list'),
path(route='create/', view=views.ProductCreateView.as_view(), name='product-create'),
path(
route='products/<pk>/update/',
view=views.ProductUpdateView.as_view(),
name='product-update',
),
path(
route='products/<pk>/delete/',
view=views.ProductDeleteView.as_view(),
name='product-delete',
),
]
app_name: str = 'staff'
__all__ = ('app_name', 'urlpatterns',)
|
10,257 | 46e3803cdc972f8411c14ac338e5ff0eb84e8023 | __author__ = 'wangqiushi'
|
10,258 | 555063bb8c1fa7a0c857f88f1a034a7fda00d56d | import pandas as pd
import numpy as np
import os
import pickle
from keras.preprocessing.text import Tokenizer
from keras.preprocessing.sequence import pad_sequences
from keras.models import Model, load_model
from azure_stt import speech_to_text
from google_tts import text_to_audio
from text_analytics import keyword_extraction
from image_captioning import image_captioning
from tensorflow.python.util import deprecation
deprecation._PRINT_DEPRECATION_WARNINGS = False
os.environ['TF_CPP_MIN_LOG_LEVEL']='3'
#tokenizer = None
#model = None
def question_analytics(txt):
global tokenizer
global model
if txt.lower()=='what do you see?':
########### Take_A_Picture()############
return image_captioning()
else:
txt=[txt]
question_type = ['abbreviation', 'description', 'entity', 'human', 'location', 'number']
token = tokenizer.texts_to_sequences(txt)
token = pad_sequences(token,maxlen=30)
result = model.predict(token,verbose=2)
question = question_type[np.argmax(result)]
keywords = keyword_extraction(txt)[0]
return answer_pool(keywords,question)
def answer_pool(keywords,question_type):
keywords = ','.join(keywords)
if question_type=='abbreviation':
answer = 'What? We parrots never use abbreviations.'
if question_type=='description':
if 'weather' in keywords:
answer = "Every day is sunny in Singapore."
elif 'time' in keywords:
answer = "Parrots have no ideas how humans measure time."
elif ('breakfast' in keywords) or ('lunch' in keywords) or ('dinner' in keywords):
answer = "I'm a robot parrot. I only consume electricity."
elif 'money' in keywords:
answer = 'Money? Useless!'
elif ('girlfriend' in keywords) or ('boyfriend' in keywords):
answer = 'You will always be single, I promise.'
elif 'friend' in keywords:
answer = 'Sadly. My master has never created any friend for me.'
elif 'hobby' in keywords:
answer = "My hobby is to generate bugs so that my masters will get anxious, haha"
else:
answer = "I've never heard of that. Why not check it on your mobile phone ?"
if question_type=='entity':
if 'food' in keywords:
answer = "I'm a robot parrot. I only consume electricity."
elif 'hobby' in keywords:
answer = "My hobby is to generate bugs so that my masters will get anxious, haha."
else:
keywords = keywords.split(',')
answer = ''
for i in range(len(keywords)):
if i!=0:
answer+=' and '
answer+=keywords[i]
if len(keywords)>1:
answer+=" are not included in a parrot's vocabulary."
else:
answer+=" is not included in a parrot's vocabulary."
if question_type=='human':
if len(keywords)==0:
answer="My master never gives me a name. But sometimes they call me artificial idiot parrot."
elif 'master' in keywords:
answer="I have 4 masters. They are the most handsome guys in the world."
elif ('mother' in keywords) or ('father' in keywords):
answer="My masters are my mothers and fathers."
else:
answer="Don't ask me about humans. I don't know anyone of you."
if question_type=='location':
if len(keywords)==0:
answer="I'm from the magical world of ones and zeros."
else:
answer="I have no sense of direction. Why not check your google map ?"
if question_type=='number':
if len(keywords)==0 or ('age' in keywords):
answer="I'm 10 days old. My master created me just 10 days ago."
else:
answer="I've never learned math. I can't even count from 1 to 100. So please don't ask me about numbers again."
return answer
def confirm():
ans = ""
while ans not in ["y", "n"]:
ans = input("continue [Y/N]? ").lower()
return ans == "y"
if __name__=='__main__':
global tokenizer
global model
with open('tokenizer.pickle','rb') as handle:
tokenizer = pickle.load(handle)
model = load_model('model.h5')
mode = input("Please select the parrot's mode: 1. Text-based 2. Audio-based: (1 or 2) ")
if mode=='1':
while True:
question = input("Please input your question: ").lower()
answer = question_analytics(question)
print(answer)
if not confirm():
print('Bye bye')
break
elif mode=='2':
while True:
question,success = speech_to_text()
if success==False:
print(question)
else:
print(question)
answer = question_analytics(question)
print(answer)
text_to_audio(answer)
if not confirm():
print('Bye bye')
text_to_audio('Bye bye')
break
else:
print('Error input. Please run the script again.')
|
10,259 | 82fa4a87b4d8cfc45577bc519f62d06b7369b242 | # Generated by Django 3.0.2 on 2020-08-04 15:04
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('exam', '0002_profile'),
]
operations = [
migrations.AddField(
model_name='profile',
name='About_me',
field=models.TextField(blank=True, null=True),
),
migrations.AddField(
model_name='profile',
name='Class',
field=models.CharField(blank=True, max_length=300, null=True),
),
migrations.AddField(
model_name='profile',
name='Mobile',
field=models.IntegerField(blank=True, null=True),
),
]
|
10,260 | af39fa086691c48f157863c791e3f07b96152fc6 |
# def TwoSum(nums:list,target):
# nums.sort()
# begin = 0
# end = len(nums) - 1
# while begin < end:
# sum = nums[begin] + nums[end]
# if sum== target:
# print(begin,end)
# begin += 1
# end -= 1
# else:
# if sum < target:
# begin += 1
# else:
# begin -= 1
# def TwoSum(nums,target):
# for i in range(len(nums)):
# for j in range(i + 1,len(nums)):
# if nums[i] + nums[j] == target:
# return i,j
def TwoSum(nums:list,target):
d ={}
for i in range(len(nums)):
temp = target - nums[i]
if temp in d:
return d[temp],i
d[nums[i]] = i
nums = [1,4,3,5,2]
print(TwoSum(nums,6)) |
10,261 | 3413c8d24d8d411f98a9d1148b47d3f8dab32ffc | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
import logging
import configparser
from telegram.ext import Updater
from telegram.ext import CommandHandler
from telegram.ext import MessageHandler, Filters
import wiki2txt
import web_browser
import holiday
logging.basicConfig(format='%(asctime)s - %(name)s - %(levelname)s - %(message)s',
level=logging.INFO)
config = configparser.RawConfigParser()
config.read('config.cnf')
updater = Updater(token=config.get("bot", "TOKEN"))
dispatcher = updater.dispatcher
def start(bot, update):
bot.sendMessage(chat_id=update.message.chat_id, text="I'm a bot, please talk to me!")
def wiki_parse(args):
if len(args):
n = wiki2txt.CorrectWord(args)
url = wiki2txt.RuWiki(n)
s = wiki2txt.GetWiki(url)
if not len(s):
return u'Статья не найдена. Попробуйте поискать другую'
txt = wiki2txt.Wiki2Text(s)
if len(txt):
return txt
else:
return u'Статья пустая. Попробуйте почитать другую.'
else:
return u'Укажите, пожалуйста, название статьи после команды'
def wiki(bot, update, args):
bot.sendMessage(chat_id=update.message.chat_id, text=wiki_parse(" ".join(args))[:340])
def echo(bot, update):
bot.sendMessage(chat_id=update.message.chat_id, text=update.message.text)
dispatcher.add_handler(CommandHandler('start', start))
dispatcher.add_handler(CommandHandler('wiki', wiki, pass_args=True))
dispatcher.add_handler(CommandHandler('web', web_browser.web_handler, pass_args=True))
dispatcher.add_handler(CommandHandler('holiday', holiday.holiday_handler, pass_args=True))
# dispatcher.add_handler(MessageHandler(Filters.text, echo))
updater.start_polling()
|
10,262 | a15c714afa5ecd3bb7424db44f82a620602c6963 | import requests
url = 'https://ipinfo.io'
username = ''
password = ''
proxy = f'socks5h://{username}:{password}@gate.smartproxy.com:7000'
response = requests.get(url, proxies= {'http': proxy, 'https': proxy})
print(response.text)
|
10,263 | fcaf05a0f83ee78a37ca9726e1c111597fce6dfc | from models.usuario import UsuarioModel
import bcrypt
class Usuario(object):
def __init__(self, id, username):
self.id = id
self.username=username
def __str__(self):
return "Usuario(id='%s')" % self.id
# return "Usuario(id='{}')".format(self.id)
def autenticacion(username, password):
if username and password:
resultado = UsuarioModel.query.filter_by(correo=username).first()
if resultado:
# comprobacion de contraseñas
pass_convertida = bytes(password,'utf-8')
salt = bytes(resultado.salt,'utf-8')
hashed = bcrypt.hashpw(pass_convertida,salt).decode('utf-8')
if hashed == resultado.hashe:
return Usuario(resultado.id, resultado.correo)
else:
print('Contraseña incorrecta')
return None
else:
print('Usuario no encontrado')
return None
else:
print('Falta el username o la password')
return None
def identificacion(payload):
"""El payload es la parte donde esta la fecha de duracion de la token, los campos extras que puedo almacenar en la token, y la fecha de creacion de la token"""
if (payload['identity']):
# el identity me devuelve el id del usuario que se ha logeado entonces gracias a ello yo puedo almacenar otros campos
resultado = UsuarioModel.query.filter_by(id=payload['identity']).first()
if resultado:
return (resultado.id, resultado.correo)
else:
return None
else:
return None
|
10,264 | d47d487cd3213f98980041ebb22d33dc2b58baed | #!/usr/bin/python3
import tkinter as tk
from tkinter import ttk
from PIL import Image, ImageTk
import pyautogui
#import random
from random import randint
import cv2
import h5py
import numpy as np
from matplotlib import pyplot as plt
from pylab import *
from tkinter import messagebox
from matplotlib.backends.backend_tkagg import (
FigureCanvasTkAgg, NavigationToolbar2Tk)
# Implement the default Matplotlib key bindings.
from matplotlib.backend_bases import key_press_handler
from matplotlib.figure import Figure
import matplotlib.backends.backend_tkagg as tkagg
import numpy as np
from TaskerOrbitPlotter2 import TaskerOrbitPlotter
from datetime import datetime, timedelta
trace = 0
plotButton = False
raster = "default"
class TaskerCanvas(ttk.Frame):
"""
Displays the map and plot
:ivar TaskerGui parent: The parent application
:ivar int canvas_width: The width of the canvas
:ivar int canvas_height: The height of the canvas
:ivar TaskerOrbitPlotter plotter: Manages orbit data and plotting for the canvas
:ivar FigureCanvasTkAgg canvas: The canvas
:param int width: The desired width of the canvas
:param int height: The desired height of the canvas
"""
def __init__(self, mainframe, width=500, height=500):
ttk.Frame.__init__(self, master=mainframe)
self.parent = mainframe
# Vertical and horizontal scrollbars for canvas
vbar = tk.Scrollbar(self, orient='vertical')
hbar = tk.Scrollbar(self, orient='horizontal')
# Create canvas and put map on it
self.canvas_width = width
self.canvas_height = height
self.plotter = TaskerOrbitPlotter(self)
fig = self.plotter.show()
t = np.arange(0, 3, .01)
self.canvas = FigureCanvasTkAgg(fig, master = mainframe)
self.canvas.draw()
self.canvas.get_tk_widget().pack(side="top",fill=tk.BOTH,expand=True)
def enableZoomIn(self):
"""
Enables zooming in when clicking on the map and changes the cursor.
"""
self.zoomInID = self.canvas.mpl_connect('button_press_event', self.onZoomIn)
self.master.config(cursor = "cross")
def disableZoomIn(self):
"""
Disables zooming in. Changes cursor back to normal.
"""
self.canvas.mpl_disconnect(self.zoomInID)
self.master.config(cursor = "arrow")
def enableZoomOut(self):
"""
Enables zooming out when clicking on the map and changes the cursor.
"""
self.zoomOutID = self.canvas.mpl_connect('button_press_event', self.onZoomOut)
self.master.config(cursor = "cross")
def disableZoomOut(self):
"""
Disables zooming out. Changes cursor back to normal.
"""
self.canvas.mpl_disconnect(self.zoomOutID)
self.master.config(cursor = "arrow")
def onZoomIn(self, event):
"""
Called when the map is clicked. Zooms in on the quadrant clicked on.
"""
try:
print('%s click: button=%d, x=%d, y=%d, xdata=%f, ydata=%f' %
('double' if event.dblclick else 'single', event.button,
event.x, event.y, event.xdata, event.ydata))
except:
return
self.plotter.zoomIn(event)
def onZoomOut(self, event):
"""
Called when the map is clicked. Zooms out by one zoom level.
"""
self.plotter.zoomOut(event)
def on_resize_parent(self,event):
"""
Called when app is resized
"""
#print("parent event size="+str(event.width)+" X "+str(event.height))
self.canvas_width = event.width
self.canvas_height = event.height
self.canvas.get_tk_widget().config(width=self.canvas_width, height=self.canvas_height)
self.show_image()
def on_resize_parentx(self,event):
"""
Called only by Panedwindow to resize in x-dir only.
"""
##print("parent event size="+str(event.width)+" X "+str(event.height))
self.canvas_width = event.width
self.canvas.get_tk_widget().config(width=self.canvas_width)
self.show_image()
def event_subscribe(self, obj_ref):
"""
Subscribes obj_ref to the TaskerGui.
:param obj_ref: object to be subscribed to TaskerGui
"""
self.subscribers.append(obj_ref)
def event_publish(self, cmd):
"""
Publishes an event to all subscribers
:param str cmd: Command to be published
"""
for sub in self.subscribers:
sub.event_receive(cmd)
def event_receive(self,event):
"""
Receives an event from a subscription
:param event: The event received from a subscription
"""
pass |
10,265 | b30cc79dd8f7db6001158bef66aeee89e1d60558 | # -*- coding: utf-8 -*-
"""Plant disease detection
Automatically generated by Colaboratory.
Original file is located at
https://colab.research.google.com/drive/1p_O39hjZ9J9CzX2lDWWqH_rNa8cp-mg1
"""
from keras import applications
from keras.preprocessing.image import ImageDataGenerator
from keras import optimizers
from keras.models import Sequential, Model, load_model
from keras.layers import Dropout, Flatten, Dense
from keras.callbacks import ModelCheckpoint, EarlyStopping
import tensorflow as tf
import numpy as np
from sklearn import metrics
tf.logging.set_verbosity(tf.logging.ERROR)
import os
img_width, img_height = 224, 224
nb_train_samples = 135
nb_validation_samples = 35
batch_size = 5
epochs = 1
from google.colab import drive
drive.mount('/content/drive')
# !pwd
import shutil
shutil.move("drive/My Drive/plant-leaf.rar", "plant-leaf.rar")
get_ipython().system_raw("unrar x plant-leaf.rar")
shutil.move("plant-leaf.rar", "drive/My Drive/plant-leaf.rar")
# get_ipython().system_raw("unrar x plant-leaf")
train_data_dir = 'plant-leaf/train'
test_data_dir = 'plant-leaf/test'
shutil.move("train_aug", "drive/My Drive/train_aug")
shutil.move("drive/My Drive/train_aug", "train_aug")
"""# VGG16"""
vgg16 = applications.VGG16(weights = "imagenet", include_top = False, input_shape = (img_width, img_height, 3))
vgg16.layers
# !rm -rf test_aug
# os.mkdir('saved')
train_aug = 'train_aug'
test_aug = 'test_aug'
os.mkdir(train_aug)
os.mkdir(test_aug)
# Freeze the layers which you don't want to train. Here I am freezing the all the layers.
for layer in vgg16.layers[:11]:
layer.trainable = False
# Adding custom Layers
x = vgg16.output
x = Flatten()(x)
x = Dense(1024, activation = "relu")(x)
x = Dropout(0.5)(x)
x = Dense(512, activation = "relu")(x)
x = Dropout(0.5)(x)
predictions = Dense(2, activation = "softmax")(x)
# creating the final model
vgg16_final = Model(input = vgg16.input, output = predictions)
# compile the model
sgd = optimizers.SGD(lr=0.001, momentum=0.0, decay=0.0, nesterov=False)
# rmsprop = optimizers.RMSprop(lr=0.0001, rho=0.9, epsilon=None, decay=0.0)
# adagrad = optimizers.Adagrad(lr=0.0001, epsilon=None, decay=0.0)
# adam = optimizers.Adam(lr=0.0001, beta_1=0.9, beta_2=0.999, epsilon=None, decay=0.0, amsgrad=False)
vgg16_final.compile(loss = "binary_crossentropy", optimizer = sgd, metrics = ["accuracy"])
# Initiate the train and test generators with data Augumentation
train_datagen = ImageDataGenerator(
rescale = 1./255,
horizontal_flip = True,
fill_mode = "nearest",
zoom_range = 0.3,
width_shift_range = 0.3,
height_shift_range = 0.3,
rotation_range = 30)
train_generator = train_datagen.flow_from_directory(
train_data_dir,
target_size = (img_height, img_width),
batch_size = batch_size,
class_mode = "categorical", shuffle = False,
save_to_dir=train_aug, save_prefix='image', save_format='png')
test_datagen = ImageDataGenerator(
rescale = 1./255,
horizontal_flip = True,
fill_mode = "nearest",
zoom_range = 0.3,
width_shift_range = 0.3,
height_shift_range = 0.3,
rotation_range = 30
)
test_generator = test_datagen.flow_from_directory(test_data_dir,
target_size = (img_height, img_width),
class_mode = "categorical", shuffle = False,
save_to_dir=test_aug, save_prefix='image', save_format='png')
# Save the model according to the conditions
checkpoint = ModelCheckpoint('saved/weights.{epoch:02d}-{val_loss:.2f}.h5', monitor = 'val_loss', verbose = 1, save_best_only = True, save_weights_only = False, mode = 'auto', period = 1)
# early = EarlyStopping(monitor = 'val_loss', min_delta = 0, patience = 10, verbose = 1, mode = 'auto')
# Train the model
# vgg16_final.fit_generator(train_generator,
# samples_per_epoch = nb_train_samples,
# epochs = epochs,
# validation_data = test_generator,
# nb_val_samples = nb_validation_samples,
# callbacks = [checkpoint, early])
vgg16_final.fit_generator(train_generator,
samples_per_epoch = nb_train_samples,
epochs = epochs,
validation_data = test_generator,
nb_val_samples = nb_validation_samples,
callbacks = [checkpoint])
# Loading the trained model
vgg16_final = load_model('saved/weights.08-1.57.h5')
score, acc = vgg16_final.evaluate_generator(test_generator, verbose = 0)
score
print('Test Accuracy: {}%' .format(acc * 100))
"""# Confusion Matrix"""
# Confusion Matrix and Classification Report
Y_pred = vgg16_final.predict_generator(test_generator)
y_pred = np.argmax(Y_pred, axis=1)
print('Confusion Matrix')
cm = metrics.confusion_matrix(test_generator.classes, y_pred)
print(cm)
acc = 100 * (cm[0][0] + cm[1][1]) / (cm[0][0] + cm[1][1] + cm[0][1] + cm[1][0])
print('\nAccuracy: ', acc, '%')
test_generator.classes
y_pred
|
10,266 | 4273e7dcdb038ff83a89e785b34b217509b3a4ce | #20微巴斯卡=0分貝,定義為v0;此式代表多少微巴斯卡=多少分貝
import numpy as np
print(20*np.log10(20000/20))
#分貝=20log10(微巴斯卡/20)
print((10**(30/20+np.log10(20)))/(10**(50/20+np.log10(20))))
|
10,267 | 092bb2ec1e09147f69e251597c8b141471429784 | import sys
import numpy as np
import numpy.linalg as la
import pandas as pd
import patsy
import statsmodels.api as sm
from feature_selection_utils import select_features_by_variation
from sklearn.feature_selection import mutual_info_regression
from sklearn.preprocessing import StandardScaler
# Auxiliary functions of COXEN start here ####################
def calculate_concordance_correlation_coefficient(u, v):
"""
This function calculates the concordance correlation coefficient between two input 1-D numpy arrays.
Parameters:
-----------
u: 1-D numpy array of a variable
v: 1-D numpy array of a variable
Returns:
--------
ccc: a numeric value of concordance correlation coefficient between the two input variables.
"""
a = 2 * np.mean((u - np.mean(u)) * (v - np.mean(v)))
b = (
np.mean(np.square(u - np.mean(u)))
+ np.mean(np.square(v - np.mean(v)))
+ np.square(np.mean(u) - np.mean(v))
)
ccc = a / b
return ccc
def generalization_feature_selection(data1, data2, measure, cutoff):
"""
This function uses the Pearson correlation coefficient to select the features that are generalizable
between data1 and data2.
Parameters:
-----------
data1: 2D numpy array of the first dataset with a size of (n_samples_1, n_features)
data2: 2D numpy array of the second dataset with a size of (n_samples_2, n_features)
measure: string. 'pearson' indicates the Pearson correlation coefficient;
'ccc' indicates the concordance correlation coefficient. Default is 'pearson'.
cutoff: a positive number for selecting generalizable features. If cutoff < 1, this function selects
the features with a correlation coefficient >= cutoff. If cutoff >= 1, it must be an
integer indicating the number of features to be selected based on correlation coefficient.
Returns:
--------
fid: 1-D numpy array containing the indices of selected features.
"""
cor1 = np.corrcoef(np.transpose(data1))
cor2 = np.corrcoef(np.transpose(data2))
num = data1.shape[1]
cor = []
if measure == "pearson":
for i in range(num):
cor.append(
np.corrcoef(
np.vstack(
(
list(cor1[:i, i]) + list(cor1[(i + 1) :, i]),
list(cor2[:i, i]) + list(cor2[(i + 1) :, i]),
)
)
)[0, 1]
)
elif measure == "ccc":
for i in range(num):
cor.append(
calculate_concordance_correlation_coefficient(
np.array(list(cor1[:i, i]) + list(cor1[(i + 1) :, i])),
np.array(list(cor2[:i, i]) + list(cor2[(i + 1) :, i])),
)
)
cor = np.array(cor)
fid = np.argsort(-cor)[: int(cutoff)]
return fid
# Auxiliary functions of COXEN end here ####################
def coxen_single_drug_gene_selection(
source_data,
target_data,
drug_response_data,
drug_response_col,
tumor_col,
prediction_power_measure="pearson",
num_predictive_gene=100,
generalization_power_measure="ccc",
num_generalizable_gene=50,
multi_drug_mode=False,
):
"""
This function selects genes for drug response prediction using the COXEN approach. The COXEN approach is
designed for selecting genes to predict the response of tumor cells to a specific drug. This function
assumes no missing data exist.
Parameters:
-----------
source_data: pandas data frame of gene expressions of tumors, for which drug response is known. Its size is
[n_source_samples, n_features].
target_data: pandas data frame of gene expressions of tumors, for which drug response needs to be predicted.
Its size is [n_target_samples, n_features]. source_data and target_data have the same set
of features and the orders of features must match.
drug_response_data: pandas data frame of drug response values for a drug. It must include a column of drug
response values and a column of tumor IDs.
drug_response_col: non-negative integer or string. If integer, it is the column index of drug response in
drug_response_data. If string, it is the column name of drug response.
tumor_col: non-negative integer or string. If integer, it is the column index of tumor IDs in drug_response_data.
If string, it is the column name of tumor IDs.
prediction_power_measure: string. 'pearson' uses the absolute value of Pearson correlation coefficient to
measure prediction power of gene; 'mutual_info' uses the mutual information to measure prediction power
of gene. Default is 'pearson'.
num_predictive_gene: positive integer indicating the number of predictive genes to be selected.
generalization_power_measure: string. 'pearson' indicates the Pearson correlation coefficient;
'ccc' indicates the concordance correlation coefficient. Default is 'ccc'.
num_generalizable_gene: positive integer indicating the number of generalizable genes to be selected.
multi_drug_mode: boolean, indicating whether the function runs as an auxiliary function of COXEN
gene selection for multiple drugs. Default is False.
Returns:
--------
indices: 1-D numpy array containing the indices of selected genes, if multi_drug_mode is False;
1-D numpy array of indices of sorting all genes according to their prediction power, if multi_drug_mode is True.
"""
if isinstance(drug_response_col, str):
drug_response_col = np.where(drug_response_data.columns == drug_response_col)[
0
][0]
if isinstance(tumor_col, str):
tumor_col = np.where(drug_response_data.columns == tumor_col)[0][0]
drug_response_data = drug_response_data.copy()
drug_response_data = drug_response_data.iloc[
np.where(np.isin(drug_response_data.iloc[:, tumor_col], source_data.index))[0],
:,
]
source_data = source_data.copy()
source_data = source_data.iloc[
np.where(np.isin(source_data.index, drug_response_data.iloc[:, tumor_col]))[0],
:,
]
source_std_id = select_features_by_variation(
source_data, variation_measure="std", threshold=0.00000001
)
target_std_id = select_features_by_variation(
target_data, variation_measure="std", threshold=0.00000001
)
std_id = np.sort(np.intersect1d(source_std_id, target_std_id))
source_data = source_data.iloc[:, std_id]
target_data = target_data.copy()
target_data = target_data.iloc[:, std_id]
# Perform the first step of COXEN approach to select predictive genes. To avoid exceeding the memory limit,
# the prediction power of genes is calculated in batches.
batchSize = 1000
numBatch = int(np.ceil(source_data.shape[1] / batchSize))
prediction_power = np.empty((source_data.shape[1], 1))
prediction_power.fill(np.nan)
for i in range(numBatch):
startIndex = i * batchSize
endIndex = min((i + 1) * batchSize, source_data.shape[1])
if prediction_power_measure == "pearson":
cor_i = np.corrcoef(
np.vstack(
(
np.transpose(
source_data.iloc[:, startIndex:endIndex]
.loc[drug_response_data.iloc[:, tumor_col], :]
.values
),
np.reshape(
drug_response_data.iloc[:, drug_response_col].values,
(1, drug_response_data.shape[0]),
),
)
)
)
prediction_power[startIndex:endIndex, 0] = abs(cor_i[:-1, -1])
if prediction_power_measure == "mutual_info":
mi = mutual_info_regression(
X=source_data.iloc[:, startIndex:endIndex]
.loc[drug_response_data.iloc[:, tumor_col], :]
.values,
y=drug_response_data.iloc[:, drug_response_col].values,
)
prediction_power[startIndex:endIndex, 0] = mi
if multi_drug_mode:
indices = np.argsort(-prediction_power[:, 0])
return std_id[indices]
num_predictive_gene = int(min(num_predictive_gene, source_data.shape[1]))
gid1 = np.argsort(-prediction_power[:, 0])[:num_predictive_gene]
# keep only predictive genes for source and target data
source_data = source_data.iloc[:, gid1]
target_data = target_data.iloc[:, gid1]
num_generalizable_gene = int(min(num_generalizable_gene, len(gid1)))
# perform the second step of COXEN approach to select generalizable genes among the predictive genes
gid2 = generalization_feature_selection(
source_data.values,
target_data.values,
generalization_power_measure,
num_generalizable_gene,
)
indices = std_id[gid1[gid2]]
return np.sort(indices)
def coxen_multi_drug_gene_selection(
source_data,
target_data,
drug_response_data,
drug_response_col,
tumor_col,
drug_col,
prediction_power_measure="lm",
num_predictive_gene=100,
generalization_power_measure="ccc",
num_generalizable_gene=50,
union_of_single_drug_selection=False,
):
"""
This function uses the COXEN approach to select genes for predicting the response of multiple drugs.
It assumes no missing data exist. It works in three modes.
(1) If union_of_single_drug_selection is True, prediction_power_measure must be either 'pearson' or 'mutual_info'.
This functions runs coxen_single_drug_gene_selection for every drug with the parameter setting and takes the
union of the selected genes of every drug as the output. The size of the selected gene set may be larger than
num_generalizable_gene.
(2) If union_of_single_drug_selection is False and prediction_power_measure is 'lm', this function uses a
linear model to fit the response of multiple drugs using the expression of a gene, while the drugs are
one-hot encoded. The p-value associated with the coefficient of gene expression is used as the prediction
power measure, according to which num_predictive_gene genes will be selected. Then, among the predictive
genes, num_generalizable_gene generalizable genes will be selected.
(3) If union_of_single_drug_selection is False and prediction_power_measure is 'pearson' or 'mutual_info',
for each drug this functions ranks the genes according to their power of predicting the
response of the drug. The union of an equal number of predictive genes for every drug will be generated,
and its size must be at least num_predictive_gene. Then, num_generalizable_gene generalizable genes
will be selected.
Parameters:
-----------
source_data: pandas data frame of gene expressions of tumors, for which drug response is known. Its size is
[n_source_samples, n_features].
target_data: pandas data frame of gene expressions of tumors, for which drug response needs to be predicted.
Its size is [n_target_samples, n_features]. source_data and target_data have the same set
of features and the orders of features must match.
drug_response_data: pandas data frame of drug response that must include a column of drug response values,
a column of tumor IDs, and a column of drug IDs.
drug_response_col: non-negative integer or string. If integer, it is the column index of drug response in
drug_response_data. If string, it is the column name of drug response.
tumor_col: non-negative integer or string. If integer, it is the column index of tumor IDs in drug_response_data.
If string, it is the column name of tumor IDs.
drug_col: non-negative integer or string. If integer, it is the column index of drugs in drug_response_data.
If string, it is the column name of drugs.
prediction_power_measure: string. 'pearson' uses the absolute value of Pearson correlation coefficient to
measure prediction power of a gene; 'mutual_info' uses the mutual information to measure prediction power
of a gene; 'lm' uses the linear regression model to select predictive genes for multiple drugs. Default is 'lm'.
num_predictive_gene: positive integer indicating the number of predictive genes to be selected.
generalization_power_measure: string. 'pearson' indicates the Pearson correlation coefficient;
'ccc' indicates the concordance correlation coefficient. Default is 'ccc'.
num_generalizable_gene: positive integer indicating the number of generalizable genes to be selected.
union_of_single_drug_selection: boolean, indicating whether the final gene set should be the union of genes
selected for every drug.
Returns:
--------
indices: 1-D numpy array containing the indices of selected genes.
"""
if isinstance(drug_response_col, str):
drug_response_col = np.where(drug_response_data.columns == drug_response_col)[
0
][0]
if isinstance(tumor_col, str):
tumor_col = np.where(drug_response_data.columns == tumor_col)[0][0]
if isinstance(drug_col, str):
drug_col = np.where(drug_response_data.columns == drug_col)[0][0]
drug_response_data = drug_response_data.copy()
drug_response_data = drug_response_data.iloc[
np.where(np.isin(drug_response_data.iloc[:, tumor_col], source_data.index))[0],
:,
]
drugs = np.unique(drug_response_data.iloc[:, drug_col])
source_data = source_data.copy()
source_data = source_data.iloc[
np.where(np.isin(source_data.index, drug_response_data.iloc[:, tumor_col]))[0],
:,
]
source_std_id = select_features_by_variation(
source_data, variation_measure="std", threshold=0.00000001
)
target_std_id = select_features_by_variation(
target_data, variation_measure="std", threshold=0.00000001
)
std_id = np.sort(np.intersect1d(source_std_id, target_std_id))
source_data = source_data.iloc[:, std_id]
target_data = target_data.copy()
target_data = target_data.iloc[:, std_id]
num_predictive_gene = int(min(num_predictive_gene, source_data.shape[1]))
if union_of_single_drug_selection:
if (
prediction_power_measure != "pearson"
and prediction_power_measure != "mutual_info"
):
print(
"pearson or mutual_info must be used as prediction_power_measure for taking the union of selected genes of every drugs"
)
sys.exit(1)
gid1 = np.array([]).astype(np.int64)
for d in drugs:
idd = np.where(drug_response_data.iloc[:, drug_col] == d)[0]
response_d = drug_response_data.iloc[idd, :]
gid2 = coxen_single_drug_gene_selection(
source_data,
target_data,
response_d,
drug_response_col,
tumor_col,
prediction_power_measure,
num_predictive_gene,
generalization_power_measure,
num_generalizable_gene,
)
gid1 = np.union1d(gid1, gid2)
return np.sort(std_id[gid1])
if prediction_power_measure == "lm":
pvalue = np.empty((source_data.shape[1], 1))
pvalue.fill(np.nan)
drug_m = np.identity(len(drugs))
drug_m = pd.DataFrame(drug_m, index=drugs)
drug_sample = drug_m.loc[drug_response_data.iloc[:, drug_col], :].values
for i in range(source_data.shape[1]):
ge_sample = (
source_data.iloc[:, i].loc[drug_response_data.iloc[:, tumor_col]].values
)
sample = np.hstack(
(np.reshape(ge_sample, (len(ge_sample), 1)), drug_sample)
)
sample = sm.add_constant(sample)
mod = sm.OLS(drug_response_data.iloc[:, drug_response_col].values, sample)
try:
res = mod.fit()
pvalue[i, 0] = res.pvalues[1]
except ValueError:
pvalue[i, 0] = 1
gid1 = np.argsort(pvalue[:, 0])[:num_predictive_gene]
elif (
prediction_power_measure == "pearson"
or prediction_power_measure == "mutual_info"
):
gene_rank = np.empty((len(drugs), source_data.shape[1]))
gene_rank.fill(np.nan)
gene_rank = pd.DataFrame(gene_rank, index=drugs)
for d in range(len(drugs)):
idd = np.where(drug_response_data.iloc[:, drug_col] == drugs[d])[0]
response_d = drug_response_data.iloc[idd, :]
temp_rank = coxen_single_drug_gene_selection(
source_data,
target_data,
response_d,
drug_response_col,
tumor_col,
prediction_power_measure,
num_predictive_gene=None,
generalization_power_measure=None,
num_generalizable_gene=None,
multi_drug_mode=True,
)
gene_rank.iloc[d, : len(temp_rank)] = temp_rank
for i in range(
int(np.ceil(num_predictive_gene / len(drugs))), source_data.shape[1] + 1
):
gid1 = np.unique(
np.reshape(gene_rank.iloc[:, :i].values, (1, gene_rank.shape[0] * i))[
0, :
]
)
gid1 = gid1[np.where(np.invert(np.isnan(gid1)))[0]]
if len(gid1) >= num_predictive_gene:
break
gid1 = gid1.astype(np.int64)
# keep only predictive genes for source and target data
source_data = source_data.iloc[:, gid1]
target_data = target_data.iloc[:, gid1]
num_generalizable_gene = int(min(num_generalizable_gene, len(gid1)))
# perform the second step of COXEN approach to select generalizable genes among the predictive genes
gid2 = generalization_feature_selection(
source_data.values,
target_data.values,
generalization_power_measure,
num_generalizable_gene,
)
indices = std_id[gid1[gid2]]
return np.sort(indices)
def generate_gene_set_data(
data,
genes,
gene_name_type="entrez",
gene_set_category="c6.all",
metric="mean",
standardize=False,
data_dir="../../Data/examples/Gene_Sets/MSigDB.v7.0/",
):
"""
This function generates genomic data summarized at the gene set level.
Parameters:
-----------
data: numpy array or pandas data frame of numeric values, with a shape of [n_samples, n_features].
genes: 1-D array or list of gene names with a length of n_features. It indicates which gene a genomic
feature belongs to.
gene_name_type: string, indicating the type of gene name used in genes. 'entrez' indicates Entrez gene ID and
'symbols' indicates HGNC gene symbol. Default is 'symbols'.
gene_set_category: string, indicating the gene sets for which data will be calculated. 'c2.cgp' indicates gene sets
affected by chemical and genetic perturbations; 'c2.cp.biocarta' indicates BioCarta gene sets; 'c2.cp.kegg'
indicates KEGG gene sets; 'c2.cp.pid' indicates PID gene sets; 'c2.cp.reactome' indicates Reactome gene sets;
'c5.bp' indicates GO biological processes; 'c5.cc' indicates GO cellular components; 'c5.mf' indicates
GO molecular functions; 'c6.all' indicates oncogenic signatures. Default is 'c6.all'.
metric: string, indicating the way to calculate gene-set-level data. 'mean' calculates the mean of gene
features belonging to the same gene set. 'sum' calculates the summation of gene features belonging
to the same gene set. 'max' calculates the maximum of gene features. 'min' calculates the minimum
of gene features. 'abs_mean' calculates the mean of absolute values. 'abs_maximum' calculates
the maximum of absolute values. Default is 'mean'.
standardize: boolean, indicating whether to standardize features before calculation. Standardization transforms
each feature to have a zero mean and a unit standard deviation.
Returns:
--------
gene_set_data: a data frame of calculated gene-set-level data. Column names are the gene set names.
"""
sample_name = None
if isinstance(data, pd.DataFrame):
sample_name = data.index
data = data.values
elif not isinstance(data, np.ndarray):
print("Input data must be a numpy array or pandas data frame")
sys.exit(1)
if standardize:
scaler = StandardScaler()
data = scaler.fit_transform(data)
genes = [str(i) for i in genes]
if gene_name_type == "entrez":
gene_set_category = gene_set_category + ".v7.0.entrez.gmt"
if gene_name_type == "symbols":
gene_set_category = gene_set_category + ".v7.0.symbols.gmt"
f = open(data_dir + gene_set_category, "r")
x = f.readlines()
gene_sets = {}
for i in range(len(x)):
temp = x[i].split("\n")[0].split("\t")
gene_sets[temp[0]] = temp[2:]
gene_set_data = np.empty((data.shape[0], len(gene_sets)))
gene_set_data.fill(np.nan)
gene_set_names = np.array(list(gene_sets.keys()))
for i in range(len(gene_set_names)):
idi = np.where(np.isin(genes, gene_sets[gene_set_names[i]]))[0]
if len(idi) > 0:
if metric == "sum":
gene_set_data[:, i] = np.nansum(data[:, idi], axis=1)
elif metric == "max":
gene_set_data[:, i] = np.nanmax(data[:, idi], axis=1)
elif metric == "min":
gene_set_data[:, i] = np.nanmin(data[:, idi], axis=1)
elif metric == "abs_mean":
gene_set_data[:, i] = np.nanmean(np.absolute(data[:, idi]), axis=1)
elif metric == "abs_maximum":
gene_set_data[:, i] = np.nanmax(np.absolute(data[:, idi]), axis=1)
else: # 'mean'
gene_set_data[:, i] = np.nanmean(data[:, idi], axis=1)
if sample_name is None:
gene_set_data = pd.DataFrame(gene_set_data, columns=gene_set_names)
else:
gene_set_data = pd.DataFrame(
gene_set_data, columns=gene_set_names, index=sample_name
)
keep_id = np.where(np.sum(np.invert(pd.isna(gene_set_data)), axis=0) > 0)[0]
gene_set_data = gene_set_data.iloc[:, keep_id]
return gene_set_data
# Auxiliary functions of ComBat start here ####################
def design_mat(mod, numerical_covariates, batch_levels):
# require levels to make sure they are in the same order as we use in the
# rest of the script.
design = patsy.dmatrix(
"~ 0 + C(batch, levels=%s)" % str(batch_levels), mod, return_type="dataframe"
)
mod = mod.drop(["batch"], axis=1)
numerical_covariates = list(numerical_covariates)
sys.stdout.write("found %i batches\n" % design.shape[1])
other_cols = [c for i, c in enumerate(mod.columns) if i not in numerical_covariates]
factor_matrix = mod[other_cols]
design = pd.concat((design, factor_matrix), axis=1)
if numerical_covariates is not None:
sys.stdout.write(
"found %i numerical covariates...\n" % len(numerical_covariates)
)
for i, nC in enumerate(numerical_covariates):
cname = mod.columns[nC]
sys.stdout.write("\t{0}\n".format(cname))
design[cname] = mod[mod.columns[nC]]
sys.stdout.write("found %i categorical variables:" % len(other_cols))
sys.stdout.write("\t" + ", ".join(other_cols) + "\n")
return design
def it_sol(sdat, g_hat, d_hat, g_bar, t2, a, b, conv=0.0001):
n = (1 - np.isnan(sdat)).sum(axis=1)
g_old = g_hat.copy()
d_old = d_hat.copy()
change = 1
count = 0
while change > conv:
# print g_hat.shape, g_bar.shape, t2.shape
g_new = postmean(g_hat, g_bar, n, d_old, t2)
sum2 = (
(
sdat
- np.dot(
g_new.values.reshape((g_new.shape[0], 1)),
np.ones((1, sdat.shape[1])),
)
)
** 2
).sum(axis=1)
d_new = postvar(sum2, n, a, b)
change = max(
(abs(g_new - g_old) / g_old).max(), (abs(d_new - d_old) / d_old).max()
)
g_old = g_new # .copy()
d_old = d_new # .copy()
count = count + 1
adjust = (g_new, d_new)
return adjust
def aprior(gamma_hat):
m = gamma_hat.mean()
s2 = gamma_hat.var()
return (2 * s2 + m**2) / s2
def bprior(gamma_hat):
m = gamma_hat.mean()
s2 = gamma_hat.var()
return (m * s2 + m**3) / s2
def postmean(g_hat, g_bar, n, d_star, t2):
return (t2 * n * g_hat + d_star * g_bar) / (t2 * n + d_star)
def postvar(sum2, n, a, b):
return (0.5 * sum2 + b) / (n / 2.0 + a - 1.0)
# Auxiliary functions of ComBat end here ####################
def combat_batch_effect_removal(
data, batch_labels, model=None, numerical_covariates=None
):
"""
This function corrects for batch effect in data.
Parameters:
-----------
data: pandas data frame of numeric values, with a size of (n_features, n_samples)
batch_labels: pandas series, with a length of n_samples. It should provide the batch labels of samples.
Its indices are the same as the column names (sample names) in "data".
model: an object of patsy.design_info.DesignMatrix. It is a design matrix describing the covariate
information on the samples that could cause batch effects. If not provided, this function
will attempt to coarsely correct just based on the information provided in "batch".
numerical_covariates: a list of the names of covariates in "model" that are numerical rather than
categorical.
Returns:
--------
corrected : pandas data frame of numeric values, with a size of (n_features, n_samples). It is
the data with batch effects corrected.
"""
if isinstance(numerical_covariates, str):
numerical_covariates = [numerical_covariates]
if numerical_covariates is None:
numerical_covariates = []
if model is not None and isinstance(model, pd.DataFrame):
model["batch"] = list(batch_labels)
else:
model = pd.DataFrame({"batch": batch_labels})
batch_items = model.groupby("batch").groups.items()
batch_levels = [k for k, v in batch_items]
batch_info = [v for k, v in batch_items]
n_batch = len(batch_info)
n_batches = np.array([len(v) for v in batch_info])
n_array = float(sum(n_batches))
# drop intercept
drop_cols = [
cname for cname, inter in ((model == 1).all()).iteritems() if inter == True
]
drop_idxs = [list(model.columns).index(cdrop) for cdrop in drop_cols]
model = model[[c for c in model.columns if c not in drop_cols]]
numerical_covariates = [
list(model.columns).index(c) if isinstance(c, str) else c
for c in numerical_covariates
if c not in drop_cols
]
design = design_mat(model, numerical_covariates, batch_levels)
sys.stdout.write("Standardizing Data across genes.\n")
B_hat = np.dot(np.dot(la.inv(np.dot(design.T, design)), design.T), data.T)
grand_mean = np.dot((n_batches / n_array).T, B_hat[:n_batch, :])
var_pooled = np.dot(
((data - np.dot(design, B_hat).T) ** 2),
np.ones((int(n_array), 1)) / int(n_array),
)
stand_mean = np.dot(
grand_mean.T.reshape((len(grand_mean), 1)), np.ones((1, int(n_array)))
)
tmp = np.array(design.copy())
tmp[:, :n_batch] = 0
stand_mean += np.dot(tmp, B_hat).T
s_data = (data - stand_mean) / np.dot(
np.sqrt(var_pooled), np.ones((1, int(n_array)))
)
sys.stdout.write("Fitting L/S model and finding priors\n")
batch_design = design[design.columns[:n_batch]]
gamma_hat = np.dot(
np.dot(la.inv(np.dot(batch_design.T, batch_design)), batch_design.T), s_data.T
)
delta_hat = []
for i, batch_idxs in enumerate(batch_info):
delta_hat.append(s_data[batch_idxs].var(axis=1))
gamma_bar = gamma_hat.mean(axis=1)
t2 = gamma_hat.var(axis=1)
a_prior = list(map(aprior, delta_hat))
b_prior = list(map(bprior, delta_hat))
sys.stdout.write("Finding parametric adjustments\n")
gamma_star, delta_star = [], []
for i, batch_idxs in enumerate(batch_info):
temp = it_sol(
s_data[batch_idxs],
gamma_hat[i],
delta_hat[i],
gamma_bar[i],
t2[i],
a_prior[i],
b_prior[i],
)
gamma_star.append(temp[0])
delta_star.append(temp[1])
sys.stdout.write("Adjusting data\n")
bayesdata = s_data
gamma_star = np.array(gamma_star)
delta_star = np.array(delta_star)
for j, batch_idxs in enumerate(batch_info):
dsq = np.sqrt(delta_star[j, :])
dsq = dsq.reshape((len(dsq), 1))
denom = np.dot(dsq, np.ones((1, n_batches[j])))
numer = np.array(
bayesdata[batch_idxs] - np.dot(batch_design.loc[batch_idxs], gamma_star).T
)
bayesdata[batch_idxs] = numer / denom
vpsq = np.sqrt(var_pooled).reshape((len(var_pooled), 1))
bayesdata = bayesdata * np.dot(vpsq, np.ones((1, int(n_array)))) + stand_mean
return bayesdata
|
10,268 | 0b1b8da0467298471c3f9487b753c801a3c6f514 | import pymssql #引入pymssql模块
import xlwt
##==============写入第一个数据库10.42.90.92========================
connect = pymssql.connect('10.42.90.92', 'fis', 'fis', 'cab') #服务器名,账户,密码,数据库名
print("连接10.42.90.92成功!")
crsr = connect.cursor()
# select name from sysobjects where xtype='u'
# select * from sys.tables
#查询全部表名称
# cursor = connect.cursor() #创建一个游标对象,python里的sql语句都要通过cursor来执行
sql = "select * from sys.tables"
crsr.execute(sql) #执行sql语句
row = crsr.fetchone() #读取查询结果,
alldata = crsr.fetchall()
while row: #循环读取所有结果
## print("Name=%s, Sex=%s" % (row[0],row[1])) #输出结果
row = crsr.fetchone()
# 写入excel
book = xlwt.Workbook()
sheet1 = book.add_sheet('10.42.90.92')
fields = [field[0] for field in crsr.description] # 获取所有字段名
print(fields)
for col,field in enumerate(fields):
print(col,field)
sheet1.write(0,col,field)
print ("========完成写表头=========")
row = 1
for data in alldata:
#print(data)
## print ("d%",row)
for col,field in enumerate(data):
sheet1.write(row,col,field)
#print(type(row))
#print(row)
#print(col)
#print(field)
row += 1
crsr.close()
print("======完成=10.42.90.92========")
##==============写入第二个数据库10.42.90.92========================
connect = pymssql.connect('10.42.90.92', 'fis', 'fis', 'cab') #服务器名,账户,密码,数据库名
print("连接成功!")
crsr = connect.cursor()
# select name from sysobjects where xtype='u'
# select * from sys.tables
#查询全部表名称
# cursor = connect.cursor() #创建一个游标对象,python里的sql语句都要通过cursor来执行
sql = "select * from sys.tables"
crsr.execute(sql) #执行sql语句
row = crsr.fetchone() #读取查询结果,
alldata = crsr.fetchall()
while row: #循环读取所有结果
## print("Name=%s, Sex=%s" % (row[0],row[1])) #输出结果
row = crsr.fetchone()
# 写入excel
book = xlwt.Workbook()
sheet1 = book.add_sheet('10.42.90.92')
fields = [field[0] for field in crsr.description] # 获取所有字段名
print(fields)
for col,field in enumerate(fields):
print(col,field)
sheet1.write(0,col,field)
print ("========完成写表头=========")
row = 1
for data in alldata:
#print(data)
## print ("d%",row)
for col,field in enumerate(data):
sheet1.write(row,col,field)
#print(type(row))
#print(row)
#print(col)
#print(field)
row += 1
crsr.close()
print("======完成=10.42.90.92========")
book.save("database_sqlseve_table_list.xls")
print("========完成写入xls=========")
connect.close()
|
10,269 | d64480d370113edf14f7fda9f9551604af779439 | from django.conf.urls import url
from django.conf import settings
from .views import fill_clients, fill_accounts, fill_account_analytics, fill_products, fill_product_analytics, fill_product_track_record_evolution, fill_account_track_record_composition, fill_account_track_record_evolution
urlpatterns = [
url(r'^clients/$', fill_clients, {'file': settings.BASE_DIR + '/fill/datas/ClientsTable.csv'}),
url(r'^accounts/$', fill_accounts, {'file': settings.BASE_DIR + '/fill/datas/AccountsTable.csv'}),
url(r'^aa/$', fill_account_analytics, {'file': settings.BASE_DIR + '/fill/datas/AccountAnalytics.csv'}),
url(r'^products/$', fill_products, {'file': settings.BASE_DIR + '/fill/datas/ProductsTable.csv'}),
url(r'^pa/$', fill_product_analytics, {'file': settings.BASE_DIR + '/fill/datas/ProductAnalytics.csv'}),
url(r'^ptre/$', fill_product_track_record_evolution, {'file': settings.BASE_DIR + '/fill/datas/ProductTrackRecordEvolution.csv'}),
url(r'^atrc/$', fill_account_track_record_composition, {'file': settings.BASE_DIR + '/fill/datas/AccountTrackRecordCompositionAmounts.csv'}),
url(r'^atre/$', fill_account_track_record_evolution, {'file': settings.BASE_DIR + '/fill/datas/AccountTrackRecordEvolution.csv'})
]
|
10,270 | f5c41d4c9a974da27a39e1f6936f2895c7a9f447 | n=int(input())
ss=input()
a=[int(i) for i in ss.split(' ')]
h=[0 for i in range(100)]
t=-1
for i in range(n):
t=max(a[i],t)
h[a[i]]+=1
ans=0
while n:
cnt=0
for i in range(t+1):
while h[i] and i>=cnt:
h[i]-=1
cnt+=1
n-=1
ans+=1
print(ans) |
10,271 | 259b25eee48c1670e3c28d70b663a5123574d66f | # coding=utf-8
import random
from OTLMOW.OTLModel.Datatypes.KeuzelijstField import KeuzelijstField
from OTLMOW.OTLModel.Datatypes.KeuzelijstWaarde import KeuzelijstWaarde
# Generated with OTLEnumerationCreator. To modify: extend, do not edit
class KlSlemProductfamilie(KeuzelijstField):
"""De mogelijke productfamiles."""
naam = 'KlSlemProductfamilie'
label = 'Productfamilies'
objectUri = 'https://wegenenverkeer.data.vlaanderen.be/ns/onderdeel#KlSlemProductfamilie'
definition = 'De mogelijke productfamiles.'
status = 'ingebruik'
codelist = 'https://wegenenverkeer.data.vlaanderen.be/id/conceptscheme/KlSlemProductfamilie'
options = {
'1': KeuzelijstWaarde(invulwaarde='1',
label='1',
status='ingebruik',
definitie='Productfamilie 1',
objectUri='https://wegenenverkeer.data.vlaanderen.be/id/concept/KlSlemProductfamilie/1'),
'2': KeuzelijstWaarde(invulwaarde='2',
label='2',
status='ingebruik',
definitie='Productfamilie 2',
objectUri='https://wegenenverkeer.data.vlaanderen.be/id/concept/KlSlemProductfamilie/2'),
'5': KeuzelijstWaarde(invulwaarde='5',
label='5',
status='ingebruik',
definitie='Productfamilie 5',
objectUri='https://wegenenverkeer.data.vlaanderen.be/id/concept/KlSlemProductfamilie/5'),
'6': KeuzelijstWaarde(invulwaarde='6',
label='6',
status='ingebruik',
definitie='Productfamilie 6',
objectUri='https://wegenenverkeer.data.vlaanderen.be/id/concept/KlSlemProductfamilie/6')
}
@classmethod
def create_dummy_data(cls):
return random.choice(list(map(lambda x: x.invulwaarde,
filter(lambda option: option.status == 'ingebruik', cls.options.values()))))
|
10,272 | 73d2b2cba0c76020cbd22b2783b7eeeec9f0123a | # -*- coding: utf-8 -*-
# Generated by Django 1.9.2 on 2016-03-11 15:36
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('originacion', '0004_rvgl'),
]
operations = [
migrations.AddField(
model_name='rvgl',
name='importe_aprob',
field=models.DecimalField(decimal_places=2, default=0, max_digits=10),
),
]
|
10,273 | 9481b4b2f24b440fb01e863d8bd44e1c760dcaed | d = "Привет!".upper()
print(d)
e = "Hallo!".replace("a", "@")
print(e)
|
10,274 | e999d8d23215c6b2bece36753f107be96af9e855 | ################################################################################
# MIT License
#
# Copyright (c) 2017 Jean-Charles Fosse & Johann Bigler
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
################################################################################
import fields, collections, bcrypt
from twisted.internet.defer import inlineCallbacks, returnValue
from fields import PrimaryKeyField
from query import SelectQuery, \
InsertQuery, \
AddQuery, \
RemoveQuery, \
UpdateQuery, \
DeleteQuery
"""
Metaclass enables to have a set of variable for each class Model.
This set of variable is represented by the class ModelOptions
"""
_METACLASS_ = '_metaclass_helper_'
def with_metaclass(meta, base=object):
return meta(_METACLASS_, (base,), {})
class ModelOptions(object):
"""
Represents all the options associated to a model.
They are accesible using the _meta variable from a Model object
"""
def __init__(self, cls,
table_name = None,
database = None,
primary_key = True,
on_conflict = [],
unique = [],
many_to_many = False,
order = [],
propagate = False,
hypertable = []):
# Model class
self.model_class = cls
# Model name
self.name = cls.__name__.lower()
# Table name. Either set by the user or derivated from name
self.table_name = table_name.lower() if table_name else self.name
# Database to use
self.database = database
# Does the models have a primary key. If so it will be set by Kameleon
self.primary_key = primary_key
# XXX
self.on_conflict = on_conflict
# List of field which association should be unique.
# XXX #3 Today it receive a string.
# It should be receiving a list of fields
self.unique = unique
# Is this model a middle table for a many to many link
self.many_to_many = many_to_many
# Map of links represented by this table. Filled by the class
self.links = {}
# Order to respect. Useful if table not created by the ORM
self.order = order
# Should any change on a model be propagate
self.propagate = propagate
# Should the table change to hyper table.
self.hypertable = hypertable
# Map of fields
self.fields = {}
# Map of reverse relation fields
self.reverse_fields = {}
# List of fields sorted in order
self.sorted_fields = []
# Fields name sorted in order
self.sorted_fields_names = []
# Map of direct relation
self.rel = {}
# Map of reverse relation
self.reverse_rel = {}
# Map of related classes and the field associated
self.rel_class = {}
def add_field(self, field):
"""
Add a field to the class. It makes sure all related variables are
up to date
"""
if field.name in self.fields:
print("WARNING: Field {0} already in model {1}"
.format(field.name, self.table_name))
return
self.fields[field.name] = field
self.sorted_fields.append(field)
self.sorted_fields_names.append(field.name)
class BaseModel(type):
"""
Metaclass for all models.
"""
def __new__(cls, name, bases, attrs):
if name == _METACLASS_ or bases[0].__name__ == _METACLASS_:
return super(BaseModel, cls).__new__(cls, name, bases, attrs)
# Get all variable defined in the meta class of each model.
meta_options = {}
meta = attrs.pop('Meta', None)
if meta:
for k, v in meta.__dict__.items():
if not k.startswith('_'):
meta_options[k] = v
# Create Model class and its options
cls = super(BaseModel, cls).__new__(cls, name, bases, attrs)
cls._meta = ModelOptions(cls, **meta_options)
# If many to many initialize the links between the two tables.
if cls._meta.many_to_many:
links = []
if cls._meta.order:
for attr in cls._meta.order:
if attr in attrs:
links.append((attr, attrs[attr]))
else:
for key, value in attrs.items():
if not key.startswith('_'):
links.append((key, value))
links[0][1].related_name = links[1][0]
links[0][1].add_to_model(cls, links[0][0])
links[1][1].related_name = links[0][0]
links[1][1].add_to_model(cls, links[1][0])
# Else it is a basic model.
else:
# If primary key
if cls._meta.primary_key:
# Create primary key field
cls.id = fields.PrimaryKeyField()
# Add field to the model
cls.id.add_to_model(cls, PrimaryKeyField.name)
# Add each field to the model
if cls._meta.order:
for attr in cls._meta.order:
if attr in attrs:
attrs[attr].add_to_model(cls, attr)
else:
for key, value in attrs.items():
if not key.startswith('_'):
value.add_to_model(cls, key)
return cls
class Model(with_metaclass(BaseModel)):
"""
Represents a model in the database with all its fields and current values
"""
def __init__(self, **kwargs):
# Map of all fields and associated values
self.dictValues = {}
# Initialize each field. If no value set it to None
for k, v in self._meta.fields.items():
if k in kwargs:
self.dictValues[k] = kwargs[k]
setattr(self, k, kwargs[k])
else:
self.dictValues[k] = None
setattr(self, k, None)
# Set primary key to None if no value provided
if self._meta.primary_key and not "id" in self.dictValues:
self.dictValues["id"] = None
object.__setattr__(self, "id", None)
# Initialize reverse relation as empty list.
for field in self._meta.reverse_rel:
object.__setattr__(self, field, [])
if self._meta.propagate and self._meta.database.subscribe:
self._subscribe()
def __setattr__(self, name, value):
"""
Overide __setattr__ to update dict value and field value at once
"""
object.__setattr__(self, name, value)
if name in self.dictValues: # If updating a field value
if self._meta.fields[name].salt: # field is salt
# If field is already salt do nothing.
# XXX Could create a security issue. What happend is value
# starts with $2b$ but it's not encrypted. Not critical for now
if not ("$2b$" in value and value[:4] == "$2b$"):
value = bcrypt.hashpw(value.encode('utf8'), bcrypt.gensalt())
object.__setattr__(self, name, value)
# If value is an instance of model class and has a relation.
# Append it to the corresponding field list
if hasattr(value, "_meta") and self.isForeignKey(self._meta.fields[name]):
self.dictValues[name] = getattr(value, self._meta.fields[name].reference.name)
return
self.dictValues[name] = value
@classmethod
def isForeignKey(cls, _field):
"""
Is the field an instance of ForeignKeyField
"""
return isinstance(_field, fields.ForeignKeyField)
@classmethod
def isReferenceField(cls, _field):
"""
Is the field an instance of ReferenceField
"""
return isinstance(_field, fields.ReferenceField)
@classmethod
@inlineCallbacks
def create_table(cls, *args, **kwargs):
"""
Creates a table in the database.
"""
init = cls._meta.database.create_table_title(cls._meta.table_name)
i = 1
fields = zip(cls._meta.sorted_fields_names, cls._meta.sorted_fields)
for field in fields:
field_string = field[1].create_field(field[0])
if i == len(fields):
if cls._meta.unique:
init = cls._meta.database.create_unique(init, cls._meta.unique)
init = cls._meta.database.create_table_field_end(init, field_string)
if cls._meta.hypertable:
init = cls._meta.database.create_hypertable(init,
cls._meta)
else:
init = cls._meta.database.create_table_field(init, field_string)
i+=1
yield cls._meta.database.runOperation(init)
@classmethod
@inlineCallbacks
def delete_table(cls, *args, **kwargs):
"""
Deletes table from database
"""
operation = cls._meta.database.delete_table(cls._meta.table_name)
yield cls._meta.database.runOperation(operation)
@classmethod
@inlineCallbacks
def insert(cls, values):
"""
Insert a row to the table with the given values
"""
result = yield InsertQuery(cls, values).execute()
returnValue(result)
@classmethod
@inlineCallbacks
def update(cls, values):
"""
Update values in row
"""
result = yield UpdateQuery(cls, values).execute()
returnValue(result)
@classmethod
@inlineCallbacks
def create(cls, **kwargs):
"""
Instanciates a model class object and save it into the database.
"""
inst = cls(**kwargs)
yield inst.save()
returnValue(inst)
@classmethod
def all(cls):
"""
Get all rows from a table
"""
return SelectQuery(cls)
@classmethod
@inlineCallbacks
def add(cls, obj1, obj2):
"""
Add a link between two model
"""
if not cls._meta.many_to_many:
raise Exception("ERROR: Add called on non many to many model")
query = AddQuery(cls, obj1, obj2)
yield query.execute()
if not getattr(obj1, obj2._meta.name):
setattr(obj1, obj2._meta.name, [obj2])
else:
getattr(obj1, obj2._meta.name).append(obj2)
if not getattr(obj2, obj1._meta.name):
setattr(obj2, obj1._meta.name, [obj1])
else:
getattr(obj2, obj1._meta.name).append(obj1)
@classmethod
@inlineCallbacks
def remove(cls, obj1, obj2):
"""
Remove a link between two model
"""
if not cls._meta.many_to_many:
raise Exception("ERROR: Remove called on non many to many model")
query = RemoveQuery(cls, obj1, obj2)
yield query.execute()
if obj2 in getattr(obj1, obj2._meta.name):
getattr(obj1, obj2._meta.name).remove(obj2)
if obj1 in getattr(obj2, obj1._meta.name):
getattr(obj2, obj1._meta.name).remove(obj1)
@classmethod
def delete(cls):
"""
Delete a row in the database
"""
query_instance = DeleteQuery(cls)
return query_instance
@inlineCallbacks
def save(self):
"""
Save a row
"""
# For each field get the value to insert
values = {key : self._meta.fields[key].insert_format(value) for key, value in self.dictValues.items()}
if self._meta.primary_key:
# If an id exist then we should update
if self.id:
pk = yield self.update(values)
if self._meta.propagate:
self._meta.database.propagate(self)
# Else it means we should create the row
else:
# XXX To Do: What happen if insert failed. What should we return
del values["id"]
pk = yield self.insert(values)
# Update id value
self.id = pk
else:
yield self.insert(values)
def _subscribe(self):
self._meta.database.connection.subscribe(self.propagate_update, u"wamp.postgresql.propagadate.{0}".format(self._meta.name))
def propagate_update(self, dictValues):
if dictValues["id"] == self.id:
for field, value in dictValues.iteritems():
self.__setattr__(field, value)
|
10,275 | 4aea5ce6c195f5b9a72299166118bb96c984b8a5 | #importing serial module(need pyserial)
import serial
try:
arduino = serial.Serial(timeout = 1, baudrate = 9600)
except:
print('Check port')
rawData = []
def clean(L):
newl = []
for i in range(len(L)):
temp = L[i][2:]
newl.append(temp[:-5])
return newl
cleanData = clean(rawData)
#write to file function
def write(L):
file = open("data.txt", mode = 'w')
for i in range(len(L)):
file.write(L[i] + '\n')
file.close()
#currently receives data indefinitely
while True:
rawData.append(str(arduino.readline()))
write(cleanData)
|
10,276 | 623037c96b2a2f97fc218432c5621c311986dfd1 | from PIL import Image
import re
import os
class Product:
def __init__(self, name, price):
self.name = name
self.price = price
def add_thumbnail(self, image_path, size):
image = Image.open(os.getcwd() + "/" + image_path)
name = re.search('(?<=\/)\w+', image_path).group(0)
image.thumbnail(size)
thumb_name = name + str(size[0]) + "x" + str(size[1]) + "." + image.format
image.save(os.getcwd() + "/thumbnails/" + thumb_name)
|
10,277 | dcdfe6937f33fb444aab8dce19cad7cfe91bb210 | import openpyxl,os
wb = openpyxl.Workbook()
print(wb.sheetnames)
sheet = wb['Sheet']
sheet['A1'] = 32
sheet['A2'] = 'hello'
wb.save('example2.xlsx')
sheet2= wb.create_sheet()
print(wb.sheetnames) |
10,278 | b12cd6667c8de6dde35f1f00442b1cba0e965caf | #!flask/bin/python3.7
from flask import Flask, jsonify, abort, make_response
app = Flask(__name__)
devices = [
{
'id': 1,
'description': u'Keith\'s Desktop',
'ip': u'192.168.1.182'
},
{
'id': 2,
'description': u'Keith\'s Macbook Air',
'ip': u'192.168.1.15'
}
]
@app.route('/all_devices', methods=['GET'])
def get_all_devices():
return jsonify({'all_devices': devices})
@app.route('/device/<int:device_id>', methods=['GET'])
def get_device(device_id):
device = [device for device in devices if device['id'] == device_id]
if len(device) == 0:
abort(404)
return jsonify({'device': device[0]})
@app.errorhandler(404)
def not_found(error):
return make_response(jsonify({'error': 'Not found'}), 404)
if __name__ == '__main__':
app.run(debug=True)
|
10,279 | 8b9f86094b652776ede67f32117440ed8f456b47 | import torch
from pyro.distributions import (
Independent, RelaxedBernoulliStraightThrough
)
from pyro.distributions.torch import RelaxedOneHotCategorical # noqa: F401
from torch import nn
from torch.distributions.utils import clamp_probs, broadcast_all
from counterfactualms.distributions.deep import DeepConditional
class DeepRelaxedBernoulli(DeepConditional):
def __init__(self, backbone:nn.Module, temperature:float=2./3.):
super().__init__()
self.backbone = backbone
self.temperature = temperature
def forward(self, z):
logits = self.backbone(z)
return logits
def predict(self, z) -> Independent:
logits = self(z)
temperature = torch.tensor(self.temperature, device=z.device, requires_grad=False)
event_ndim = len(logits.shape[1:]) # keep only batch dimension
return RelaxedBernoulliStraightThrough(temperature, logits=logits).to_event(event_ndim)
class DeepRelaxedOneHotCategoricalStraightThrough2D(DeepConditional):
def __init__(self, backbone: nn.Module, temperature:float=2./3.):
super().__init__()
self.backbone = backbone
self.temperature = temperature
def forward(self, z):
logits = self.backbone(z)
return logits
def predict(self, z) -> Independent:
logits = self(z)
temperature = torch.tensor(self.temperature, device=z.device, requires_grad=False)
# keep only batch dimension; have to subtract 1 b/c way relaxedonehotcategorical setup
event_ndim = len(logits.shape[1:]) - 1
return RelaxedOneHotCategoricalStraightThrough2D(temperature, logits=logits).to_event(event_ndim-1)
class RelaxedOneHotCategorical2D(RelaxedOneHotCategorical):
def rsample(self, sample_shape=torch.Size()):
shape = self._extended_shape(sample_shape)
uniforms = clamp_probs(torch.rand(shape, dtype=self.logits.dtype, device=self.logits.device))
gumbels = -((-(uniforms.log())).log())
scores = (self.logits + gumbels) / self.temperature
return scores - scores.logsumexp(dim=1, keepdim=True)
def log_prob(self, value):
K = self._categorical._num_events
if self._validate_args:
self._validate_sample(value)
logits, value = broadcast_all(self.logits, value)
log_scale = (torch.full_like(self.temperature, float(K)).lgamma() -
self.temperature.log().mul(-(K - 1)))
score = logits - value.mul(self.temperature)
score = (score - score.logsumexp(dim=1, keepdim=True)).sum((1,2,3))
return score + log_scale
class RelaxedOneHotCategoricalStraightThrough2D(RelaxedOneHotCategorical2D):
event_dim = 3
def rsample(self, sample_shape=torch.Size()):
soft_sample = super().rsample(sample_shape)
soft_sample = clamp_probs(soft_sample)
hard_sample = QuantizeCategorical2D.apply(soft_sample)
return hard_sample
def log_prob(self, value):
value = getattr(value, '_unquantize', value)
return super().log_prob(value)
class QuantizeCategorical2D(torch.autograd.Function):
@staticmethod
def forward(ctx, soft_value):
argmax = soft_value.max(1)[1]
hard_value = torch.zeros_like(soft_value)
hard_value._unquantize = soft_value
if argmax.dim() < hard_value.dim():
argmax = argmax.unsqueeze(1)
return hard_value.scatter_(1, argmax, 1)
@staticmethod
def backward(ctx, grad):
return grad
if __name__ == "__main__":
net = DeepRelaxedBernoulli(nn.Conv2d(2,2,1), 1)
x = torch.randn(5, 2, 28, 28)
out = net.predict(x)
samp = out.rsample()
print('Bernoulli')
print(samp.shape)
print(out.batch_shape, out.event_shape)
print(out.event_dim)
net = DeepRelaxedOneHotCategoricalStraightThrough2D(nn.Conv2d(2,2,1), 1)
out = net.predict(x)
samp = out.rsample()
print('OneHot2D')
print(samp.shape)
print(out.batch_shape, out.event_shape)
print(out.event_dim)
|
10,280 | c05e4d33ed802cdc74d3a432417e3b66ed042dad | #!/usr/bin/python
#
# Copyright 2018-2022 Polyaxon, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import polyaxon_sdk
class V1RunKind(polyaxon_sdk.V1RunKind):
eager_values = {
polyaxon_sdk.V1RunKind.MATRIX,
}
default_runtime_values = {
polyaxon_sdk.V1RunKind.JOB,
polyaxon_sdk.V1RunKind.SERVICE,
polyaxon_sdk.V1RunKind.MPIJOB,
polyaxon_sdk.V1RunKind.TFJOB,
polyaxon_sdk.V1RunKind.PYTORCHJOB,
polyaxon_sdk.V1RunKind.MXJOB,
polyaxon_sdk.V1RunKind.XGBJOB,
polyaxon_sdk.V1RunKind.NOTIFIER,
polyaxon_sdk.V1RunKind.WATCHDOG,
polyaxon_sdk.V1RunKind.TUNER,
polyaxon_sdk.V1RunKind.CLEANER,
polyaxon_sdk.V1RunKind.BUILDER,
}
class V1CloningKind(polyaxon_sdk.V1CloningKind):
pass
class V1PipelineKind(polyaxon_sdk.V1PipelineKind):
pass
class V1RunEdgeKind(polyaxon_sdk.V1RunEdgeKind):
pass
|
10,281 | c0aaacb3f6961f5b6d12b6aacc2eb9a4bf2f6827 | import pytest
from twindb_backup.destination.gcs import GCS
@pytest.fixture
def gs():
return GCS(
bucket='test-bucket',
gc_credentials_file='foo'
)
|
10,282 | 5be342f5a24437ec1570b44ce54b473e38229646 | def dupfiles_count(input_dict):
count = 0
for val in input_dict.values():
count += len(val)
return count
|
10,283 | 945db3ea014f4828af2a1a58fbb1db491cbe30a7 | lat = 51
lon = 4
startyear = 2015
endyear = 2015
angle = 0
aspect = 0
optimalangles = 0
outputformat = "json" |
10,284 | e621363a0bb29ba95b102bf0409b4afe27b35c1d | import functions
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.patches as mpatches
from matplotlib import colors as mcolors
def legendre(a,p):
return functions.fast_power(a, (p-1)//2, p)
def jacobi(a,n):
# print(a, n)
if n <= 0 or n % 2 == 0:
return -2 # Undefined
res = 1
while True:
# print('cycle', a, n)
if a == 1 or n == 1:
return res
if functions.gcd(a, n) != 1:
return 0
if a % 2 == 0:
if n % 8 == 3 or n % 8 == 5:
res *= -1
a //= 2
res *= (-1) ** ((n-1)//2) * (-1) ** ((a-1)//2)
a, n = n % a, a
if __name__ == "__main__":
print(jacobi(13,13))
arr = np.fromfunction(np.vectorize(jacobi), (100, 100), dtype=int).T
colors = [(0, 0, 0, 1), (1, 0, 0, 1), (1, 1, 1, 1), (0, 1, 0, 1)]
values = [-2, -1, 0, 1]
colormap = mcolors.ListedColormap(['black', 'red', 'white', 'green'])
norm = mcolors.BoundaryNorm(values, colormap.N)
im = plt.imshow(arr, cmap=colormap)
# colors = [ im.cmap(im.norm(value)) for value in values]
print(colors)
patches = [ mpatches.Patch(color=colors[i], label="{l}".format(l=values[i])) for i in range(len(values))]
plt.legend(handles=patches, bbox_to_anchor=(1.05, 1), loc=2, borderaxespad=0.)
plt.xlabel('a')
plt.ylabel('n')
plt.show()
# fig = plt.figure()
# ax = plt.subplot(111)
# chartBox = ax.get_position()
# ax.set_position([chartBox.x0, chartBox.y0, chartBox.width*0.6, chartBox.height])
# fig.colorbar(loc='upper center', bbox_to_anchor=(1.45, 0.8), shadow=True, ncol=1)
# ax.imshow(arr)
# plt.show()
|
10,285 | b7bfdfe671f8683f56f0194a730ef8da49c4452b | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
# ----------------------------------------------------------------------------
# Copyright 2018-2020 ARM Limited or its affiliates
#
# SPDX-License-Identifier: Apache-2.0
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ----------------------------------------------------------------------------
import logging, sys
from suit_tool.argparser import MainArgumentParser
from suit_tool import create, sign, parse, get_pubkey, keygen, sever #, verify, cert, init
# from suit_tool import update
import colorama
colorama.init()
LOG = logging.getLogger(__name__)
LOG_FORMAT='[%(levelname)s] %(asctime)s - %(name)s - %(message)s'
def main():
driver = CLIDriver()
return driver.main()
class CLIDriver(object):
def __init__(self):
self.options = MainArgumentParser().parse_args().options
log_level = {
'debug': logging.DEBUG,
'info': logging.INFO,
'warning': logging.WARNING,
'exception': logging.CRITICAL
}[self.options.log_level]
logging.basicConfig(level=log_level,
format=LOG_FORMAT,
datefmt='%Y-%m-%d %H:%M:%S')
logging.addLevelName( logging.INFO, "\033[1;32m%s\033[1;0m" % logging.getLevelName(logging.INFO))
logging.addLevelName( logging.WARNING, "\033[1;93m%s\033[1;0m" % logging.getLevelName(logging.WARNING))
logging.addLevelName( logging.CRITICAL, "\033[1;31m%s\033[1;0m" % logging.getLevelName(logging.CRITICAL))
LOG.debug('CLIDriver created. Arguments parsed and logging setup.')
def main(self):
rc = {
"create": create.main,
"parse": parse.main,
# "verify": verify.main,
# "cert": cert.main,
# "init": init.main,
# "update" : update.main,
"pubkey": get_pubkey.main,
"sign": sign.main,
"keygen": keygen.main,
"sever" : sever.main,
}[self.options.action](self.options) or 0
sys.exit(rc)
|
10,286 | 53a0d0204591653bd13a6b392e1f2b8d138df8ab | '''
Python wrapper for libgtpnl
'''
from ctypes import CDLL,c_int,c_uint16,c_char_p,c_void_p
from ctypes import pointer,byref
from socket import socket,inet_aton,AF_INET,SOCK_DGRAM,AF_NETLINK # IPv4
from struct import unpack
from .gtpsock import GtpSocket
from .structures import *
import logging
from time import sleep
logger = logging.getLogger(__name__)
try:
lgnl = CDLL("libgtpnl.so")
except OSError:
logger.error("no libgtpnl.so in search path, check LD_LIBRARY_PATH variable")
exit(1)
# 2 socks needed, although GTPv0 is not used, use ascii devnames
def dev_create(devname, fd0, fd1):
bstring = devname.encode('ascii')
# call libgtpnl to create, mnl dep
creator = lgnl.gtp_dev_create
creator.argtypes = [c_int, c_char_p, c_int, c_int]
try:
logger.debug("creating device: {} {} {} {}".format(-1, bstring, fd0, fd1))
creator(-1 , bstring, fd0, fd1)
# cant catch C errors
except Exception as e:
logger.error("{}".format(e))
exit(1)
#Open communications
sock = GtpSocket()
sock.discovery()
return sock
# destroy a gtp dev, kill all, no errors ever, TODO: maybe propagate from C, not trivial
def dev_stop(name):
dev_destroy = lgnl.gtp_dev_destroy
bstring = name.encode('ascii')
dev_destroy.argtypes = [c_char_p]
dev_destroy(bstring)
'''tunnel_add()
the tunnel creator requires nlsock address as arg to preserve abstraction level it seems
Sock is a pyroute2 NetlinkSocket object
'''
def tunnel_add(ns, ue_ip, enb_ip, i_tei, o_tei, devname, sock, ebi=0):
logger.info("adding tunnel ue:{}, enb:{}, i:{}, o:{}, ebi:{}".format(ue_ip,
enb_ip,
i_tei,
o_tei,
ebi))
ifindex = lgnl.if_nametoindex
ifindex.argtypes = [c_char_p]
idx = ifindex(devname.encode('ascii'))
logger.debug("if_index: {}".format(idx))
zero = V0(0)
one = V1(i_tei, o_tei)
versions = VERSIONS(zero, one)
ue_bytes = IN_ADDR(unpack("<I", inet_aton(ue_ip))[0])
enb_bytes = IN_ADDR(unpack("<I", inet_aton(enb_ip))[0])
# 1 is gtp version
tunnel = GTPTUNNEL(ns, idx, ue_bytes, enb_bytes, ebi, 1, versions)
sockaddr = SOCKADDR_NL(sock.family, 0, sock.getsockname()[0], sock.groups)
logger.debug("sock.pid: {}".format(sock.getsockname()[0]))
c_sock = MNL_SOCK(sock.fileno(), sockaddr)
logger.debug("c_sock done")
logger.debug("c_sock: {}".format(c_sock))
p_tun = pointer(tunnel)
p_sock = pointer(c_sock)
#TODO: pythonize
if_mnlsock_id = lgnl.genl_lookup_family
if_mnlsock_id.argtypes = [c_void_p, c_char_p]
mnlsock_id = if_mnlsock_id(byref(c_sock), devname.encode('ascii'))
tadd = lgnl.gtp_add_tunnel
tadd.argtypes = [c_uint16, c_void_p, c_void_p]
try:
ret=tadd(mnlsock_id, byref(c_sock), byref(tunnel))
logger.debug("creating tunnel: {} {} {}".format(mnlsock_id, p_sock.contents, p_tun.contents))
except Exception as e:
logger.error("{}".format(e))
def tunnel_del(ns, i_tei, o_tei, devname, sock, ebi=0):
logger.info("deleting tunnel i:{}, o:{}, ebi:{}".format(i_tei, o_tei, ebi))
ifindex = lgnl.if_nametoindex
ifindex.argtypes = [c_char_p]
idx = ifindex(devname.encode('ascii'))
zero = V0(0)
one = V1(i_tei, o_tei)
versions = VERSIONS(zero, one)
ue_bytes = IN_ADDR(0)
enb_bytes = IN_ADDR(0)
# 1 is gtp version
tunnel = GTPTUNNEL(ns, idx, ue_bytes, enb_bytes, ebi, 1, versions)
sockaddr = SOCKADDR_NL(sock.family, 0, sock.getsockname()[0], sock.groups)
logger.debug("sock.pid: {}".format(sock.pid))
c_sock = MNL_SOCK(sock.fileno(), sockaddr)
logger.debug("c_sock done")
logger.debug("c_sock: {}".format(c_sock))
#TODO: pythonize
if_mnlsock_id = lgnl.genl_lookup_family
if_mnlsock_id.argtypes = [c_void_p, c_char_p]
mnlsock_id = if_mnlsock_id(byref(c_sock), devname.encode('ascii'))
logger.debug("mnlsock_id: {}".format(mnlsock_id))
tdel = lgnl.gtp_del_tunnel
tdel.argtypes = [c_int, c_void_p, c_void_p]
try:
tdel(mnlsock_id, byref(c_sock), byref(tunnel))
except Exception as e:
logger.error("{}".format(e))
#uses C to print tunnel list of device, maybe pythonification?
def tunnel_list(devname, sock):
tlist = lgnl.gtp_list_tunnel
tlist.argtypes = [c_int, c_void_p]
if_mnlsock_id = lgnl.genl_lookup_family
if_mnlsock_id.argtypes = [c_void_p, c_char_p]
family_id = if_mnlsock_id(sock, devname.encode('ascii'))
tlist(family_id, sock)
# what is this? TODO: research why mod == del add.
def tunnel_mod(ns, ue_ip, enb_ip, i_tei, o_tei, devname, sock):
tunnel_del(ns, i_tei, o_tei, devname, sock)
tunnel_add(ns, ue_ip, enb_ip, i_tei, o_tei, devname, sock)
|
10,287 | cdb78a8996cd517f5f49d5a6e5faca73b5d94033 | from django.core.management.base import NoArgsCommand
import pdb
class Command(NoArgsCommand):
def handle_noargs(self, **options):
'''
deletes all keys from given keyring
'''
from onboarding.interactive_brokers import encryption as encr
from onboarding.interactive_brokers import onboarding as onboard
import gnupg
gpg = gnupg.GPG(gnupghome=onboard.PATH_TO_FILES + onboard.KEYS[:-1], verbose=True)
private = False # 'True' to delete private keys, 'False' for public keys
keys = encr.list_keys(gpg, private)
print('BEFORE ---------------------------------------')
print(str(keys))
keys_before=len(keys)
print('# of keys:' + str(keys_before))
print('----------------------------------------------')
for key in keys:
encr.delete_key(gpg, key, private)
keys = encr.list_keys(gpg, private)
print('AFTER ---------------------------------------')
print(str(keys))
keys_after=len(keys)
print('# of keys:' + str(keys_after))
print(str(keys_before - keys_after) + ' keys deleted')
print('----------------------------------------------')
|
10,288 | 44e86828ff8acb96a1d1c2dd4c2cef5d5eff25ac | """
You can call the function find_largest_water_body by passing in a 2D matrix of 0s and 1s as an argument. The function
will return the size of the largest water body in the matrix.
Here's a Python solution that uses a recursive approach to find the largest water body in a 2D matrix of 0s and 1s:
"""
def _wbs(i, j, rs, cs, grid):
# Base Check
if i < 0 or i >= rs or j < 0 or j >= cs or grid[i][j] != 0:
return 0
# Mark the call as visited
grid[i][j] = -1
return 1 + \
_wbs(i - 1, j, rs, cs, grid) + \
_wbs(i + 1, j, rs, cs, grid) + \
_wbs(i, j - 1, rs, cs, grid) + \
_wbs(i, j + 1, rs, cs, grid)
def find_largest_water_body(grid):
max_size = 0
rs = len(grid)
cs = len(grid[0])
for i in range(rs):
for j in range(cs):
if grid[i][j] == 0:
max_size = max(max_size, _wbs(i, j, rs, cs, grid))
return max_size
"""
Dp Solution
"""
# def find_largest_water_body_2(matrix):
# max_size = 0
# rows = len(matrix)
# cols = len(matrix[0])
# visited = [[False for j in range(cols)] for i in range(rows)]
#
# def dfs(i, j):
# if 0 <= i < rows and 0 <= j < cols and not visited[i][j] and matrix[i][j] == 0:
# visited[i][j] = True
# size = 1
# size += dfs(i + 1, j)
# size += dfs(i - 1, j)
# size += dfs(i, j + 1)
# size += dfs(i, j - 1)
# return size
# return 0
#
# for i in range(rows):
# for j in range(cols):
# if matrix[i][j] == 0 and not visited[i][j]:
# size = dfs(i, j)
# max_size = max(max_size, size)
# return max_size
|
10,289 | 93c34b54593993816f83802353ce8a334a546b45 | from django.db import models
from django.contrib.auth.models import User
from django.contrib.sessions.models import Session
class Manufacturer(models.Model):
ManID = models.IntegerField(primary_key=True, serialize=True)
Name = models.CharField(max_length=255)
def __str__(self):
return self.Name
class ManufacturerLogo(models.Model):
ManID = models.OneToOneField(Manufacturer, on_delete=models.CASCADE, unique=True)
Image = models.CharField(max_length=9999)
class ItemCategory(models.Model):
CategoryID = models.IntegerField(primary_key=True, serialize=True)
CategoryTag = models.CharField(max_length=255)
def __str__(self):
return self.CategoryTag
class Items(models.Model):
ItemID = models.IntegerField(primary_key=True, serialize=True)
ManID = models.ForeignKey(Manufacturer, on_delete=models.CASCADE)
Quantity_available = models.IntegerField()
Price = models.IntegerField()
Name = models.CharField(max_length=255)
Description = models.CharField(max_length=9999, blank=True)
Image = models.CharField(max_length=255, null=True)
Image_extra = models.CharField(max_length=255, null=True)
Tags = models.ManyToManyField(ItemCategory)
def __str__(self):
return self.Name
class Country(models.Model):
CountryName = models.CharField(max_length=255)
def __str__(self):
return f'{self.CountryName}'
class UserInfo(models.Model):
AccountConnected = models.ForeignKey(User, on_delete=models.CASCADE, null=True, default=None)
FirstName = models.CharField(max_length=255)
LastName = models.CharField(max_length=255)
City = models.CharField(max_length=255)
PostalCode = models.CharField(max_length=15)
Address = models.CharField(max_length=255)
HouseNum = models.IntegerField()
MobilePhone = models.CharField(max_length=63)
Email = models.CharField(max_length=255)
SSN = models.CharField(max_length=255)
Country = models.ForeignKey(Country, on_delete=models.CASCADE)
class ShippingInfo(models.Model):
FirstName = models.CharField(max_length=255)
LastName = models.CharField(max_length=255)
City = models.CharField(max_length=255)
PostalCode = models.CharField(max_length=15)
Address = models.CharField(max_length=255)
HouseNum = models.IntegerField()
MobilePhone = models.CharField(max_length=63)
Email = models.CharField(max_length=255)
SSN = models.CharField(max_length=255)
Country = models.ForeignKey(Country, on_delete=models.CASCADE)
class PromoCodes(models.Model): # id
Name = models.CharField(max_length=63)
Discount = models.FloatField()
class CartContains(models.Model):
ItemID = models.ForeignKey(Items, on_delete=models.CASCADE, null=False)
Quantity = models.IntegerField(null=False)
class ShoppingCart(models.Model):
SessionID = models.ForeignKey(Session, on_delete=models.CASCADE, null=False)
ItemsInCart = models.ManyToManyField(CartContains)
Promo = models.ForeignKey(PromoCodes, null=True, default=None, on_delete=models.CASCADE)
class OrderContains(models.Model):
ItemID = models.ForeignKey(Items, on_delete=models.PROTECT)
Quantity = models.IntegerField(null=False)
Price = models.FloatField(null=False, default=0)
class Order(models.Model): #has id
ShippingInfoID = models.ForeignKey(UserInfo, on_delete=models.CASCADE, null=False)
ItemsInOrder = models.ManyToManyField(OrderContains)
TotalPrice = models.FloatField(null=False, default=0)
DatePurchased = models.DateField(auto_now_add=True, blank=True)
AccountConnected = models.ForeignKey(User, on_delete=models.SET_NULL, null=True, default=None)
SessionConnected = models.ForeignKey(Session, on_delete=models.SET_NULL, null=True, default=None)
class UserImage(models.Model):
User = models.ForeignKey(User, on_delete=models.CASCADE, null=False)
Image = models.URLField(max_length=9999, null=True)
class SessionHistory(models.Model):
SessionID = models.ForeignKey(Session, on_delete=models.CASCADE, null=False)
HistoryStr = models.CharField(max_length=255)
# Create your models here.
|
10,290 | 2e96d36b19fd9aef031c0dd18853d01730ef7b12 | import csv
import numpy as np
import cv2
def run():
with open("./data_base.csv") as file:
n_cols = len(file.readline().split(";"))
print(n_cols)
X = np.loadtxt("./data_base.csv", delimiter=";", usecols=np.arange(0, n_cols - 1))
Y = np.loadtxt("./data_base.csv", delimiter=";", usecols=n_cols - 1)
Y_new = Y
for index in range(0, len(Y_new)):
if int(Y_new[index]) == 0:
char = X[index].reshape(27, 15)
cv2.imshow('Letra', char)
cv2.waitKey(0)
cv2.destroyAllWindows()
tag = int(input('Correcion:'))
Y_new[index] = tag
Data_base = np.insert(X, X.shape[1], Y_new, 1)
Data_base = Data_base.astype(int)
print('Tamano X: ', X.shape, 'Tamano: ', Y.shape)
print(Data_base.shape)
with open('./data_base1.csv', 'a', newline='') as file:
writer = csv.writer(file, lineterminator='\n', delimiter=";")
writer.writerows(Data_base)
if __name__ == '__main__':
run()
|
10,291 | 584944ea2122fcffe7c72f8df3922aeb2765eba7 | # 6-11. Cities: Make a dictionary called cities. Use the names of three
# cities as keys in your dictionary. Create a dictionary of information about
# each city and include the country that the city is in, its approximate
# population, and one fact about that city. The keys for each city’s
# dictionary should be something like country, population, and fact. Print
# the name of each city and all of the information you have stored about it.
cities = {
'mcallen': {
'country': 'united states',
'population': 1000,
},
'tianjin': {
'country': 'china',
'population': 2000,
},
'beijing': {
'country': 'china',
'population': 3000,
},
}
# mcallen is a 1000 population city in united states
for name, information in cities.items():
print(
name.title() + " is a " + str(information['population']) + " city in " +
information['country'].title() + ".")
|
10,292 | 237b38605e007edfa0e25cc0cd68534073a15c66 | # -*- coding: utf-8 -*- {{{
# vim: set fenc=utf-8 ft=python sw=4 ts=4 sts=4 et:
#
# Copyright 2017, Battelle Memorial Institute.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# This material was prepared as an account of work sponsored by an agency of
# the United States Government. Neither the United States Government nor the
# United States Department of Energy, nor Battelle, nor any of their
# employees, nor any jurisdiction or organization that has cooperated in the
# development of these materials, makes any warranty, express or
# implied, or assumes any legal liability or responsibility for the accuracy,
# completeness, or usefulness or any information, apparatus, product,
# software, or process disclosed, or represents that its use would not infringe
# privately owned rights. Reference herein to any specific commercial product,
# process, or service by trade name, trademark, manufacturer, or otherwise
# does not necessarily constitute or imply its endorsement, recommendation, or
# favoring by the United States Government or any agency thereof, or
# Battelle Memorial Institute. The views and opinions of authors expressed
# herein do not necessarily state or reflect those of the
# United States Government or any agency thereof.
#
# PACIFIC NORTHWEST NATIONAL LABORATORY operated by
# BATTELLE for the UNITED STATES DEPARTMENT OF ENERGY
# under Contract DE-AC05-76RL01830
# }}}
from __future__ import absolute_import
import logging
import sys
import bson
from bson import ObjectId
import pymongo
from volttron.platform.agent import utils
from volttron.platform.agent.base_aggregate_historian import AggregateHistorian
from volttron.platform.dbutils import mongoutils
utils.setup_logging(logging.DEBUG)
_log = logging.getLogger(__name__)
__version__ = '1.0'
class MongodbAggregateHistorian(AggregateHistorian):
"""
Agent to aggregate data in historian based on a specific time period.
This aggregegate historian aggregates data collected by mongo historian.
"""
def __init__(self, config_path, **kwargs):
"""
Validate configuration, create connection to historian, create
aggregate tables if necessary and set up a periodic call to
aggregate data
:param config_path: configuration file path
:param kwargs:
"""
self.dbclient = None
self._data_collection = None
self._meta_collection = None
self._topic_collection = None
self._agg_meta_collection = None
self._agg_topic_collection = None
self.topic_id_map = {}
super(MongodbAggregateHistorian, self).__init__(config_path, **kwargs)
def configure(self, config_name, action, config):
if not config or not isinstance(config, dict):
raise ValueError("Configuration should be a valid json")
connection = config.get('connection')
self.dbclient = mongoutils.get_mongo_client(connection['params'])
# Why are we not letting users configure data and topic collection
# names in mongo similar to sqlhistorian
# tables_def = sqlutils.get_table_def(self.config)
db = self.dbclient.get_default_database()
cursor = db[self.volttron_table_defs].find()
table_map = {}
prefix = ""
for document in cursor:
table_map[document['table_id'].lower()] = document[
'table_name']
prefix = document.get('table_prefix') + "_" if document.get(
'table_prefix') else ''
self._data_collection = prefix + table_map.get('data_table', 'data')
self._meta_collection = prefix + table_map.get('meta_table', 'meta')
self._topic_collection = prefix + table_map.get('topics_table',
'topics')
self._agg_meta_collection = prefix + 'aggregate_' \
+ table_map.get('meta_table', 'meta')
self._agg_topic_collection = prefix + 'aggregate_' \
+ table_map.get('topics_table', 'topics')
db[self._agg_topic_collection].create_index(
[('agg_topic_name', pymongo.DESCENDING),
('agg_type', pymongo.DESCENDING),
('agg_time_period', pymongo.DESCENDING)],
unique=True, background=True)
# 2. load topic name and topic id.
self.topic_id_map, name_map = self.get_topic_map()
super(MongodbAggregateHistorian, self).configure(config_name,
action, config)
def get_topic_map(self):
return mongoutils.get_topic_map(self.dbclient, self._topic_collection)
def get_agg_topic_map(self):
return mongoutils.get_agg_topic_map(self.dbclient,
self._agg_topic_collection)
def get_aggregation_list(self):
return ['SUM', 'COUNT', 'AVG', 'MIN', 'MAX', 'STDDEVPOP',
'STDDEVSAMP']
def initialize_aggregate_store(self, aggregation_topic_name, agg_type,
agg_time_period, topics_meta):
db = self.dbclient.get_default_database()
agg_collection = agg_type + '''_''' + agg_time_period
db[agg_collection].create_index(
[('topic_id', pymongo.DESCENDING),
('ts', pymongo.DESCENDING)],
unique=True, background=True)
row = db[self._agg_topic_collection].insert_one(
{'agg_topic_name': aggregation_topic_name,
'agg_type': agg_type,
'agg_time_period': agg_time_period})
agg_id = row.inserted_id
_log.debug("Inserted aggregate topic in {} agg id is{}".format(
self._agg_topic_collection, agg_id))
db[self._agg_meta_collection].insert_one({'agg_topic_id': agg_id,
'meta': topics_meta})
return agg_id
def update_aggregate_metadata(self, agg_id, aggregation_topic_name,
topic_meta):
db = self.dbclient.get_default_database()
result = db[self._agg_topic_collection].update_one(
{'_id': bson.objectid.ObjectId(agg_id)},
{'$set': {'agg_topic_name': aggregation_topic_name}})
_log.debug("Updated topic name for {} records".format(
result.matched_count))
result = db[self._agg_meta_collection].update_one(
{'agg_topic_id': bson.objectid.ObjectId(agg_id)},
{'$set': {'meta': topic_meta}})
_log.debug("Updated meta name for {} records".format(
result.matched_count))
def collect_aggregate(self, topic_ids, agg_type, start_time, end_time):
db = self.dbclient.get_default_database()
_log.debug("collect_aggregate: params {}, {}, {}, {}".format(
topic_ids, agg_type, start_time, end_time))
# because topic_ids might be got by making rpc call to historian
# in which case historian would have returned object ids as strings
# in order to be serializable
if not isinstance(topic_ids[0], ObjectId):
topic_ids = [ObjectId(x) for x in topic_ids]
match_conditions = [{"topic_id": {"$in": topic_ids}}]
if start_time is not None:
match_conditions.append({"ts": {"$gte": start_time}})
if end_time is not None:
match_conditions.append({"ts": {"$lt": end_time}})
match = {"$match": {"$and": match_conditions}}
group = {"$group": {"_id": "null", "count": {"$sum": 1},
"aggregate": {"$" + agg_type: "$value"}}}
pipeline = [match, group]
_log.debug("collect_aggregate: pipeline: {}".format(pipeline))
cursor = db[self._data_collection].aggregate(pipeline)
try:
row = cursor.next()
_log.debug("collect_aggregate: got result as {}".format(row))
return row['aggregate'], row['count']
except StopIteration:
return 0, 0
def insert_aggregate(self, topic_id, agg_type, period, end_time,
value, topic_ids):
db = self.dbclient.get_default_database()
table_name = agg_type + '_' + period
db[table_name].replace_one(
{'ts': end_time, 'topic_id': topic_id},
{'ts': end_time, 'topic_id': topic_id, 'value': value,
'topics_list': topic_ids},
upsert=True)
def main(argv=sys.argv):
"""Main method called by the eggsecutable."""
try:
utils.vip_main(MongodbAggregateHistorian, version=__version__)
except Exception as e:
_log.exception('unhandled exception' + e.message)
if __name__ == '__main__':
# Entry point for script
sys.exit(main())
|
10,293 | bcd750a204aef76f974e22121ebcf33221b908c5 | index = 0
fruits = ["apple", "mango", "strawberry", "grapes", "pear", "kiwi", "orange", "banana"]
print(fruits)
print(len(fruits))
while index < len(fruits): # while loop goes on executing until the condition inside it is satisfied
fruit_new = fruits[index]
print(index)
print(fruit_new)
index = index + 1
# while True: # Uncomment this, it is an infinite loop
# print("a")
'''
The "while" loop can execute a set of statements as long as a condition is true.
The "break" statement can stop the loop even if the while condition is true.
The "continue" statement can stop the current iteration, and continue with the next.
'''
###########------------------BONUS------------------###########
print("\n") # adds new lines
print('''
We take a variable i = 6. Iterate it till it reaches 24.
But then we use "break" if i reaches 17.
You'll see that it comes out of the "while" loop, even though iterations are remaining.
''')
i = 6
while i < 24:
print(i)
if i == 17:
break
i += 1 # increments "i" till it reaches 17.
print("\n") # adds new lines
print('''
We take a variable i = 6. Iterate it till it reaches 24.
But then we use "continue" if i reaches 17 and print that i = 17 here.
We then "continue" with remaining iterations.
''')
i = 6
while i < 24:
i += 1
if i == 17:
print("i = 17 here")
continue
print(i)
|
10,294 | c62458e4e7ea2b87068c4172bcabed4f1c48bdc8 | INPUT = {
4: ['Masters', 'Doctorate', 'Prof-school'],
6: ['HS-grad'],
3: ['Bachelor']
}
wynik = {}
for key, value in INPUT.items():
for education in value:
wynik[education] = str(key)
## Alternatywnie:
#
# wynik = {education: str(key)
# for key, value in EDUCATION_GROUPS.items()
# for education in value
# }
print(wynik)
# OUTPUT = {
# 'Masters': '4',
# 'Doctorate': '4',
# 'Prof-school': '4',
# 'HS-grad': '6',
# 'Bachelor': '3',
# }
|
10,295 | 1534cc3b4c6f1554213512f16738b57f0d77b41e | age = int(input('나이 입력: '))
if (age >= 60):
print('30% 요금 할인대상입니다')
cost = 14000*0.7
elif (age <= 10):
print('20% 요금할인 대상입니다')
cost = 14000*0.8
else:
print('요금할인 대상이 아닙니다')
cost = 14000
print('요금: ' + str(int(cost)))
|
10,296 | 28ed2f4c981db5cb41aa51dc691285b4c64086d8 | import FWCore.ParameterSet.Config as cms
process = cms.Process("Gen")
process.load("FWCore.MessageService.MessageLogger_cfi")
# control point for all seeds
#
process.load("Configuration.StandardSequences.SimulationRandomNumberGeneratorSeeds_cff")
process.load("SimGeneral.HepPDTESSource.pythiapdt_cfi")
# physics event generation
#
process.load("Configuration.Spring08Production.Spring08_Gamma_Jets_Pythia_cfi")
process.load("Configuration.EventContent.EventContent_cff")
process.maxEvents = cms.untracked.PSet(
output = cms.untracked.int32(10)
)
process.configurationMetadata = cms.untracked.PSet(
version = cms.untracked.string('$Revision: 1.2 $'),
name = cms.untracked.string('$Source: /cvs_server/repositories/CMSSW/CMSSW/Configuration/Spring08Production/data/Spring08_Gamma_Jets_PythiaFilterGammaGamma_GEN.cfg,v $'),
annotation = cms.untracked.string('generation of gamma+jets, CTEQ 6L1 used')
)
process.filter = cms.EDFilter("PythiaFilterGammaGamma",
AcceptPrompts = cms.untracked.bool(True),
PtSeedThr = cms.untracked.double(5.0),
NTkConeSum = cms.untracked.int32(3),
moduleLabel = cms.untracked.string('source'),
EtaElThr = cms.untracked.double(2.8),
EtaSeedThr = cms.untracked.double(2.8),
dRNarrowCone = cms.untracked.double(0.02),
EtaMaxCandidate = cms.untracked.double(3.0),
dPhiSeedMax = cms.untracked.double(0.3),
EtaGammaThr = cms.untracked.double(2.8),
InvMassWide = cms.untracked.double(80.0),
EtaTkThr = cms.untracked.double(2.2),
PtElThr = cms.untracked.double(2.0),
NTkConeMax = cms.untracked.int32(2),
dEtaSeedMax = cms.untracked.double(0.12),
PromptPtThreshold = cms.untracked.double(20.0),
PtMinCandidate2 = cms.untracked.double(22.5),
PtGammaThr = cms.untracked.double(0.0),
PtMinCandidate1 = cms.untracked.double(37.5),
dRSeedMax = cms.untracked.double(0.0),
PtTkThr = cms.untracked.double(1.6),
InvMassNarrow = cms.untracked.double(14000.0),
dRTkMax = cms.untracked.double(0.2)
)
process.GEN = cms.OutputModule("PoolOutputModule",
process.FEVTSIMEventContent,
dataset = cms.untracked.PSet(
dataTier = cms.untracked.string('GEN')
),
SelectEvents = cms.untracked.PSet(
SelectEvents = cms.vstring('p1')
),
fileName = cms.untracked.string('PythiaGammaJetsFilterGG.root')
)
process.p1 = cms.Path(process.filter)
process.outpath = cms.EndPath(process.GEN)
process.schedule = cms.Schedule(process.p1,process.outpath)
|
10,297 | efc631f75aa1b4780fef1ec559d0ff439818e95a | from django.urls import path
from backend.reviews import views
urlpatterns = [
path('', views.ListReviews.as_view(), name="list_review"),
path('add/', views.AddReview.as_view(), name="add_review")
# api
# path('add/', views.AddReview.as_view()),
# path('all/', views.AllReviews.as_view()),
# path('moderated/', views.ModeratedReviews.as_view()),
# path('not_moderated/', views.NotModeratedReviews.as_view()),
]
|
10,298 | b5623cad90b2c4d14a2a7a505665abe6c953662e | import numpy
n = int(input())
array_a = []
array_b = []
for i in range(n):
a = list(map(int, input().split()))
array_a.append(a)
for i in range(n):
b = list(map(int, input().split()))
array_b.append(b)
array_a = numpy.array(array_a)
array_b = numpy.array(array_b)
print(numpy.dot(array_a, array_b))
|
10,299 | a1ffa9403118d9afcb718525da331082d0932e6d | async def herro(*args):
return await dnd_bot.say("herro dere") |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.