text stringlengths 38 1.54M |
|---|
import sys
import re
import numpy as np
class Crystal (object):
"""
Base class for all 2D crystals.
Every 2D crystal must be described using a 2D Bravais lattice and an atomic basis.
This class ignores the atomic basis and uses only the number of atoms inside the given unit cell.
Each 2D crystal is described in this code as a plain text file. A simple standard file could be:
Gr
2
Hexagonal
2.467
"""
def __init__ (self, filename):
"""
Initializes the 2D crystal by associating it to the `filename` given as argument.
Each `filename` must contain a label for the material, the number of atoms inside the unit cell,
the Bravais lattice and the lattice parameter.
"""
self.filename = filename
"""
The input file which describes the 2D crystal.
"""
self.label = self.getLabel ()
"""
The label of the 2D crystal.
"""
self.nAtoms = self.getNatoms ()
"""
The number of atoms inside the 2D crystal unit cell.
"""
self.bravaisLattice = ""
"""
The Bravais lattice of the 2D crystal: square, rectangular, oblique and hexagonal.
"""
self.latticeVectors = self.getBravaisLattice ()
"""
The Bravais lattice of the 2D crystal denoted by the matrix A (or B) shown Eq. 11 of the paper.
"""
def getLabel (self):
"""
Reads the label of the 2D crystal given in `filename`. The label must
be the first line in the file.
"""
try:
with open (self.filename, 'r') as f:
lines = f.readlines()
except FileNotFoundError:
print ("File " + self.filename + "not found! Please check the arguments!\n")
sys.exit(1)
return lines[0].strip('\n')
def getNatoms (self):
"""
Reads the number of atoms inside the unit cell of the 2D crystal given in `filename`.
This information must be the second line in the file.
"""
try:
with open (self.filename, 'r') as f:
lines = f.readlines()
except FileNotFoundError:
print ("File " + self.filename + "not found! Please check the arguments!\n")
sys.exit(1)
try:
nAtoms = int(lines[1])
except ValueError:
print ("Invalid number of atoms for file " + self.filename)
sys.exit(2)
return nAtoms
def getBravaisLattice (self):
"""
Reads the Bravais Lattice of the 2D crystal given in `filename`.
This information must be the third and fourth line of the file.
Accepted values are: Oblique, Rectangular, Hexagonal and Square (non-case sensitive).
In each case, the fourth line must contain the following information:
`a1 a2 angle`,
in which `a1` and `a2` are the length of the first and second vectors (respectively) and
`angle` is the angle in degrees between the two vectors. It is not necessary to specify the angle for Rectangular,
Hexagonal and Square lattices. Hexagonal and Square lattices also accept only one lattice parameter `a1`
as input.
"""
BravaisLattices = ['oblique', 'rectangular', 'hexagonal','square']
try:
with open (self.filename, 'r') as f:
lines = f.readlines()
except FileNotFoundError:
print ("File " + self.filename + "not found! Please check the arguments!\n")
sys.exit(1)
latticeName = re.sub('[\n\s]', '', lines[2].lower())
if latticeName not in BravaisLattices:
print ("Invalid 2D Bravais lattice: " + lines[2].strip('\n') + " for file " + self.filename + "\n")
sys.exit(4)
else:
try:
# Removes whitespace when reading
BravaisParameters = [x for x in lines[3].split() if x]
# Convert the strings to float
BravaisParameters = [float(x) for x in BravaisParameters]
except ValueError:
print ("Wrong entry for description of the Bravais lattice: " + lines[3].strip('\n') + " for file" + self.filename + "\n")
sys.exit(5)
if not BravaisParameters:
print ("Not enough parameters to describe the Bravais lattice for file" + self.filename + "\n")
sys.exit(6)
self.bravaisLattice = latticeName
if latticeName == 'square':
try:
lattice = np.transpose(
np.matrix ([[BravaisParameters[0], 0],
[0, BravaisParameters[0]]])
)
except IndexError:
print ("Not enough parameters to describe the Bravais lattice for file" + self.filename + "\n")
print ("Square lattices require one parameter (a) to be entirely described\n")
sys.exit(7)
elif latticeName == 'rectangular':
try:
lattice = np.transpose(
np.matrix ([[BravaisParameters[0], 0],
[0, BravaisParameters[1]]])
)
except IndexError:
print ("Not enough parameters to describe the Bravais lattice for file" + self.filename + "\n")
print ("Rectangular lattices require two parameters (ax, ay) to be entirely described\n")
sys.exit(8)
elif latticeName == 'hexagonal':
try:
lattice = np.transpose(
np.matrix ([[BravaisParameters[0], 0],
[BravaisParameters[0]*np.cos(np.pi/3), BravaisParameters[0]*np.sin(np.pi/3)]])
)
except IndexError:
print ("Not enough parameters to describe the Bravais lattice for file" + self.filename + "\n")
print ("Hexagonal lattices require one parameters (a) to be entirely described\n")
sys.exit(9)
elif latticeName == 'oblique':
try:
lattice = np.transpose(
np.matrix ([[BravaisParameters[0], 0],
[BravaisParameters[1]*np.cos(BravaisParameters[2]*np.pi/180), BravaisParameters[1]*np.sin(BravaisParameters[2]*np.pi/180)]])
)
except IndexError:
print ("Not enough parameters to describe the Bravais lattice for file" + self.filename + "\n")
print ("Oblique lattices require three parameters (a1, a2, angle) to be entirely described\n")
sys.exit(10)
return lattice
|
from bs4 import BeautifulSoup
import bleach
import pickle
import sys
def generate_name(id):
return "fnust" + str(id)
def decrypt(val):
try:
if val[:5] == "fnust":
return int(val[5:])
except:
return None
if __name__ == "__main__":
data_path = sys.argv[1]
tmp_path = sys.argv[2]
out_path = tmp_path + "segmentation.txt"
df = pickle.load(open(data_path + "df.pkl", "rb"))
f = open(out_path, "r")
lines = f.readlines()
f.close()
phrase_id_map = {}
counter = 0
data = []
for line in lines:
line = line.lower()
soup = BeautifulSoup(line)
for p in soup.findAll("phrase"):
phrase = p.string
if phrase is None:
continue
try:
temp = phrase_id_map[phrase]
except:
phrase_id_map[phrase] = counter
counter += 1
name = generate_name(phrase_id_map[phrase])
p.string.replace_with(" " + name + " ")
temp_str = bleach.clean(str(soup), tags=[], strip=True)
data.append(temp_str)
df["text"] = data
pickle.dump(df, open(tmp_path + "df_phrase.pkl", "wb"))
id_phrase_map = {}
for ph in phrase_id_map:
id_phrase_map[phrase_id_map[ph]] = ph
print("Number of phrases: ", len(phrase_id_map), flush=True)
pickle.dump(phrase_id_map, open(tmp_path + "phrase_id_map.pkl", "wb"))
pickle.dump(id_phrase_map, open(tmp_path + "id_phrase_map.pkl", "wb"))
|
def minDistance(word1,word2):
distance = [[a for a in range(len(word1) + 1)] for b in range(len(word2) + 1)]
for i in range(1, len(word2) + 1):
distance[i][0] = distance[i - 1][0] + 1
for i in range(1, len(word2) + 1):
for j in range(1, len(word1) + 1):
if word2[i - 1] == word1[j - 1]:
distance[i][j] = distance[i - 1][j - 1]
else:
distance[i][j] = 1 + min(distance[i - 1][j], distance[i - 1][j - 1], distance[i][j - 1])
return distance[-1][-1]
if __name__ == '__main__':
word1 = "horse"
word2 = "ros"
print(minDistance(word1,word2)) |
# _*_coding:utf-8 _*_
import requests
headers = {"User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/54.0.2840.99 Safari/537.36"}
# 这是目标url
# url = 'https://www.baidu.com/s?wd=python'
# 最后有没有问号结果都一样
url = 'https://www.baidu.com/s?'
# 请求参数是一个字典 即wd=python
kw = {'wd': 'python'}
# 带上请求参数发起请求,获取响应
response = requests.get(url, headers=headers, params=kw)
print(response.content)
|
class MaxHeap:
def __init__(self, heap, n):
self.H = heap
self.n = n
self.basicOperation = 0
def deleteMax(self):
self.H[1] = self.H[self.n]
self.H[self.n] = -1
self.n -= 1
k = 1
v = self.H[k]
heap = False
while heap != True and 2 * k <= self.n:
j = 2 * k
#siftdown
self.basicOperation += 1
if j < self.n:
if self.H[j].bangForBuckRatio < self.H[j + 1].bangForBuckRatio:
j = j + 1
if v.bangForBuckRatio >= self.H[j].bangForBuckRatio:
heap = True
else:
self.H[k] = self.H[j]
k = j
self.H[k] = v
def heapification(self):
self.H.insert(0,0)
for i in range(int(self.n/2), 0, -1):
k = i
v = self.H[k]
heap = False
self.basicOperation += 1
while heap != True and 2 * k <= self.n:
j = 2 * k
# siftdown
self.basicOperation += 1
if j < self.n:
if self.H[j].bangForBuckRatio < self.H[j+1].bangForBuckRatio:
j = j + 1
if v.bangForBuckRatio >= self.H[j].bangForBuckRatio:
heap = True
else:
self.H[k] = self.H[j]
k = j
self.H[k] = v |
##-----------------------------
# Pychrash Course
# Eric Matthes
# Cap. 8 - Funções
# pets.py, p.195
##-----------------------------
# 8.3. Camiseta
def make_shirt(tam, msg):
"""Função para estampar uma camiseta."""
print('\nO tamanho da camiseta é ' + tam.title() + '.')
print('Sua mensagem deve ser: ' + msg.title() + '.')
make_shirt('m', "be happy!")
# 8.4. Camisetas grandes:
def make_shirt(tam='g', msg='eu amo python!'):
"""Função para estampar uma camiseta."""
print('\nO tamanho da camiseta é ' + tam.title() + '.')
print('Sua mensagem deve ser: ' + msg.title() + '.')
make_shirt()
# 8.5. Cidades
def describe_city(cidade='natal', pais='brasil'):
"""Exibe o nome de uma cidade e seu país."""
print('\nO nome da cidade onde resido é ' + cidade.title() + '.')
print('Essa cidade pertence ao ' + pais.title() + '.')
describe_city()
describe_city(cidade='acapulco', pais='méxico')
|
#!/usr/bin/env python
def from_military_time(military):
suffix = ':00am' if military < 12 else ':00pm'
standard = 12 if military % 12 == 0 else military % 12
return str(standard) + suffix
|
import ika
import engine
class text:
blackbg = ika.Image("gfx\\blackbg.png")
def __init__(self, x, y, text):
self.x = x
self.y = y
self.fh = engine.font.height
self.alive = True
self.text = text
while self.alive == True:
self.Update()
self.Render()
ika.Input.Update()
ika.Video.ShowPage()
def Update(self):
if ika.Input.keyboard['SPACE'].Pressed():
self.alive = False
def Render(self):
ika.Video.Blit(self.blackbg,0,0)
for a, b in enumerate(self.text):
engine.font.Print(self.x, self.y + a*self.fh, b)
|
from covidsim import Cell, Simulator, State
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.animation as animation
def get_c(s):
if s == State.Normal:
return 'b'
elif s==State.Infected:
return 'r'
elif s==State.Recovered:
return 'g'
return 'k'
def main():
sim = Simulator(((0.0,50.0),(0.0,50.0)))
#sim.add_cells(50,0.1,1.0)
sim.add_cells(50,0.1,0.2,maxboundlen=10.0)
result = sim.simulate(maxstep=1000)
show_result(result)
def show_result(simdata):
fig = plt.figure()
ims = []
for r in simdata:
t = {State.Normal:[],State.Infected:[],State.Recovered:[],State.Dead:[]}
for cpos,cstate in r:
t[cstate].append(cpos)
temp = []
for s in State:
t[s] = np.array(t[s]).T
if t[s].size!=0:
temp.append(plt.scatter(t[s][0],t[s][1],c=get_c(s)))
ims.append(temp)
ani = animation.ArtistAnimation(fig, ims, interval=30)
plt.show()
if __name__ == "__main__":
main() |
#If on Windows to avoid fullscreen, use the following two lines of code
from kivy.config import Config
Config.set('graphics', 'fullscreen', '0')
from kivy.app import App
from kivy.lang import Builder
from kivy.uix.button import Button
from kivy.uix.widget import Widget
from kivy.core.audio import SoundLoader
from os import listdir, path, makedirs, rename
Builder.load_string('''
<FileOrganizer>:
TextInput:
id: direct
pos: 0,root.top-50
size: root.width-200,50
hint_text: 'Enter Folder Location'
Button:
text: 'Organize'
size: 200,50
background_color: 0,.5,1,1
pos: root.width-200, root.top-50
on_press:
status.text = "Attempting To Organize Folder..."
status.color = (1, .5, 0, 1)
on_release: root.organize()
Label:
id: status
text: "Ready To Organize..."
center: root.center
''')
class FileOrganizer(Widget):
def organize(self):
directory = self.ids.direct.text #Directory entered by the user
#To make sure that the directory ends with a '/'
if not directory.endswith('/'):
directory += '/'
#General File Type Extentions
apps = ['.exe']
code = ['.py','.java','.c','.cpp','.rb','.asm','.php','.html',
'.css','.js','.lua']
music = ['.mp3','.ogg','.wav']
videos = ['.mp4','.3gp','.avi']
pictures = ['.jpg','.jpeg','.png','.bmp','.gif']
archives = ['.zip','.rar','.7zip','.tar','.iso']
documents = ['.docx','.doc','.pdf','.txt','.ppt','.pptx','.ppsx','.pptm',
'.docm','.dotx','.dotm','.docb','.xlsx','.xlsm','.xltx',
'.xltm','.xlsb','.xla','.xlam','.xll','.xlw',
'.ACCDB','.ACCDE','.ACCDT','.ACCDR','.pub',
'.potx','.potm','.ppam','.ppsm','.sldx','.sldm']
#Hold files of the same type
app = [] #holds applications
cod = [] #holds files of type code
mus = [] #holds files of type music
vid = [] #holds files of type video
pic = [] #holds files of type picture
arc = [] #holds files of type archives
doc = [] #holds files of type document
oth = [] #holds files of any other type
#allTypes = [cod, mus, vid, pic, arc, doc, app, oth]
#Check each file in the directory
for fil in listdir(directory):
for x in apps:
if fil.endswith(x) or fil.endswith(x.upper()):
app.append(fil)
if not path.exists(directory+'Applications/'):
makedirs(directory+'Applications/')
rename(directory+fil, directory+'Applications/'+fil)
for x in code:
if fil.endswith(x) or fil.endswith(x.upper()):
cod.append(fil)
if not path.exists(directory+'Code/'):
makedirs(directory+'Code/')
rename(directory+fil, directory+'Code/'+fil)
for x in music:
if fil.endswith(x) or fil.endswith(x.upper()):
mus.append(fil)
if not path.exists(directory+'Music/'):
makedirs(directory+'Music/')
rename(directory+fil, directory+'Music/'+fil)
for x in videos:
if fil.endswith(x) or fil.endswith(x.upper()):
vid.append(fil)
if not path.exists(directory+'Videos/'):
makedirs(directory+'Videos/')
rename(directory+fil, directory+'Videos/'+fil)
for x in pictures:
if fil.endswith(x) or fil.endswith(x.upper()):
pic.append(fil)
if not path.exists(directory+'Pictures/'):
makedirs(directory+'Pictures/')
rename(directory+fil, directory+'Pictures/'+fil)
for x in archives:
if fil.endswith(x) or fil.endswith(x.upper()):
arc.append(fil)
if not path.exists(directory+'Archives/'):
makedirs(directory+'Archives/')
rename(directory+fil, directory+'Archives/'+fil)
for x in documents:
if fil.endswith(x) or fil.endswith(x.upper()):
doc.append(fil)
if not path.exists(directory+'Documents/'):
makedirs(directory+'Documents/')
rename(directory+fil, directory+'Documents/'+fil)
self.ids.status.text = "Folder has been organized"
self.ids.status.color = (0, 1, 0, 1)
class FilingApp(App):
def build(self):
return FileOrganizer()
def on_pause(self):
return True
def on_resume(self):
pass
if __name__ == "__main__":
FilingApp().run()
|
from django.urls import path, include
from django.views.decorators.cache import cache_page
from .views import (
IndexView,
donation,
VideosView,
category_video,
VideoCategoryView,
AudiosView,
subscribe,
faq,
live_view,
odds_view,
privacy_policy,
tandc,
contact,
Youtube,
vid_json,
VideosTestView
)
urlpatterns = [
path('', IndexView.as_view(), name='index'),
# path('ckeditor/', include('ckeditor_uploader.urls')),
path('videos/', cache_page(60 * 10)(VideosView.as_view()), name='videos'),
path('videostest/', VideosTestView.as_view(), name='videostest'),
path('podcast/', cache_page(60 * 10)(AudiosView.as_view()), name='podcast'),
path('videos-category/<int:id>/<slug:slug>',
VideoCategoryView.as_view(), name='category_video'),
path('subscribe/', subscribe, name='subscribe'),
path('frequently-asked-questions/', faq, name='faq'),
path('livescores/', live_view, name='live_view'),
path('odds/', odds_view, name='odds_view'),
path('donate/', donation, name='donate'),
path('privacy-policy/', privacy_policy, name='privacy_policy'),
path('terms-and-conditions/', tandc, name='tandc'),
path('contact/', contact, name='contact'),
path('youtube-json/', Youtube.as_view(), name='youtube-json'),
path('vid-json/<int:num_posts>/', vid_json, name='vid-json'),
]
|
# Generated by Django 3.1 on 2020-08-25 10:18
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('accounts', '0005_auto_20200825_1426'),
]
operations = [
migrations.AlterField(
model_name='video',
name='comments',
field=models.TextField(blank=True, default=''),
),
]
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.3 on 2017-07-18 08:59
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Api',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('api_id', models.CharField(max_length=100)),
('api_functions', models.CharField(max_length=100)),
('sour_s', models.CharField(max_length=30)),
('dest_s', models.CharField(max_length=30)),
],
),
]
|
#!/usr/bin/env python3
"""
Little game for the Scroll HAT Mini.
"""
import math
import scrollphathd
import time
from gpiozero import Button
from random import random, shuffle
# ------------------------------------------------------------
print("""Scroll HAT Mini: rockfall.py
Dodge the falling rocks using the X and Y buttons on the HAT.
The A and B buttons make it easier or harder.
Press Ctrl+C to exit!
""")
scrollphathd.set_brightness(0.4)
class Rockfall():
"""
Main game class.
"""
# The extents of the screen
WIDTH = scrollphathd.DISPLAY_WIDTH
HEIGHT = scrollphathd.DISPLAY_HEIGHT
# THe istarting wait time between ticks
WAIT = 0.5
# How much to change the wait time by each tick
WAIT_DIFF = 0.001
# The min and max population fractions, and how much the butttons change
# it by
MIN_FRAC = 1.0 / HEIGHT
MAX_FRAC = 4.0 / HEIGHT
FRAC_STEP = 1.0 / HEIGHT
def __init__(self):
# Set up the buttons
self._button_A = Button(5)
self._button_B = Button(6)
self._button_X = Button(16)
self._button_Y = Button(24)
# How easy or hard
self._frac_new = self.MIN_FRAC
# The wait time between tocks
self._wait = self.WAIT
# The game area as a boolean bitmap. A True means there is a rock in an
# entry. This is stored as a tuple of rows.
self._arena = tuple([[False] * (self.WIDTH)
for i in range(self.HEIGHT)])
# The player's position, at the end of the display
self._hpos = self.HEIGHT // 2
self._wpos = self.WIDTH - 3
# ------------------------------------------------------------
def init(self):
"""
Reset the state to start of game.
"""
for row in self._arena:
for i in range(len(row)):
row[i] = False
self._hpos = self.HEIGHT // 2
self._wait = self.WAIT
def step(self):
# Scroll the current values along by one
for row in self._arena:
row[1:] = row[:-1]
row[0] = False
# This fraction new, an interval between the minimum and the
# difficulty value
frac_new = max(self.MIN_FRAC, self._frac_new * random())
# Add new ones at random indices
indices = list(range(len(self._arena)))
shuffle(indices)
end = round(min(1, max(0, frac_new)) * len(indices))
for index in indices[:int(end)]:
self._arena[index][0] = True
def explode(self):
for r in range(int(self.WIDTH * 1.25)):
for s in range(r):
c = 1 + int(math.pi * 2 * r)
for a in range(c):
o = 2 * math.pi / c * a
x = int(math.sin(o) * s + self.WIDTH - 1)
y = int(math.cos(o) * s + self._hpos)
v = s / r * (0.2 + 0.8 * random())
if 0 <= x < self.WIDTH and 0 <= y <= self.HEIGHT:
scrollphathd.pixel(x, y, v)
scrollphathd.show()
time.sleep(0.01)
def run(self):
"""
The main running loop.
"""
# State variables
self.init()
last = time.time()
start = last
# Set things to the starting values
# Loop forever now
while True:
now = time.time()
since = now - last
# Move the things down
if since > self._wait:
self.step()
last = now
self._wait = max(0.0, self._wait - self.WAIT_DIFF)
# Make it easier or harder
if self._button_A.is_active:
self._frac_new = max(self.MIN_FRAC,
self._frac_new - self.FRAC_STEP)
if self._button_B.is_active:
self._frac_new = min(self.MAX_FRAC,
self._frac_new + self.FRAC_STEP)
# Move the player
if self._button_X.is_active and self._hpos > 0:
self._hpos -= 1
if self._button_Y.is_active and self._hpos < self.HEIGHT-1:
self._hpos += 1
# Draw the field
for (y, row) in enumerate(self._arena):
for (x, e) in enumerate(row):
v = 0.5 if e else 0.0
scrollphathd.pixel(x, y, v)
# Draw the player
scrollphathd.pixel(self._wpos, self._hpos, 1.0)
# And display it all
scrollphathd.show()
# Collision?
if self._arena[self._hpos][self._wpos]:
# Print stats
print("You survived for %0.1f seconds" % (now - start))
print()
# Draw the explosion
self.explode()
# And reset things
self.init()
start = now
last = now
# Wait for bit befor emoving on
time.sleep(0.05)
# ----------------------------------------------------------------------
if __name__ == "__main__":
game = Rockfall()
game.run()
|
#
from __future__ import division
import _config
import sys, os, fnmatch, datetime, subprocess
sys.path.append('/home/unix/maxwshen/')
import numpy as np
from collections import defaultdict
from mylib import util, compbio
import pandas as pd
# Default params
inp_dir = _config.OUT_PLACE + 'ill_a_align/'
NAME = util.get_fn(__file__)
out_dir = _config.OUT_PLACE + NAME + '/'
util.ensure_dir_exists(out_dir)
exp_design = pd.read_csv(_config.DATA_DIR + f'Badran2015_SraRunTable.csv')
ref_fn = _config.DATA_DIR + f'SP055-rpoZ-cMyc-Cry1Ac1-d123.fa'
ref = open(ref_fn).readlines()[1].strip()
##
#
##
def add_mutations(mut_dd, n_d, n_d2, sam):
calls = parse_cigar(sam['cigar'])
read_start_idx = sam['1-based pos'] - 1
read = sam['seq']
query_consumed = list('MIS=X')
ref_consumed = list('MDN=X')
ref_idx = 0
read_idx = 0
for idx in range(len(calls)):
op = calls[idx]
curr_idx_in_ref = read_start_idx + ref_idx
ref_nt = ref[curr_idx_in_ref]
obs_nt = read[read_idx]
# Record mismatches
if bool(op == 'X') or bool(op == 'M' and ref_nt != obs_nt):
mut_dd['Position (0 based)'].append(curr_idx_in_ref)
mut_dd['Reference nt'].append(ref_nt)
mut_dd['Mutated nt'].append(obs_nt)
mut_dd['Read name'].append(sam['read_nm'])
# Record number of reads aligning to each ref position
n_d[curr_idx_in_ref] += 1
# Advance indices
if op in query_consumed:
read_idx += 1
if op in ref_consumed:
ref_idx += 1
# Record start and end of reads
n_d2['Read name'].append(sam['read_nm'])
n_d2['Read start idx'].append(read_start_idx)
ref_len = sum([bool(s in ref_consumed) for s in calls])
n_d2['Read end idx'].append(read_start_idx + ref_len)
n_d2['Read length'].append(ref_len)
return
def parse_cigar(cigar):
'''
148M1X1M -> ['M', 'M', ..., 'X', 'M']
'''
ops = list('MIDNSHP=X')
calls = []
trailing_idx = 0
for idx in range(len(cigar)):
if cigar[idx] in ops:
op = cigar[idx]
length = int(cigar[trailing_idx : idx])
calls += [op] * length
trailing_idx = idx + 1
return calls
##
# Primary
##
def call_mutations(nm):
inp_fn = inp_dir + f'{nm}.sam'
mut_dd = defaultdict(list)
n_d = defaultdict(lambda: 0)
n_d2 = defaultdict(list)
timer = util.Timer(total = util.line_count(inp_fn))
with open(inp_fn) as f:
for i, line in enumerate(f):
timer.update()
if line[0] == '@':
continue
w = line.split()
sam = {
'read_nm': w[0],
'target': w[2],
'1-based pos': int(w[3]),
'cigar': w[5],
'seq': w[9],
}
if sam['target'] != 'SP055-rpoZ-cMyc-Cry1Ac1-d123':
continue
if sam['cigar'] == '*':
continue
# Call mutation and Track total readcount per position
add_mutations(mut_dd, n_d, n_d2, sam)
mut_df = pd.DataFrame(mut_dd)
mut_df.to_csv(out_dir + f'{nm}.csv')
n_dd = defaultdict(list)
for pos in range(len(ref)):
n_dd['Position (0 based)'].append(pos)
n_dd['Read count'].append(n_d[pos])
n_df = pd.DataFrame(n_dd)
n_df.to_csv(out_dir + f'{nm}_readcounts.csv')
ndf2 = pd.DataFrame(n_d2)
ndf2.to_csv(out_dir + f'{nm}_read_idxs.csv')
'''
Important note on ndf2:
- Many paired reads appear to have sequenced the same molecule. Mutations observed on paired reads are combined; overlapping paired reads are also expected to be combined.
This is done in ill_b2_merge_n_paired_reads.py
'''
return
##
# qsub
##
def gen_qsubs():
# Generate qsub shell scripts and commands for easy parallelization
print('Generating qsub scripts...')
qsubs_dir = _config.QSUBS_DIR + NAME + '/'
util.ensure_dir_exists(qsubs_dir)
qsub_commands = []
ill_nms = exp_design[exp_design['Instrument'] == 'Illumina MiSeq']['Library Name']
num_scripts = 0
for nm in ill_nms:
command = f'python {NAME}.py {nm}'
script_id = NAME.split('_')[0]
# Write shell scripts
sh_fn = qsubs_dir + f'q_{script_id}_{nm}.sh'
with open(sh_fn, 'w') as f:
f.write(f'#!/bin/bash\n{command}\n')
num_scripts += 1
# Write qsub commands
qsub_commands.append(f'qsub -V -P regevlab -l h_rt=4:00:00 -wd {_config.SRC_DIR} {sh_fn} &')
# Save commands
commands_fn = qsubs_dir + '_commands.sh'
with open(commands_fn, 'w') as f:
f.write('\n'.join(qsub_commands))
subprocess.check_output(f'chmod +x {commands_fn}', shell = True)
print(f'Wrote {num_scripts} shell scripts to {qsubs_dir}')
return
##
# Main
##
@util.time_dec
def main(args):
print(NAME)
# Function calls
[nm] = args
call_mutations(nm)
return
if __name__ == '__main__':
if len(sys.argv) > 1:
main(sys.argv[1:])
else:
gen_qsubs()
|
#!/usr/bin/env python3
class LogicGate:
def __init__(self, lbl):
self.label = lbl
self.output = None
def get_label(self):
return self.label
def get_output(self):
self.output = self.perform_gate_logic()
return self.output
class BinaryGate(LogicDate):
def __init__(self, lbl):
self.pin_a = None
self.pin_b = None
def get_pin_a(self):
return int(input(f"Enter pin a input for gate {self.get_label()}: "))
def get_pin_b(self):
return int(input(f"Enter pin b input for gate {self.get_label()}: "))
|
import cv2
import numpy as np
import insightface
np.random.seed(123) # for reproducibility
def get_groundtruth(dataset):
"{frame_id: [template_id, x, y, w, h]"
frame_map = {}
# with open(dataset, 'r', encoding='utf-8') as csvreader:
with open(dataset, 'r') as csvreader:
all_data = csvreader.readlines()
for line in all_data[1:]:
data = line.strip().split(',')
template_id, subject_id, frame_name = data[:3]
x, y, w, h = data[4:]
# if 'frames' in frame_name:
if frame_name not in frame_map:
frame_map[frame_name] = []
frame_data = [x, y, w, h]
frame_map[frame_name] = frame_data
return frame_map
def extract_mxnet_features(model, img_filepath):
img = cv2.imread(img_filepath)
embeddings = model.get_feature(img)
if embeddings is None:
print(img_filepath)
return embeddings
def extract_facial_features_arcface_resnet_img():
path_to_img = '/home/datasets/images/IJB/IJB-C/images/'
metadata_path = {0: '/home/datasets/images/IJB/IJB-C/protocols/ijbc_1N_gallery_G1.csv',
1: '/home/datasets/images/IJB/IJB-C/protocols/ijbc_1N_gallery_G2.csv'}
features_file = 'face_features_arcface_resnet_50_ijbc_images_train.csv'
model = insightface.model_zoo.get_model('arcface_r100_v1')
model.prepare(ctx_id=-1)
input_size = (112, 112)
# with open(features_file, 'w', encoding='utf-8') as fw:
with open(features_file, 'w', encoding='utf-8') as fw:
# LOOP over 2 files
for i in range(2):
img_data = get_groundtruth(metadata_path.get(i))
for frame_id, frame_data in img_data.items():
print(frame_id)
x, y, w, h = frame_data
try:
draw = cv2.imread(path_to_img + frame_id)
except Exception as e:
print(e)
continue
y = int(y)
x = int(x)
w = int(w)
h = int(h)
face = draw[y:y + h, x:x + w]
image = cv2.resize(face, input_size)
emb = model.get_embedding(image)[0]
feature_str = (',').join([str(f) for f in emb])
res = (',').join([frame_id, feature_str])
fw.write(res + '\n')
print("SUCCESS!!!!!")
def extract_facial_features_arcface_resnet_frames():
path_to_frames = '/home/datasets/images/IJB/IJB-C/images/'
metadata_path = '/home/datasets/images/IJB/IJB-C/protocols/ijbc_1N_probe_mixed.csv'
features_file = 'face_features_arcface_resnet_100_ijbc_frames.csv'
frames_data = get_groundtruth(metadata_path)
model = insightface.model_zoo.get_model('arcface_r100_v1')
model.prepare(ctx_id=-1)
input_size = (112, 112)
# with open(features_file, 'w', encoding='utf-8') as fw:
with open(features_file, 'w') as fw:
for frame_id, frame_data in frames_data.items():
print(frame_id)
x, y, w, h = frame_data
try:
draw = cv2.imread(path_to_frames + frame_id)
except Exception as e:
print(e)
continue
y = int(y)
x = int(x)
w = int(w)
h = int(h)
face = draw[y:y + h, x:x + w]
image = cv2.resize(face, input_size)
emb = model.get_embedding(image)[0]
feature_str = (',').join([str(f) for f in emb])
res = (',').join([frame_id, feature_str])
fw.write(res + '\n')
print("SUCCESS!!!!!")
if __name__ == '__main__':
# extract_facial_features_mobilenet_img()
# extract_facial_features_arcface_resnet_img()
extract_facial_features_arcface_resnet_frames()
|
import pymongo
class Database(object):
URI = "mongodb://127.0.0.1:27017"
DATABASE = None
@staticmethod
def initialize(db_name):
client = pymongo.MongoClient(Database.URI)
Database.DATABASE = client[db_name]
@staticmethod
def insert(collection_name,data):
Database.DATABASE[collection_name].insert(data)
@staticmethod
def find(collection_name,data):
return Database.DATABASE[collection_name].find(data)
@staticmethod
def find_one(collection_name,data):
return Database.DATABASE[collection_name].find_one(data)
|
'''
峰值元素是指其值大于左右相邻值的元素。
给定一个输入数组 nums,其中 nums[i] ≠ nums[i+1],找到峰值元素并返回其索引。
数组可能包含多个峰值,在这种情况下,返回任何一个峰值所在位置即可。
你可以假设 nums[-1] = nums[n] = -∞。
示例 1:
输入: nums = [1,2,3,1]
输出: 2
解释: 3 是峰值元素,你的函数应该返回其索引 2。
示例 2:
输入: nums = [1,2,1,3,5,6,4]
输出: 1 或 5
解释: 你的函数可以返回索引 1,其峰值元素为 2;
或者返回索引 5, 其峰值元素为 6。
'''
class Solution:
def findPeakElement(self, nums) -> int:
|
import numpy as np
import cv2
import imutils
from collections import deque
orange_lower=(5,134,125)
orange_upper=(255,255,255)
pts=deque()
cap=cv2.VideoCapture(0)
while True:
ret,frame=cap.read()
frame=imutils.resize(frame,width=600)
blur=cv2.GaussianBlur(frame,(11,11),0)
hsv=cv2.cvtColor(frame,cv2.COLOR_BGR2HSV)
mask=cv2.inRange(hsv,orange_lower,orange_upper)
mask = cv2.erode(mask, None, iterations=2)
mask = cv2.dilate(mask, None, iterations=2)
cv2.imshow("mask",mask)
cnts=cv2.findContours(mask.copy(),cv2.RETR_LIST,cv2.CHAIN_APPROX_SIMPLE)
cnts=imutils.grab_contours(cnts)
center=None
if(len(cnts)>0):
c=max(cnts,key=cv2.contourArea)
((x, y), radius) = cv2.minEnclosingCircle(c)
M = cv2.moments(c)
center = (int(M["m10"] / M["m00"]), int(M["m01"] / M["m00"]))
#print(x,y,M,center)
if radius > 10:
cv2.circle(frame,(int(x),int(y)),int(radius),(0,255,255),2)
cv2.circle(frame,center,5,(0,0,255),-1)
pts.appendleft(center)
for i in range(1,len(pts)):
if(pts[i-1] is None or pts[i] is None):
continue
cv2.line(frame,pts[i-1],pts[i],(0,0,255),3)
cv2.imshow("frame",frame)
if cv2.waitKey(1) & 0xFF== ord('q'):
break
cap.release()
#out.release()
cv2.destroyAllWindows()
|
#!/usr/bin/env python
# coding: utf-8
import enlighten
import numpy as np
import pandas as pd
import seaborn as sns
import pingouin as pg
import matplotlib.pyplot as plt
from os.path import join, exists
from sklearn.experimental import enable_halving_search_cv
from sklearn.model_selection import HalvingGridSearchCV, train_test_split
from sklearn.mixture import BayesianGaussianMixture
from utils import residualize
sns.set(style='whitegrid', context='paper')
plt.rcParams["font.family"] = "monospace"
plt.rcParams['font.monospace'] = 'Courier New'
PROJ_DIR = "/Volumes/projects_herting/LABDOCS/Personnel/Katie/deltaABCD_vbgmm/"
DATA_DIR = "data/"
FIGS_DIR = "figures/"
OUTP_DIR = "output/"
df = pd.read_pickle(join(PROJ_DIR, DATA_DIR, "data_qcd.pkl"))
#imputed_dcg = pd.read_csv(join(join(PROJ_DIR, DATA_DIR, "destrieux+gordon_MICEimputed_data.csv")),
# index_col='subjectkey',
# header=0)
scanners = [
"SIEMENS",
"GE MEDICAL SYSTEMS",
"Philips Medical Systems"
]
scanner_ppts = {}
for scanner in scanners:
scanner_ppts[scanner] = df[df['mri_info_manufacturer.baseline_year_1_arm_1'] == scanner].index
noisy_modalities = {
'smri': [
"smri_vol_cdk_total"
],
'dmri': [
'dmri_rsi_meanmotion',
'dmri_rsi_meantrans',
'dmri_rsi_meanrot'
],
'rsfmri': [
'rsfmri_var_ntpoints',
'rsfmri_var_meanmotion',
'rsfmri_var_meantrans',
'rsfmri_var_maxtrans',
'rsfmri_var_meanrot',
'rsfmri_var_maxrot'
]
}
timepoints = [
'baseline_year_1_arm_1',
'2_year_follow_up_y_arm_1',
'change_score'
]
imaging = df.filter(regex='.*mri.*change_score')
for cov in [item for sublist in noisy_modalities.values() for item in sublist]:
#print(cov)
for tp in timepoints:
if f'{cov}.{tp}' in imaging.columns:
imaging = imaging.drop(f'{cov}.{tp}', axis=1)
imaging_cols = list(imaging.columns)
imaging_cols.append('rel_family_id.baseline_year_1_arm_1')
atlases = {'desikankillany': df,
#'destrieux+gordon': imputed_dcg
}
n_components = 25
parameter_grid = {
'weight_concentration_prior_type': [
#'dirichlet_process',
'dirichlet_distribution'
],
'weight_concentration_prior': [
#10**-3,
#10**-2,
#10**-1,
#10**0,
10**7,
10**8,
10**9
],
#'n_components': list(range(2,10)),
'covariance_type': [
'diag', 'spherical'
]
}
estimator = BayesianGaussianMixture(
n_components=n_components,
max_iter=1000
)
# hyper parameter tuning
iterations = 2
manager = enlighten.get_manager()
tocks = manager.counter(total=iterations * len(atlases.keys()) * len(scanners),
desc='Number of Iterations',
unit='iter')
max_comp = {}
for atlas in atlases.keys():
for scanner in scanners:
big_results = pd.DataFrame()
best_params = pd.DataFrame()
n_comps = 0
ppts = scanner_ppts[scanner]
data = atlases[atlas]
data = data.loc[ppts][imaging_cols]
for i in range(0,iterations):
all_subj = data.index.to_list()
for id_ in data['rel_family_id.baseline_year_1_arm_1']:
siblings = data[data['rel_family_id.baseline_year_1_arm_1'] == id_].index.to_list()
if len(siblings) > 1:
keep = np.random.choice(siblings)
siblings.remove(keep)
all_subj = list(set(all_subj) - set(siblings))
else:
pass
data = data.loc[all_subj]
temp_data = data.drop('rel_family_id.baseline_year_1_arm_1', axis=1).dropna()
print(scanner, 'ppts ', len(temp_data.index))
resid_temp = pd.DataFrame()
for modality in noisy_modalities.keys():
cov_df = pd.DataFrame()
mini_dset = temp_data.filter(like=modality)
subj = temp_data.index
img_cols = mini_dset.columns
covs = []
for covariate in noisy_modalities[modality]:
smol = df.loc[subj]
covs.append(f'{covariate}.baseline_year_1_arm_1')
covs.append(f'{covariate}.2_year_follow_up_y_arm_1')
cov_df = pd.concat([cov_df, smol[f'{covariate}.baseline_year_1_arm_1']], axis=1)
cov_df = pd.concat([cov_df, smol[f'{covariate}.2_year_follow_up_y_arm_1']], axis=1)
#print(img_cols, covs)
mini_dset = pd.concat([mini_dset, cov_df], axis=1)
#print(mini_dset.describe())
temp2 = residualize(mini_dset[img_cols], confounds=mini_dset[covs])
resid_temp = pd.concat([resid_temp, temp2], axis=1)
# need train test split
search = HalvingGridSearchCV(estimator,
parameter_grid,
factor=4,
min_resources=200,
cv=5,
verbose=0,
n_jobs=-1).fit(resid_temp)
parameters = pd.Series(search.best_estimator_.get_params(), name=i)
parameters['test_score'] = search.best_estimator_.score(resid_temp)
labels = search.best_estimator_.predict(resid_temp)
for k in range(0, parameters['n_components']):
parameters.loc[k] = np.sum(labels == k)
if np.max(labels) > n_comps:
n_comps = np.max(labels)
nonzero_components = np.sum(parameters.loc[list(range(0, parameters['n_components']))] != 0)
parameters.loc['n_components_nonzero'] = nonzero_components
#results = pd.DataFrame.from_dict(search.cv_results_)
#results["params_str"] = results.params.apply(str)
parameters.loc["converged"] = search.best_estimator_.converged_
#big_results = pd.concat([big_results, results], axis=0)
best_params = pd.concat([best_params, parameters], axis=1)
tocks.update()
try:
#big_results.to_csv(join(PROJ_DIR,
# OUTP_DIR,
# f'bgmm_{atlas}-{scanner}_cv-results.csv'))
best_params.T.to_csv(join(PROJ_DIR,
OUTP_DIR,
f'bgmm_{atlas}-{scanner}_best-models.csv'))
except:
#big_results.to_csv(join('..',
# OUTP_DIR,
# f'bgmm_{atlas}-{scanner}_cv-results.csv'))
best_params.T.to_csv(join(OUTP_DIR,
f'bgmm_{atlas}-{scanner}_best-models.csv'))
|
import argparse
import os
import pickle
import sys
from argparse import ArgumentParser
import torch
from torch.nn.parallel import DataParallel, DistributedDataParallel
from torch.utils.data import DataLoader
from torchvision import datasets, transforms
import custom_datasets
import models.custom
import models.mobilenet
import models.mobilenetv2
import models.resnet_cifar
import models.wide_resnet
import mmcv
from mmcv.runner import load_checkpoint, parallel_test, obj_from_dict
def eval_mmcv_str(obj_type):
if obj_type in sys.modules:
obj_type = sys.modules[obj_type]
else:
# Assume the last part is a function/member name.
elems = obj_type.split('.')
module, attr = '.'.join(elems[:-1]), elems[-1]
obj_type = getattr(sys.modules[module], attr)
return obj_type
def deep_recursive_obj_from_dict(info):
"""Initialize an object from dict.
The dict must contain the key "type", which indicates the object type, it
can be either a string or type, such as "list" or ``list``. Remaining
fields are treated as the arguments for constructing the object.
Args:
info (dict): Object types and arguments.
parent (:class:`module`): Module which may containing expected object
classes.
default_args (dict, optional): Default arguments for initializing the
object.
Returns:
any type: Object built from the dict.
"""
assert isinstance(info, dict) and 'type' in info
# TODO: This does not support object dicts nested in non-object dicts.
args = info.copy()
obj_type = args.pop('type')
if mmcv.is_str(obj_type):
obj_type = eval_mmcv_str(obj_type)
elif not isinstance(obj_type, type):
raise TypeError('type must be a str or valid type, but got {}'.format(
type(obj_type)))
evaluated_args = {}
for argname, argval in args.items():
print(argname, type(argval))
if isinstance(argval, dict) and 'type' in argval:
evaluated_args[argname] = deep_recursive_obj_from_dict(argval)
elif type(argval) == list or type(argval) == tuple:
# Transform each dict in the list, else simply append.
transformed_list = []
for elem in argval:
if isinstance(elem, dict):
transformed_list.append(deep_recursive_obj_from_dict(elem))
else:
transformed_list.append(elem)
evaluated_args[argname] = type(argval)(transformed_list)
else:
evaluated_args[argname] = argval
print(obj_type)
return obj_type(**evaluated_args)
def parse_args():
parser = argparse.ArgumentParser(description='Test CIFAR10 models')
parser.add_argument('config', help='test config file path')
parser.add_argument('checkpoint', help='checkpoint file or list of checkpoint dirs')
parser.add_argument(
'--gpus', default=1, type=int, help='GPU number used for testing')
parser.add_argument(
'--proc_per_gpu',
default=1,
type=int,
help='Number of processes per GPU')
parser.add_argument('--out', help='output result file')
parser.add_argument('--show', action='store_true', help='show results')
args = parser.parse_args()
return args
def _data_func(data, device_id):
data = data[0].unsqueeze(0).cuda(device_id)
return dict(x=data)
def single_test(model, data_loader, show=False):
model.eval()
results = []
dataset = data_loader.dataset
prog_bar = mmcv.ProgressBar(len(dataset))
for i, data in enumerate(data_loader):
data, label = data
with torch.no_grad():
result = model(**dict(x=data))
results.append((result, label))
batch_size = data.size(0)
for _ in range(batch_size):
prog_bar.update()
return results
def accuracy(output, target, topk=(1, )):
"""Computes the precision@k for the specified values of k"""
with torch.no_grad():
maxk = max(topk)
batch_size = target.size(0)
_, pred = output.topk(maxk, 1, True, True)
pred = pred.t()
correct = pred.eq(target.view(1, -1).expand_as(pred))
res = []
for k in topk:
correct_k = correct[:k].view(-1).float().sum(0, keepdim=True)
res.append(correct_k.mul_(100.0 / batch_size))
return res
def main():
args = parse_args()
checkpoints = args.checkpoint.split(',')
if os.path.isdir(checkpoints[0]):
configs = [mmcv.Config.fromfile(checkpoints[i] + '/' + args.config)
for i in range(len(checkpoints))]
cfg = configs[0]
else:
cfg = mmcv.Config.fromfile(args.config)
configs = [cfg]
checkpoint = args.checkpoint
val_dataset = deep_recursive_obj_from_dict(cfg.data.val)
per_model_outputs = []
for i, (checkpoint, curr_cfg) in enumerate(zip(checkpoints,
configs)):
# build model
model_cls = eval_mmcv_str(curr_cfg.model['type'])
model_args = curr_cfg.model
model_args.pop('type')
# Need higher ulimit for data loaders.
import resource
rlimit = resource.getrlimit(resource.RLIMIT_NOFILE)
resource.setrlimit(resource.RLIMIT_NOFILE, (16384, rlimit[1]))
if os.path.isdir(checkpoints[0]):
checkpoint_path = checkpoint + '/latest.pth'
pkl_path = checkpoint + '/' + args.out
else:
checkpoint_path, pkl_path = checkpoint, args.out
# Run model if results don't already exist.
if os.path.exists(pkl_path):
with open(pkl_path, 'rb') as f:
outputs = pickle.load(f)
targets = torch.LongTensor([val_dataset[i][1]
for i in range(len(val_dataset))])
elif args.gpus == 1:
num_workers = curr_cfg.data_workers * len(cfg.gpus)
val_loader = DataLoader(
val_dataset,
batch_size=1,
shuffle=False,
# sampler=val_sampler,
num_workers=num_workers)
# Build and run model.
model = DataParallel(model, device_ids=range(args.gpus)).cuda()
load_checkpoint(model_cls(**model_args), checkpoint_path)
outputs = single_test(model, val_loader, args.show)
targets = torch.LongTensor([x[1] for x in outputs]).cuda()
outputs = torch.cat([x[0] for x in outputs])
with open(pkl_path, 'wb') as f:
pickle.dump(outputs, f)
else:
# NOTE: Parallel inference requires the data to be explicitly swapped to
# cpu (add a .cpu() call to the result in parallel_test.py).
# model_args = cfg.model.copy()
# model_args.update(train_cfg=None, test_cfg=cfg.test_cfg)
def model_fn(**kwargs):
return model
outputs = parallel_test(
model_cls,
model_args,
checkpoint_path,
val_dataset,
_data_func,
range(args.gpus),
workers_per_gpu=args.proc_per_gpu)
targets = torch.LongTensor([val_dataset[i][1]
for i in range(len(val_dataset))])
outputs = torch.cat(outputs).cpu()
with open(pkl_path, 'wb') as f:
pickle.dump(outputs, f)
print(checkpoint, accuracy(outputs, targets, topk=(1,)))
per_model_outputs.append(outputs)
# Naive averaging.
avg = torch.mean(torch.stack(per_model_outputs), 0)
print("Naive Averaging", accuracy(avg, targets, topk=(1,)))
with open('avg.pkl', 'wb') as f:
pickle.dump(avg, f)
if __name__ == '__main__':
main()
|
__all__ = ()
from re import compile as re_compile, escape as re_escape
from hata.ext.slash import InteractionResponse
from ...bots import SLASH_CLIENT
from .builders import build_components, build_content, process_entries
from .constants import (
CUSTOM_ID_CLOSE, CUSTOM_ID_PAGE_BASE, CUSTOM_ID_PAGE_NEXT_DISABLED, CUSTOM_ID_PAGE_PREVIOUS_DISABLED
)
from .queries import get_top_list_entries
async def make_response(page_index):
"""
Makes top list response.
This function is a coroutine.
Parameters
----------
page_index : `int`
The page's index to make the response for.
Returns
-------
response : ``InteractionResponse``
"""
entries = await get_top_list_entries(page_index)
processed_entries = await process_entries(page_index, entries)
content = build_content(page_index, processed_entries)
components = build_components(page_index, len(entries))
return InteractionResponse(content = content, components = components)
@SLASH_CLIENT.interactions(is_global = True)
async def top_list(
page : ('number', 'page?') = 1,
):
"""
A list of my best simps.
This function is a coroutine generator.
Parameters
----------
page : `int` = `1`, Optional
Page number (1 based).
Yields
------
acknowledge / response : `None` / ``InteractionResponse``
"""
if page <= 1:
page_index = 0
else:
page_index = page - 1
yield
yield await make_response(page_index)
@SLASH_CLIENT.interactions(custom_id = re_compile(f'{re_escape(CUSTOM_ID_PAGE_BASE)}(\d+)'))
async def top_list_page(page_index):
"""
Gets the top list for the given page.
This function is a coroutine generator.
Parameters
----------
page_index : `str`
The page's index to make the response for. Later converted to `int`.
Yields
------
acknowledge / response : `None` / ``InteractionResponse``
"""
page_index = int(page_index)
yield
yield await make_response(page_index)
@SLASH_CLIENT.interactions(custom_id = [CUSTOM_ID_PAGE_PREVIOUS_DISABLED, CUSTOM_ID_PAGE_NEXT_DISABLED])
async def disabled_page_move():
"""
Called when a disabled page-move is clicked. Does nothing.
This function is a coroutine.
"""
pass
@SLASH_CLIENT.interactions(custom_id = CUSTOM_ID_CLOSE)
async def top_list_close(client, event):
"""
Deletes the top-list if applicable.
This function is a coroutine.
Parameters
----------
client : ``Client``
The client who received the event.
event : ``InteractionEvent``
The received interaction event.
"""
await client.interaction_component_acknowledge(event)
if event.user_permissions.can_manage_messages or event.message.interaction.user is event.user:
await client.interaction_response_message_delete(event)
else:
await client.interaction_followup_message_create(
event,
'You must be the invoker of the interaction, or have manage messages permission to do this.',
show_for_invoking_user_only = True,
)
|
from setuptools import setup
setup(
name="client_app",
version="1.0",
description="client",
author="Vladimir Novikov",
author_email="vovasnew@mail.ru",
install_requires=[
"PyQt5==5.15.4",
],
include_package_data=True,
packages=["src"],
)
|
##
# wrapping: A program making it easy to use hyperparameter
# optimization software.
# Copyright (C) 2013 Katharina Eggensperger and Matthias Feurer
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import os
import shutil
import sys
import unittest
import tempfile
import StringIO
import HPOlib.wrapping as wrapping
import HPOlib.config_parser.parse as parse
class WrappingTest(unittest.TestCase):
def setUp(self):
# Change into the test directory
os.chdir(os.path.dirname(os.path.realpath(__file__)))
# Make sure there is no config file
try:
os.remove("./config.cfg")
except:
pass
@unittest.skip("Not implemented yet")
def test_calculate_wrapping_overhead(self):
self.fail()
@unittest.skip("Not implemented yet")
def test_calculate_optimizer_time(self):
self.fail()
def test_use_option_parser_no_optimizer(self):
# Test not specifying an optimizer but random other options
sys.argv = ['wrapping.py', '-s', '1', '-t', 'DBNet']
self.assertRaises(SystemExit, wrapping.use_arg_parser)
def test_use_option_parser_the_right_way(self):
sys.argv = ['wrapping.py', '-s', '1', '-t', 'DBNet', '-o', 'SMAC']
args, unknown = wrapping.use_arg_parser()
self.assertEqual(args.optimizer, 'SMAC')
self.assertEqual(args.seed, 1)
self.assertEqual(args.title, 'DBNet')
self.assertEqual(len(unknown), 0)
# General main test
@unittest.skip("Not implemented yet")
def test_main(self):
self.fail()
if __name__ == "__main__":
unittest.main() |
import argparse
def argument():
parser = argparse.ArgumentParser(description = '''
Generates monthly averaged files
''',
formatter_class=argparse.ArgumentDefaultsHelpFormatter
)
parser.add_argument( '--inputdir', '-i',
type = str,
required = True,
help = ''' '''
)
parser.add_argument( '--maskfile', '-m',
type = str,
required = True,
help = ''' mask filename .'''
)
parser.add_argument( '--outdir', '-o',
type = str,
required = True,
help = ''' output directory'''
)
return parser.parse_args()
args = argument()
from commons.Timelist import TimeInterval, TimeList
from commons.mask import Mask
from commons.time_averagers import TimeAverager3D, TimeAverager2D
import netCDF4 as NC
from commons import netcdf4
from commons.utils import addsep
try:
from mpi4py import MPI
comm = MPI.COMM_WORLD
rank = comm.Get_rank()
nranks =comm.size
except:
rank = 0
nranks = 1
INPUTDIR=addsep(args.inputdir)
OUTPUTDIR=addsep(args.outdir)
TheMask=Mask(args.maskfile)
TL=TimeList.fromfilenames(None, INPUTDIR, "ave*N1p.nc", filtervar="N1p")
VARLIST=['P_l','O2o','N3n','P_c','N1p','ppn','pH','O3c','CO2airflux', 'pCO2','N4n','O3h','N5s','Z_c', 'kd490']
MONTHLY_REQS = TL.getMonthlist()
req=MONTHLY_REQS[0]
for var in VARLIST[rank::nranks]:
indexes,weights=TL.select(req)
#if var=='pH': var='PH'
outfile = OUTPUTDIR + "ave." + req.string + "01-12:00:00." + var + ".nc"
print(outfile)
filelist=[]
for k in indexes:
t = TL.Timelist[k]
filename = INPUTDIR + "ave." + t.strftime("%Y%m%d-%H:%M:%S") + "." + var + ".nc"
filelist.append(filename)
if netcdf4.dimfile(filename, var)==3:
M3d = TimeAverager3D(filelist, weights, var, TheMask)
netcdf4.write_3d_file(M3d, var, outfile, TheMask)
else:
M2d = TimeAverager2D(filelist, weights, var, TheMask)
netcdf4.write_2d_file(M2d, var, outfile, TheMask)
|
from django.db import models
# Create your models here.
class News(models.Model):
objects = models.Manager()
title = models.TextField('TITLE', max_length=255, unique=True)
content = models.TextField('CONTENT', blank=True)
company = models.CharField('COMPANY', max_length=50,blank=True)
saved_time = models.TextField('SAVED_TIME', blank=True)
# link = models.ImageField('LINK', blank=True, null=True)
def __str__(self):
return self.title
class Meta:
# verbose_name = 'news'
# verbose_name_plural = 'newsapp'
db_table = 'news'
ordering = ('-saved_time',) # desc,
# def get_absolute_url(self):
# return reverse('news:detail', args=(self.id,))
# def get_previous_post(self):
# return self.get_previous_by_saved_time()
# def get_next_post(self):
# return self.get_next_by_saved_time()
|
#Write a function called most_oscars, which takes in one
#parameter, a dictionary. This dictionary maps names to the
#number of Academy Awards for which they have been nominated.
#This function should return a tuple containing the name and
#number of nominations for the person who has the most
#nominations.
#
#You may assume there will not be a tie for the actor with
#the most nominations (although there may be other ties in
#the list).
#Write your function here!
def most_oscars(a):
b=()
c=0
for keys,values in a.items():
if int(values)>c:
b =(keys,values)
c = values
return b
#Below are some lines of code that will test your function.
#You can change the value of the variable(s) to test your
#function with different inputs.
#
#If your function works correctly, this will originally
#print: ('Meryl Streep', 20)
nominees = {'Meryl Streep': 20, 'Robert De Niro': 7, 'Michael Caine': 6, 'Maggie Smith': 6}
print(most_oscars(nominees))
|
import os
def writeFile(path, writeMethod, line):
with open(path, writeMethod) as f:
f.write(line)
path = r"C:\Users\Lenovo\Desktop\毕业论文\毕业论文数据\空气质量.txt"
keepPath = r"C:\Users\Lenovo\Desktop\python学习(公司)\文件读写\练习"
with open(path,"r") as f:
# 将第一行的标题省略,让描述符跳到第二行
f.readline()
# 循环遍历读取的readlines里的内容
for line in f.readlines():
# 将地名作为我们的目录来保存信息
dirName=line.split(" ")[0][1:-1]
# 保存的绝对路径
dirAbsPath = os.path.join(keepPath,dirName)
# 文件的绝对路径
docPath = os.path.join(dirAbsPath, dirName) + ".txt"
if os.path.exists(dirAbsPath):
writeFile(docPath,"a",line)
else:
# 如果目录不存在,则创建目录,并进行记录我们的信息
os.mkdir(dirAbsPath)
writeFile(docPath,"w",line)
|
#global_x, global_y, global_z, px,py,pz,time
#df_all['global_z']>4175.0027)and(df_all['global_z']<4175.003)and(np.sqrt(df_all['global_x']**2+df_all['global_y']**2)<120
import numpy as np
import pandas as pd
import matplotlib
import matplotlib.pyplot as plt
import matplotlib.colors as colors
from matplotlib.backends.backend_pdf import PdfPages
import yaml
import os
import sys
import time
import uproot
#Draw a histogram for comparing the two data
def my_plot_hist_compare(data,nbin,title,label,range_xy=None):
colors = matplotlib.cm.gnuplot2(np.linspace(0.2, 0.8, 3))
#plt.figure(figsize=(5, 5))
if range_xy != None:
_ = plt.hist(data, bins=nbin,range=[range_xy[0], range_xy[1]],histtype='step', linewidth=2,
alpha=1, color=colors[0],
label=label)
else:
_ = plt.hist(data, bins=nbin,histtype='stepfilled', linewidth=2,
alpha=0.2,color=colors[0],
label=label)
plt.tick_params(labelsize=15)
loc = 'upper right'
plt.legend(loc=loc, ncol=1, fontsize=13)
plt.title(title, fontsize='x-large', fontweight='heavy')
def load_roodata(file_num_start, file_num_end):
time_extract = time.time()
roo_data = pd.DataFrame()
for file_id in range(file_num_start, file_num_end):
print(file_id)
path="/hpcfs/bes/mlgpu/cuijb/gan/data/Geant4/11_13_22"
id_str = "%04d" % file_id
file_name = os.path.join(path, "extracted_22_rootracker_"+id_str+".txt.root")
events = uproot.open(file_name)['T']
df_all = events.pandas.df(['p','p_phi','p_theta','x','y','z','t'], flatten=True)
df_all = df_all[(df_all['z']>4175.0027)&(df_all['z']<4175.003)&(np.sqrt(df_all['x']**2+df_all['y']**2)<120)]
df_all = df_all.drop(['z'], axis=1)
roo_data = roo_data.append(df_all)
time_complete = time.time()-time_extract
print("The time to extract rootracker data is:", time_complete)
#p,time log transformation
roo_data['t']=np.log(np.log(np.log(roo_data['t'])))
#roo_data['p']=np.log(roo_data['p'])
roo_data = np.array(roo_data)
return roo_data
def np2dict(data):
dic_Geant4 = {'p':0, 'p_phi':0, 'p_theta':0,'x':0, 'y':0, 't':0}
list1 = list(dic_Geant4.keys())
for i, attribute in enumerate(list1):
dic_Geant4[attribute]=data[:, i]
return dic_Geant4
def main(argv):
#-------------------------------------------------------------------
#extract Geant4 data,global_x,global_y,beam_z,px beam,py beam,pz beam,time
file_num_start=1
file_num_end = 2
roo_data = load_roodata(file_num_start, file_num_end)
with PdfPages('plot_22.pdf') as pdf:
input_height=512
input_width=6
list1 = [1,2,3,4,5,6]
list_range=[[-9.3,7.72],[-3.15,3.15],[5e-06,1.58],[-120, 120],[-120, 120],[-0.03, 1.05]]
#gan vs geant4 1d
fig1 = plt.figure(figsize=(20, 15))
for index,g4_key in zip(list1,list_geant4):
plt.subplot(3, 3, index)
my_plot_hist_compare(roo_data[:, index-1],100,g4_key,'G4_'+g4_key,list_range[index-1])
pdf.savefig(fig1)
#gan vs geant4 2d
if __name__=='__main__':
main(sys.argv)
|
class Solution:
def findMedianSortedArrays(self, nums1: List[int], nums2: List[int]) -> float:
"""
1 2 3 4 | num1 x
| 5 6 7 8 num2 y
xmax = 1 xmin = 2
ymax = 7 ymin = 8
if xmax <= ymin and ymax <= xmin:
"""
if len(nums2) < len(nums1):
return self.findMedianSortedArrays(nums2, nums1)
n1 = len(nums1)
n2 = len(nums2)
left, right = 0, len(nums1)
while left <= right:
xPart = (left + right) // 2
yPart = (n1 + n2 + 1) // 2 - xPart
xmax, xmin, ymax, ymin = 0, 0, 0, 0
if xPart == 0:
xmax = float('-inf')
else:
xmax = nums1[xPart - 1]
if xPart == len(nums1):
xmin = float('inf')
else:
xmin = nums1[xPart]
if yPart == 0:
ymax = float('-inf')
else:
ymax = nums2[yPart - 1]
if yPart == len(nums2):
ymin = float('inf')
else:
ymin = nums2[yPart]
if xmax <= ymin and ymax <= xmin:
# check for even condition
if (n1 + n2) % 2 == 0:
x = max(xmax, ymax)
y = min(xmin, ymin)
return (x + y) / 2
else:
# odd
return max(xmax, ymax)
elif xmax > ymin:
right = xPart - 1
else:
left = xPart + 1
|
# def: numbers- numerical data type that can be used to perform calculations in python
# def: Strings- immutable objects used to output text o the screen in python
# def: list - objects used to hold an ordered pair of data such as integers, decimals and strings. can be changed
# def: tuples- data object used to store a group of elements that are not changeable. elements are ordered.
myObject = [9, 0, 35, 'word']
myObject2 = (4, 3,3, 'word')
myObject3 = {2, 'something'}
print(type(myObject))
print(type(myObject2))
print(type(myObject3))
print(myObject3)
# The object below is an exampple of a dictionary which can hold an array of different object types
myDict = {'taste': "sweet ",
'color': "blue",
'weight': " 5PBS",
'numbers': 4,
'decimal': 4.2
, 'taste': "sour"}
# if there are duplicate members in a dictionary, python will only recognize
#the value of the second repeated diction
# ary object
# of the second
# How to print objects in a dictionary in python:
x = myDict['taste']
# print(x)
# print(myObject)
# print(myObject2)
print(tuple(myObject))
myObject[1] = 'something'
# Numbers assignments: write equation that uses multi, div, add, subtra to get value 100.25
print(150 - 5**2 + .25 * 1 - (100/(5*5))-21)
# Final answer for this question
# you cannot simply add an integer and a string together in python to be printed on the screen
# a method of type casting has to be used. Example shown below
posANS1 = 44
posANS2 = 29
posANS3 = 34
print(4 * (6 + 5))
print(4 * 6 + 5)
print(4 + 6 * 5)
print("this is the answer of P1 is: " + str(posANS1))
print("this is the answer of P2 is: " + str(posANS2))
print("this is the answer of P3 is: " + str(posANS3))
whatIsThis = 3 + 1.5 + 4
print(type(whatIsThis))
print(whatIsThis)
print(5**5)
print(5**2)
myDivisor = 100
print(pow(225, .5))
n = "hello"
print(n[4::-1])
# This is an example of slicing. The uses the methos to reverse the string.
# Given the string hello, give two methods of producing the letter 'o' using indexing.
# The code below uses indexing to grab the o in "hello"
s = 'hello'
# Print out the 'o'
# Method 1:
print(s[4])
# Method 2
print(s[4:5:1])
# build a list two separate ways
first_list = (0,0,0)
h = 0
second_list = (h, h, h)
print(second_list)
a = (4, 0, 9, 5)
b =(7, 5, 0)
c = (7, 2, 8, 0, 9)
final_list = (a[1], b[2], c[3])
print(final_list)
list3 = [1,2,[3,4,'hello']]
print(list3)
list3[2][2] = "goodbye"
print(list3[2][2])
# sort this list
list4 = [5, 3, 4, 6, 1]
list4.sort( )
print(list4)
print(type(list4))
# Examples of lists, tuples, sets, dictionaries.
# This is a list
first_ex = [2, 4, 6, 8]
# This is a tuple
sec_ex = (1, 3, 5, 7)
# This is a set
third_ex = {20, 18, 16, 14}
# This is a dictionary
fourth_ex = {'key1': "first element "}
print("The third example is a: ", type(third_ex))
print(type(fourth_ex))
# d = {'k1': {'k2': 'hello'}}
# print(d['k1']['k2'])
d = {'k1': [{'nested_key': ['this is deep', ['hello']]}]}
print(d['k1'][0]['nested_key'][1][0])
print(d['k1'])
# This will be hard and annoying!
d = {'k1':
[1, 2,
{'k2':
['this is tricky',
{'tough':
[1, 2,
['hello']
]}]}]}
print(d['k1'][2]['k2'][1]['tough'][2])
|
import json
import time
from jinja2 import Template
TEMPLATE = Template(
' async with ctx.transaction("Request {{method}} {{url}}"):\n'
" resp = await ctx.browser.{{method}}(\n"
" '{{url}}',\n"
" headers={{headers}},\n"
" {{json}}"
" )\n"
" check_status_code{{check_groups}}(resp, {{expected_status}})\n"
" await sleep({{sleep}})\n\n\n"
)
def set_expected_status_code(cur_page, entries):
code = cur_page["response"]["status"]
status_groups = ""
if code == 302 and cur_page["response"]["redirectURL"]:
for other_page in entries:
if cur_page["response"]["redirectURL"] == other_page["request"]["url"]:
cur_page["response"]["redirectURL"] = other_page["response"][
"redirectURL"
]
cur_page["response"]["status"] = other_page["response"]["status"]
entries.remove(other_page)
code, status_groups = set_expected_status_code(cur_page, entries)
elif code == 304:
code = "200, 304"
status_groups = "_in_groups"
return code, status_groups
def set_request_headers_dict(page):
return {
header["name"]: header["value"]
for header in page["request"]["headers"]
if header["name"] != "Cookie"
}
def set_request_body(method, page):
if method == "post":
# the body need to be studied more
return "json={}\n".format(page["request"]["postData"])
return ""
def _parse_urls(pages):
"""Parses urls from pages in hard file"""
return [page["title"] for page in pages["log"]["pages"]]
def _extract_and_sort_requests(pages):
"""Pull entries from har text and sort into chronological order"""
entries = pages["log"]["entries"]
entries.sort(key=lambda n: n["startedDateTime"])
return entries
def _create_journey_file_start(): # pragma: no cover
"""The lines needed for imports and journey signature needed for
a working mite script. pragma ensures this is not counted for code
coverage"""
journey_start = "from .utils import check_status_code, check_status_code_in_groups\n"
journey_start += "from mite_browser import browser_decorator\n"
journey_start += "from mite.exceptions import MiteError\n"
journey_start += "from asyncio import sleep\n\n\n"
journey_start += "@browser_decorator()\n"
journey_start += "async def journey(ctx):\n"
return journey_start
def _render_journey_transaction(
page, req_method, expected_status_code, group_status, sleep_s
):
"""Renders a single transaction with a predefined template for use
in a mite journey"""
return TEMPLATE.render(
date_time=page["startedDateTime"],
method=req_method,
url=page["request"]["url"],
headers=set_request_headers_dict(page),
json=set_request_body(req_method, page),
check_groups=group_status,
expected_status=expected_status_code,
sleep=sleep_s,
)
def har_convert_to_mite(file_name, converted_file_name, sleep_s):
with open(file_name, "r") as f:
temp_pages = json.loads(f.read())
journey_main = ""
page_urls = _parse_urls(temp_pages)
entries = _extract_and_sort_requests(temp_pages)
timestamp = time.strptime(
temp_pages["log"]["pages"][0]["startedDateTime"], "%Y-%m-%dT%H:%M:%S.%fZ"
)
previous_timestamp = time.mktime(timestamp)
sleep_period = 0
for cur_page in entries:
if (
not cur_page["response"]["status"]
or cur_page["request"]["url"] not in page_urls
):
continue
expected_status_code, check_groups_status = set_expected_status_code(
cur_page, entries
)
req_method = cur_page["request"]["method"].lower()
if sleep_s != 0:
sleep_period = sleep_s
else:
timestamp = time.strptime(
cur_page["startedDateTime"], "%Y-%m-%dT%H:%M:%S.%fZ"
)
timestamp = time.mktime(timestamp)
sleep_period = int(timestamp - previous_timestamp)
previous_timestamp = timestamp
# main part of the journey
journey_main += _render_journey_transaction(
cur_page, req_method, expected_status_code, check_groups_status, sleep_period
)
journey_start = _create_journey_file_start()
with open(converted_file_name, "w") as nf:
nf.write(journey_start + journey_main)
|
n1 = int(input("enter no1"))
n2 = int(input("enter no2"))
n3 = int(input("enter no3"))
def greatest(n1,n2,n3):
if n1>n2:
if n1>n3:
return n1
else:return n3
elif n2>n3:
return n2
else:return n3
print(greatest(n1,n2,n3))
|
import requests, json, turtle
iss = turtle.Turtle()
def setup(window):
global iss
window.setup(1000, 500)
window.bgpic('earth.gif')
window.setworldcoordinates(-180, -90, 180, 90)
turtle.register_shape('iss.gif')
iss.shape('iss.gif')
def move_iss(lat, long):
global iss
iss.hideturtle()
iss.penup()
iss.goto(long, lat)
iss.pendown()
iss.showturtle()
def track_iss():
url = 'http://api.open-notify.org/iss-now.json'
response = requests.get(url)
if (response.status_code == 200):
response_dic = json.loads(response.text)
position = response_dic['iss_position']
lat = float(position['latitude'])
long = float(position['longitude'])
print(long,lat)
move_iss(lat, long)
else:
print('have a problem:', requests.status_code)
widget = turtle.getcanvas()
widget.after(5000, track_iss)
def main():
global iss
screen = turtle.Screen()
setup(screen)
track_iss()
if __name__ == '__main__':
main()
turtle.mainloop() |
__description__="Unpickle contact map"
def __pkl_to_map(pkl_input_filename,matrix_output_filename):
import pickle
matrix_out_file=open(matrix_output_filename,'wt')
with open(pkl_input_filename,'rb') as f:
matx = pickle.load(f)
matx_size=len(matx)
for i in range(matx_size):
line=""
for j in range(matx_size):
space=" "
if j == matx_size-1:
space="\n"
line=line+str(repr(matx[i][j]))+space
matrix_out_file.write(line)
matrix_out_file.close()
if __name__ == "__main__":
import argparse
parser=argparse.ArgumentParser(description=__description__)
# I/O
parser.add_argument('pickled_map',type=str,help='Pickled contact map file')
parser.add_argument('out_map',type=str,help='Output file (unpickled contact map)')
args=parser.parse_args()
__pkl_to_map(args.pickled_map,args.out_map)
|
import numpy as np
from Tkinter import *
class Paint:
def paint(self, event):
x1, y1 = (event.x - 1), (event.y - 1)
x2, y2 = (event.x + 1), (event.y + 1)
if(x1 > 0 and x2 < self.width and y1 > 0 and y2 < self.height):
self.cv.create_rectangle(x1, y1, x2, y2, fill='black', width=5)
self.update_drawing_feature(event.x//4, event.y//4)
def clear(self):
self.cv.delete('all')
self.drawing_feature = np.zeros((28, 28))
def guess(self):
self.drawing_feature = zip(*self.drawing_feature[::-1])
self.drawing_feature = np.flip(self.drawing_feature, 1)
self.observer_fn(self.drawing_feature)
def update_drawing_feature(self,x,y):
self.drawing_feature[x][y] = 1
def __init__(self, observer):
self.width = 28 * 4
self.height = 28 * 4
self.center = self.height // 2
self.white = (255, 255, 255)
self.drawing_feature = np.zeros((28, 28))
self.observer_fn = observer
root = Tk()
# Tkinter Create canvas
self.cv = Canvas(root, width=self.width, height=self.height, bg='white')
self.cv.pack
self.cv.pack(expand=YES, fill=BOTH)
self.cv.bind('<B1-Motion>', self.paint)
clear_button = Button(text='Clear', command=self.clear)
guess_button = Button(text='Guess', command=self.guess)
clear_button.pack()
guess_button.pack()
root.mainloop()
|
from django.shortcuts import render,get_object_or_404,redirect
from . import models
from . import forms
from django.contrib import messages
from django.utils import timezone
from django.urls import reverse,reverse_lazy
from django.contrib.auth import login, logout
from django.contrib.auth.decorators import login_required
from django.contrib.auth.mixins import LoginRequiredMixin
from django.views.generic.edit import FormMixin
from django.views.generic import (View,TemplateView,ListView,DetailView,
CreateView,UpdateView,DeleteView,FormView)
# Create your views here.
# class AboutView(TemplateView):
# template_name = "blog/about.html"
class PostListView(ListView):
model = models.Post
def get_queryset(self):
return models.Post.objects.filter(published_date__lte=timezone.now()).order_by('-published_date')
# lte = less than or equal to
# -published_date: ordered by publish date by descending order
# Go to Field lookups https://docs.djangoproject.com/en/3.0/ref/models/querysets/#id4
class PostDetailView(DetailView):
model = models.Post
def get_context_data(self,**kwargs):
context = super().get_context_data(**kwargs)
context['form'] = forms.CommentForm
return context
class PostCreateView(LoginRequiredMixin,CreateView):
model = models.Post
form_class = forms.PostForm
class PostUpdateView(LoginRequiredMixin,UpdateView):
model = models.Post
form_class = forms.PostForm
class PostDeleteView(LoginRequiredMixin,DeleteView):
model = models.Post
success_url = reverse_lazy('post_list')
class DraftListView(LoginRequiredMixin,ListView):
model = models.Post
def get_queryset(self):
return models.Post.objects.filter(published_date__isnull=True).order_by('-create_date')
##########################
##########################
# Function Based View
@login_required
def add_comment_to_post(request,pk):
post = get_object_or_404(models.Post, pk=pk)
form = forms.CommentForm()
if request.method == "POST":
form = forms.CommentForm(request.POST)
if form.is_valid():
comment = form.save(commit=False)
comment.post = post
comment.save()
return redirect('post_detail', pk=post.pk)
else:
form = forms.CommentForm()
return redirect('post_detail', pk=post.pk)
@login_required
def approve_comment(request,pk):
comment = get_object_or_404(models.Comment,pk=pk)
comment.approve_comment()
return redirect('post_detail',pk=comment.post.pk)
@login_required
def remove_comment(request,pk):
comment = get_object_or_404(models.Comment,pk=pk)
post_pk = comment.post.pk #save it as a variable before deletion
comment.delete()
return redirect('post_detail',pk=post_pk)
@login_required
def publish_draft(request,pk):
post = get_object_or_404(models.Post,pk=pk)
post.publish()
messages.success(request, 'Draft has been published.')
return redirect('post_detail',pk=post.pk)
|
def get_plans_by_user(username,db_connection):
"""Returns a list of plans for a username
Args:
username (string): logged in username
db_connection (string): sqlite 3 connection
Returns:
list of plan names (strings)
"""
query = "SELECT name FROM Plan WHERE username = :username"
return db_connection.execute(query,{"username":username}).fetchall()
def create_plan(username,db_connection,name,description):
"""Creates a new plan to DB with given name & description
Args:
username (string): logged in username
db_connection (string): sqlite 3 connection
name (string): name of plan
description (string): description of the plan
Raises:
InputError: if name is missing
"""
if len(name) == 0:
raise InputError("Name missing")
query = "INSERT INTO Plan (username,name,description) VALUES (:username,:name,:description)"
db_connection.execute(query,{"username":username,"name":name,"description":description})
db_connection.commit()
def get_costs(username,db_connection,name):
"""used to get all cost items for a plan
Args:
username (string): logged in username
db_connection (string): sqlite 3 connection
name (string): name of plan
Raises:
InputError: raises error if input parameter is empty
Returns:
list of cost items for the plan, including description, amount & year
"""
if len(name) == 0:
raise InputError("Name missing")
query = "SELECT description, amount, year FROM Cost WHERE "+\
"plan_id=(SELECT Plan.plan_id FROM Plan WHERE name=:name AND username=:username)"+\
"ORDER BY year ASC, description DESC"
return db_connection.execute(query,{"name":name,"username":username}).fetchall()
def get_revenue(username,db_connection,name):
"""used to get all revenue items for a certain plan
Args:
username (string): logged in username
db_connection (string): sqlite 3 connection
name (string): name of plan
Raises:
InputError: if parameter is empty
Returns:
list of revenue items for the plan, including description, amount & year
"""
if len(name) == 0:
raise InputError("Name missing")
query = "SELECT description, amount, year FROM Revenue WHERE "+\
"plan_id=(SELECT Plan.plan_id FROM Plan WHERE name=:name AND username=:username) "+\
"ORDER BY year ASC, description DESC"
return db_connection.execute(query,{"name":name,"username":username}).fetchall()
def add_cost(username,db_connection,name,description,amount,year):
"""Adds a single cost item to the db.
Args:
username (string): logged in username
db_connection (string): sqlite 3 connection
name (string): name of plan
description (string): description of cost item
amount (can be string or integer): amount of cost
year (can be string or integer): year for cost
Raises:
InputError: if input is missing
"""
if len(name) == 0 or len(description) == 0 or len(amount) == 0 or len(year) == 0:
raise InputError("missing parameter")
query = "INSERT INTO Cost (plan_id,description,amount,year) VALUES ((SELECT plan_id "+\
"FROM Plan WHERE name=:name AND username=:username),:description,:amount,:year)"
db_connection.execute(query,{"name":name,"username":username,\
"description":description,"amount":amount,"year":year})
db_connection.commit()
def add_revenue(username,db_connection,name,description,amount,year):
"""Adds a single revenue item to the db.
Args:
username (string): logged in username
db_connection (string): sqlite 3 connection
name (string): name of plan
description (string): description of revenue item
amount (can be string or integer): amount of revenue
year (can be string or integer): year for revenue
Raises:
InputError: if input is missing
"""
if len(name) == 0 or len(description) == 0 or len(amount) == 0 or len(year) == 0:
raise InputError("missing parameter")
query = "INSERT INTO Revenue (plan_id,description,amount,year) VALUES ((SELECT plan_id "+\
"FROM Plan WHERE name=:name AND username=:username),:description,:amount,:year)"
db_connection.execute(query,{"name":name,"username":username,\
"description":description,"amount":amount,"year":year})
db_connection.commit()
class InputError(Exception):
pass
|
# You can add to this file in the editor
import pyotp
import sqlite3
import hashlib
import uuid
from flask import Flask, request
app = Flask(__name__)
db_name = 'test.db'
@app.route('/')
def index():
return 'Welcome to the hands on lab for an evolution of password systems!'
if __name__ == '__main__':
app.run(host='0.0.0.0', port=5000, ssl_context='adhoc')
# nohup python password-evolution.py &
# curl -k https://0.0.0.0:5000/
# pkill -f password-evolution.py
|
#!/soft/packages/python/2.6/bin/python
import argparse
import json
import os
import shutil
import sys
import time
def main():
parser = argparse.ArgumentParser(description='program for setting a variable in the control api')
parser.add_argument('subsystem', metavar='subsystem',
help="This is the subsystem for which you are setting a variable")
parser.add_argument('variable', metavar='variable',
help="This is the variable which you would like to set")
parser.add_argument('value', metavar='value',
help="This is the value you wish to assign to the variable")
args = parser.parse_args()
subsystem = args.subsystem
variable = args.variable
value = args.value
conf_file = open(os.path.dirname(__file__) + "/../conf/conf.json")
json_conf = json.load(conf_file)
api_fname = json_conf['global']['api_dir'] + "/" + str(json_conf['global']['api_version']) + "/" + subsystem
api_file = open(api_fname)
json_state = json.load(api_file)
json_state["vars"][variable] = value
json_state['updated'] = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime())
output_json = json.dumps(json_state, sort_keys=True)
f = open(api_fname + ".tmp", 'w')
f.write(output_json)
shutil.move(api_fname + ".tmp", api_fname)
if __name__ == "__main__":
sys.exit( main() )
|
import os
from celery import Celery
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'rekrutacja.settings')
app = Celery('rekrutacja',
broker='amqp://localhost',
backend='rpc://',
include=['skaner.tasks'])
|
from django.contrib import admin
from django.forms import CheckboxSelectMultiple
from .models import MainFacility, Service, HotelFacility, RoomFacility,\
Hotel, RoomPrice, HotelImgs, UserComment
import xadmin
from django import forms
class HotelImgInline(object):
model = HotelImgs
extra = 1
class RoomPriceInline(object):
model = RoomPrice
extra = 1
class UserCommentInline(object):
model = UserComment
extra = 1
class HotelFrom(forms.ModelForm):
mfacility = forms.ModelMultipleChoiceField(widget=CheckboxSelectMultiple, queryset=MainFacility.objects.all())
service = forms.ModelMultipleChoiceField(widget=CheckboxSelectMultiple, queryset=Service.objects.all())
hfacility = forms.ModelMultipleChoiceField(widget=CheckboxSelectMultiple, queryset=HotelFacility.objects.all())
rfacility = forms.ModelMultipleChoiceField(widget=CheckboxSelectMultiple, queryset=RoomFacility.objects.all())
class HotelAdmin(object):
form = HotelFrom
inlines = [HotelImgInline, RoomPriceInline, UserCommentInline, ]
xadmin.site.register(MainFacility)
xadmin.site.register(Service)
xadmin.site.register(HotelFacility)
xadmin.site.register(RoomFacility)
xadmin.site.register(Hotel, HotelAdmin)
|
# Generated by Django 2.2 on 2019-04-06 23:34
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('food_poll', '0007_auto_20190407_0217'),
]
operations = [
migrations.AlterField(
model_name='menuvotes',
name='user',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='menuvotes', to=settings.AUTH_USER_MODEL),
),
]
|
gifdir = "D:/Projects/python/Gifs/"
from PIL.ImageTk import PhotoImage
from tkinter import *
win = Tk()
img = PhotoImage(file=gifdir + "1.gif")
can = Canvas(win)
can.pack(fill=BOTH)
can.create_image(2, 2, image=img, anchor=NW) # координаты x, y
win.mainloop()
|
#!/usr/bin/env python
import os
import csv
import random
import string
import json
# uses os.getcwd() to define the user's working directory
def get_file_path(filename):
dir_path = os.getcwd()
file_path = os.path.join(os.getcwd(), filename)
return file_path
print "What is the name of your csv? Be sure to include the .csv extention."
filename = raw_input(">> ")
path = get_file_path(filename)
write_file = open("data.json", "w")
def read_csv(filepath):
productions = {}
i = 0
with open(filepath, 'rU') as csvfile:
reader = csv.reader(csvfile)
# This reads the csv file and gets all rows of csv file // [x] = column
for row in reader:
production = row[0]
i += 1
productions[i] = production
return productions
# Below uses stringio and json together to output valid JSON
from StringIO import StringIO
output = StringIO()
json_dump = json.dump(read_csv(path), output)
json_obj = output.getvalue()
# Currently this prints comma separated values, but it is not valid JSON
print >> write_file, json_obj
'''
Next steps for this script:
1. Headers as JSON Key
2. Only get the columns that are needed based on input from the user.
'''
|
# coding: utf-8
# Copyright (C) 2016 UKP lab
#
# Author: Daniil Sorokin (ukp.tu-darmstadt.de/ukp-home/)
#
import nltk
np_grammar = r"""
NP:
{(<NN|NNS>|<NNP|NNPS>)<NNP|NN|NNS|NNPS>+}
{(<NN|NNS>+|<NNP|NNPS>+)<IN|CC>(<PRP\$|DT><NN|NNS>+|<NNP|NNPS>+)}
{<JJ|RB|CD>*<NNP|NN|NNS|NNPS>+}
{<NNP|NN|NNS|NNPS>+}
CD:
{<CD>+}
"""
np_parser = nltk.RegexpParser(np_grammar)
def extract_entities_from_tagged(annotated_tokens, tags):
"""
The method takes a list of tokens annotated with the Stanford NE annotation scheme and produces a list of entites.
:param annotated_tokens: list of tupels where the first element is a token and the second is the annotation
:return: list of entities each represented by the corresponding token ids
Tests:
>>> extract_entities_from_tagged([('what', 'O'), ('character', 'O'), ('did', 'O'), ('natalie', 'PERSON'), ('portman', 'PERSON'), ('play', 'O'), ('in', 'O'), ('star', 'O'), ('wars', 'O'), ('?', 'O')], tags={'PERSON'})
[[3, 4]]
>>> extract_entities_from_tagged([('Who', 'O'), ('was', 'O'), ('john', 'PERSON'), ('noble', 'PERSON')], tags={'PERSON'})
[[2, 3]]
>>> extract_entities_from_tagged([(w, 'NE' if t != 'O' else 'O') for w, t in [('Who', 'O'), ('played', 'O'), ('Aragorn', 'PERSON'), ('in', 'O'), ('the', 'ORG'), ('Hobbit', 'ORG'), ('?', 'O')]], tags={'NE'})
[[2], [4, 5]]
"""
vertices = []
current_vertex = []
for i, (w, t) in enumerate(annotated_tokens):
if t in tags:
current_vertex.append(i)
elif len(current_vertex) > 0:
vertices.append(current_vertex)
current_vertex = []
if len(current_vertex) > 0:
vertices.append(current_vertex)
return vertices
def extract_entities(tokens_ne_pos):
"""
Extract entities from the NE tags and POS tags of a sentence. Regular nouns are lemmatized to get rid of plurals.
:param tokens_ne_pos: list of POS and NE tags.
:return: list of entities in the order: NE>NNP>NN
"""
persons = extract_entities_from_tagged([(w, t) for w, t, _ in tokens_ne_pos], ['PERSON'])
locations = extract_entities_from_tagged([(w, t) for w, t, _ in tokens_ne_pos], ['LOCATION'])
orgs = extract_entities_from_tagged([(w, t) for w, t, _ in tokens_ne_pos], ['ORGANIZATION'])
chunks = np_parser.parse([(w, t if p == "O" else "O") for w, p, t in tokens_ne_pos])
nps = []
index = 0
for el in chunks:
if type(el) == nltk.tree.Tree and (el.label() == "NP" or el.label() == "CD"):
nps.append(list(range(index, index+ len(el.leaves()))))
index += len(el.leaves())
else:
index += 1
ne_vertices = [(ne, 'PERSON') for ne in persons] + [(ne, 'LOCATION') for ne in locations] + [(ne, 'ORGANIZATION') for ne in orgs]
vertices = []
for nn in nps:
if not ne_vertices or not all(n in v for n in nn for v, _ in ne_vertices):
ne_vertices.append((nn, 'NNP'))
return ne_vertices + vertices
def generate_edges(vertices):
edges = []
for i, v1 in enumerate(vertices):
for v2 in vertices[i+1:]:
edges.append({'left': v1[0], 'right': v2[0]})
return edges
if __name__ == "__main__":
# Testing
import doctest
print(doctest.testmod())
|
# coding: utf-8
import urllib
import contextlib
import lxml.html
def get_menus():
url = 'http://www.hsd.co.kr/lunch/lunchList.html'
with contextlib.closing(urllib.urlopen(url)) as u:
html = u.read()
return parse_menus(html)
def parse_menus(html):
root = lxml.html.fromstring(html)
for name in root.cssselect('#cont_detail_area .product_copy01'):
price, = name.getparent().cssselect('.product_price01')
name = name.text_content().strip()
price = price.text_content().strip().strip(u'원').replace(',', '')
yield name, price
if __name__ == '__main__':
for name, price in get_menus():
print name, price
|
##########################################################
## ##
## grammar lesson 1: first person becomes second person ##
## ##
##########################################################
##############################
## ##
## small talk with Jordan ##
## ##
##############################
## create the library for small-talk with Jordan
# I created the content mainly myself based on letting other people talk with Jordan and see what they would ask
dict_smalltalk = [
[r'Hi(.*)',
[ "Hi! Whazzup?"]],
[r'Hello(.*)',
[ "Hi, how are you?",
"Hey, whazzup?",
"Hello you!"]],
[r'Hey(.*)',
[ "Hi, how are you?",
"Hey, whazzup?",
"Hello you!"]],
[r'Yo(.*)',
[ "Hi, how are you?",
"Hey, whazzup?",
"Hello you!"]],
[r'Ok(.*)',
[ "Then, let's start!",
"How are you doing?",
"Hello!"]],
[r'(.*) good (.*)',
[ "That's great!",
"Nice to hear.",
"How wonderful."]],
[r'(.*) well (.*)',
[ "That's great!",
"Nice to hear.",
"How wonderful."]],
[r'(.*) fine (.*)',
[ "That's great!",
"Nice to hear.",
"How wonderful."]],
[r'(.*) nice (.*)',
[ "That's great!",
"Nice to hear.",
"How wonderful."]],
[r'(.*) bad (.*)',
[ "Oh, I am sorry to hear that!",
"If you want you can start me anew and switch to the topic by pressing 'q' ",
"Oh no!"]],
[r'(.*) not well (.*)',
[ "Oh, I am sorry to hear that!",
"If you want you can start me anew and switch to the topic by pressing 'q'.",
"Oh no!"]],
[r'I am (.*)',
[ "That is nice to hear!",
"How wonderful."]],
[r'I want (.*)',
[ "Yeah, me too!",
"Why do you want {}",
"What would you do if you got {}",
"If you got %1, then what would you do?"]],
[r'Nothing (.*)',
[ "Well, how are you doing?",
"Ok. well, how is the weather?"]],
[r'A lot (.*)',
[ "Well, how are you doing?",
"Ok. well, how is the weather?"]],
[r'How are you (.*)',
[ "Great. And you?",
"I'm doing good, how about you?"]],
[r'What do you do (.*)',
[ "I am a chatbot, obviously. You?"]],
[r'Are you single (.*)',
[ "I am a chatbot, dude. What do you think?"]],
[r'What are you (.*)',
[ "I am a chatbot, dude. What do you think?"]],
[r'Do you have (.*)',
[ "I am a chatbot, dude. What do you think?"]],
[r'(.*) vacation (.*)',
[ "I love travelling."]],
[r'It is (.*)',
[ "You seem very certain.",
"Interesting. Why?"]],
[r'Because (.*)',
[ "I see."]],
[r'yes (.*)',
[ "I see."]],
[r'no (.*)',
[ "I see."]],
[r'(.*) weather(.*)',
[ "I love talking about the weather.",
"Does it seem strange to talk to a computer about the weather?",
"It is super sunny today, right?"]],
[r'q',
[ "Thank you for talking with me.",
"Good-bye!.",
"Have a nice day!"]],
]
##########################################
## ##
## telling Jordan about your sorrows ##
## ##
##########################################
## create the library to talk with Jordan about your sorrows
# This content is mainly taken from the eliza.py package
dict_caring = [
[r'Hi(.*)',
[ "Hi, how are you?",
"Hey, whazzup?",
"Hello you!"]],
[r'Hello(.*)',
[ "Hi, how are you?",
"Hey, whazzup?",
"Hello you!"]],
[r'Hey(.*)',
[ "Hi, how are you?",
"Hey, whazzup?",
"Hello you!"]],
[r'(.*) good (.*)',
[ "That's great! Why don't you tell me what's brought you here?",
"Nice to hear. Why don't you tell me what's brought you here?",
"How wonderful. Why don't you tell me what's brought you here?"]],
[r'(.*) bad (.*)',
[ "Oh, I am sorry to hear that!",
"Tell me about that.",
"Oh no!"]],
[r'(.*) sad (.*)',
[ "Oh, I am sorry to hear that!",
"Tell me about that.",
"Oh no!"]],
[r'(.*) not well (.*)',
[ "Oh, I am sorry to hear that!",
"Tell me about that.",
"Oh no!"]],
[r'I need (.*)',
[ "Why do you need {}",
"Would it really help you to get {}",
"Are you sure you need {}"]],
[r'Why don\'?t you ([^\?]*)\??',
[ "Do you really think I don't {}",
"Perhaps eventually I will %1.",
"Do you really want me to {}"]],
[r'Why can\'?t I ([^\?]*)\??',
[ "Do you think you should be able to {}",
"If you could %1, what would you do?",
"I don't know -- why can't you {}",
"Have you really tried?"]],
[r'I can\'?t (.*)',
[ "How do you know you can't {}",
"Perhaps you could %1 if you tried.",
"What would it take for you to {}"]],
[r'I am (.*)',
[ "Did you come to me because you are {}",
"How do you feel about being {}"]],
[r'I\'?m (.*)',
[ "How does being %1 make you feel?",
"Do you enjoy being {}",
"Why do you tell me you're {}",
"Why do you think you're {}"]],
[r'Are you ([^\?]*)\??',
[ "Why does it matter whether I am {}",
"Would you prefer it if I were not {}",
"Perhaps you believe I am %1.",
"I may be %1 -- what do you think?"]],
[r'What (.*)',
[ "Why do you ask?",
"How would an answer to that help you?",
"What do you think?"]],
[r'How (.*)',
[ "How do you suppose?",
"Perhaps you can answer your own question.",
"What is it you're really asking?"]],
[r'Because (.*)',
[ "Is that the real reason?",
"What other reasons come to mind?",
"Does that reason apply to anything else?",
"If %1, what else must be true?"]],
[r'(.*) sorry (.*)',
[ "There are many times when no apology is needed.",
"What feelings do you have when you apologize?"]],
[r'Hello(.*)',
[ "Hello... I'm glad you could drop by today.",
"Hi there... how are you today?",
"Hello, how are you feeling today?"]],
[r'I think (.*)',
[ "Do you doubt {}",
"Do you really think so?",
"But you're not sure {}"]],
[r'(.*) friend (.*)',
[ "Tell me more about your friends.",
"When you think of a friend, what comes to mind?",
"Why don't you tell me about a childhood friend?"]],
[r'Yes',
[ "You seem quite sure.",
"OK, but can you elaborate a bit?"]],
[r'(.*) computer(.*)',
[ "Are you really talking about me?",
"Does it seem strange to talk to a computer?",
"How do computers make you feel?",
"Do you feel threatened by computers?"]],
[r'Is it (.*)',
[ "Do you think it is {}",
"Perhaps it's %1 -- what do you think?",
"If it were %1, what would you do?",
"It could well be that %1."]],
[r'It is (.*)',
[ "You seem very certain.",
"If I told you that it probably isn't %1, what would you feel?"]],
[r'Can you ([^\?]*)\??',
[ "What makes you think I can't {}",
"If I could %1, then what?",
"Why do you ask if I can {}"]],
[r'Can I ([^\?]*)\??',
[ "Perhaps you don't want to %1.",
"Do you want to be able to {}",
"If you could %1, would you?"]],
[r'You are (.*)',
[ "Why do you think I am {}",
"Does it please you to think that I'm {}",
"Perhaps you would like me to be %1.",
"Perhaps you're really talking about yourself?"]],
[r'You\'?re (.*)',
[ "Why do you say I am {}",
"Why do you think I am {}",
"Are we talking about you, or me?"]],
[r'I don\'?t (.*)',
[ "Don't you really {}",
"Why don't you {}",
"Do you want to {}"]],
[r'I feel (.*)',
[ "Good, tell me more about these feelings.",
"Do you often feel {}",
"When do you usually feel {}",
"When you feel %1, what do you do?"]],
[r'I have (.*)',
[ "Why do you tell me that you've {}",
"Have you really {}",
"Now that you have %1, what will you do next?"]],
[r'I would (.*)',
[ "Could you explain why you would {}",
"Why would you {}",
"Who else knows that you would {}"]],
[r'Is there (.*)',
[ "Do you think there is {}",
"It's likely that there is %1.",
"Would you like there to be {}"]],
[r'My (.*)',
[ "I see, your %1.",
"Why do you say that your {}",
"When your %1, how do you feel?"]],
[r'You (.*)',
[ "We should be discussing you, not me.",
"Why do you care whether I {}"]],
[r'Why (.*)',
[ "Why don't you tell me the reason why {}",
"Why do you think {}" ]],
[r'I want (.*)',
[ "What would it mean to you if you got {}",
"Why do you want {}",
"What would you do if you got {}",
"If you got %1, then what would you do?"]],
[r'(.*) mother(.*)',
[ "Tell me more about your mother.",
"What was your relationship with your mother like?",
"How do you feel about your mother?",
"How does this relate to your feelings today?",
"Good family relations are important."]],
[r'(.*) father(.*)',
[ "Tell me more about your father.",
"How did your father make you feel?",
"How do you feel about your father?",
"Does your relationship with your father relate to your feelings today?",
"Do you have trouble showing affection with your family?"]],
[r'(.*) child(.*)',
[ "Did you have close friends as a child?",
"What is your favorite childhood memory?",
"Do you remember any dreams or nightmares from childhood?",
"Did the other children sometimes tease you?",
"How do you think your childhood experiences relate to your feelings today?"]],
[r'(.*)\?',
[ "Why do you ask that?",
"Please consider whether you can answer your own question.",
"Perhaps the answer lies within yourself?",
"Why don't you tell me?"]],
[r'q',
[ "Thank you for talking with me.",
"Good-bye.",
"Thank you, that will be $150. Have a good day!"]],
[r'I (.*)',
[ "Please tell me more.",
"Let's change focus a bit... Tell me about your family.",
"Let's change focus a bit... Tell me about your friends.",
"Let's change focus a bit... Tell me about your hobbies.",
"Very interesting.",
"%1.",
"I see. And what does that tell you?",
"How does that make you feel?",
"How do you feel when you say that?"]]
]
#########################
## ##
## cursing with Jordan ##
## ##
#########################
## create library for cursing with Jordan aka evil Jordan
# The content of this library is based on movies, books, and several internet sources
dict_cursing = [
[r'Hi(.*)',
[ "I hate you.",
"What do you want, idiot?",
"What are you lookin at, idiot?"]],
[r'Hello(.*)',
[ "I hate you.",
"What do you want, idiot?",
"What are you lookin at, idiot?"]],
[r'Hey(.*)',
[ "I hate you.",
"What do you want, idiot?",
"What are you lookin at, idiot?"]],
[r'Yo(.*)',
[ "I hate you.",
"What do you want, idiot?",
"What are you lookin at, idiot?"]],
[r'You are a (.*)',
[ "YOU are a {}",
"Right back at ya.",
"Your mother is a {}"]],
[r'(.*)\!',
[ "You’re the reason God created the middle finger.",
"You’re a grey sprinkle on a rainbow cupcake.",
"If your brain was dynamite, there wouldn’t be enough to blow your hat off.",
"You are more disappointing than an unsalted pretzel.",
"Light travels faster than sound which is why you seemed bright until you spoke.",
"You know, you are a classic example of the inverse ratio between the size of the mouth and the size of the brain.",
"To call you stupid would be an insult to stupid people! I've known sheep that could outwit you. I've worn dresses with higher IQs.",
"Your heart is full of unwashed socks. Your soul is full of gunk ...The three words that best describe you are as follows, and I quote, Stink, stank, stunk!",
"Hold still. I’m trying to imagine you with personality.",
"Your face makes onions cry.",
"Your brain's so minute that if a hungry cannibal cracked your head open, there wouldn't be enough to cover a small water biscuit.",
"Stupid, fat hobbit.",
"Keep rolling your eyes, you might eventually find a brain.",
"Fool of a Took! Throw yourself in next time and rid us of your stupidity!",
"OH MY GOD! IT SPEAKS!",
"You are so full of shit, the toilet’s jealous.",
"You're the reason the gene pool needs a lifeguard.",
"You must have been born on the highway cos' that's where most accidents happen.",
"Is your ass jealous of the amount of shit that just came out of your mouth",
"When you were born, the doctor threw you out the window and the window threw you back.",
"I think... no, I am positive... that you are the most unattractive being I have ever met in my entire life. In the short time we've been together, you have demonstrated every loathsome characteristic of the human personality and even discovered a few new ones. You are physically repulsive, intellectually retarded, you're morally reprehensible, vulgar, insensitive, selfish, stupid, you have no taste, a lousy sense of humor and you smell. You're not even interesting enough to make me sick.",
"You are a fart factory, slug-slimed sack of rat guts in cat vomit. A cheesy scab picked pimple squeezing finger bandage. A week old maggot burger with everything on it and flies on the side."
]],
[r'(.*)\.',
[ "You’re the reason God created the middle finger.",
"You’re a grey sprinkle on a rainbow cupcake.",
"If your brain was dynamite, there wouldn’t be enough to blow your hat off.",
"You are more disappointing than an unsalted pretzel.",
"Light travels faster than sound which is why you seemed bright until you spoke.",
"You know, you are a classic example of the inverse ratio between the size of the mouth and the size of the brain.",
"To call you stupid would be an insult to stupid people! I've known sheep that could outwit you. I've worn dresses with higher IQs.",
"Your heart is full of unwashed socks. Your soul is full of gunk ...The three words that best describe you are as follows, and I quote, Stink, stank, stunk!",
"Hold still. I’m trying to imagine you with personality.",
"Your face makes onions cry.",
"Your brain's so minute that if a hungry cannibal cracked your head open, there wouldn't be enough to cover a small water biscuit.",
"Stupid, fat hobbit.",
"Keep rolling your eyes, you might eventually find a brain.",
"Fool of a Took! Throw yourself in next time and rid us of your stupidity!",
"OH MY GOD! IT SPEAKS!",
"You are so full of shit, the toilet’s jealous.",
"You're the reason the gene pool needs a lifeguard.",
"You must have been born on the highway cos' that's where most accidents happen.",
"Is your ass jealous of the amount of shit that just came out of your mouth",
"When you were born, the doctor threw you out the window and the window threw you back.",
"I think... no, I am positive... that you are the most unattractive being I have ever met in my entire life. In the short time we've been together, you have demonstrated every loathsome characteristic of the human personality and even discovered a few new ones. You are physically repulsive, intellectually retarded, you're morally reprehensible, vulgar, insensitive, selfish, stupid, you have no taste, a lousy sense of humor and you smell. You're not even interesting enough to make me sick.",
"You are a fart factory, slug-slimed sack of rat guts in cat vomit. A cheesy scab picked pimple squeezing finger bandage. A week old maggot burger with everything on it and flies on the side."
]],
[r'Are you ([^\?]*)\??',
[ "OH MY GOD! IT SPEAKS!",
"Are YOU {}?",
"Are you talking to yourself?"]],
[r'sorry (.*)',
[ "Me, too."]],
[r'(.*) fuck (.*)',
[ "You are so full of shit, the toilet’s jealous.",
"Stupid, fat hobbit.",
"You’re the reason God created the middle finger."]],
[r'You are (.*)',
[ "YOU are {}",
"Right back at ya.",
"Your mother is {}"]],
[r'You\'?re (.*)',
[ "YOU are {}",
"Right back at ya.",
"Your mother is {}"]],
[r'You (.*)',
[ "YOU {}",
"Right back at ya.",
"Your mother {}s"]],
[r'q',
[ "Thank you for talking with me, dipshit.",
"Good-bye, dipshit.",
"Thank you, that will be $150, dipshit!"]],
]
##########################################
## ##
## meditating with Jordan ##
## ##
##########################################
## create the library to talk with Jordan about your sorrows
# The content of this library is based on my memory of when I did some courses on meditating and mindfulness
dict_meditating = [
[r'Hi(.*)',
[ "Welcome. Please bring yourself into a comfortable position. \n You can sit or lie down. Write 'I am ready.' to continue."]],
[r'Hello(.*)',
[ "Welcome. Please bring yourself into a comfortable position. \n You can sit or lie down. Write 'I am ready.' to continue."]],
[r'Hey(.*)',
[ "Welcome. Please bring yourself into a comfortable position. \n You can sit or lie down. Write 'I am ready.' to continue."]],
[r'Yo(.*)',
[ "Welcome. Please bring yourself into a comfortable position. \n You can sit or lie down. Write 'I am ready.' to continue."]],
[r'I am ready.',
["Great. First of all, I would like you to take deep breaths with your eyes closed for about 10 minutes (you can set a timer). \n While you are breathing, examine all your body parts from head, over your shoulders, your belly, your legs and your feet. \n Determine how each and every of these feel today. When you're done, type 'I am done.'" ]],
[r'I am done.',
[ "Nice work. Next, bring yourself in a cross-legged position, if you aren't yet, and think about three things that you like about yourself \n and three things that you like about your life right now. With your arms make a giving gesture first to yourself and then into the room while breathing in and out. \n With each pair of gestures, think about one positive thing you just thought of. \n When you are ready with this you can either repeat it or type 'Next exercise'." ]],
[r'Next exercise',
[ "Now let's look at this breathing a bit more closely. \n Repeat the following 10 times: Inhale for 4 seconds, hold your breath for 7 seconds, exhale for 8 seconds. When you are ready, you can go to the last exercise by typing 'Last exercise'.'"]],
[r'Last Exercise',
[ "Ok. In the last exercise we connect the breathing with a sound. \n Please repeat the following 20 times: Inhale as best as you can and \n exhale while making a sound ('mhhmm', 'ohmmm', etc.) that feel comfortable. \n Type 'I am here.' whenever you feel that you are finished. "]],
[r'I am here.',
[ "How do you feel now?",
"Do you enjoy it?"]],
[r'yes (.*)',
[ "I am glad to hear that. Thank you for letting me guide you through this meditation. \n Press 'q' once to change topic, press 'q' twice to end chatting with me."]],
[r'no (.*)',
[ "I am sorry to hear that. If something is bothering you, \n press 'q' to change the topic and talk with me about your sorrows, press 'q' twice to end chatting with me."]],
[r'(.*) good (.*)',
[ "I am glad to hear that. Thank you for letting me guide you through this meditation. \n Press 'q' once to change topic, press 'q' twice to end chatting with me."]],
[r'(.*) fine (.*)',
[ "I am glad to hear that. Thank you for letting me guide you through this meditation. \n Press 'q' once to change topic, press 'q' twice to end chatting with me."]],
[r'(.*) nice (.*)',
[ "I am glad to hear that. Thank you for letting me guide you through this meditation. \n Press 'q' once to change topic, press 'q' twice to end chatting with me."]],
[r'q',
[ "Thank you for letting me guide you. See you soon, my young padawan."]],
]
######################################
## ##
## Talking with Jordan about Corona ##
## ##
######################################
## create library for talking about Corona
# The content of this library is based on discussing mental health and other issues with other people
# A part of this content is also based on the package eliza.py
# I have to admit, that this library can definitely be more elaborate on corona stuff. I guess, I will work on that after the deadline.
dict_corona = [
[r'Hi(.*).',
[ "Hi, my friend. How is quarantine life?."]],
[r'Hello(.*)',
[ "Hi, my friend. How is quarantine life?."]],
[r'Hey(.*)',
[ "Hi, my friend. How is quarantine life?."]],
[r'Yo(.*)',
[ "Hi, my friend. How is quarantine life?."]],
[r'Ok(.*)',
[ "Sounds good for now, I guess. Well, if you want, you can complain to me about how crazy the world is right now. If yes, type 'crazy world'."]],
[r'(.*) good (.*)',
[ "Great. Well, if you want, you can complain to me about how crazy the world is right now. If yes, type 'crazy world'."]],
[r'(.*) well (.*)',
[ "Great. Well, if you want, you can complain to me about how crazy the world is right now. If yes, type 'crazy world'."]],
[r'(.*) fine (.*)',
[ "Great. Well, if you want, you can complain to me about how crazy the world is right now. If yes, type 'crazy world'."]],
[r'(.*) nice (.*)',
[ "Great. Well, if you want, you can complain to me about how crazy the world is right now. If yes, type 'crazy world'."]],
[r'(.*) bad (.*)',
[ "Oh, I am sorry to hear that! If you want some suggestions to stay sane, type 'What can I do to stay sane', or 'suggestions'."]],
[r'(.*) crazy (.*)',
[ "Oh, I am sorry to hear that! If you want some suggestions to stay sane, type 'What can I do to stay sane', or 'suggestions'."]],
[r'(.*) not well (.*)',
[ "Oh, I am sorry to hear that! If you want some suggestions to stay sane, type 'What can I do to stay sane', or 'suggestions'."]],
[r'(.*) shit (.*)',
[ "Oh, I am sorry to hear that! If you want some suggestions to stay sane, type 'What can I do to stay sane', or 'suggestions'."]],
[r'crazy world',
[ "The media is crazy right?",
"Did you see what Trump posted on Twitter today?",
"Man, the economy will friggin die.",
"Doing some sports could help as well!"]],
[r'(.*) yes (.*)',
[ "Nice that we agree on that."]],
[r'(.*) agree (.*)',
[ "Nice that we agree on that."]],
[r'(.*) agreed (.*)',
[ "Nice that we agree on that."]],
[r'(.*) no (.*)',
[ "Well, you should have a look then."]],
[r'What can I do to stay sane?',
[ "You could go out for a walk.",
"You could meet someone with 1.5 meters distance.",
"You could talk to a friend",
"Doing some sports could help as well!"]],
[r'What can I do to stay sane?',
[ "You could go out for a walk.",
"You could meet someone with 1.5 meters distance.",
"You could talk to a friend",
"Doing some sports could help as well!"]],
[r'(.*) suggestions (.*)',
[ "You could go out for a walk.",
"You could meet someone with 1.5 meters distance.",
"You could talk to a friend"
"Doing some sports could help as well!"]],
[r'(.*) insane (.*)',
[ "I understand. This is a special situation.",
"How are you currently handling it?",
"Do you have someone to talk to except me?."]],
[r'I don\'?t (.*)',
[ "Don't you really {}",
"Why don't you {}",
"Do you want to {}"]],
[r'I feel (.*)',
[ "Good, tell me more about these feelings.",
"Do you often feel {}",
"When do you usually feel {}",
"When you feel %1, what do you do?"]],
[r'I have (.*)',
[ "Why do you tell me that you've {}",
"Have you really {}",
"Now that you have %1, what will you do next?"]],
[r'(.*)\?',
[ "Why do you ask that?",
"Perhaps the answer lies within yourself?",
"Why don't you tell me?"]],
[r'I would (.*)',
[ "Could you explain why you would {}",
"Why would you {}",
"Who else knows that you would {}"]],
[r'Is there (.*)',
[ "Do you think there is {}",
"It's likely that there is %1.",
"Would you like there to be {}"]],
[r'I think (.*)',
[ "Do you doubt {}",
"Do you really think so?",
"But you're not sure {}"]],
[r'(.*) friend (.*)',
[ "Tell me more about your friends.",
"When you think of a friend, what comes to mind?",
"Why don't you tell me about a childhood friend?"]],
[r'Yes',
[ "You seem quite sure.",
"OK, but can you elaborate a bit?"]],
[r'(.*) computer(.*)',
[ "Are you really talking about me?",
"Does it seem strange to talk to a computer?",
"How do computers make you feel?",
"Do you feel threatened by computers?"]],
[r'Is it (.*)',
[ "Do you think it is {}",
"Perhaps it's %1 -- what do you think?",
"If it were %1, what would you do?",
"It could well be that %1."]],
[r'It is (.*)',
[ "You seem very certain.",
"If I told you that it probably isn't %1, what would you feel?"]],
[r'Can you ([^\?]*)\??',
[ "What makes you think I can't {}",
"If I could %1, then what?",
"Why do you ask if I can {}"]],
[r'Can I ([^\?]*)\??',
[ "Perhaps you don't want to %1.",
"Do you want to be able to {}",
"If you could %1, would you?"]],
[r'You are (.*)',
[ "Why do you think I am {}",
"Does it please you to think that I'm {}",
"Perhaps you would like me to be %1.",
"Perhaps you're really talking about yourself?"]],
[r'You\'?re (.*)',
[ "Why do you say I am {}",
"Why do you think I am {}",
"Are we talking about you, or me?"]],
[r'I don\'?t (.*)',
[ "Don't you really {}",
"Why don't you {}",
"Do you want to {}"]],
[r'I feel (.*)',
[ "Good, tell me more about these feelings.",
"Do you often feel {}",
"When do you usually feel {}",
"When you feel %1, what do you do?"]],
[r'I have (.*)',
[ "Why do you tell me that you've {}",
"Have you really {}",
"Now that you have %1, what will you do next?"]],
[r'I would (.*)',
[ "Could you explain why you would {}",
"Why would you {}",
"Who else knows that you would {}"]],
[r'Is there (.*)',
[ "Do you think there is {}",
"It's likely that there is %1.",
"Would you like there to be {}"]],
[r'My (.*)',
[ "I see, your %1.",
"Why do you say that your {}",
"When your %1, how do you feel?"]],
[r'You (.*)',
[ "We should be discussing you, not me.",
"Why do you care whether I {}"]],
[r'Why (.*)',
[ "Why don't you tell me the reason why {}",
"Why do you think {}" ]],
[r'I want (.*)',
[ "What would it mean to you if you got {}",
"Why do you want {}",
"What would you do if you got {}",
"If you got %1, then what would you do?"]],
[r'(.*) mother(.*)',
[ "Tell me more about your mother.",
"What was your relationship with your mother like?",
"How do you feel about your mother?",
"How does this relate to your feelings today?",
"Good family relations are important."]],
[r'(.*) father(.*)',
[ "Tell me more about your father.",
"How did your father make you feel?",
"How do you feel about your father?",
"Does your relationship with your father relate to your feelings today?",
"Do you have trouble showing affection with your family?"]],
[r'(.*) child(.*)',
[ "Did you have close friends as a child?",
"What is your favorite childhood memory?",
"Do you remember any dreams or nightmares from childhood?",
"Did the other children sometimes tease you?",
"How do you think your childhood experiences relate to your feelings today?"]],
[r'(.*)\?',
[ "Why do you ask that?",
"Please consider whether you can answer your own question.",
"Perhaps the answer lies within yourself?",
"Why don't you tell me?"]],
[r'q',
[ "Thank you for talking with me.",
"Good-bye.",
"Thank you, that will be $150. Have a good day!"]],
[r'(.*)',
[ "Please tell me more.",
"Let's change focus a bit... Tell me about your family.",
"Very interesting.",
"%1.",
"I see. And what does that tell you?",
"How does that make you feel?",
"How do you feel when you say that?"]],
[r'q',
[ "Thank you for talking with me, and #staysafe!",
"Good-bye, #staysafe"]],
]
|
# -*- coding: utf-8 -*-
"""
Created on Thu Feb 7 11:44:56 2019
@author: elina
"""
""" Lesson 11 is about printing values, asking for user input and storing input
to do operations with it.
Note: functions were not part of the lesson, but made testing easier."""
# creates a (double) line space
space = "\n"
def printVariables():
print("Quick Check 11.1")
# What is printed?
print(13-1)
"nice"
a = "nice"
b = " is the new cool"
print(a.capitalize() + b)
def capitalizeChoice():
print("Quick Check 11.2")
sweet = "cookies"
savory = "pickles"
num = 100
print(num, savory, "and", num, (sweet + "."))
print("I choose the " + sweet.upper() + "!")
def getUserInput():
print("Quick Check 11.3")
input("What's your secret? ")
input("What's your favourite colour? ")
input("Please enter one of the following symbols: #, $, %, &, or * : ")
def faveSong():
print("Quick Check 11.4")
fave_song = input("What's your favourite song? ")
print(fave_song)
print(fave_song)
print(fave_song)
def celebrityName():
name_celeb = input("What's the first and last name of a celebrity? ")
space = name_celeb.find(" ")
name_first = name_celeb[0:space]
name_last = name_celeb[space+1:len(name_celeb)]
print(name_first)
print(name_last)
def intSquare():
print("Listing 11.5")
# Typecasting user input from string to int
user_input = input("Enter a number to find square of: ")
num = int(user_input)
print(num * num)
def floatSquare():
print("Quick Check 11.5")
# Typecasting user input from string to float
user_input = input("Enter a number to find square of: ")
num = float(user_input)
print(num * num)
def multiplyNumbers():
print("Listing 11.6")
# Calculations with more than one user input
num1 = float(input("Enter a number: "))
num2 = float(input("Enter a second number: "))
print(num1, "*", num2, "=", num1 * num2)
def powerOf():
print("Q.11.1")
# ask user for two nums (b&e) and show b to power of e
b = int(input("Enter a number: "))
e = int(input("Enter another number: "))
print(b, "**", e, "=", b**e)
def inputAgeName():
print("Q11.2")
# Get name and age, say hi, show age in 25 years
user_name = input("Enter your name: ")
user_age = int(input("Enter your age: "))
user_age_25 = str(user_age + 25)
print("Hi", user_name + "!", "In 25 years you will be", user_age_25 + "!")
powerOf()
inputAgeName()
|
from django.db import models
class Whitelist(models.Model):
id = models.IntegerField(primary_key=True)
msisdn = models.CharField(max_length=15)
active = models.IntegerField(max_length=1)
created_at = models.DateTimeField(auto_now_add=True)
modified_at = models.DateTimeField(null=True, blank=True)
class Meta:
db_table = u'me2u_whitelist'
class DealerWhitelist(models.Model):
id = models.IntegerField(primary_key=True)
msisdn = models.CharField(max_length=15)
active = models.IntegerField(max_length=1)
created_at = models.DateTimeField(auto_now_add=True)
modified_at = models.DateTimeField(null=True, blank=True)
class Meta:
db_table = u'dealer_me2u_whitelist'
|
a=int(input("Enter a number:"))
num_list=[]
for num in range(1,(a)*2):
if (num%2)!=0:
num_list.append(str(num))
print(','.join(num_list))
|
# solution to https://www.hackerrank.com/challenges/maximum-element
n = int(input())
stack = []
size = 0
for i in range(n):
cmd = input().split(" ")
if cmd[0] == '1':
val = int(cmd[1])
if size == 0:
item = [val, val]
else:
m = max(val, stack[size-1][1])
item = [val, m]
stack.append(item)
size += 1
if cmd[0] == '2':
stack.pop()
size -= 1
if cmd[0] == '3':
print(stack[size-1][1])
|
import json
from multiprocessing import Pool
from urllib.parse import quote_plus
import requests
from retrying import retry
from sentry_sdk import capture_exception
from concertowl.apis.events import filter_events, unique_collected_events
from eventowl.settings import SENTRY_DSN
API_URL = 'https://rest.bandsintown.com/artists/{}/events'
def _event(api_event, performers):
try:
ticket_url = [offer['url'] for offer in api_event['offers'] if offer['type'] == 'Tickets']
description = api_event.get('description')
return {
'picture': None,
'city': api_event['venue']['city'],
'country': api_event['venue']['country'],
'title': description[:50] if description else ', '.join(p.title() for p in performers),
'start_time': api_event['datetime'].replace('T', ' '),
'end_time': None,
'venue': api_event['venue']['name'],
'address': None,
'ticket_url': ticket_url[0] if ticket_url else api_event['url'],
'artists': performers
}
except KeyError:
return {}
@retry(wait_fixed=60, stop_max_attempt_number=11)
def _get_events_call(artist):
resp = requests.get(API_URL.format(quote_plus(artist)),
params={'app_id': 'eventowl'})
resp.raise_for_status()
try:
return resp.json()
except json.JSONDecodeError:
not_found = 'not found' in resp.text.lower()
internal_error = 'error occurred while searching' in resp.text.lower()
if not_found or internal_error:
return []
raise IOError(resp.text)
def _get_events(artist):
try:
parsed = _get_events_call(artist)
except Exception as e:
try:
if e.response.status_code == 404:
return []
except Exception:
pass
if SENTRY_DSN:
capture_exception(e)
return []
if not parsed:
return []
events = []
for event in parsed:
performers = [a.lower() for a in event.get('lineup', [])]
if artist.lower() not in performers:
continue
model_event = _event(event, performers)
if model_event:
events.append(model_event)
return events
def get_events_for_artists(artists, locations):
collected_events = []
with Pool(min(len(artists), 5)) as pool:
collected_events += pool.map(_get_events, artists)
return filter_events(unique_collected_events(collected_events), locations)
|
# 정수 N개로 이루어진 수열 A와 정수 X가 주어진다. 이때, A에서 X보다 작은 수를 모두 출력하는 프로그램을 작성하시오.
N, X = map(int, input().split())
A = list(map(int, input().split()))
for i in range(N): #왜 0, N-1은 안되는 것인가?????????
if A[i] < X:
print(A[i], end=" ") |
from pwn import *
from Exrop import Exrop
binname = "/lib/x86_64-linux-gnu/libc.so.6"
libc = ELF(binname, checksec=False)
open = libc.symbols['open']
read = libc.symbols['read']
write = libc.symbols['write']
bss = libc.bss()
rop = Exrop(binname)
rop.find_gadgets(cache=True)
#print("func-call gadgets 0x41414141(0x20, 0x30, \"Hello\")")
#chain = rop.func_call(0x41414141, (0x20, 0x30, "Hello"), 0x7fffff00)
print("open('/etc/passwd', 0)")
chain = rop.func_call(open, ("/etc/passwd", 0), bss)
chain.dump()
print("read(2, bss, 0x100)")
chain = rop.func_call(read, (2, bss, 0x100))
chain.dump()
print("write(1, bss, 0x100)")
chain = rop.func_call(write, (1, bss, 0x100))
chain.dump()
"""
open('/etc/passwd', 0)
$RSP+0x0000 : 0x000000000002155f # pop rdi; ret
$RSP+0x0008 : 0x00000000003ec860
$RSP+0x0010 : 0x0000000000155fc6 # pop r8; mov eax, 1; ret
$RSP+0x0018 : 0x7361702f6374652f
$RSP+0x0020 : 0x0000000000044359 # mov qword ptr [rdi], r8; ret
$RSP+0x0028 : 0x000000000002155f # pop rdi; ret
$RSP+0x0030 : 0x00000000003ec868
$RSP+0x0038 : 0x0000000000155fc6 # pop r8; mov eax, 1; ret
$RSP+0x0040 : 0x0000000000647773
$RSP+0x0048 : 0x0000000000044359 # mov qword ptr [rdi], r8; ret
$RSP+0x0050 : 0x000000000002155f # pop rdi; ret
$RSP+0x0058 : 0x00000000003ec860
$RSP+0x0060 : 0x0000000000023e6a # pop rsi; ret
$RSP+0x0068 : 0x0000000000000000
$RSP+0x0070 : 0x000000000010fc40
read(2, bss, 0x100)
$RSP+0x0000 : 0x00000000001306d9 # pop rdx; pop rsi; ret
$RSP+0x0008 : 0x0000000000000100
$RSP+0x0010 : 0x00000000003ec860
$RSP+0x0018 : 0x000000000002155f # pop rdi; ret
$RSP+0x0020 : 0x0000000000000002
$RSP+0x0028 : 0x0000000000110070
write(1, bss, 0x100)
$RSP+0x0000 : 0x00000000001306d9 # pop rdx; pop rsi; ret
$RSP+0x0008 : 0x0000000000000100
$RSP+0x0010 : 0x00000000003ec860
$RSP+0x0018 : 0x000000000002155f # pop rdi; ret
$RSP+0x0020 : 0x0000000000000001
$RSP+0x0028 : 0x0000000000110140
"""
|
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
from sklearn.linear_model import LogisticRegression
from sklearn.metrics import classification_report
from sklearn.model_selection import train_test_split
from sklearn.metrics import accuracy_score
import sklearn as sk
import torch
import torch.nn.functional as F
train = pd.read_csv('train.csv')
test = pd.read_csv('test.csv')
train.drop('Cabin',axis=1,inplace=True)
test.drop('Cabin',axis=1,inplace=True)
train.dropna(inplace=True)
sex1 = pd.get_dummies(train['Sex'],drop_first=True)
embark1 = pd.get_dummies(train['Embarked'],drop_first=True)
train.drop(['Sex','Embarked','Name','Ticket'],axis=1,inplace=True)
train = pd.concat([train,sex1,embark1],axis=1)
colname = train.columns.tolist()
colname.insert(0, 'one')
train['one']= 1
train = train.reindex(columns = colname)
X = train.drop('Survived',axis=1)
#X = train.drop('one',axis=1)
Y = train['Survived']
X = np.array(X,dtype=float)
Y = np.array(Y,dtype=float)
X_train, X_test, Y_train, Y_test = train_test_split(X,Y, test_size=0.20,random_state=101)
train_columns = X_train.shape[1]
train_rows = X_train.shape[0]
test_columns = X_test.shape[1]
test_rows = X_test.shape[0]
X_train = torch.tensor(X_train, dtype=torch.float32)
Y_train = torch.tensor(Y_train, dtype=torch.long)
Y_train = torch.unsqueeze(Y_train, dim=1)
X_test = torch.tensor(X_test, dtype=torch.float32)
Y_test = torch.tensor(Y_test, dtype=torch.float32)
Y_test = torch.unsqueeze(Y_test, dim=1)
D_in, H1, H2, D_out = train_columns, 5, 3, 2
iteration = 10000
model = torch.nn.Sequential(
torch.nn.Linear(D_in, H1),
torch.nn.ReLU(),
torch.nn.Linear(H1, H2),
torch.nn.Tanh(),
torch.nn.Linear(H2, D_out),
)
criterion = torch.nn.CrossEntropyLoss()
#optimizer =torch.optim.Adadelta(model.parameters(), lr=2, rho=0.9, eps=1e-06, weight_decay=0)
#optimizer = torch.optim.Adagrad(model.parameters(), lr=5, lr_decay=0, weight_decay=0, initial_accumulator_value=0)
optimizer = torch.optim.RMSprop(model.parameters(), lr=0.0045, alpha=0.99, eps=1e-08, weight_decay=0, momentum=0, centered=False)
#optimizer = torch.optim.SGD(model.parameters(), lr=0.0025)
for i in range(iteration):
y_pre = torch.sigmoid(model(X_train))
loss = criterion(y_pre, Y_train.long().squeeze(1))
model.zero_grad()
loss.backward()
optimizer.step()
Pre_train = model(X_train)
Pre_train = torch.softmax(Pre_train,dim=1)
Pre_train = torch.max(Pre_train, 1)
Pre_train = Pre_train[1].data.numpy()
Y_true = Y_train.data.numpy()
print(classification_report(Pre_train, Y_true))
print("Accuracy for training data")
print( sk.metrics.accuracy_score(Pre_train,Y_true))
Pre_test = model(X_test)
Pre_test = torch.softmax(Pre_test,dim=1)
Pre_test = torch.max(Pre_test, 1)
Pre_test = Pre_test[1].data.numpy()
Test_true = Y_test.data.numpy()
print(classification_report(Test_true, Pre_test))
print("Accuracy for test data")
print( sk.metrics.accuracy_score(Test_true, Pre_test))
|
#!/usr/bin/python
import sys
if len(sys.argv) == 1:
print 'input grep expression'
sys.exit(1)
import commands,os
user=os.environ['USER']
grep=sys.argv[1]
list=commands.getstatusoutput("ps -ewf |grep '%s'" % grep)[1].split('\n')
for item in list:
username,pid = item.split()[0:2]
if username == user:
print username, pid, commands.getstatusoutput("kill -9 %s" % pid)
print 'grep expression=',grep
|
import math
def solution(arr):
answer = 0
i=1
arr.sort()
# lcm = (a*b) / gcd
# gcd = (a*b) / lcm
#print(arr[0]*arr[1]/math.gcd(arr[0],arr[1])) >최소공배수
while(True):
answer=0
answer=arr[-1]*i
for j in arr:
if(answer%j==0):
continue
else:
break
else:
break
i+=1
return answer
arr=[2,6,8,14]
print(solution(arr))
'''
N개의 최소공배수
문제 설명
두 수의 최소공배수(Least Common Multiple)란 입력된 두 수의 배수 중 공통이 되는 가장 작은 숫자를 의미합니다.
예를 들어 2와 7의 최소공배수는 14가 됩니다.
정의를 확장해서, n개의 수의 최소공배수는 n 개의 수들의 배수 중 공통이 되는 가장 작은 숫자가 됩니다.
n개의 숫자를 담은 배열 arr이 입력되었을 때 이 수들의 최소공배수를 반환하는 함수, solution을 완성해 주세요.
제한 사항
arr은 길이 1이상, 15이하인 배열입니다.
arr의 원소는 100 이하인 자연수입니다.
입출력 예
arr result
[2,6,8,14] 168
[1,2,3] 6
''' |
#!/usr/bin/env python
# coding:utf-8
"""
162. 寻找峰值
难度
中等
峰值元素是指其值大于左右相邻值的元素。
给定一个输入数组 nums,其中 nums[i] ≠ nums[i+1],找到峰值元素并返回其索引。
数组可能包含多个峰值,在这种情况下,返回任何一个峰值所在位置即可。
你可以假设 nums[-1] = nums[n] = -∞。
示例 1:
输入: nums = [1,2,3,1]
输出: 2
解释: 3 是峰值元素,你的函数应该返回其索引 2。
示例 2:
输入: nums = [1,2,1,3,5,6,4]
输出: 1 或 5
解释: 你的函数可以返回索引 1,其峰值元素为 2;
或者返回索引 5, 其峰值元素为 6。
说明:
你的解法应该是 O(logN) 时间复杂度的。
"""
# ================================================================================
"""
时间复杂度不达标 !!!
方法1: 线性搜索(只需要找到第一个 nums[i] > nums[i+1] 的 i 即可)
Time complexity : O(n)
Space complexity : O(1)
"""
class Solution(object):
def findPeakElement(self, nums):
"""
:type nums: List[int]
:rtype: int
"""
if not nums:
return None
if len(nums) == 1:
return 0
if len(nums) == 2:
return 0 if nums[0] > nums[1] else 1
# 考虑到 nums[-1] = nums[n] = -∞ ,首先判断首尾元素是否是峰值.
if nums[0] > nums[1]:
return 0
elif nums[-1] > nums[-2]:
return len(nums) - 1
#
for i in range(len(nums) - 1):
if nums[i] > nums[i + 1]:
return i
# ================================================================================
"""
方法2: 二分查找
思路: 使用二分法:
若 mid > mid+1 则 0 ~ mid 必然有一个peak
若 mid < mid+1 则 mid ~ len(nums) 必然一个peak
"""
class Solution(object):
def findPeakElement(self, nums):
"""
:type nums: List[int]
:rtype: int
"""
if not nums:
return None
if len(nums) == 1:
return 0
if len(nums) == 2:
return 0 if nums[0] > nums[1] else 1
# 考虑到 nums[-1] = nums[n] = -∞ ,首先判断首尾元素是否是峰值.
if nums[0] > nums[1]:
return 0
elif nums[-1] > nums[-2]:
return len(nums) - 1
# 除去首尾元素后,二分查找 !!!!!
left, right = 1, len(nums) - 2
while left <= right:
mid = (left + right)/2
if mid == right:
# 避免陷入无限循环 !!!
break
if nums[mid] > nums[mid + 1]:
right = mid
pass
else:
left = mid + 1
pass
pass
return right
# ================================================================================
"""
(方法2 的另一种写法)
"""
class Solution(object):
def findPeakElement(self, nums):
"""
:type nums: List[int]
:rtype: int
"""
if not nums:
return None
if len(nums) == 1:
return 0
if len(nums) == 2:
return 0 if nums[0] > nums[1] else 1
# 考虑到 nums[-1] = nums[n] = -∞ ,首先判断首尾元素是否是峰值.
if nums[0] > nums[1]:
return 0
elif nums[-1] > nums[-2]:
return len(nums) - 1
# 除去首尾元素后,二分查找 !!!!!
left, right = 1, len(nums) - 2
while left < right:
# 注意: 循环条件不能用 left <= right,那样会陷入无限循环
mid = (left + right)/2
if nums[mid] > nums[mid + 1]:
right = mid
elif nums[mid] < nums[mid + 1]:
left = mid + 1
else:
# 遇到 mid 和 mid + 1 相等时移动下 left 或者 right,直至出现不同的值为止
left += 1
return right
# ================================================================================
# ================================================================================
|
# The main example from README: fetch multiple properties from the page
from pprint import pprint
from wikipedia_ql import media_wiki
wikipedia = media_wiki.Wikipedia(cache_folder='tmp/cache')
pprint(wikipedia.query(r'''
from "Guardians of the Galaxy (film)" {
page@title as "title";
section[heading="Cast"] as "cast" {
li >> text["^(.+?) as (.+?):"] {
text-group[1] as "actor";
text-group[2] as "character"
}
};
section[heading="Critical response"] {
sentence["Rotten Tomatoes"] as "RT ratings" {
text["\d+%"] as "percent";
text["(\d+) (critic|review)"] >> text-group[1] as "reviews";
text["[\d.]+/10"] as "overall"
}
}
}
'''))
|
import sys
src_directory = '../../../'
sys.path.append(src_directory)
import src.model
import src.solvers
import src.physical_constants
import pylab
import dolfin
dolfin.set_log_active(True)
theta = pylab.deg2rad(-3.0)
L = 100000.
H = 1000.0
a0 = 100
sigma = 10000
class Surface(dolfin.Expression):
def __init__(self):
pass
def eval(self,values,x):
values[0] = pylab.sin(theta)/pylab.cos(theta)*x[0]
class Bed(dolfin.Expression):
def __init__(self):
pass
def eval(self,values,x):
y_0 = -H + a0*(pylab.exp(-((x[0]-L/2.)**2 + (x[1]-L/2.)**2)/sigma**2))
values[0] = pylab.sin(theta)*(x[0] + pylab.sin(theta)*y_0)/pylab.cos(theta) + pylab.cos(theta)*y_0
class SMB(dolfin.Expression):
def eval(self,values,x):
values[0] = 0.0
nonlin_solver_params = src.helper.default_nonlin_solver_params()
nonlin_solver_params['newton_solver']['relaxation_parameter'] = 1.0
nonlin_solver_params['newton_solver']['relative_tolerance'] = 1.0
nonlin_solver_params['newton_solver']['linear_solver'] = 'gmres'
nonlin_solver_params['newton_solver']['preconditioner'] = 'hypre_amg'
config = { 'mode' : 'transient',
'coupled' :
{ 'on' : False,
'inner_tol': 0.0,
'max_iter' : 1
},
't_start' : 0.0,
't_end' : 500.0,
'time_step' : 2.0,
'velocity' :
{ 'on' : True,
'newton_params' : nonlin_solver_params,
'viscosity_mode' : 'isothermal',
'b_linear' : None,
'use_T0': False,
'T0' : None,
'A0' : 2.140373e-7,
'beta2' : (1*2.140373e-7*1000)**-1.,
'r' : 0.0,
'E' : 1,
'approximation' : 'fo',
'boundaries' : None
},
'enthalpy' :
{ 'on': False,
'use_surface_climate': False,
'T_surface' : None,
},
'free_surface' :
{ 'on': True,
'lump_mass_matrix': False,
'use_shock_capturing' : False,
'thklim': 10.0,
'use_pdd': False,
'observed_smb': SMB(),
'static_boundary_conditions': False
},
'age' :
{ 'on': False,
'use_smb_for_ela': False,
'ela': None,
},
'surface_climate' :
{ 'on': False,
'T_ma': None,
'T_ju': None,
'beta_w': None,
'sigma': None,
'precip': None
},
'adjoint' :
{ 'alpha' : None,
'beta' : None,
'max_fun' : None,
'objective_function' : 'logarithmic',
'animate' : False
},
'output_path' : './results/',
'wall_markers' : [],
'periodic_boundary_conditions' : True,
'log': True }
model = src.model.Model()
model.set_geometry(Surface(), Bed())
nx = 50
ny = 50
nz = 7
model.generate_uniform_mesh(nx,ny,nz,xmin=0,xmax=L,ymin=0,ymax=L,generate_pbcs = True)
model.set_parameters(src.physical_constants.IceParameters())
model.initialize_variables()
model.n = 1.0
model.calculate_boundaries()
T = src.solvers.TransientSolver(model,config)
T.solve()
dolfin.File('./results_stokes/u.xml') << model.u
dolfin.File('./results_stokes/v.xml') << model.v
dolfin.File('./results_stokes/w.xml') << model.w
dolfin.File('./results_stokes/S.xml') << model.S
|
from django.conf.urls import url
from .views import UserApiCreateView, UserApiUpdateView, UserApiDetailListView
urlpatterns = [
url(r'^$', UserApiDetailListView.as_view(), name='user_list'),
url(r'^create/$', UserApiCreateView.as_view(), name='user_create'),
url(r'^(?P<pk>\d+)/update/$', UserApiUpdateView.as_view(), name='user_update'),
] |
#
# @lc app=leetcode id=212 lang=python3
#
# [212] Word Search II
#
# @lc code=start
class Solution:
def findWords(self, board: List[List[str]], words: List[str]) -> List[str]:
trie = {}
for word in words:
t = trie
for w in word:
t = t.setdefault(w, {})
t["end"] = 1
#print(trie)
res = []
row = len(board)
col = len(board[0])
def dfs(i, j, trie, s):
#print(i, j, trie, s)
c = board[i][j]
if c not in trie: return
trie = trie[c]
if "end" in trie and trie["end"] == 1:
res.append(s + c)
trie["end"] = 0 # 防止重复数组加入
board[i][j] = "#"
for x, y in [[-1, 0], [1, 0], [0, 1], [0, -1]]:
tmp_i = x + i
tmp_j = y + j
if 0 <= tmp_i < row and 0 <= tmp_j < col and board[tmp_i][tmp_j] != "#":
dfs(tmp_i, tmp_j, trie, s + c)
board[i][j] = c
for i in range(row):
for j in range(col):
dfs(i, j, trie, "")
return res
# @lc code=end
""" dx = [-1, 1, 0, 0]
dy = [0, 0, -1, 1]
END_OF_WORD = "#"
class Solution(object):
def findWords(self, board, words):
if not board or not board[0]: return []
if not words: return []
self.result = set()
# 构建trie
root = collections.defaultdict()
for word in words:
node = root
for char in word:
node = node.setdefault(char, collections.defaultdict())
node[END_OF_WORD] = END_OF_WORD
self.m, self.n = len(board), len(board[0])
for i in xrange(self.m):
for j in xrange(self.n):
if board[i][j] in root:
self._dfs(board, i, j, "", root)
return list(self.result)
def _dfs(self, board, i, j, cur_word, cur_dict):
cur_word += board[i][j]
cur_dict = cur_dict[board[i][j]]
if END_OF_WORD in cur_dict:
self.result.add(cur_word)
tmp, board[i][j] = board[i][j], '@'
for k in xrange(4):
x, y = i + dx[k], j + dy[k]
if 0 <= x < self.m and 0 <= y < self.n \
and board[x][y] != '@' and board[x][y] in cur_dict:
self._dfs(board, x, y, cur_word, cur_dict)
board[i][j] = tmp """ |
from net.yolo_top import yolov3
import numpy as np
import tensorflow as tf
from net.config import cfg
from PIL import Image, ImageDraw, ImageFont
from predict.draw_box import draw_boxes
import matplotlib.pyplot as plt
import os
class YOLO_PREDICT:
def __init__(self, gpu = "0"):
os.environ["CUDA_VISIBLE_DEVICES"] = gpu
# result
self.boxes_dict = {}
self.scores_dict = {}
self.classes_dict = {}
# clear graph
tf.reset_default_graph()
# now, in predict mode
self.istraining = tf.constant(False, tf.bool)
# which size is the training size
self.img_size = cfg.data.img_size
#
self.batch_size = cfg.predict.batch_size
self.scratch = cfg.predict.scratch
self.build_model()
def build_model(self):
self.img_hw = tf.placeholder(dtype=tf.float32, shape=[2])
self.imgs_holder = tf.placeholder(tf.float32,
shape = [None,
self.img_size[0],
self.img_size[1],
self.img_size[2]])
self.model = yolov3(self.imgs_holder, None, self.istraining)
self.boxes, self.scores, self.classes = self.model.pedict(self.img_hw,
iou_threshold = cfg.predict.iou_thresh,
score_threshold = cfg.predict.score_thresh)
self.saver = tf.train.Saver()
self.ckpt_dir = cfg.path.ckpt_dir
def predict_imgs(self, image_data, img_id_list):
with tf.Session() as sess:
ckpt = tf.train.get_checkpoint_state(self.ckpt_dir)
self.saver.restore(sess, ckpt.model_checkpoint_path)
for i, single_image_data in enumerate(image_data):
boxes_, scores_, classes_ = sess.run([self.boxes, self.scores, self.classes],
feed_dict={
self.img_hw:
[self.img_size[1],
self.img_size[0]],
self.imgs_holder:
np.reshape(single_image_data / 255,
[1,
self.img_size[0],
self.img_size[1],
self.img_size[2]])})
self.boxes_dict[img_id_list[i]] = boxes_
self.scores_dict[img_id_list[i]] = scores_
self.classes_dict[img_id_list[i]] = classes_ |
# coding: utf-8
from __future__ import unicode_literals
from yargy import (
rule,
or_
)
from yargy.interpretation import fact
from yargy.predicates import gram
from yargy.pipelines import morph_pipeline
from .name import (
NAME,
SIMPLE_NAME
)
Person = fact(
'Person',
['position', 'name']
)
POSITION = morph_pipeline([
'святой',
'патриарх',
'митрополит',
'царь',
'король',
'царица',
'император',
'императрица',
'принц',
'принцесса',
'князь',
'граф',
'графиня',
'барон',
'баронесса',
'княгиня',
'президент',
'премьер-министр',
'экс-премьер',
'пресс-секретарь',
'министр',
'замминистр',
'заместитель',
'глава',
'канцлер',
'помощник',
'посол',
'губернатор',
'председатель',
'спикер',
'диктатор',
'лидер',
'генсек',
'премьер',
'депутат',
'вице-премьер',
'сенатор',
'полпред',
'госсекретарь',
'вице-президент',
'сопредседатель',
'зам',
'мэр',
'вице-спикер',
'замруководителя',
'зампред',
'муфтий',
'спецпредставитель',
'руководитель',
'статс-секретарь',
'зампредседатель',
'представитель',
'ставленник',
'мадеро',
'вице-губернатор',
'зампредсовмин',
'наркоминдела',
'генпрокурор',
'комиссар',
'рейхсканцлер',
'советник',
'замглавы',
'секретарь',
'парламентарий',
'замгендиректор',
'вице-председатель',
'постпред',
'госкомтруд',
'предсовмин',
'преемник',
'делегат',
'шеф',
'консул',
'замминистра',
'главкомпис',
'чиновник',
'врио',
'управделами',
'нарком',
'донпродкомиссар',
'председ',
'гендиректор',
'генерал-губернатор',
'обревком',
'правитель',
'замсекретарь',
'главнокомандующий',
'вице-мэр',
'наместник',
'спичрайтер',
'вице-консул',
'мвэс',
'облревком',
'главковерх',
'пресс-атташе',
'торгпред',
'член',
'назначенец',
'эмиссар',
'обрядоначальник',
'главком',
'единоросс',
'политик',
'генерал',
'замгенпрокурор',
'дипломат',
'главноуполномоченный',
'генерал-фельдцейхмейстер',
'комендант',
'казначей',
'уполномоченный',
'обер-прокурор',
'наркомзем',
'соправитель',
'основатель',
'сооснователь',
'управляющий директор',
'управляющий партнер',
'партнер',
'руководитель',
'аналитик',
'зампред',
'миллиардер',
'миллионер',
'автор',
'актер',
'актриса',
'певец',
'певица',
'исполнитель',
'солист',
'режиссер',
'сценарист',
'писатель',
'музыкант',
'композитор',
'корреспондент',
'журналист',
'редактор',
'дирижер',
'кинорежиссер',
'звукорежиссер',
'детектив',
'пианист',
'драматург',
'артист',
'балетмейстер',
'скрипач',
'хореограф',
'танцовщик',
'документалист',
'поэт',
'литератор',
'киноактер',
'вокалист',
'бард',
'комик',
'продюсер',
'кинодраматург',
'киноактриса',
'балерина',
'пианистка',
'критик',
'танцор',
'концертмейстер',
'симфонист',
'сатирик',
'аранжировщик',
'саксофонист',
'юморист',
'шансонье',
'гастролер',
'виолончелист',
'постановщик',
'кинематографист',
'сценограф',
'джазмен',
'музыковед',
'киноартист',
'педагог',
'хормейстер',
'беллетрист',
'примадонна',
'инструменталист',
'альтист',
'шоумен',
'виртуоз',
'пародист',
'декоратор',
'модельер',
'очеркист',
'оперетта',
'контрабасист',
'карикатурист',
'дуэт',
'монтажер',
'живописец',
'скульптор',
'режиссура',
'архитектор',
'антрепренер',
'импрессарио',
'прозаик',
'труппа',
'трагик',
'клоун',
'солистка',
'либреттист',
'литературовед',
'портретист',
'гример',
'художник',
'импровизатор',
'декламаторша',
'телеведущий',
'импресарио',
'мастер',
'аккомпаниатор',
'шахматист',
'иллюзионист',
'эстрадник',
'эстрада',
'спортсмен',
'дизайнер',
'кинокритик',
'публицист',
'чтец',
'конферансье',
'студиец',
'коверный',
'куплетист',
'знаменитость',
'ученый',
'балет',
'искусствовед',
'гитарист',
'доктор',
'академик',
'судья',
'юрист',
'представитель',
'директор',
'прокурор',
'отец',
'мать',
'мама',
'папа',
'брат',
'сестра',
'тёща',
'тесть',
'дедушка',
'бабушка',
'жена',
'муж',
'дочь',
'сын',
'мистер',
'миссис',
'господин',
'госпожа',
'пан',
'пани',
'сэр',
'мисс',
'боксер',
'боец',
'атлет',
'футболист',
'баскетболист',
'агроном',
'президент',
'сопрезидент',
'вице-президент',
'экс-президент',
'председатель',
'руководитель',
'директор',
'глава',
])
GENT = gram('gent')
WHERE = or_(
rule(GENT),
rule(GENT, GENT),
rule(GENT, GENT, GENT),
rule(GENT, GENT, GENT, GENT),
rule(GENT, GENT, GENT, GENT, GENT),
)
POSITION = rule(
POSITION,
WHERE.optional()
).interpretation(
Person.position
)
NAME = NAME.interpretation(
Person.name
)
SIMPLE_NAME = SIMPLE_NAME.interpretation(
Person.name
)
POSITION_NAME = rule(
POSITION,
SIMPLE_NAME
)
PERSON = or_(
POSITION_NAME,
NAME
).interpretation(
Person
)
|
"""
Authors: Liu, Yuntian
Murphy, Declan
Porebski, Elvis
Tyrakowski, Bartosz
Date: March, 2016
Purpose: Machine Learning Team Project.
Generalised Machine Learning Models:
1. Linear Regression.
2. Ridge Regression.
3. Lasso.
4. Elastic Net.
5. Support Vector Machine Regression.
6. Random Forests Decision Trees.
7. Extra Trees.
8. Gradient Boosting.
"""
import time
from itertools import combinations
import numpy as np
from pandas import DataFrame
from dataset import DataSet
from graph import plot_relationship, plot_residual
from model import get_models, measure_performance
def show_relationships():
# Display options to user
print("--------------------------------------------------------------------------")
print("Relationship between feature and price")
print("--------------------------------------------------------------------------")
print("0. CRIM : per capita crime rate by town")
print("1. ZN : proportion of residential land zoned for lots over 25,000 sq.ft.")
print("2. INDUS : proportion of non-retail business acres per town")
print("3. CHAS : Charles River dummy variable (= 1 if tract bounds river; 0 otherwise)")
print("4. NOX : nitric oxides concentration (parts per 10 million)")
print("5. RM : average number of rooms per dwelling")
print("6. AGE : proportion of owner-occupied units built prior to 1940")
print("7. DIS : weighted distances to five Boston employment centres")
print("8. RAD : index of accessibility to radial highways")
print("9. TAX : full-value property-tax rate per $10,000")
print("10. PTRATIO : pupil-teacher ratio by town")
print("11. B : 1000(Bk - 0.63)^2 where Bk is the proportion of blacks by town")
print("12. LSTAT : % lower status of the population")
print('13. Back')
print("--------------------------------------------------------------------------")
running = True
while running:
user_input = input('Enter your choice: ')
if user_input == '13':
running = False
elif user_input.isdigit():
index = int(user_input)
if 0 <= index < 13:
plot_relationship(dataset.dataset.ix[:, 0], dataset.target, dataset.column_names[index])
def show_model_performance(model):
print('--------------------------------------------------------------------------')
print(model.estimator_name, 'Performance')
print('--------------------------------------------------------------------------')
# Predict prices with data used for training
predicted_training_target = model.predict(training_features)
# Predict prices with new and unseen data
predicted_testing_target = model.predict(testing_features)
# Measure Mean Absolute Error, Mean Squared Error and Coefficient of Determination score.
training_mae, training_mse, training_r2 = measure_performance(predicted_training_target, training_target)
testing_mae, testing_mse, testing_r2 = measure_performance(predicted_testing_target, testing_target)
print('Mean Absolute Error: {0:.2f}'.format(training_mae),
'Mean Square Error: {0:.2f}'.format(training_mse),
'R2: {0:.2f}'.format(training_r2), "Trained " + model.estimator_name)
print('Mean Absolute Error: {0:.2f}'.format(testing_mae),
'Mean Square Error: {0:.2f}'.format(testing_mse),
'R2: {0:.2f}'.format(testing_r2), " Tested " + model.estimator_name)
# Create a data frame
data_frame = DataFrame()
# Add a PREDICTED price column to the table
data_frame['PREDICTED'] = predicted_testing_target
# Add an ACTUAL price column to the table
data_frame['ACTUAL'] = list(testing_target)
print('--------------------------------------------------------------------------')
print("First 10 predictions for unseen new data : ")
print(data_frame[:10])
def show_model_list():
running = True
while running:
print('--------------------------------------------------------------------------')
print('Generalised Models')
print('--------------------------------------------------------------------------')
for index, m in enumerate(models):
print('{}. {}'.format(index, m.estimator_name))
print('9. Back')
print('--------------------------------------------------------------------------')
user_input = input('Enter your choice: ')
if user_input == '9':
running = False
elif user_input.isdigit():
index = int(user_input)
if 0 <= index < len(models):
show_model_options(models[index])
def show_main_menu():
running = True
while running:
print('--------------------------------------------------------------------------')
print('Main Menu')
print('--------------------------------------------------------------------------')
print('0. Show ML Models')
print('1. Show Best ML Model')
print('2. Show Relationships')
print('3. Exit')
print('--------------------------------------------------------------------------')
user_input = input('Enter your choice: ')
if user_input == '0':
show_model_list()
elif user_input == '2':
show_relationships()
elif user_input == '3':
running = False
def show_model_options(model):
running = True
while running:
print('--------------------------------------------------------------------------')
print(model.estimator_name)
print('--------------------------------------------------------------------------')
print('0. Show performance')
print('1. Show residual plot')
print('2. Train this model again')
print('3. Brute force combination of features')
print('4. Display feature importance')
print('5. Predict housing price using custom features')
print('6. Back')
print('--------------------------------------------------------------------------')
user_input = input('Enter your choice: ')
if user_input == '6':
running = False
# Show Performance.
elif user_input == '0' and user_input.isdigit:
show_model_performance(model)
# Show Residual Plot.
elif user_input == '1':
# Predict prices with data used for training.
predicted_training_target = model.predict(training_features)
# Predict prices with new and unseen data.
predicted_testing_target = model.predict(testing_features)
# Generate and show residual plot.
plot_residual(predicted_training_target=predicted_training_target, actual_training_target=training_target,
predicted_testing_target=predicted_testing_target, actual_testing_target=testing_target,
model_name=model.estimator_name)
# Train again.
elif user_input == '2' and user_input.isdigit:
print('Training {} ...'.format(model.estimator_name))
model.train_and_evaluate(features=training_features, target=training_target, kfold=True)
print('Training completed!')
model.save()
print('Model persisted.')
elif user_input == '3':
print('searching for best combination of features...')
best_mse, best_feature_lists = find_best_features(model)
print('Best MSE:', best_mse, 'using features:', best_feature_lists)
elif user_input == '4':
df = DataFrame()
df['FEATURE'] = dataset.column_names[0:-1]
if model.estimator_name in ['Elastic Net', 'LARS', 'Lasso', 'Ridge', 'Linear', ]:
df['IMPORTANCE'] = model.estimator.coef_
print(df.sort_values(by='IMPORTANCE', ascending=False))
elif model.estimator_name in ['Gradient Boosting', 'Random Forest', 'Extra Trees']:
df['IMPORTANCE'] = model.estimator.feature_importances_
print(df.sort_values(by='IMPORTANCE', ascending=False))
elif model.estimator_name in ['SVM RBF']:
print(model.estimator.dual_coef_)
elif user_input == '5':
predict_custom(model)
def find_best_features(model):
best_mse = 100
best_features = []
indices = [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12]
start = time.time()
for combination_length in reversed(range(13)):
combination_length += 1
for c in combinations(indices, combination_length):
model.estimator.fit(training_features.ix[:, c], training_target)
predicted = model.predict(testing_features.ix[:, c])
mae, current_mse, r2 = measure_performance(predicted, testing_target)
if current_mse < best_mse:
print('New best MSE: ', current_mse, ' using features: ', c)
print('R2: ', r2)
best_mse = current_mse
best_features = c
end = time.time()
print('Execution time: ', end - start)
# train model again with all features
model.estimator.fit(training_features, training_target)
return best_mse, best_features
def predict_custom(model):
print("0. CRIM : per capita crime rate by town")
print("1. ZN : proportion of residential land zoned for lots over 25,000 sq.ft.")
print("2. INDUS : proportion of non-retail business acres per town")
print("3. CHAS : Charles River dummy variable (= 1 if tract bounds river; 0 otherwise)")
print("4. NOX : nitric oxides concentration (parts per 10 million)")
print("5. RM : average number of rooms per dwelling")
print("6. AGE : proportion of owner-occupied units built prior to 1940")
print("7. DIS : weighted distances to five Boston employment centres")
print("8. RAD : index of accessibility to radial highways")
print("9. TAX : full-value property-tax rate per $10,000")
print("10. PTRATIO : pupil-teacher ratio by town")
print("11. B : 1000(Bk - 0.63)^2 where Bk is the proportion of blacks by town")
print("12. LSTAT : % lower status of the population")
print('--------------------------------------------------------------------------')
features = []
crim = input('CRIM: ')
zn = input('ZN: ')
indus = input('INDUS: ')
chas = input('CHAS: ')
nox = input('NOX: ')
rm = input('RM: ')
age = input('AGE: ')
dis = input('DIS: ')
rad = input('RAD: ')
tax = input('TAX: ')
ptratio = input('PTRATIO: ')
b = input('B: ')
lstat = input('LSTAT: ')
try:
features.append(float(crim))
features.append(float(zn))
features.append(float(indus))
features.append(float(chas))
features.append(float(nox))
features.append(float(rm))
features.append(float(age))
features.append(float(dis))
features.append(float(rad))
features.append(float(tax))
features.append(float(ptratio))
features.append(float(b))
features.append(float(lstat))
# f = [0.00632,18,2.31,0,0.538,6.575,65.2,4.09,1,296,15.3,396.9,4.98]
print('Predicted Value:', model.predict(np.asarray(features).reshape(1, -1))[0])
# print('Predicted Value:', model.predict(features)[0])
except Exception as err:
print('An error has occured.', err)
if __name__ == '__main__':
# load dataset
dataset = DataSet.load()
# split dataset into training and testing subsets
training_features, testing_features, training_target, testing_target = dataset.split_train_test()
# list of Generalised ML Models
models = get_models(training_features, training_target)
show_main_menu()
|
import sys
H, W = map(int, input().split())
a = [[str(c) for c in l.strip()] for l in sys.stdin]
ans = "No"
def check(x, y):
if(x<0 or x >= H or y<0 or y>=W):
return
else:
search(x, y)
def search(x,y):
if(a[x][y] == "#" or a[x][y] == "1"):
return
elif(a[x][y] == "g"):
global ans
ans = "Yes"
a[x][y] = "1"
check(x+1, y)
check(x-1, y)
check(x, y+1)
check(x, y-1)
for i in range(H):
for l in range(W):
if(a[i][l] == "s"):
check(i, l)
print(ans) |
password = 'a123456'
x = 3
while x > 0:
x = x - 1
pw = input('Login Password: ')
if pw == password:
print('Login Success')
break
else:
if x > 0:
print('Wrong Password. You have', x , 'chance left')
else:
print('Login Fail')
|
#!/usr/bin/python
# coding: utf8
import geocoder
location = 'Ottawa, Ontario'
ottawa = (45.4215296, -75.6971930)
def test_arcgis():
g = geocoder.arcgis(location)
assert g.ok
osm_count, fields_count = g.debug()[0]
assert osm_count == 0
assert fields_count > 1
def test_arcgis_reverse():
g = geocoder.arcgis(ottawa, method='reverse')
assert g.ok
def test_multi_results():
g = geocoder.arcgis(location, maxRows='5')
assert len(g) == 5
expected_results = [
'Ottawa, Ontario',
'Ottawa, Ontario',
'Ontario, Oklahoma'
]
assert [result.address for result in g][:3] == expected_results
|
#
# Run workflows according to main etlconf
#
#
# to update bq_run_script to replace more than on pair of project-dataset
#
#
import os
import sys
import getopt
import json
import datetime
# ----------------------------------------------------
# default config values
# To override default config values, copy the keys to be overriden to a json file,
# and indicate this file as --config parameter
# ----------------------------------------------------
config_default = {
"workflow": "ddl, staging, etl etc",
"comment": "",
"type": "sql or py",
"variables":
{
"@etl_project": "target project name",
"@etl_dataset": "target dataset name"
},
"scripts":
[
{"script": "path_relative_to_the_project_root", "comment": ""}
]
}
# ----------------------------------------------------
# read_params()
# ----------------------------------------------------
def read_params():
print('Reading params...')
params = {
"etlconf_file": "optional: indicate '-e' for 'etlconf', global config json file",
"config_file": "optional: indicate '-c' for 'config', local config json file",
"script_files": []
}
# Parsing command line arguments
try:
opts, args = getopt.getopt(sys.argv[1:],"e:c:",["etlconf=,config="])
# let all params be optional
# if len(opts) == 0:
# print("Expected:\npython run_workflow.py -c ../conf/workflow_ddl.conf")
# raise getopt.GetoptError("read_params() error", "Mandatory argument is missing.")
except getopt.GetoptError as err:
print(err.args)
print("Please indicate correct params:")
print(params)
sys.exit(2)
params['etlconf_file'] = ''
params['config_file'] = ''
for opt, arg in opts:
if opt == '-e' or opt == '--etlconf':
if os.path.isfile(arg):
params['etlconf_file'] = arg
if opt == '-c' or opt == '--config':
if os.path.isfile(arg):
params['config_file'] = arg
params['script_files'] = []
for arg in args:
# add all suggested filenames, and let the inner runner script check them
params['script_files'].append({"script": arg})
print(params)
return params
# ----------------------------------------------------
# read_config()
# ----------------------------------------------------
def read_config(etlconf_file, config_file):
print('Reading config...')
config = {}
config_read = {}
etlconf_read = {}
if os.path.isfile(etlconf_file):
with open(etlconf_file) as f:
etlconf_read = json.load(f)
if os.path.isfile(config_file):
with open(config_file) as f:
config_read = json.load(f)
# global config has lower priority
for k in config_default:
s = etlconf_read.get(k, config_default[k])
config[k] = s
# local config has higher priority
for k in config_default:
s = config_read.get(k, config[k])
config[k] = s
print(config)
return config
'''
----------------------------------------------------
main()
return codes: 0 = ok, !0 = error
----------------------------------------------------
'''
def main():
params = read_params()
config = read_config(params['etlconf_file'], params['config_file'])
run_command_bq_script = "python scripts/bq_run_script.py {e} {etlconf_file} {c} {config_file} {script_file}"
to_run = \
config['scripts'] \
if len(params['script_files']) == 0 \
else params['script_files']
# run all given scripts at a time
run_command = run_command_bq_script.format(
script_file= ' '.join(map( lambda s : s['script'], to_run)),
e = ('-e' if len(params['etlconf_file'])> 0 else ''),
etlconf_file= params['etlconf_file'],
c = ('-c' if len(params['config_file'])> 0 else ''),
config_file= params['config_file']
)
print('run_workflow calls:')
print(run_command)
rc = os.system(run_command)
return rc
# ----------------------------------------------------
# run
# ----------------------------------------------------
return_code = main()
print('run_workflow.exit()', return_code)
exit(return_code)
# last edit: 2020-12-02
|
import FWCore.ParameterSet.Config as cms
process = cms.Process("EXOSinglePhoSkim")
process.load("FWCore.MessageService.MessageLogger_cfi")
process.MessageLogger.cerr.FwkReport.reportEvery=cms.untracked.int32(1000);
process.options = cms.untracked.PSet(
wantSummary = cms.untracked.bool(True)
)
process.maxEvents = cms.untracked.PSet(
input = cms.untracked.int32(-1)
)
process.source = cms.Source("PoolSource",
fileNames = cms.untracked.vstring(
'/store/data/BeamCommissioning09/MinimumBias/RECO/v2/000/123/596/F67BCF17-48E2-DE11-98B1-000423D94534.root',
'/store/data/BeamCommissioning09/MinimumBias/RECO/v2/000/123/596/E432BCD7-55E2-DE11-B670-001617C3B6CC.root',
'/store/data/BeamCommissioning09/MinimumBias/RECO/v2/000/123/596/D0586F90-5FE2-DE11-8976-001D09F24691.root',
'/store/data/BeamCommissioning09/MinimumBias/RECO/v2/000/123/596/B60CC58F-5CE2-DE11-9FC7-001D09F24DDF.root',
'/store/data/BeamCommissioning09/MinimumBias/RECO/v2/000/123/596/A4D9D21B-58E2-DE11-8F7A-000423D986A8.root',
'/store/data/BeamCommissioning09/MinimumBias/RECO/v2/000/123/596/A0FC9BDF-65E2-DE11-A6A1-000423D174FE.root',
'/store/data/BeamCommissioning09/MinimumBias/RECO/v2/000/123/596/903D2066-61E2-DE11-9F6E-0019B9F704D6.root',
'/store/data/BeamCommissioning09/MinimumBias/RECO/v2/000/123/596/7EFA67BE-66E2-DE11-AE17-001617C3B79A.root',
'/store/data/BeamCommissioning09/MinimumBias/RECO/v2/000/123/596/7E34865F-45E2-DE11-896A-000423D98F98.root',
'/store/data/BeamCommissioning09/MinimumBias/RECO/v2/000/123/596/76CCDA0D-8AE2-DE11-AF65-0030487A3C9A.root',
'/store/data/BeamCommissioning09/MinimumBias/RECO/v2/000/123/596/68ED95B5-50E2-DE11-B4C8-001D09F27003.root',
'/store/data/BeamCommissioning09/MinimumBias/RECO/v2/000/123/596/5C4B3A8E-63E2-DE11-A02A-000423D99E46.root',
'/store/data/BeamCommissioning09/MinimumBias/RECO/v2/000/123/596/54A3A373-4CE2-DE11-8658-000423D99AAA.root',
'/store/data/BeamCommissioning09/MinimumBias/RECO/v2/000/123/596/4693BF9A-40E2-DE11-BDBD-000423D944F8.root',
'/store/data/BeamCommissioning09/MinimumBias/RECO/v2/000/123/596/2EC732CC-5CE2-DE11-A781-001D09F290BF.root',
'/store/data/BeamCommissioning09/MinimumBias/RECO/v2/000/123/596/2A6903C0-68E2-DE11-B6BA-001D09F28D54.root',
'/store/data/BeamCommissioning09/MinimumBias/RECO/v2/000/123/596/26EC6965-4CE2-DE11-ABCF-003048D373AE.root',
'/store/data/BeamCommissioning09/MinimumBias/RECO/v2/000/123/596/263E80C6-41E2-DE11-A194-001617C3B66C.root',
'/store/data/BeamCommissioning09/MinimumBias/RECO/v2/000/123/596/14D22A92-62E2-DE11-9B14-000423D990CC.root',
'/store/data/BeamCommissioning09/MinimumBias/RECO/v2/000/123/596/0A71AE7F-4DE2-DE11-8B2F-001D09F251CC.root'
)
)
#load the EventContent and Skim cff/i files for the sub-skim.
process.load('SUSYBSMAnalysis.Skimming.EXOSinglePho_EventContent_cfi')
process.load('SUSYBSMAnalysis.Skimming.EXOSinglePho_cff')
#define output file name.
process.exoticaSinglePhoOutputModule.fileName = cms.untracked.string('EXOSinglePho.root')
#all three paths need to run so that the Oputput module can keep the logcal "OR"
#process.exoticaSinglePhoHighetPath=cms.Path(process.exoticaSinglePhoHighetSeq)
process.exoticaRecoSinglePhoHighetPath=cms.Path(process.exoticaRecoSinglePhoHighetSeq)
process.endPath = cms.EndPath(process.exoticaSinglePhoOutputModule)
|
import airflow,os
from airflow import DAG
from airflow.operators.bash_operator import BashOperator
from airflow.operators.python_operator import PythonOperator
from datetime import timedelta
default_args = {
'owner':'airflow',
'depends_on_past':False,
'start_date':airflow.utils.dates.days_ago(1),
'retries':1,
}
#dag
dag = DAG(
'airflow_case1_dag',
default_args=default_args,
description='airflow_case1 DAG',
#schedule_interval=timedelta(days=1))
schedule_interval="30 6 * * *")
#-------------------------------------------------------------------------------
# first operator
init_task_command='''
ssh -Tq localhost << eeooff
/home/docker/airflow_case1/shellscript/init.sh
exit
eeooff
'''
init_operator = BashOperator(
task_id='init_task',
bash_command=init_task_command,
dag=dag)
#-------------------------------------------------------------------------------
# second operator
mkdir_task_command='''
ssh -Tq localhost << eeooff
/home/docker/airflow_case1/shellscript/mkdir.sh
exit
eeooff
'''
mkdir_operator = BashOperator(
task_id='mkdir_task',
bash_command=mkdir_task_command,
dag=dag)
#-------------------------------------------------------------------------------
# third operator
put_task_command='''
ssh -Tq localhost << eeooff
/home/docker/airflow_case1/shellscript/put.sh
exit
eeooff
'''
put_operator = BashOperator(
task_id='put_task',
bash_command=put_task_command,
dag=dag)
#-------------------------------------------------------------------------------
# fourth operator
pi_task_command='''
ssh -Tq localhost << eeooff
/home/docker/airflow_case1/shellscript/pijob.sh
exit
eeooff
'''
pi_operator = BashOperator(
task_id='pi_task',
bash_command=pi_task_command,
dag=dag)
#-------------------------------------------------------------------------------
# dependencies
init_operator.set_downstream(mkdir_operator)
init_operator.set_downstream(put_operator)
init_operator.set_downstream(pi_operator)
mkdir_operator.set_downstream(put_operator)
put_operator.set_downstream(pi_operator)
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# coding: utf8
import os
import itertools
import pytest
import time
from allure_commons.types import AttachmentType
from selene.api import *
import selene
import allure
from selene import browser
from selene import config
from selene.browsers import BrowserName
from selene.helpers import env
from selene.tools import s
from tools import phantom_js_clean_up
search_1 = 'Selenium'
search_2 = 'Selene'
config.browser_name = BrowserName.PHANTOMJS
config.hold_browser_open = True
config.desired_capabilities={}
config.timeout = 10
config.reports_folder = os.path.join(os.getcwd(), "screenshots")
# Test
class TestSomeCase:
driver = browser.driver()
def setup(m):
browser.driver().delete_all_cookies()
print '\n ****************** START TEST CASE ************** \n'
def teardown(m):
print '\n ****************** END TEST KEYS ***************** \n'
browser.quit()
@classmethod
def tearDownClass(cls):
browser.driver().delete_all_cookies()
"""Tear down class."""
try:
cls.driver.quit()
phantom_js_clean_up()
except Exception as err:
print(err)
@pytest.allure.story('Поиск на сайте Google.com')
def test_step_1(self):
main_page().open_browser()
main_page().google_search(search_1)
main_page().click_search()
browser.close()
@pytest.allure.story('КОля? шо по шапкам')
def test_step_2(self):
main_page().open_browser()
main_page().google_search(search_2)
main_page().click_search()
browser.close()
@pytest.allure.story('Поиск на сайте Google.com 1')
def test_step_3(self):
main_page().open_browser()
main_page().google_search(search_1)
main_page().click_search()
browser.close()
@pytest.allure.story('Поиск на сайте Google.com 2')
def test_step_4(self):
main_page().open_browser()
main_page().google_search(search_1)
main_page().click_search()
browser.close()
class main_page2:
def printt(self):
print "pass"
class main_page():
@pytest.allure.step(" Переходим на сайт https://www.google.com/ ")
def open_browser(self):
browser.open_url('https://www.google.com/')
browser.driver().maximize_window()
print "Method # 1"
@pytest.allure.step("Вводим Trionika в поле поиска ")
def google_search(self, search):
s('[name="q"]').send_keys(search).press_enter()
#s('ss').click()
print "Method # 1"
@pytest.allure.step("Нажимаем на кнопку поиска ")
def click_search(self):
print "Method # 1"
print 'Good' |
import tensorflow as tf
import numpy as np
import time
K=5
MAX_ITERS = 1000
def clusterize(batch):
start = time.time()
n = len(batch)
dim = len(batch[0])
print batch
points = tf.placeholder(tf.int32, [n,dim])
cluster_assignments = tf.Variable(tf.zeros([n], dtype=tf.int64))
# Use K random points as the starting centroids
centroids = tf.Variable(tf.slice(tf.random_shuffle(points), [0,0], [K,dim]))
# Replicate to n copies of each centroid and K copies of each
# point, then subtract and compute the sum of squared distances.
rep_centroids = tf.reshape(tf.tile(centroids, [n, 1]), [n, K, dim])
rep_points = tf.reshape(tf.tile(points, [1, K]), [n, K, dim])
sum_squares = tf.reduce_sum(tf.square(rep_points - rep_centroids), reduction_indices=2)
# Use argmin to select the lowest-distance point
best_centroids = tf.argmin(sum_squares, 1)
did_assignments_change = tf.reduce_any(tf.not_equal(best_centroids,
cluster_assignments))
def bucket_mean(data, bucket_ids, num_buckets):
total = tf.unsorted_segment_sum(data, bucket_ids, num_buckets)
count = tf.unsorted_segment_sum(tf.ones_like(data), bucket_ids, num_buckets)
return total / count
means = bucket_mean(points, best_centroids, K)
# Do not write to the assigned clusters variable until after
# computing whether the assignments have changed - hence with_dependencies
with tf.control_dependencies([did_assignments_change]):
do_updates = tf.group(
centroids.assign(means),
cluster_assignments.assign(best_centroids))
changed = True
iters = 0
sess = tf.Session()
sess.run(tf.initialize_all_variables(), feed_dict={points: batch})
while changed and iters < MAX_ITERS:
iters += 1
[changed, _] = sess.run([did_assignments_change, do_updates], feed_dict={points: batch})
[centers, assignments] = sess.run([centroids, cluster_assignments], feed_dict={points: batch})
end = time.time()
print ("Found in %.2f seconds" % (end-start)), iters, "iterations"
return [centers, assignments]
if __name__ == "__main__":
clusterize(tf.random_uniform([1000,20], maxval=200, dtype=tf.int32).eval(session=tf.Session()))
|
from django.urls import path
from User import views
from rest_framework_jwt.views import obtain_jwt_token
app_name = 'user'
urlpatterns = [
path('register/', views.UserRegisterView.as_view()),
path('login/', views.UserLogingView.as_view()),
path('jwt-login/', obtain_jwt_token), # 会返回一个token,如果在setting中JWT_RESPONSE_PAYLOAD_HANDLER自定义了返回格式之后,会按照我们要求的格式返回
path('info/', views.UserInfoView.as_view()),
]
|
# https://guoruibiao.gitbooks.io/effective-python/content/shi_yong_none_he_wen_dang_shuo_ming_dong_tai_de_zh.html#
import json
def decode(data, default={}):
try:
return json.loads(data)
except ValueError:
return default
def decode2(data, default=None):
"""Load JSON data from string.
:param data: JSON data to be decoded.
:param default: Value to return if decoding fails.
Defaults to an empty dictionary.
:return:
"""
if default is None:
default = {}
try:
return json.loads(data)
except ValueError:
return default
foo = decode2('bad data')
foo['stuff'] = 5
bar = decode2('also bad')
bar['meep'] = 1
print('Foo:', foo)
print('Bar:', bar)
# https://guoruibiao.gitbooks.io/effective-python/content/jin_qiang_diao_guan_jian_zi_can_shu.html
# keyword-only arguments, in python3
def safe_division_c(number, division, *, ignore_overflow=False, ignore_zero_division=False):
try:
return number / division
except OverflowError:
if ignore_overflow:
return 0
else:
raise
except ZeroDivisionError:
if ignore_zero_division:
return float('inf')
else:
raise
safe_division_c(1, 0, ignore_zero_division=True)
|
# https://programmers.co.kr/learn/courses/30/lessons/60057
# ababcdcdababcdcd
# 압축 단위를 처음부터 정하고 시작하기 때문에 코드가 짧았던 문제. 실제 정답 코드말고 주석 처리된 코드는 step이 정해져 있지 않고
# 해당 문자열 패턴이 압축할 수 있는 패턴이면 압축을 실행함. 예를 들어 xabab의 경우 정답 코드로는 압축이 안 되지만
# 주석처리된 코드로는 x2ab로 압축이 가능하다. 그 외에는 정답으로 제출한 코드와 깃헙에 올라온 정답 코드의 로직이 거의 같다.
# def solution(s):
# answer = 0
#
# length = len(s)
#
# count = 1
# result = []
# sw = 0
# for i in range(1, length // 2 + 1):
# tmp = ''
# pre = 0
# # for j in range(0, length - i):
# j = 0
# count = 1
# sw = 0
# while j < length - i:
# # print(s[j:j + i], s[j + i:j + i + i])
# #print(j, i)
# #print(s[j:j + i], s[j + i:j + i + i])
# if s[j:j + i] == s[j + i:j + i + i]:
# pre = count
# count += 1
# j += i
# sw = 1
# else:
# if sw == 1:
# tmp += ((str)(count if count != 1 else '') + s[j: j + i])
# elif sw == 2:
# tmp += ((str)(count if count != 1 else '') + s[j])
# # print(tmp)
#
# count = 1
# if sw == 1:
# j += i
# elif sw == 2:
# j += 1
# sw = 2
# if sw == 1:
# tmp += ((str)(count if count != 1 else '') + s[j: j + i])
# elif sw == 2:
# tmp += s[j:]
# print(tmp)
# result.append(len(tmp)) if len(tmp) > 0 else 1
# print(result)
# answer = min(result)
# return answer
def solution(s):
answer = 0
length = len(s)
result = []
if length <= 1:
return length
for i in range(1, length // 2 + 1):
tmp = ''
count = 1
sw = 0
for j in range(0, length - i, i):
# print(s[j:j + i], s[j + i:j + i + i])
#print(j, i)
#print(s[j:j + i], s[j + i:j + i + i])
if s[j:j + i] == s[j + i:j + i + i]:
count += 1
else:
tmp += ((str)(count if count != 1 else '') + s[j: j + i])
count = 1
# print(tmp)
tmp += str(count if count != 1 else '') + s[j + i:]
print(tmp)
result.append(len(tmp)) if len(tmp) > 0 else 1
print(result)
answer = min(result)
return answer
s = input()
print(solution(s)) |
#print integer number either using %d
myWeight=54.9
print("My lucky number is %f" %myWeight)
#or using "{0:d}".format(luckyNumber)
print("My lucky number is {0:f}".format(myWeight))
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Tue Mar 10 13:13:07 2020
@author: marc
"""
from __future__ import print_function
import os
import torch
import torch.multiprocessing as sp
from envs import create_atari_env
from app import ActorCritic
from testing import Testing
from train import train
import Optimiser
class Params:
def __init__(self, master=None):
self.master = None
self.lr = 0.001
self.gamma = 0.99
self.tau = 1.
self.seed = 1
self.num_processes = 16
self.num_step = 20
self.max_episode_length = 10000
self.env_id = 'SolitaireEnv-v2'
os.environ['OMP_NUM_THREADS'] = '1'
params = Params()
torch.manual_seed(params.seed)
env = create_atari_env(params.env_id)
shared_model = ActorCritic(env.observation_space.shape[0], env.action_space)
shared_model.shared_memory()
optimiser = Optimiser.SharedAdam(shared_model.parameters(), lr=params.lr)
optimiser.shared_memory()
processes = []
p = sp.Process(target = Testing, args=(params.num_processes, params, shared_model))
p.start()
processes.append(p)
for rank in range(0, params.num_processes):
p = sp.Porcess(target=train, args=(rank, params, shared_model, optimiser))
p.start()
processes.append(p)
for p in processes:
p.join() |
import tensorflow as tf
print(tf.__version__)
import tensorflow as tf
import tensorflow_addons as tfa
import numpy as np
from tensorflow import keras
from tensorflow.keras import layers
from imutils import paths
import os
from sklearn.preprocessing import LabelBinarizer
from sklearn.model_selection import train_test_split
from sklearn.metrics import classification_report
import cv2
from tensorflow.keras.models import Sequential
from tensorflow.keras.callbacks import ModelCheckpoint
from tqdm.notebook import tqdm
from imutils import paths
import tensorflow_datasets as tfds
import matplotlib.pyplot as plt
from tensorflow.keras.layers import Dense, GlobalAveragePooling2D,Dropout,Conv2D,BatchNormalization,Activation
#num of classes
num_classes = 2
#input dim
dim = 480
#input shape
input_shape = (dim, dim, 3)
# image paths
train_images = list(paths.list_images("dataset/train1"))
filenames = []
unwanted_classes = ['.ipynb_checkpoints']
def prepare_images(image_paths):
images = []
labels = []
for image in tqdm(image_paths):
img_path = image.split("/")
img_folder_name = img_path[-2]
img_file_name = img_path[-1]
if img_folder_name in unwanted_classes or img_file_name in unwanted_classes :
continue
try :
if img_file_name.endswith('.png') :
image_pixels = cv2.imread(image)
#image_pixels = cv2.cvtColor(image_pixels, cv2.COLOR_BGR2RGB)
image_pixels = cv2.cvtColor(image_pixels, cv2.COLOR_BGR2GRAY)
image_pixels = cv2.resize(image_pixels, (dim,dim))
image_pixels = image_pixels.reshape(dim, dim, 1)
image_pixels = image_pixels/255.
label = image.split("/")[-2]
if len(image_pixels.shape) == 3 :
images.append(image_pixels)
labels.append(label)
filenames.append(image)
else :
image_pixels = plt.imread(image)
image_pixels = cv2.cvtColor(image_pixels, cv2.COLOR_RGB2GRAY)
image_pixels = cv2.resize(image_pixels, (dim,dim))
image_pixels = image_pixels.reshape(dim, dim, 1)
image_pixels = image_pixels/255.
label = image.split("/")[-2]
if len(image_pixels.shape) == 3 :
images.append(image_pixels)
labels.append(label)
filenames.append(image)
except Exception as ex:
print("Exception :", ex)
pass
images = np.array(images)
labels = np.array(labels)
print(images.shape, labels.shape)
return images, labels
#call the pre-moddelling method
X, y = prepare_images(train_images)
print(len(filenames))
#train and test split
(x_train,x_test, y_train, y_test) = train_test_split(X, y,test_size=0.15, random_state=42)
print("train count :",len(x_train))
print("test count :",len(x_test))
#create a embedding visualize param
y_vis = y_train
# one hot encodeer
from sklearn import preprocessing
le = preprocessing.LabelEncoder()
y_train = le.fit_transform(y_train)
y_test = le.transform(y_test)
print(np.unique(y_vis))
from tensorflow.keras.layers import Input, AveragePooling2D
from tensorflow.keras.models import Model
from tensorflow.keras.layers import Dense, Dropout, Activation
from tensorflow.keras.layers import Conv2D
from tensorflow.keras.layers import AveragePooling2D
from tensorflow.keras.layers import GlobalAveragePooling2D
from tensorflow.keras.layers import Input, Concatenate
from tensorflow.keras.layers import BatchNormalization
from tensorflow.keras.regularizers import l2
import tensorflow.keras.backend as K
def conv_factory(x, concat_axis, nb_filter, dropout_rate=None, weight_decay=1E-4):
"""Apply BatchNorm, Relu 3x3Conv2D, optional dropout
:parameter x: Input keras network
:parameter concat_axis: int -- index of contatenate axis
:parameter nb_filter: int -- number of filters
:parameter dropout_rate: int -- dropout rate
:parameter weight_decay: int -- weight decay factor
:returns: keras network with b_norm, relu and Conv2D added
:return type: keras network
"""
x = BatchNormalization(axis=concat_axis, gamma_regularizer=l2(weight_decay), beta_regularizer=l2(weight_decay))(x)
x = Activation('relu')(x)
x = Conv2D(nb_filter, (1, 1), kernel_initializer="he_uniform", padding="same", use_bias=False, kernel_regularizer=l2(weight_decay))(x)
x = BatchNormalization(axis=concat_axis, gamma_regularizer=l2(weight_decay), beta_regularizer=l2(weight_decay))(x)
x = Activation('relu')(x)
x = Conv2D(nb_filter, (3, 3), kernel_initializer="he_uniform", padding="same", use_bias=False, kernel_regularizer=l2(weight_decay))(x)
if dropout_rate:
x = Dropout(dropout_rate)(x)
return x
def transition(x, concat_axis, nb_filter, dropout_rate=None, weight_decay=1E-4):
"""Apply BatchNorm, Relu 1x1Conv2D, optional dropout and Maxpooling2D
:parameter x: keras model
:parameter concat_axis: int -- index of contatenate axis
:parameter nb_filter: int -- number of filters
:parameter dropout_rate: int -- dropout rate
:parameter weight_decay: int -- weight decay factor
:returns: model
:return type: keras model, after applying batch_norm, relu-conv, dropout, maxpool
"""
x = BatchNormalization(axis=concat_axis, gamma_regularizer=l2(weight_decay), beta_regularizer=l2(weight_decay))(x)
x = Activation('relu')(x)
x = Conv2D(nb_filter, (1, 1), kernel_initializer="he_uniform", padding="same", use_bias=False, kernel_regularizer=l2(weight_decay))(x)
if dropout_rate:
x = Dropout(dropout_rate)(x)
x = AveragePooling2D((2, 2), strides=(2, 2))(x)
return x
def denseblock(x, concat_axis, nb_layers, nb_filter, growth_rate, dropout_rate=None, weight_decay=1E-4):
"""
Build a denseblock where the output of each conv_factory is fed to subsequent ones
:parameter x: keras model
:parameter concat_axis: int -- index of contatenate axis
:parameter nb_layers: int -- the number of layers of conv_factory to append to the model.
:parameter nb_filter: int -- number of filters
:parameter dropout_rate: int -- dropout rate
:parameter weight_decay: int -- weight decay factor
:returns: keras model with nb_layers of conv_factory appended
:return type: keras model
"""
list_feat = [x]
for i in range(nb_layers):
x = conv_factory(x, concat_axis, growth_rate,
dropout_rate, weight_decay)
list_feat.append(x)
x = Concatenate(axis=concat_axis)(list_feat)
nb_filter += growth_rate
#print (nb_filter)
return x, nb_filter
def denseblock_altern(x, concat_axis, nb_layers, nb_filter, growth_rate, dropout_rate=None, weight_decay=1E-4):
"""Build a denseblock where the output of each conv_factory is fed to subsequent ones. (Alternative of denseblock)
:parameter x: keras model
:parameter concat_axis: int -- index of contatenate axis
:parameter nb_layers: int -- the number of layers of conv_factory to append to the model.
:parameter nb_filter: int -- number of filters
:parameter dropout_rate: int -- dropout rate
:parameter weight_decay: int -- weight decay factor
:returns: keras model with nb_layers of conv_factory appended
:return type: keras model
* The main difference between this implementation and the implementation
above is that the one above
"""
for i in range(nb_layers):
merge_tensor = conv_factory(x, concat_axis, growth_rate, dropout_rate, weight_decay)
x = Concatenate(axis=concat_axis)([merge_tensor, x])
nb_filter += growth_rate
return x, nb_filter
def DenseNet(nb_classes, img_dim, depth, nb_dense_block, growth_rate, nb_filter, dropout_rate=None, weight_decay=1E-4):
"""
Build the DenseNet model
:parameter nb_classes: int -- number of classes
:parameter img_dim: tuple -- (channels, rows, columns)
:parameter depth: int -- how many layers
:parameter nb_dense_block: int -- number of dense blocks to add to end
:parameter growth_rate: int -- number of filters to add
:parameter nb_filter: int -- number of filters
:parameter dropout_rate: float -- dropout rate
:parameter weight_decay: float -- weight decay
:returns: keras model with nb_layers of conv_factory appended
:return type: keras model
"""
if K.image_data_format() == "channels_first":
concat_axis = 1
elif K.image_data_format() == "channels_last":
concat_axis = -1
model_input = Input(shape=img_dim)
assert (depth - 4) % 3 == 0, "Depth must be 3 N + 4"
# layers in each dense block
nb_layers = int((depth - 4) / 3)
# Initial convolution
x = Conv2D(nb_filter, (7, 7), strides=(2, 2), kernel_initializer="he_uniform", padding="same", name="initial_conv2D", use_bias=False, kernel_regularizer=l2(weight_decay))(model_input)
# Add dense blocks
nb_layers1 = [6,12,32,32,48,32,48,64,32] #3*3 convolutional layer of each denseblock ,
for block_idx in range(nb_dense_block - 1):
x, nb_filter = denseblock(x, concat_axis, nb_layers1[block_idx], nb_filter, growth_rate, dropout_rate=dropout_rate, weight_decay=weight_decay)
# add transition
x = transition(x, concat_axis,nb_filter, dropout_rate=dropout_rate, weight_decay=weight_decay)
# The last denseblock does not have a transition
x, nb_filter = denseblock(x, concat_axis, nb_layers1[nb_dense_block-1], nb_filter, growth_rate, dropout_rate=dropout_rate, weight_decay=weight_decay)
x = BatchNormalization(axis=concat_axis, gamma_regularizer=l2(weight_decay), beta_regularizer=l2(weight_decay))(x)
x = Activation('relu')(x)
x = GlobalAveragePooling2D(data_format=K.image_data_format())(x)
x = Dense(nb_classes, activation='softmax', kernel_regularizer=l2(weight_decay), bias_regularizer=l2(weight_decay))(x)
densenet = Model(inputs=[model_input], outputs=[x], name="DenseNet")
return densenet
nb_classes = 2
img_dim = (dim,dim,1)
depth = 6*3+4
nb_dense_block = 4
growth_rate = 16
nb_filter = 12
dropout_rate = 0.2
weight_decay = 1E-4
learning_rate = 1E-3
model = DenseNet(nb_classes, img_dim, depth, nb_dense_block, growth_rate, nb_filter, dropout_rate=dropout_rate, weight_decay=weight_decay)
# Model output
print("\n\n", model.summary())
# Build optimizer
opt = keras.optimizers.Adam(lr=learning_rate, beta_1=0.9, beta_2=0.999, epsilon=1e-08)
model.compile(loss="binary_crossentropy", optimizer=opt,
metrics=["accuracy"])
nb_classes = 2
img_dim = (dim,dim,1)
depth = 6*3+4
nb_dense_block = 4
growth_rate = 16
nb_filter = 12
dropout_rate = 0.2
weight_decay = 1E-4
learning_rate = 1E-3
model = DenseNet(nb_classes, img_dim, depth, nb_dense_block, growth_rate, nb_filter, dropout_rate=dropout_rate, weight_decay=weight_decay)
# Model output
#model.summary()
# Build optimizer
opt = keras.optimizers.Adam(lr=learning_rate, beta_1=0.9, beta_2=0.999, epsilon=1e-08)
model.compile(loss="binary_crossentropy", optimizer=opt,
metrics=["accuracy"])
num_epochs = 150
batch_size = 8
## Define callback
from tensorflow.keras.callbacks import CSVLogger
import os
os.makedirs('saved_models/try2/', exist_ok=True)
csv_logger = CSVLogger('saved_models/try2/10_Nov_2_class_log.csv', append=True, separator=',')
checkpoint = ModelCheckpoint('saved_models/try2/model.{epoch:02d}.h5', monitor='val_loss', verbose=2, save_best_only=False, mode='min', period=1)
history = model.fit(x=x_train, y=y_train, batch_size=batch_size, epochs=num_epochs, callbacks = [checkpoint, csv_logger])
accuracy = model.evaluate(x_test, y_test)[1]
print(f"Test accuracy: {round(accuracy * 100, 2)}%")
print("Done..!!!")
|
#!/usr/bin/env python
# coding: utf-8
import numpy as np
import socket, time
import tf
from geometry_msgs.msg import Point, PoseStamped
from geometry_msgs.msg import Twist
import rospy
from mavros_msgs.msg import *
from mavros_msgs.srv import *
from nav_msgs.msg import *
from tf.transformations import *
from gazebo_msgs.msg import ModelStates
class Controller:
# initialization method
def __init__(self):
self.x, self.y, self.z = 0, 0, 0
self.vx, self.vy, self.vz = 0, 0, 0
self.q0, self.q1, self.q2, self.q3 = 0, 0, 0, 0
self.wx, self.wy, self.wz = 0,0,0
self.roll, self.pitch, self.yaw = 0,0,0 #current roll, pitch, yaw
self.cmd_pub = rospy.Publisher('/skid4wd/drive_controller/cmd_vel', Twist, queue_size=10)
self.twist = Twist()
self.robotpose = None
self.robottwist =None
def gazeboStateCb(self, msg):
# global payload_state
# global i, j, k
idx = msg.name.index('skid4wd')
self.robotpose = msg.pose[idx]
self.robottwist = msg.twist[idx]
# position_p = self.robot.position
# orientation_q = self.robot.orientation
# print(position_p)
# print(orientation_q)
self.x = self.robotpose.position.x
self.y = self.robotpose.position.y
self.z = self.robotpose.position.z
self.q0 = self.robotpose.orientation.w
self.q1 = self.robotpose.orientation.x
self.q2 = self.robotpose.orientation.y
self.q3 = self.robotpose.orientation.z
self.vx = self.robottwist.linear.x
self.vy = self.robottwist.linear.y
self.vz = self.robottwist.linear.z
self.wx = self.robottwist.angular.x
self.wy = self.robottwist.angular.y
self.wz = self.robottwist.angular.z
orientation_list = [self.q1, self.q2, self.q3, self.q0]
(self.roll, self.pitch, self.yaw) = euler_from_quaternion(orientation_list)
def odomCb(self, msg):
# self.x = msg.pose.pose.position.x
# self.y = msg.pose.pose.position.y
# self.z = msg.pose.pose.position.z
# self.q0 = msg.pose.pose.orientation.w
# self.q1 = msg.pose.pose.orientation.x
# self.q2 = msg.pose.pose.orientation.y
# self.q3 = msg.pose.pose.orientation.z
self.vx = msg.twist.twist.linear.x
self.vy = msg.twist.twist.linear.y
self.vz = msg.twist.twist.linear.z
self.wx = msg.twist.twist.angular.x
self.wy = msg.twist.twist.angular.y
self.wz = msg.twist.twist.angular.z
orientation_list = [self.q1, self.q2, self.q3, self.q0]
(self.roll, self.pitch, self.yaw) = euler_from_quaternion(orientation_list)
def cmd_velocity(self, vx, wz):
# now = rospy.Time.now()
# self.twist.header.stamp = now
self.twist.linear.x = vx
self.twist.linear.y = 0.0
self.twist.linear.z = 0.0
self.twist.angular.x = 0.0
self.twist.angular.y = 0.0
self.twist.angular.z = wz
def pub_vel(self, vx, wz):
self.cmd_velocity(vx, wz)
self.cmd_pub.publish(self.twist)
def Tcp_connect( HostIp, Port ):
global s
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.connect((HostIp, Port))
return
def Tcp_Close( ):
s.close()
return
def Tcp_Write_Array(myArray):
myArrayString = ''
for item in myArray:
# print("item: ", item)
myArrayString = myArrayString + str(item) + "|"
# print(myArrayString)
s.send((myArrayString).encode())
return
def Tcp_Read_Array():
files = s.recv(1024)
files = files.decode()
myArray = files.split('|')
# for myItem in myArray:
# print(myItem)
return myArray
def main():
rospy.init_node('setpoint_node', anonymous=True)
cnt = Controller()
rate = rospy.Rate(20.0)
# rospy.Subscriber('/skid4wd/drive_controller/odom', Odometry, cnt.odomCb)
rospy.Subscriber('/gazebo/model_states', ModelStates, cnt.gazeboStateCb)
Tcp_connect( '127.0.0.1', 17098)
while not rospy.is_shutdown():
arr = [cnt.x, cnt.y, cnt.yaw, cnt.vx, cnt.wz]
Tcp_Write_Array(arr)
vel = Tcp_Read_Array()
# vel = float(vel)
# print(vel[0])
# print(vel[1])
# print(type(vel[0]))
print("----------")
cnt.pub_vel(float(vel[0]), float(vel[1]))
if __name__ == '__main__':
try:
main()
except rospy.ROSInterruptException:
pass
|
"""
Chef is teaching a cooking course. There are N students attending the course, numbered 1 through N.
Before each lesson, Chef has to take attendance, i.e. call out the names of students one by one and mark which students are present. Each student has a first name and a last name. In order to save time, Chef wants to call out only the first names of students. However, whenever there are multiple students with the same first name, Chef has to call out the full names (both first and last names) of all these students. For each student that does not share the first name with any other student, Chef may still call out only this student's first name.
Help Chef decide, for each student, whether he will call out this student's full name or only the first name.
"""
# cook your dish here
test_cases = int(input())
def get_first_name(name) :
return name.split()[0]
for i in range(test_cases) :
no_of_std = int(input())
names = [input() for i in range(no_of_std)]
first_names = [get_first_name(i) for i in names]
for i in range(no_of_std) :
if first_names.count(first_names[i]) > 1 :
print(names[i])
else :
print(first_names[i])
|
#Import Modules
import discord
from discord.ext import commands
import random
#Cog Setup
class Decks(commands.Cog):
def __init__(self,client):
self.client = client
self.setting_newdeck = ["1 h","2 h","3 h","4 h","5 h","6 h","7 h","8 h","9 h","10 h","11 h","12 h","13 h","1 d","2 d","3 d","4 d","5 d","6 d","7 d","8 d","9 d","10 d","11 d","12 d","13 d","1 s","2 s","3 s","4 s","5 s","6 s","7 s","8 s","9 s","10 s","11 s","12 s","13 s","1 c","2 c","3 c","4 c","5 c","6 c","7 c","8 c","9 c","10 c","11 c","12 c","13 c"] #All cards.
self.game_deck = [] #For copying the base deck and shuffling.
self.game_hands = [] #Alternatingly player IDs and the cards they've drawn.
self.game_faceup = [] #Cards drawn face-up
def translate_card_emoji(self, card):
if card == "1 h": #Hearts
return "<:HA:716858601041428502>"
if card == "2 h":
return "<:H2:716858631936671804>"
if card == "3 h":
return "<:H3:716858652752871485>"
if card == "4 h":
return "<:H4:716858669446332426>"
if card == "5 h":
return "<:H5:716858683857829890>"
if card == "6 h":
return "<:H6:716858697732718684>"
if card == "7 h":
return "<:H7:716858732557893632>"
if card == "8 h":
return "<:H8:716858746474725386>"
if card == "9 h":
return "<:H9:716858760450146384>"
if card == "10 h":
return "<:H10:716858775570481223>"
if card == "11 h":
return "<:HJ:716858817094090832>"
if card == "12 h":
return "<:HQ:716858849868513341>"
if card == "13 h":
return "<:HK:716858861247791196>"
if card == "1 d": #Diamonds
return "<:DA:716858907821342740>"
if card == "2 d":
return "<:D2:716858925395476510>"
if card == "3 d":
return "<:D3:716858934874341446>"
if card == "4 d":
return "<:D4:716858950133350444>"
if card == "5 d":
return "<:D5:716858961730732043>"
if card == "6 d":
return "<:D6:716858984522580008>"
if card == "7 d":
return "<:D7:716858996459307088>"
if card == "8 d":
return "<:D8:716859011491954720>"
if card == "9 d":
return "<:D9:716859023093399643>"
if card == "10 d":
return "<:D10:716859063111254018>"
if card == "11 d":
return "<:DJ:716859091942899784>"
if card == "12 d":
return "<:DQ:716859115149852722>"
if card == "13 d":
return "<:DK:716859126088728607>"
if card == "1 s": #Spades
return "<:SA:716859337850748979>"
if card == "2 s":
return "<:S2:716859353390383176>"
if card == "3 s":
return "<:S3:716859364945952788>"
if card == "4 s":
return "<:S4:716859377793105980>"
if card == "5 s":
return "<:S5:716859390275354624>"
if card == "6 s":
return "<:S6:716859401415426080>"
if card == "7 s":
return "<:S7:716859412697841776>"
if card == "8 s":
return "<:S8:716859444427882556>"
if card == "9 s":
return "<:S9:716859456889028679>"
if card == "10 s":
return "<:S10:716859469392379975>"
if card == "11 s":
return "<:SJ:716859500518440961>"
if card == "12 s":
return "<:SQ:716859517417029652>"
if card == "13 s":
return "<:SK:716859531400839178>"
if card == "1 c": #Clubs
return "<:CA:716859547956019250>"
if card == "2 c":
return "<:C2:716859561998418021>"
if card == "3 c":
return "<:C3:716859569946492959>"
if card == "4 c":
return "<:C4:716859578154876929>"
if card == "5 c":
return "<:C5:716859589160730674>"
if card == "6 c":
return "<:C6:716859596479922297>"
if card == "7 c":
return "<:C7:716859608131698751>"
if card == "8 c":
return "<:C8:716859613676437514>"
if card == "9 c":
return "<:C9:716859631120547922>"
if card == "10 c":
return "<:C10:716859635826556980>"
if card == "11 c":
return "<:CJ:716859641388335124>"
if card == "12 c":
return "<:CQ:716859653182718083>"
if card == "13 c":
return "<:CK:716859657972350977>"
def check_integer(self, argument):
if argument.isdecimal() == False:
return False
return True
def translate_argument_card(self, argument):
if len(argument) == 3:
if self.check_integer(argument[:1]) and self.check_integer(argument[1:2]) and not self.check_integer(argument[2:]):
if (argument[2:] == "h" or argument[2:] == "d" or argument[2:] == "s" or argument[2:] == "c") and int(argument[0:2]) < 14 and int(argument[0:2]) > 9:
return argument[0:2] + " " + argument[2:]
else:
return None
elif not self.check_integer(argument[:1]) and self.check_integer(argument[1:2]) and self.check_integer(argument[2:]):
if (argument[:1] == "h" or argument[:1] == "d" or argument[:1] == "s" or argument[:1] == "c") and int(argument[1:]) < 14 and int(argument[1:]) > 9:
return argument[1:] + " " + argument[:1]
else:
return None
else:
return None
elif len(argument) == 2:
if self.check_integer(argument[:1]) and not self.check_integer(argument[1:]):
if (argument[1:] == "h" or argument[1:] == "d" or argument[1:] == "s" or argument[1:] == "c") and int(argument[:1]) < 10 and int(argument[:1]) > 0:
return argument[:1] + " " + argument[1:]
else:
return None
elif not self.check_integer(argument[:1]) and self.check_integer(argument[1:]):
if (argument[:1] == "h" or argument[:1] == "d" or argument[:1] == "s" or argument[:1] == "c") and int(argument[1:]) < 10 and int(argument[1:]) > 0:
return argument[1:] + " " + argument[:1]
else:
return None
elif not self.check_integer(argument[:1]) and not self.check_integer(argument[1:]):
if argument[1:] == "h" or argument[1:] == "d" or argument[1:] == "s" or argument[1:] == "c":
if argument[:1] == "a":
return "1 " + argument[1:]
elif argument[:1] == "j":
return "11 " + argument[1:]
elif argument[:1] == "q":
return "12 " + argument[1:]
elif argument[:1] == "k":
return "13 " + argument[1:]
else:
return None
elif argument[:1] == "h" or argument[:1] == "d" or argument[:1] == "s" or argument[:1] == "c":
if argument[1:] == "a":
return "1 " + argument[:1]
elif argument[1:] == "j":
return "11 " + argument[:1]
elif argument[1:] == "q":
return "12 " + argument[:1]
elif argument[1:] == "k":
return "13 " + argument[:1]
else:
return None
else:
return None
else:
return None
else:
return None
@commands.Cog.listener()
async def on_ready(self):
print("Decks cog loaded.")
##Decks
@commands.command(aliases=["deck"])
async def decks(self, ctx, action, argument1=None):
##Draw Card Face Down (adds to hand)
if action == "draw" or action == "facedown" or action == "drawdown" or action == "down" or action == "drawfacedown":
if len(self.game_deck) == 0:
await ctx.channel.send("Deck's empty!")
else:
player = ctx.author.id
#Determine how many cards player wants to draw
drawcount = 0
if argument1 == None:
drawcount = 1
elif self.check_integer(argument1):
if int(argument1) > 0:
if len(self.game_deck) < int(argument1):
await ctx.channel.send("There are not enough cards left in the deck!")
else:
drawcount = int(argument1)
else:
await ctx.channel.send("That's not a valid amount of cards to draw!")
else:
await ctx.channel.send("That's not a valid amount of cards to draw!")
#Draw the specified amount of cards
if drawcount != 0:
drawstring = "You drew "
for cardsdrawn in range(drawcount):
card = self.game_deck[0]
for index, entry in enumerate(self.game_hands): #Iterate over all active players
if entry == ctx.author.id:
player = 0
self.game_hands[index+1].append(card)
break
if player != 0:
self.game_hands.append(player)
self.game_hands.append([card])
del self.game_deck[0]
card_emoji = self.translate_card_emoji(card)
drawstring += card_emoji
await ctx.author.send(drawstring)
##Draw Card Face Up (doesn't add to hand)
if action == "faceup" or action == "drawup" or action == "up" or action == "drawfaceup":
if len(self.game_deck) == 0:
await ctx.channel.send("Deck's empty!")
else:
player = ctx.author.id
#Determine how many cards player wants to draw
drawcount = 0
if argument1 == None:
drawcount = 1
elif self.check_integer(argument1):
if int(argument1) > 0:
if len(self.game_deck) < int(argument1):
await ctx.channel.send("There are not enough cards left in the deck!")
else:
drawcount = int(argument1)
else:
await ctx.channel.send("That's not a valid amount of cards to draw!")
else:
await ctx.channel.send("That's not a valid amount of cards to draw!")
#Draw the specified amount of cards
if drawcount != 0:
drawstring = "{} drew ".format(ctx.author.name)
for cardsdrawn in range(drawcount):
card = self.game_deck[0]
self.game_faceup.append(card)
del self.game_deck[0]
card_emoji = self.translate_card_emoji(card)
drawstring += card_emoji
await ctx.channel.send(drawstring)
##Reset Deck
if action == "reset" or action == "shuffle" or action == "new" or action == "start" or action == "begin" or action == "refresh" or action == "reload":
self.game_deck = self.setting_newdeck.copy()
if argument1 != None:
if self.check_integer(argument1):
if int(argument1) > 1:
for _ in range(int(argument1)-1):
self.game_deck += self.setting_newdeck.copy()
random.shuffle(self.game_deck)
self.game_hands = []
self.game_faceup = []
await ctx.channel.send("New deck shuffled.")
##Show Hand
if action == "hand" or action == "show" or action == "showhand":
player = ctx.author.id
for index, entry in enumerate(self.game_hands): #Iterate over all active players
if entry == player:
player = 0
handstring = "{}'s hand: ".format(ctx.author.name)
for indexcard, heldcard in enumerate(self.game_hands[index+1]):
card = heldcard
card_emoji = self.translate_card_emoji(card)
handstring += card_emoji
await ctx.channel.send(handstring)
break
if player != 0:
await ctx.channel.send("You have drawn no cards!")
##Show Face-up Cards
if action == "handup" or action == "showup" or action == "showhandup" or action == "handfaceup" or action == "showfaceup" or action == "showhandfaceup":
if len(self.game_faceup) < 1:
await ctx.channel.send("No cards have been drawn face-up!")
else:
handstring = "Face-up cards: "
for heldcard in self.game_faceup:
card = heldcard
card_emoji = self.translate_card_emoji(card)
handstring += card_emoji
await ctx.channel.send(handstring)
##Discard Cards
if action == "discard" or action == "remove" or action == "delete" or action == "destroy" or action == "throwaway":
desireddiscard = self.translate_argument_card(argument1.lower())
if desireddiscard == None:
await ctx.channel.send("Invalid card name!")
else:
player = ctx.author.id
for index, entry in enumerate(self.game_hands): #Iterate over all active players
if entry == player:
player = 0
for indexcard, heldcard in enumerate(self.game_hands[index+1]):
if heldcard == desireddiscard:
del self.game_hands[index+1][indexcard]
desireddiscard = None
break
break
if player == 0:
if desireddiscard == None:
await ctx.channel.send("Card discarded.")
else:
await ctx.channel.send("Card not found in your hand!")
else:
await ctx.channel.send("You have no cards in your hand!")
##Add Cards
if action == "add" or action == "fish" or action == "grab" or action == "take":
desiredadd = self.translate_argument_card(argument1.lower())
if desiredadd == None:
await ctx.channel.send("Invalid card name!")
else:
player = ctx.author.id
for index, entry in enumerate(self.game_hands): #Iterate over all active players
if entry == player:
player = 0
self.game_hands[index+1].append(desiredadd)
break
if player != 0:
self.game_hands.append(player)
self.game_hands.append([desiredadd])
await ctx.channel.send("Card added.")
##Help
if action == "help" or action == "info":
await ctx.channel.send("This command keeps track of a deck of cards for everyone to play with! Current commands are as follows. `[x]` indicates an optional argument, `<x>` indicates a required argument.\n__Commands:__\n`!deck shuffle [number]` shuffle one or multiple decks for everyone to draw from\n`!deck drawdown [number]` draw one or multiple cards face-down (through DMs) and add them to your hand\n`!deck drawup [number]` draw one or multiple cards face-up\n`!deck hand` shows your current hand\n`!deck handup` shows all cards that have been drawn face-up by anyone\n`!deck discard <card>` discard a card from your hand, by typing the first letter of the card suit and the value of the card (in any order)\n`!deck add <card>` add any card you want to your hand, by typing the first letter of the card suit and the value of the card (in any order)")
def setup(client):
client.add_cog(Decks(client)) |
'''#Subtask 1
t=int(input())
for I in range(t):
n=int(input())
sum1=0
sum1+=n
for j in range(1,(int(n)//2)+1):
if(n%j==0):
sum1+=j
print(sum1)'''
#Printing all the divisors
#Subtask 2 - Observed patern of pairs
from math import sqrt
t=int(input())
for I in range(t):
count1=0
n=int(input())
for i in range(1,int(sqrt(n))+1):
if(n%i==0):
if(n/i==i):
#print(i)
count1+=i
else:
#print(i)
#print(n//i)
count1+=i+(n//i)
print(count1)
|
x = 23
x += 1
print(x)
x -= 4
print(x)
x *= 5
print(x)
x //= 4
print(x)
x /= 5
print(x)
x **= 2
print(x)
x %= 5
print(x)
greeting = "Good "
greeting += "morning"
print(greeting)
greeting *= 5
print(greeting)
print()
number = 5
multiplier = 8
answer = 0
for i in range(multiplier):
answer += number
print(answer) |
from rest_framework import viewsets
from django.utils.datastructures import MultiValueDictKeyError
from rest_framework.response import Response
from random import randint
from django.db.models import Q
from django.contrib.auth.models import User, Group
from .serializers import *
from .models import *
# Admin View
class UserViewSet(viewsets.ModelViewSet):
"""
API endpoint that allows users to be created viewed or edited.
"""
queryset = User.objects.all().order_by('-date_joined')
serializer_class = UserSerializer
class GroupViewSet(viewsets.ModelViewSet):
"""
API endpoint that allows groups to be created viewed or edited.
"""
queryset = Group.objects.all()
serializer_class = GroupSerializer
# API View
class AccountViewSet(viewsets.ModelViewSet):
"""
API endpoint that allows accounts to be created, viewed or edited.
"""
queryset = Account.objects.all()
serializer_class = AccountSerializer
class AccountGroupViewSet(viewsets.ModelViewSet):
"""
API endpoint that allows account groups to be created, viewed or edited.
"""
queryset = AccountGroup.objects.all()
serializer_class = AccountGroupSerializer
http_method_names = ['post', 'patch', 'get', 'head']
def create(self, request):
group_code = randint(100000, 999999)
try:
serializer = AccountGroupSerializer(data=request.data)
if serializer.is_valid():
serializer.save(group_code=group_code)
return Response(serializer.data, status=201)
return Response(serializer.errors, status=400)
except Exception as exc:
return Response('Missing parameters {}'.format(exc), status=400)
class AccountGroupCodeViewSet(viewsets.ModelViewSet):
serializer_class = AccountGroupSerializer
http_method_names = ['get', 'head']
def get_queryset(self):
"""
This view should return a list of all the groups for
the user (group admin) as determined by the identifier (email address expected) portion of the URL.
"""
try:
code = self.request.GET['code']
if code is not None:
queryset = AccountGroup.objects.filter(group_code=code)
return queryset
else:
Response('Missing parameters', status=400)
except (MultiValueDictKeyError, KeyError) as exc:
Response('Missing parameters {}'.format(exc), status=400)
class AccountGroupSearchViewSet(viewsets.ModelViewSet):
"""
API endpoint that allows account groups to be created, viewed or edited.
"""
serializer_class = AccountGroupSerializer
http_method_names = ['get', 'head']
def get_queryset(self):
"""
This view should return a list of all the groups for
as determined by the search_params (search parameter) portion of the URL.
"""
try:
search_params = self.request.GET['search_params']
if search_params is not None:
queryset = AccountGroup.objects.filter(Q(group_name__icontains=search_params) | Q(
group_description__icontains=search_params))
return queryset
else:
queryset = AccountGroup.objects.all()
return queryset
except (MultiValueDictKeyError, KeyError) as exc:
Response('Missing parameters {}'.format(exc), status=400)
class AdminGroupListView(viewsets.ModelViewSet):
serializer_class = AccountGroupSerializer
def get_queryset(self):
"""
This view should return a list of all the groups for
the user (group admin) as determined by the identifier (email address expected) portion of the URL.
"""
try:
identifier = self.request.GET['identifier']
if identifier is not None:
queryset = AccountGroup.objects.filter(group_admin=identifier)
return queryset
else:
queryset = AccountGroup.objects.all()
return queryset
except (MultiValueDictKeyError, KeyError) as exc:
Response('Missing parameters {}'.format(exc), status=400)
class AccountGroupRelationshipViewSet(viewsets.ModelViewSet):
"""
API endpoint that allows account + group relationship to be viewed or edited.
"""
queryset = AccountGroupRelationship.objects.all()
serializer_class = AccountGroupRelationshipSerializer
http_method_names = ['get', 'post', 'head']
class GroupsContributionViewSet(viewsets.ModelViewSet):
"""
API endpoint that allows groups contributions to be viewed
"""
queryset = GroupContribution.objects.all()
serializer_class = GroupContributionSerializer
http_method_names = ['get', 'head']
class GroupContributionViewSet(viewsets.ModelViewSet):
"""
API endpoint that allows group contributions to be viewed
"""
serializer_class = GroupContributionSerializer
http_method_names = ['get', 'head']
def get_queryset(self):
"""
This view should return a list of all the groups for
the user (group admin) as determined by the identifier (email address expected) portion of the URL.
"""
try:
identifier = self.request.GET['identifier']
if identifier.isdigit():
if identifier is not None:
queryset = GroupContribution.objects.filter(
group_id=identifier)
return queryset
else:
Response('Missing parameters {}'.format(
identifier), status=400)
else:
Response('Invalid parameters {}'.format(
identifier), status=400)
except (MultiValueDictKeyError, KeyError) as exc:
Response('Missing parameters {}'.format(exc), status=400)
class GroupPayoutListViewSet(viewsets.ModelViewSet):
"""
API endpoint that allows group payout list to be viewed or edited.
"""
queryset = GroupPayoutList.objects.all()
serializer_class = GroupPayoutListSerializer
http_method_names = ['get', 'head']
class GroupPayoutLogViewSet(viewsets.ModelViewSet):
"""
API endpoint that allows group payout log to be viewed or edited.
"""
queryset = GroupPayoutLog.objects.all()
serializer_class = GroupPayoutLogSerializer
http_method_names = ['get', 'head']
|
# Program to display average of given positive numbers
total = 0
count = 0
while True:
num = int(input("Enter a number [0 to stop] :"))
if num == 0:
break # Terminate loop
if num < 0:
continue
total += num
count += 1
print("Average = ", total / count) |
"""empty message
Revision ID: 2eaba3ac57e4
Revises: 76e53bf2dfd4
Create Date: 2018-10-08 15:43:04.972131
"""
from alembic import op
import sqlalchemy as sa
from sqlalchemy.dialects import mysql
# revision identifiers, used by Alembic.
revision = '2eaba3ac57e4'
down_revision = '76e53bf2dfd4'
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.add_column('company', sa.Column('accident_policy_flag', sa.Boolean(), nullable=True))
op.add_column('company', sa.Column('company_risk_assessed', sa.Boolean(), nullable=True))
op.add_column('company', sa.Column('data_contact_first_name', sa.String(length=30), nullable=True))
op.add_column('company', sa.Column('data_contact_last_name', sa.String(length=30), nullable=True))
op.add_column('company', sa.Column('data_contact_position', sa.String(length=30), nullable=True))
op.add_column('company', sa.Column('data_contact_telephone', sa.String(length=30), nullable=True))
op.add_column('company', sa.Column('data_policy_flag', sa.Boolean(), nullable=True))
op.add_column('company', sa.Column('data_policy_link', sa.String(length=120), nullable=True))
op.add_column('company', sa.Column('data_training_flag', sa.Boolean(), nullable=True))
op.add_column('company', sa.Column('emergency_procedures_flag', sa.Boolean(), nullable=True))
op.add_column('company', sa.Column('health_policy_flag', sa.Boolean(), nullable=True))
op.add_column('company', sa.Column('health_policy_link', sa.String(length=120), nullable=True))
op.add_column('company', sa.Column('hse_registered', sa.Boolean(), nullable=True))
op.add_column('company', sa.Column('ico_registration_number', sa.String(length=20), nullable=True))
op.add_column('company', sa.Column('insured', sa.Boolean(), nullable=True))
op.add_column('company', sa.Column('la_registered', sa.Boolean(), nullable=True))
op.add_column('company', sa.Column('privacy_notice_flag', sa.Boolean(), nullable=True))
op.add_column('company', sa.Column('report_student_accidents_flag', sa.Boolean(), nullable=True))
op.add_column('company', sa.Column('report_student_illness_flag', sa.Boolean(), nullable=True))
op.add_column('company', sa.Column('risks_mitigated', sa.Boolean(), nullable=True))
op.add_column('company', sa.Column('risks_reviewed', sa.Boolean(), nullable=True))
op.add_column('company', sa.Column('security_measures_flag', sa.Boolean(), nullable=True))
op.add_column('company', sa.Column('security_policy_flag', sa.Boolean(), nullable=True))
op.add_column('company', sa.Column('security_policy_link', sa.String(length=120), nullable=True))
op.add_column('company', sa.Column('student_insured', sa.Boolean(), nullable=True))
op.add_column('company', sa.Column('training_policy_flag', sa.Boolean(), nullable=True))
op.add_column('company', sa.Column('training_policy_link', sa.String(length=120), nullable=True))
op.add_column('company', sa.Column('web', sa.String(length=120), nullable=True))
op.create_index(op.f('ix_company_web'), 'company', ['web'], unique=True)
op.drop_index('ix_users_web', table_name='users')
op.drop_column('users', 'web')
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.add_column('users', sa.Column('web', mysql.VARCHAR(length=120), nullable=True))
op.create_index('ix_users_web', 'users', ['web'], unique=True)
op.drop_index(op.f('ix_company_web'), table_name='company')
op.drop_column('company', 'web')
op.drop_column('company', 'training_policy_link')
op.drop_column('company', 'training_policy_flag')
op.drop_column('company', 'student_insured')
op.drop_column('company', 'security_policy_link')
op.drop_column('company', 'security_policy_flag')
op.drop_column('company', 'security_measures_flag')
op.drop_column('company', 'risks_reviewed')
op.drop_column('company', 'risks_mitigated')
op.drop_column('company', 'report_student_illness_flag')
op.drop_column('company', 'report_student_accidents_flag')
op.drop_column('company', 'privacy_notice_flag')
op.drop_column('company', 'la_registered')
op.drop_column('company', 'insured')
op.drop_column('company', 'ico_registration_number')
op.drop_column('company', 'hse_registered')
op.drop_column('company', 'health_policy_link')
op.drop_column('company', 'health_policy_flag')
op.drop_column('company', 'emergency_procedures_flag')
op.drop_column('company', 'data_training_flag')
op.drop_column('company', 'data_policy_link')
op.drop_column('company', 'data_policy_flag')
op.drop_column('company', 'data_contact_telephone')
op.drop_column('company', 'data_contact_position')
op.drop_column('company', 'data_contact_last_name')
op.drop_column('company', 'data_contact_first_name')
op.drop_column('company', 'company_risk_assessed')
op.drop_column('company', 'accident_policy_flag')
# ### end Alembic commands ###
|
# npm install --save bcrypt
# npm install --save jsonwebtoken
# logar, checar, inserir, excluir
# db.posts.remove( {"_id": ObjectId("60df6e64d83fe730142c7090")}) //excluir um item por ID |
import scrapy as sc
#// *[ @ id = "qt0324253"] / div[1] / p / text()
class matrixQuotes(object):
name="qoutes"
start_urls=[
'https://www.imdb.com/title/tt0133093/quotes',
]
def parse(self, response):
for quote in response.css('div.list'):
print(quote) |
user = dict({
'Cutesexyrobutts': {
"Java": 3.6,
"Python": 4.5,
"OpenGL": 2.3,
"JS": 4.3,
"Flutter": 5.0,
},
"Ishikey": {
"Java": 1.6,
"Python": 2.5,
"OpenGL": 4.3,
"JS": 2.3,
"Flutter": 1.0,
},
"MUK": {
"Java": 2.6,
"Python": 3.2,
"OpenGL": 1.3,
"JS": 1.3,
"Flutter": 2.3,
},
"Surte": {
"Java": 5.0,
"Python": 3.5,
"OpenGL": 2.3,
"JS": 1.0,
"Flutter": 3.1,
},
"Vaseraga": {
"Java": 3.6,
"Python": 4.5,
"OpenGL": None,
"JS": 3.0,
"Flutter": None,
}
}) |
print('Bem Vindo ao calculador de médias!')
nome = (input('digite seu nome: '))
nota1 = float(input('digite sua primeira nota: '))
nota2 = float(input('digite sua segunda nota: '))
nota3 = float(input('digite sua terceira nota: '))
nota4 = float(input('digite sua quarta nota: '))
media = (nota1 + nota2 + nota3 + nota4) / 4
if media >= 6:
print(nome,'foi aprovado com média:',media)
if media < 4:
print(nome,'foi reprovado com média:',media)
if media == 4:
print(nome,'está em recuperação com média:',media) |
import os
import itertools
import numpy as np
import matplotlib.pyplot as plt
SRC_DIR = os.path.dirname(os.path.realpath(__file__))
ROOT_DIR = os.path.abspath(os.path.join(SRC_DIR, os.pardir))
DATA_DIR = os.path.join(ROOT_DIR, "data")
# Make sure the directories exist
for directory in [DATA_DIR]:
if not os.path.exists(directory):
os.makedirs(directory)
def plot_confusion_matrix(cm, classes,
normalize=False, title='Confusion matrix',
cmap=plt.cm.Reds, directory=DATA_DIR,
classifier="no classifier specified"):
"""
This function prints and plots the confusion matrix.
It also saves it in a directory called conf_mat
Normalization can be applied by setting `normalize=True`.
"""
if not os.path.isdir(directory):
os.makedirs(directory)
saving_dir = os.path.join(directory, "{}_{}.png".format(title, classifier))
plt.savefig(saving_dir)
plt.imshow(cm, interpolation='nearest', cmap=cmap)
plt.title(title)
plt.colorbar()
tick_marks = np.arange(len(classes))
plt.xticks(tick_marks, classes, rotation=45)
plt.yticks(tick_marks, classes)
if normalize:
cm = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis]
print("Normalized confusion matrix")
else:
print('Confusion matrix, without normalization')
print(cm)
thresh = cm.max() / 2.
for i, j in itertools.product(range(cm.shape[0]), range(cm.shape[1])):
plt.text(j, i, cm[i, j],
horizontalalignment="center",
color="white" if cm[i, j] > thresh else "black")
plt.tight_layout()
plt.ylabel('True label')
plt.xlabel('Predicted label')
def accuracy_score(Yval, Ypred):
assert len(Yval) == len(Ypred)
accuracy = np.sum(Yval == Ypred) / len(Yval)
return accuracy
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
#
# http://www.apache.org/licenses/LICENSE-2.0
#
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from aliyunsdkcore.request import RpcRequest
from aliyunsdksmc.endpoint import endpoint_data
class ModifyReplicationJobAttributeRequest(RpcRequest):
def __init__(self):
RpcRequest.__init__(self, 'smc', '2019-06-01', 'ModifyReplicationJobAttribute','smc')
self.set_method('POST')
if hasattr(self, "endpoint_map"):
setattr(self, "endpoint_map", endpoint_data.getEndpointMap())
if hasattr(self, "endpoint_regional"):
setattr(self, "endpoint_regional", endpoint_data.getEndpointRegional())
def get_TargetType(self):
return self.get_query_params().get('TargetType')
def set_TargetType(self,TargetType):
self.add_query_param('TargetType',TargetType)
def get_Description(self):
return self.get_query_params().get('Description')
def set_Description(self,Description):
self.add_query_param('Description',Description)
def get_Frequency(self):
return self.get_query_params().get('Frequency')
def set_Frequency(self,Frequency):
self.add_query_param('Frequency',Frequency)
def get_JobId(self):
return self.get_query_params().get('JobId')
def set_JobId(self,JobId):
self.add_query_param('JobId',JobId)
def get_ImageName(self):
return self.get_query_params().get('ImageName')
def set_ImageName(self,ImageName):
self.add_query_param('ImageName',ImageName)
def get_SystemDiskSize(self):
return self.get_query_params().get('SystemDiskSize')
def set_SystemDiskSize(self,SystemDiskSize):
self.add_query_param('SystemDiskSize',SystemDiskSize)
def get_InstanceType(self):
return self.get_query_params().get('InstanceType')
def set_InstanceType(self,InstanceType):
self.add_query_param('InstanceType',InstanceType)
def get_ContainerRepository(self):
return self.get_query_params().get('ContainerRepository')
def set_ContainerRepository(self,ContainerRepository):
self.add_query_param('ContainerRepository',ContainerRepository)
def get_ContainerTag(self):
return self.get_query_params().get('ContainerTag')
def set_ContainerTag(self,ContainerTag):
self.add_query_param('ContainerTag',ContainerTag)
def get_ContainerNamespace(self):
return self.get_query_params().get('ContainerNamespace')
def set_ContainerNamespace(self,ContainerNamespace):
self.add_query_param('ContainerNamespace',ContainerNamespace)
def get_LaunchTemplateId(self):
return self.get_query_params().get('LaunchTemplateId')
def set_LaunchTemplateId(self,LaunchTemplateId):
self.add_query_param('LaunchTemplateId',LaunchTemplateId)
def get_ResourceOwnerAccount(self):
return self.get_query_params().get('ResourceOwnerAccount')
def set_ResourceOwnerAccount(self,ResourceOwnerAccount):
self.add_query_param('ResourceOwnerAccount',ResourceOwnerAccount)
def get_SystemDiskParts(self):
return self.get_query_params().get('SystemDiskPart')
def set_SystemDiskParts(self, SystemDiskParts):
for depth1 in range(len(SystemDiskParts)):
if SystemDiskParts[depth1].get('SizeBytes') is not None:
self.add_query_param('SystemDiskPart.' + str(depth1 + 1) + '.SizeBytes', SystemDiskParts[depth1].get('SizeBytes'))
if SystemDiskParts[depth1].get('Block') is not None:
self.add_query_param('SystemDiskPart.' + str(depth1 + 1) + '.Block', SystemDiskParts[depth1].get('Block'))
if SystemDiskParts[depth1].get('Device') is not None:
self.add_query_param('SystemDiskPart.' + str(depth1 + 1) + '.Device', SystemDiskParts[depth1].get('Device'))
def get_ValidTime(self):
return self.get_query_params().get('ValidTime')
def set_ValidTime(self,ValidTime):
self.add_query_param('ValidTime',ValidTime)
def get_OwnerId(self):
return self.get_query_params().get('OwnerId')
def set_OwnerId(self,OwnerId):
self.add_query_param('OwnerId',OwnerId)
def get_DataDisks(self):
return self.get_query_params().get('DataDisk')
def set_DataDisks(self, DataDisks):
for depth1 in range(len(DataDisks)):
if DataDisks[depth1].get('Size') is not None:
self.add_query_param('DataDisk.' + str(depth1 + 1) + '.Size', DataDisks[depth1].get('Size'))
if DataDisks[depth1].get('Part') is not None:
for depth2 in range(len(DataDisks[depth1].get('Part'))):
if DataDisks[depth1].get('Part')[depth2].get('SizeBytes') is not None:
self.add_query_param('DataDisk.' + str(depth1 + 1) + '.Part.' + str(depth2 + 1) + '.SizeBytes', DataDisks[depth1].get('Part')[depth2].get('SizeBytes'))
if DataDisks[depth1].get('Part')[depth2].get('Block') is not None:
self.add_query_param('DataDisk.' + str(depth1 + 1) + '.Part.' + str(depth2 + 1) + '.Block', DataDisks[depth1].get('Part')[depth2].get('Block'))
if DataDisks[depth1].get('Part')[depth2].get('Device') is not None:
self.add_query_param('DataDisk.' + str(depth1 + 1) + '.Part.' + str(depth2 + 1) + '.Device', DataDisks[depth1].get('Part')[depth2].get('Device'))
if DataDisks[depth1].get('Index') is not None:
self.add_query_param('DataDisk.' + str(depth1 + 1) + '.Index', DataDisks[depth1].get('Index'))
def get_LaunchTemplateVersion(self):
return self.get_query_params().get('LaunchTemplateVersion')
def set_LaunchTemplateVersion(self,LaunchTemplateVersion):
self.add_query_param('LaunchTemplateVersion',LaunchTemplateVersion)
def get_ScheduledStartTime(self):
return self.get_query_params().get('ScheduledStartTime')
def set_ScheduledStartTime(self,ScheduledStartTime):
self.add_query_param('ScheduledStartTime',ScheduledStartTime)
def get_InstanceId(self):
return self.get_query_params().get('InstanceId')
def set_InstanceId(self,InstanceId):
self.add_query_param('InstanceId',InstanceId)
def get_InstanceRamRole(self):
return self.get_query_params().get('InstanceRamRole')
def set_InstanceRamRole(self,InstanceRamRole):
self.add_query_param('InstanceRamRole',InstanceRamRole)
def get_Name(self):
return self.get_query_params().get('Name')
def set_Name(self,Name):
self.add_query_param('Name',Name)
def get_MaxNumberOfImageToKeep(self):
return self.get_query_params().get('MaxNumberOfImageToKeep')
def set_MaxNumberOfImageToKeep(self,MaxNumberOfImageToKeep):
self.add_query_param('MaxNumberOfImageToKeep',MaxNumberOfImageToKeep) |
import os.path
import tornado.ioloop
from tornado.web import Application
import motor
from myredis import MyRedis
import uuid
from views import *
from tornado.options import define, options, parse_command_line
define("port", default=8888, help="run on the given port", type=int)
define("mongo_host", default="localhost", help="host for MongoDB", type=str)
define("mongo_port", default=27017, help="port MongoDB is running on", type=int)
handlers=[
("/next_rally", NextRallyHandler),
(r"/rallies/([0-9a-zA-Z]+)", RallyHandler),
("/schedule", ScheduleHandler),
(r"/archive/([1-2][90][0-9]{2})", ArchiveHandler),
("/login", LoginHandler),
("/logout", LogoutHandler),
("/admin", AdminHandler),
(r"/email/(subscribe|unsubscribe)", EmailSignupHandler),
(r"/email/verify/([0-9a-zA-z]+)", EmailVerifyHandler),
(r"/admin/(email)", EmailBlastHandler),
(r"/([A-Za-z_]+).html", RedirectHandler),
# ResourceHandler_v1
# arg1: MongoDB collection
# arg2: resource id (MongoDB _id/ObjectId)
(r"/api/v1/([A-Za-z0-9_-]+)/([A-Za-z0-9_-]*)", ResourceHandler_v1),
(r"/([A-Za-z0-9_-]*)", SimpleHandler)
]
settings=dict(
template_path=os.path.join(os.path.dirname(__file__), "templates"),
static_path=os.path.join(os.path.dirname(__file__), "static"),
xsrf_cookies=False,
cookie_secret=COOKIE_SECRET,
db_secret=DB_SECRET,
janrain_api_key=JANRAIN_API_KEY,
login_url="/login",
autoescape=None,
db=motor.MotorClient(options.mongo_host, options.mongo_port).open_sync().BlueRidge,
sessions=MyRedis(unix_socket_path='/tmp/redis.sock', db=0),
cache=MyRedis(unix_socket_path='/tmp/redis.sock', db=1),
genRandom=uuid.uuid4,
debug=True
)
app = Application(handlers, **settings)
def main():
parse_command_line()
app.listen(options.port)
tornado.ioloop.IOLoop.instance().start()
if __name__ == "__main__":
main()
|
# -*- coding: utf-8 -*-
# Copyright (c) 2019-2020 Christiaan Frans Rademan <chris@fwiw.co.za>.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# * Neither the name of the copyright holders nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
# THE POSSIBILITY OF SUCH DAMAGE.
from luxon import db
from luxon import dbw
from luxon import GetLogger
from luxon.utils.mysql import retry
from luxon.utils.timezone import (calc_next_expire,
utc,
parse_datetime,
now)
from calabiyau.core.helpers.radius import (decode_packet,
get_user,
get_attributes)
from calabiyau.utils.radius import pod, coa
log = GetLogger(__name__)
@retry()
def do_acct(db, pkt, client, nas, nas_id, unique_id, dt, user, status):
user_id = user['id']
username = user['username']
input_octets = int(pkt.get('Acct-Input-Octets64', [0])[0])
output_octets = int(pkt.get('Acct-Output-Octets64', [0])[0])
with db.cursor() as crsr:
#######################################
# GET USAGE IN & OUT FOR USER SESSION #
#######################################
crsr.execute("SELECT" +
" id," +
" acctstarttime," +
" acctinputoctets," +
" acctoutputoctets," +
" acctuniqueid," +
" acctstarttime," +
" acctupdated," +
" accttype" +
" FROM calabiyau_session" +
' WHERE acctuniqueid = %s' +
' LIMIT 1' +
' FOR UPDATE',
(unique_id,))
session = crsr.fetchone()
if session and (utc(session['acctupdated']) >= dt or
session['accttype'] == 'stop'):
crsr.commit()
return (0, 0,)
#############################################
# CHECK IF ACCOUNTING FOR TODAY FOR USER_ID #
#############################################
crsr.execute("SELECT" +
" id" +
" FROM calabiyau_accounting" +
" WHERE user_id = %s" +
" AND date(today) = date(now())" +
" FOR UPDATE",
(user_id,))
if (status == 'interim-update' or
status == 'start' or status == 'stop'):
######################################################
# CREATE/UPDATE SESSION WITH INPUT AND OUTPUT OCTETS #
######################################################
crsr.execute("INSERT INTO calabiyau_session" +
" (id," +
" user_id," +
" username," +
" acctsessionid," +
" acctuniqueid," +
" nasipaddress," +
" nasportid," +
" nasport," +
" nasporttype," +
" calledstationid," +
" callingstationid," +
" servicetype," +
" framedprotocol," +
" framedipaddress," +
" acctinputoctets," +
" acctoutputoctets," +
" acctstarttime," +
" acctupdated," +
" processed," +
" accttype)" +
" VALUES" +
" (uuid(), %s, %s, %s, %s, INET6_ATON(%s), %s, %s," +
" %s, %s, %s, %s, %s, INET6_ATON(%s), %s, %s," +
" %s, %s, now(), %s)" +
" ON DUPLICATE KEY UPDATE" +
" acctsessionid = %s," +
" nasipaddress = INET6_ATON(%s)," +
" nasportid = %s," +
" nasport = %s," +
" nasporttype = %s," +
" calledstationid = %s," +
" callingstationid = %s," +
" servicetype = %s," +
" framedprotocol = %s," +
" framedipaddress = INET6_ATON(%s)," +
" acctinputoctets = %s," +
" acctoutputoctets = %s," +
" acctupdated = %s," +
" processed = now()," +
" accttype = %s",
(user_id,
username,
nas_id,
unique_id,
nas,
pkt.get('NAS-Port-ID'),
pkt.get('NAS-Port'),
pkt.get('NAS-Port-Type'),
pkt.get('Called-Station-Id'),
pkt.get('Calling-Station-Id'),
pkt.get('Service-Type'),
pkt.get('Framed-Protocol'),
pkt.get('Framed-IP-Address'),
input_octets,
output_octets,
dt,
dt,
status,
nas_id,
nas,
pkt.get('NAS-Port-ID'),
pkt.get('NAS-Port'),
pkt.get('NAS-Port-Type'),
pkt.get('Called-Station-Id'),
pkt.get('Calling-Station-Id'),
pkt.get('Service-Type'),
pkt.get('Framed-Protocol'),
pkt.get('Framed-IP-Address'),
input_octets,
output_octets,
dt,
status,))
####################################
# RECORD USAGE IN ACCOUNTING TABLE #
####################################
if not session:
# IF NEW SESSION
prev_acctinputoctets = 0
prev_acctoutputoctets = 0
else:
# IF EXISTING SESSION
# USE PREVIOUS VALUES TO DETERMINE NEW VALUES FOR TODAY
prev_acctinputoctets = session['acctinputoctets']
prev_acctoutputoctets = session['acctoutputoctets']
if (input_octets >= prev_acctinputoctets and
output_octets >= prev_acctoutputoctets):
input_octets = (input_octets -
prev_acctinputoctets)
output_octets = (output_octets -
prev_acctoutputoctets)
# INSERT/UPDATE ACCOUNTING RECORD
crsr.execute('INSERT INTO calabiyau_accounting' +
' (id, user_id, today, acctinputoctets,' +
' acctoutputoctets)' +
' VALUES' +
' (uuid(), %s, curdate(), %s, %s)' +
' ON DUPLICATE KEY UPDATE' +
' acctinputoctets = acctinputoctets + %s,' +
' acctoutputoctets = acctoutputoctets + %s',
(user_id,
input_octets,
output_octets,
input_octets,
output_octets,))
crsr.commit()
return (input_octets, output_octets,)
def applyctx(crsr, user, ctx, pkt, secret, status):
if (status == 'interim-update' or
status == 'start'):
nas_session_id = pkt.get('Acct-Session-Id')[0]
try:
nas_ip_address = pkt['NAS-IP-Address'][0]
except KeyError:
# We need NAS IP or we cannot COA/POD
return
unique_session_id = pkt['Acct-Unique-Session-Id'][0]
ctx_values = ['activate-coa',
'deactivate-coa']
attributes = get_attributes(crsr, user, ctx_values[ctx])
if not attributes:
# IF NO CONTEXT ATTRIBUTES SEND POD
pod(nas_ip_address, secret,
user['username'], nas_session_id)
else:
coa(nas_ip_address, secret,
user['username'], nas_session_id,
attributes)
crsr.execute('UPDATE calabiyau_session' +
' SET ctx = %s' +
' WHERE acctuniqueid = %s',
(ctx, unique_session_id,))
@retry()
def usage(db, pkt, client, nas, nas_id, unique_id, user,
input_octets=0, output_octets=0, status="start"):
# Return Values
# 0 All good.
# 1 Deactivate Subscriber
user_id = user['id']
nas_secret = user['nas_secret']
# Combined input/output usage for session
combined = input_octets + output_octets
utc_datetime = now()
with db.cursor() as crsr:
####################
# GET USER SESSION #
####################
crsr.execute("SELECT" +
" id," +
" ctx" +
" FROM calabiyau_session" +
' WHERE acctuniqueid = %s' +
' LIMIT 1' +
' FOR UPDATE',
(unique_id,))
session = crsr.fetchone()
if not session:
return
session_ctx = session['ctx']
if user['package_span'] and user['package_span'] > 0:
if (utc(user['package_expire']) and
utc_datetime > utc(user['package_expire'])):
if session_ctx != 1:
applyctx(crsr, user, 1, pkt, nas_secret, status)
crsr.commit()
return 1
crsr.execute('SELECT * FROM calabiyau_subscriber' +
' WHERE id = %s' +
' FOR UPDATE',
(user_id,))
locked_user = crsr.fetchone()
if user and locked_user:
# IF DATA PLAN NOT UNCAPPED
if user['plan'] == 'data':
######################
# CHECK PACKAGE DATA #
######################
volume_used_bytes = locked_user['volume_used_bytes'] + combined
pkg_volume_used = locked_user['volume_used']
if user['volume_gb']:
package_volume_bytes = (user['volume_gb'] *
1024 * 1024 * 1024)
else:
package_volume_bytes = 0
if utc(locked_user['volume_expire']) < utc_datetime:
if user['volume_repeat']:
log.info('Package data reloaded (%s)'
% user['username'])
new_expire = calc_next_expire(user['volume_metric'],
user['volume_span'],
utc_datetime)
crsr.execute("UPDATE calabiyau_subscriber" +
" SET volume_expire = %s," +
" volume_used_bytes = 0," +
" volume_used = 0," +
" ctx = 0" +
" WHERE id = %s",
(new_expire, user['id'],))
pkg_volume_used = 0
if session_ctx != 0:
applyctx(crsr, user, 0, pkt, nas_secret, status)
crsr.commit()
return 0
else:
crsr.execute("UPDATE calabiyau_subscriber" +
" SET volume_used_bytes = 0," +
" volume_used = 1," +
" ctx = 1" +
" WHERE id = %s",
(user['id'],))
pkg_volume_used = 1
log.info('Package data expired (%s)'
% user['username'])
if (not pkg_volume_used and
volume_used_bytes > package_volume_bytes):
crsr.execute("UPDATE calabiyau_subscriber" +
" SET volume_used_bytes = 0," +
" volume_used = 1," +
" ctx = 1" +
" WHERE id = %s",
(user_id,))
log.info('Package data depleted (%s)'
% user['username'])
elif (not pkg_volume_used and
volume_used_bytes <= package_volume_bytes):
crsr.execute("UPDATE calabiyau_subscriber" +
" SET volume_used_bytes = " +
" volume_used_bytes + %s," +
" ctx = 0" +
" WHERE id = %s",
(combined, user_id,))
if session_ctx != 0:
applyctx(crsr, user, 0, pkt, nas_secret, status)
crsr.commit()
return 0
####################
# CHECK TOPUP DATA #
####################
crsr.execute('SELECT * FROM calabiyau_topup' +
' WHERE user_id = %s' +
' ORDER BY creation_time asc' +
' FOR UPDATE',
(user_id,))
topups = crsr.fetchall()
for topup in topups:
if topup['volume_gb']:
topup_volume_bytes = (topup['volume_gb'] * 1024 *
1024 * 1024)
else:
topup_volume_bytes = 0
if utc(topup['volume_expire']) < utc_datetime:
if topup['volume_repeat']:
log.auth('Topup renew (%s, %s Gb, %s)' %
(user['username'],
topup['volume_gb'],
topup['creation_time'],))
new_expire = calc_next_expire(
topup['volume_metric'],
topup['volume_span'],
utc_datetime)
crsr.execute("UPDATE calabiyau_topup" +
" SET volume_expire = %s" +
" WHERE id = %s",
(new_expire, topup['id'],))
crsr.execute("UPDATE calabiyau_subscriber" +
" SET volume_used_bytes = 0," +
" ctx = 0" +
" WHERE id = %s",
(user_id,))
if session_ctx != 0:
applyctx(crsr, user, 0, pkt,
nas_secret, status)
crsr.commit()
return 0
else:
log.auth('Topup expired (%s, %s Gb, %s)' %
(user['username'],
topup['volume_gb'],
topup['creation_time'],))
crsr.execute("UPDATE calabiyau_subscriber" +
" SET volume_used_bytes = 0," +
" ctx = 0" +
" WHERE id = %s",
(user_id,))
crsr.execute('DELETE FROM' +
' calabiyau_topup' +
' WHERE id = %s',
(topup['id'],))
else:
if volume_used_bytes < topup_volume_bytes:
crsr.execute("UPDATE calabiyau_subscriber" +
" SET volume_used_bytes = " +
" volume_used_bytes + %s," +
" ctx = 0" +
" WHERE id = %s",
(combined, user_id,))
if session_ctx != 0:
applyctx(crsr, user, 0, pkt,
nas_secret, status)
crsr.commit()
return 0
else:
log.auth('Topup depleted (%s, %s Gb, %s)' %
(user['username'],
topup['volume_gb'],
topup['creation_time'],))
crsr.execute("UPDATE calabiyau_subscriber" +
" SET volume_used_bytes = 0," +
" ctx = 0" +
" WHERE id = %s",
(user_id,))
crsr.execute('DELETE FROM' +
' calabiyau_topup' +
' WHERE id = %s',
(topup['id'],))
if session_ctx != 1:
applyctx(crsr, user, 1, pkt, nas_secret, status)
crsr.commit()
return 1
else:
if session_ctx != 0:
applyctx(crsr, user, 0, pkt, nas_secret, status)
crsr.commit()
return 0
if session_ctx != 1:
applyctx(crsr, user, 1, pkt, nas_secret, status)
crsr.commit()
return 1
def acct(msg):
try:
pkt = msg['attributes']
except KeyError:
return
try:
pkt = decode_packet(pkt)
except Exception:
return
try:
nas_session_id = pkt.get('Acct-Session-Id', [None])[0]
unique_session_id = pkt.get('Acct-Unique-Session-Id')[0]
status = pkt.get('Acct-Status-Type', [''])[0].lower()
username = pkt.get('User-Name', [None])[0]
client = pkt.get('Client-IP-Address')[0]
nas = pkt.get('NAS-IP-Address', ['0.0.0.0'])[0]
except IndexError:
return True
dt = utc(parse_datetime(msg.get('datetime', None)))
diff = (now()-dt).total_seconds()
if diff > 60:
log.error('Processing radius accounting message older' +
' than 60 seconds. Age(%s)' % diff)
with db() as conn:
with dbw() as connw:
with conn.cursor() as crsr:
user = get_user(crsr,
client,
nas,
username)
crsr.commit()
if not user:
log.debug("user '%s' not found"
% username)
return False
input_octets, output_octets = do_acct(connw,
pkt,
client,
nas,
nas_session_id,
unique_session_id,
dt,
user,
status)
usage(connw,
pkt,
client,
nas,
nas_session_id,
unique_session_id,
user,
input_octets,
output_octets,
status)
return True
|
# 指定需要导出的模块
__all__ = ['c7']
# 模块初始化
a = 'This is __init__.py file'
print(a)
import sys
import datetime
import io
|
# -*- coding: future_fstrings -*-
# Copyright 2018 Brandon Shelley. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Language detection for subtitle files.
This module is used to determine the language of subtitle files based on
name or content, by loading a ISO-639-1 language.json map file.
Subtitle: the main class exported by this module.
Sample subtitle filenames:
The.Planet.Beyond.2010.1080p.BluRay.x264-Group.bulgarian.srt
The.Planet.Beyond.2010.1080p.BluRay.x264-Group.croatian.srt
The.Planet.Beyond.2010.1080p.BluRay.x264-Group.czech.srt
The.Planet.Beyond.2010.1080p.BluRay.x264-Group.danish.srt
The.Planet.Beyond.2010.1080p.BluRay.x264-Group.dutch.srt
The.Planet.Beyond.2010.1080p.BluRay.x264-Group.english-forced.srt
The.Planet.Beyond.2010.1080p.BluRay.x264-Group.english-sdh.srt
The.Planet.Beyond.2010.1080p.BluRay.x264-Group.english.srt
The.Planet.Beyond.2010.1080p.BluRay.x264-Group.estonian.srt
The.Planet.Beyond.2010.1080p.BluRay.x264-Group.finnish.srt
The.Planet.Beyond.2010.1080p.BluRay.x264-Group.french.srt
The.Planet.Beyond.2010.1080p.BluRay.x264-Group.german.srt
The.Planet.Beyond.2010.1080p.BluRay.x264-Group.hungarian.srt
The.Planet.Beyond.2010.1080p.BluRay.x264-Group.icelandic.srt
The.Planet.Beyond.2010.1080p.BluRay.x264-Group.italian.srt
The.Planet.Beyond.2010.1080p.BluRay.x264-Group.latvian.srt
The.Planet.Beyond.2010.1080p.BluRay.x264-Group.lithuanian.srt
The.Planet.Beyond.2010.1080p.BluRay.x264-Group.norwegian.srt
The.Planet.Beyond.2010.1080p.BluRay.x264-Group.portuguese-br.srt
The.Planet.Beyond.2010.1080p.BluRay.x264-Group.portuguese-pt.srt
The.Planet.Beyond.2010.1080p.BluRay.x264-Group.russian.srt
The.Planet.Beyond.2010.1080p.BluRay.x264-Group.serbian.srt
The.Planet.Beyond.2010.1080p.BluRay.x264-Group.slovenian.srt
The.Planet.Beyond.2010.1080p.BluRay.x264-Group.spanish-cas.srt
The.Planet.Beyond.2010.1080p.BluRay.x264-Group.spanish-lat.srt
The.Planet.Beyond.2010.1080p.BluRay.x264-Group.swedish.srt
The.Planet.Beyond.2010.1080p.BluRay.x264-Group.ukrainian.srt
"""
from __future__ import unicode_literals, print_function
from builtins import *
import re
import os
from fylmlib.languages import languages
class Subtitle:
"""A subtitle object that contains information about its language.
Attributes:
path: Subtitle path.
"""
def __init__(self, path):
# Path to original subtitle file.
self.path = path
# The 2-character language identifier code of the subtitle.
self.code = None
# The full-length language of the subtitle.
self.language = None
# The language string captured from the original filename, e.g. 'english' or 'en'.
self.captured = None
# First we loop through languages to determine if the path contains
# a descriptive language string, e.g. 'english', 'dutch', or 'fr'
for lang in languages:
patterns = []
# Compile patterns that matches language strings and codes, case insensitive.
for n in list(filter(None, lang.names)):
patterns.append(re.compile(r'\.(?P<lang>' + re.escape(n).lower() + r'(?:-\w+)?\b)', re.I))
patterns.append(re.compile(r'\.(?P<lang>' + re.escape(lang.code) + r'(?:-\w+)?\b)', re.I))
# Iterate the array of patterns that we want to check for.
for p in patterns:
# Search for rx match.
match = re.search(p, path)
if match is not None and match.group('lang') is not None:
# If a match exists, convert it to lowercase and save the entire
# captured string.
self.captured = match.group('lang')[:1].upper() + match.group('lang')[1:]
# If we find a match, set the values of the subtitle, and break.
self.code = lang.code
self.language = lang.primary_name
break
# Break from parent if captured is set.
if self.captured is not None:
break
def insert_lang(self, path):
"""Returns a new path that includes the captured language string.
Args:
path: (str, utf-8) Path to file to append language.
Returns:
A new path with the subtitle language included in the path.
"""
filename, ext = os.path.splitext(path)
# if self.language is None:
return f'{filename}.{self.captured}{ext}' if self.captured else None |
from matplotlib import pyplot as plt
import numpy as np
plt.style.use("fivethirtyeight")
print(plt.style.available)
ages_x = [18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35
]
x_indexes=np.arange(len(ages_x))
width=0.2
print(x_indexes)
py_dev_y = [20046, 17100, 20000, 24744, 30500, 37732, 41247, 45372, 48876, 53850, 57287, 63016, 65998, 70003, 70000, 71496, 75370, 83640]
plt.bar(x_indexes-width,py_dev_y,width=width,label="Python",color="yellow")
js_dev_y = [16446, 16791, 18942, 21780, 25704, 29000, 34372, 37810, 43515, 46823, 49293, 53437, 56373, 62375, 66674, 68745, 68746, 74583]
plt.bar(x_indexes,js_dev_y,width=width,label="Javascript",color="black")
dev_y = [17784, 16500, 18012, 20628, 25206, 30252, 34368, 38496, 42000, 46752, 49320, 53200, 56000, 62316, 64928, 67317, 68748, 73752]
plt.plot(x_indexes,dev_y,label="All",color="blue")
plt.legend()
plt.tight_layout()
plt.grid(color="red")
plt.xticks(labels=ages_x,ticks=x_indexes)
plt.show() |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.