index int64 0 1,000k | blob_id stringlengths 40 40 | code stringlengths 7 10.4M |
|---|---|---|
11,800 | 1ed36610592ce4ea2d4b797da8a87fac8419a6d8 | #!/usr/bin/python
# -*- encoding: utf-8 -*-
# Copyright 2015 LeTV Inc. All Rights Reserved.
__author__ = 'guoxiaohe@letv.com'
"""
using for content desktop spider
"""
import traceback
import re
from scrapy.http import Request
from scrapy.selector import Selector
from scrapy.spider import Spider
from scrapy import log
from le_crawler.core.items import CrawlerItem
from le_crawler.base.start_url_loads import StartUrlsLoader
from le_crawler.core.links_extractor import LinksExtractor
from le_crawler.base.html_utils import remove_tags, clear_tags
from le_crawler.base.url_normalize import UrlNormalize
class YoukuStarSpider(Spider):
name = 'youku_star_spider'
start_url_loader = StartUrlsLoader.get_instance('../start_urls/')
start_urls = start_url_loader.get_start_urls()
url_normalize = UrlNormalize.get_instance()
def __init__(self, *a, **kw):
super(YoukuStarSpider, self).__init__(*a, **kw)
self.finished_count = 0
self.start_size = len(YoukuStarSpider.start_urls)
self.collect_nums = 0
self.new_links_extract = \
LinksExtractor('le_crawler.common.page_info_settings',
start_url_loader = YoukuStarSpider.start_url_loader)
self.share_cache = {}
def parse(self, response):
try:
url = response.url.strip()
page = response.body.decode(response.encoding)
self.finished_count += 1
# first jugy json parser
size = 0
status = True
refer_url = response.request.headers.get('Referer', None)
status, links_map = self.new_links_extract.extract_block_links(url,
body = page, bd_type = LinksExtractor.HTML_EXTRA)
if status:
size = len(links_map)
print 'Ok:(%5d/%d)Finished Extend: %s, %d' % (self.finished_count,
self.start_size, url, size)
else:
print 'Failed:(%5d/%d)Finished Extend: %s, %d' % (self.finished_count,
self.start_size, url, size)
return
sta, links = self.new_links_extract.extract_custom_links(url, page,
LinksExtractor.HTML_EXTRA)
if sta:
item = self._youku_star_blk_parse(links.extend_map)
if item:
item['url'] = YoukuStarSpider.url_normalize.get_unique_url(url)
yield item
else:
self.log('Failed extract custom value', log.ERROR)
for i in links_map:
yield Request(i.url, headers={'Referer': '%s' % (refer_url or url)},
callback = self.parse)
except Exception, e:
print 'spider try catch error:', e
print traceback.format_exc()
return
def _extract_value(self, selec, path):
exs = selec.xpath(path)
if exs:
exts = exs.extract()
if exts:
return exts[0].replace('\t', '').replace('\n', '')
return None
def _process_figurebase(self, html):
ret = {}
from scrapy.selector import Selector
if not html:
return ret
sel_html = Selector(text = html, type = 'html')
for i in sel_html.xpath('//li'):
keyl = i.xpath('./label/text()').extract()
valuel = i.xpath('.//span/@title').extract() or i.xpath('.//span/text()').extract()
if keyl and valuel:
ret[keyl[0].replace(':', '')] = valuel[0]
return ret
def _youku_star_blk_parse(self, src_obj):
item = CrawlerItem()
if not src_obj:
return None
if 'figurebase' in src_obj:
base_info = self._process_figurebase(src_obj['figurebase'])
if base_info: item.setdefault('extend_map', {})['base_info'] = base_info
if 'name' in src_obj:
item['title'] = src_obj['name']
if 'excellent' in src_obj:
exe_sel = Selector(text = src_obj['excellent'], type = 'html')
exe_list = []
for i in exe_sel.xpath('//li[@class="p_title"]/a/text()').extract():
exe_list.append(i)
if exe_list: item.setdefault('extend_map', {})['excellent'] = exe_list
if 'honor' in src_obj:
hor_sel = Selector(text = src_obj['honor'], type = 'html')
hor_list = []
for i in hor_sel.xpath('//li'):
hl = {}
tmps = self._extract_value(i, './span[@class="data"]/text()')
if tmps: hl['year'] = tmps
tmps = self._extract_value(i, './a[1]/text()')
if tmps: hl['name'] = tmps
tmps = self._extract_value(i, './span[2]/text()')
if tmps: hl['title'] = tmps
tmps = self._extract_value(i, './a[2]/text()')
if tmps: hl['product'] = tmps
if hl: hor_list.append(hl)
if hor_list: item.setdefault('extend_map', {})['honor'] = hor_list
if 'introduction' in src_obj:
item.setdefault('extend_map', {})['introduction'] = clear_tags([''],
src_obj['introduction']).replace('\t', '').replace('\n', '').replace('...', '')
if 'productions' in src_obj:
pr_sel = Selector(text = src_obj['productions'], type = 'html')
prl = []
for i in pr_sel.xpath('//tbody/tr[@lastyear]'):
prd_inf = {}
tmps = self._extract_value(i, './td[@class="action"]//a/@href')
if tmps: prd_inf['play_url'] = YoukuStarSpider.url_normalize.get_unique_url(tmps)
tmps = self._extract_value(i, './@lastyear')
if tmps: prd_inf['year'] = tmps
tmps = self._extract_value(i, './td[@class="type"]/text()')
if tmps: prd_inf['type'] = tmps
tmps = clear_tags(['span', 'a', 'td'],
self._extract_value(i, './td[@class="title"]'))
if tmps: prd_inf['title'] = tmps
tmps = self._extract_value(i, './td[@class="role"]/text()')
if tmps: prd_inf['role'] = tmps
if prd_inf: prl.append(prd_inf)
if prl: item.setdefault('extend_map', {})['productions'] = prl
if 'cover' in src_obj:
item.setdefault('extend_map', {})['cover'] = src_obj['cover']
if item: return item
return None
|
11,801 | 770bbdbd0a29ccea60efce362b7229e09dc4f437 | # -*- coding: utf-8 -*-
# Form implementation generated from reading ui file 'openProjectWindow.ui'
#
# Created by: PyQt5 UI code generator 5.11.2
#
# WARNING! All changes made in this file will be lost!
from PyQt5 import QtCore, QtGui, QtWidgets
class Ui_Dialog(object):
def setupUi(self, Dialog):
Dialog.setObjectName("Dialog")
Dialog.resize(400, 510)
self.gridLayout = QtWidgets.QGridLayout(Dialog)
self.gridLayout.setObjectName("gridLayout")
self.openProjectListWidget = QtWidgets.QListWidget(Dialog)
self.openProjectListWidget.setObjectName("openProjectListWidget")
self.gridLayout.addWidget(self.openProjectListWidget, 0, 0, 1, 1)
self.horizontalLayout = QtWidgets.QHBoxLayout()
self.horizontalLayout.setObjectName("horizontalLayout")
spacerItem = QtWidgets.QSpacerItem(40, 20, QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Minimum)
self.horizontalLayout.addItem(spacerItem)
self.openPushButton = QtWidgets.QPushButton(Dialog)
self.openPushButton.setObjectName("openPushButton")
self.horizontalLayout.addWidget(self.openPushButton)
self.cancelPushButton = QtWidgets.QPushButton(Dialog)
self.cancelPushButton.setObjectName("cancelPushButton")
self.horizontalLayout.addWidget(self.cancelPushButton)
self.gridLayout.addLayout(self.horizontalLayout, 1, 0, 1, 1)
self.retranslateUi(Dialog)
QtCore.QMetaObject.connectSlotsByName(Dialog)
def retranslateUi(self, Dialog):
_translate = QtCore.QCoreApplication.translate
Dialog.setWindowTitle(_translate("Dialog", "Мои проекты"))
self.openPushButton.setText(_translate("Dialog", "Открыть"))
self.cancelPushButton.setText(_translate("Dialog", "Отмена"))
|
11,802 | 61a486eb3b0856c72b03d6f34b6be4fc7b27c63e | #encoding:utf-8
from django.forms import ModelForm
from django import forms
from principal.models import Arbitro, Jugador, Pareja, Partido, Pista
from django.contrib.auth.models import User
class JugadorForm(ModelForm):
class Meta:
model = Jugador
class ParejaForm(ModelForm):
class Meta:
model = Pareja
class PartidoForm(ModelForm):
class Meta:
model = Partido
class ArbitroForm(ModelForm):
class Meta:
model = Arbitro
class PistaForm(ModelForm):
class Meta:
model = Pista
class UserForm(forms.ModelForm):
class Meta:
model = User
fields = ('username', 'first_name', 'last_name', 'email')
|
11,803 | 81b300ddd5f55a754a8b88a6ecdda92b8accb51c | from Author import Author
from Blog import Blog
from Post import Post
from Tag import Tag
from Comment import Comment |
11,804 | 0766361b1ccad03d58c41e003b72561e1d574fee | # Generated by Django 3.1.5 on 2021-01-24 10:58
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('api', '0005_auto_20210124_1034'),
]
operations = [
migrations.AlterField(
model_name='topic',
name='learning',
field=models.BooleanField(default=False),
),
migrations.AlterField(
model_name='topic',
name='learnt',
field=models.BooleanField(default=False),
),
migrations.AlterField(
model_name='topic',
name='queued',
field=models.BooleanField(default=False),
),
]
|
11,805 | 3b321e3703baa0853289b0b6a31f06555fd28e72 | # 14500.py 테트로미노
def back(x, y, k, total):
if k == 4:
global MAX
MAX = max(MAX, total)
return
# ㅗ 모양
if k == 2:
tmp = []
for dx, dy in (-1, 0), (1, 0), (0, -1), (0, 1):
nx, ny = x + dx, y + dy
if -1 < nx < N and -1 < ny < M and not visit[nx][ny]:
tmp.append((nx, ny))
if tmp:
for i in range(1 << len(tmp)):
setLi = []
for j in range(len(tmp)):
if i & 1 << j:
setLi.append(tmp[j])
if len(setLi) == 1:
nx, ny = setLi.pop()
visit[nx][ny] = True
back(nx, ny, k + 1, total + board[nx][ny])
visit[nx][ny] = False
elif len(setLi) == 2:
nx1, ny1 = setLi.pop()
nx2, ny2 = setLi.pop()
visit[nx1][ny1] = True
visit[nx2][ny2] = True
back(nx1, ny1, k + 2, total + board[nx1][ny1] + board[nx2][ny2])
back(nx2, ny2, k + 2, total + board[nx1][ny1] + board[nx2][ny2])
visit[nx1][ny1] = False
visit[nx2][ny2] = False
else:
for dx, dy in (-1, 0), (1, 0), (0, -1), (0, 1):
nx, ny = x + dx, y + dy
if -1 < nx < N and -1 < ny < M and not visit[nx][ny]:
visit[nx][ny] = True
back(nx, ny, k + 1, total + board[nx][ny])
visit[nx][ny] = False
N, M = map(int, input().split())
board = [list(map(int, input().split())) for _ in range(N)]
visit = [[False] * M for _ in range(N)]
MAX = 0
for i in range(N):
for j in range(M):
back(i, j, 0, 0)
print(MAX) |
11,806 | 5a854b745f9a32e83486c547159092e6a53073ca | # -*- coding: utf-8 -*-
numero = int(input("digite o valor de numero="))
PAR = (numero*0.5)
z = PAR%2
q = PAR%4
s = PAR%6
o = PAR%8
if z or q or s or o :
print("PAR")
else :
print("IMPAR")
|
11,807 | cf497f5c8c497bde159b23d7ac132c1877be1d8f | """
This short program applies the boundary recoverer operation to check
the boundary values under some analytic forms.
"""
from gusto import *
from firedrake import (as_vector, PeriodicRectangleMesh, SpatialCoordinate,
ExtrudedMesh, FunctionSpace, Function, errornorm,
VectorFunctionSpace, interval, TensorProductElement,
FiniteElement, HDiv, norm, BrokenElement)
import numpy as np
def setup_3d_recovery(dirname):
L = 3.
H = 3.
W = 3.
deltax = L / 3.
deltay = W / 3.
deltaz = H / 3.
nlayers = int(H/deltaz)
ncolumnsx = int(L/deltax)
ncolumnsy = int(W/deltay)
m = PeriodicRectangleMesh(ncolumnsx, ncolumnsy, L, W, direction='both', quadrilateral=True)
mesh = ExtrudedMesh(m, layers=nlayers, layer_height=H/nlayers)
x, y, z = SpatialCoordinate(mesh)
# horizontal base spaces
cell = mesh._base_mesh.ufl_cell().cellname()
u_hori = FiniteElement("RTCF", cell, 1)
w_hori = FiniteElement("DG", cell, 0)
# vertical base spaces
u_vert = FiniteElement("DG", interval, 0)
w_vert = FiniteElement("CG", interval, 1)
# build elements
u_element = HDiv(TensorProductElement(u_hori, u_vert))
w_element = HDiv(TensorProductElement(w_hori, w_vert))
theta_element = TensorProductElement(w_hori, w_vert)
v_element = u_element + w_element
# spaces
VDG0 = FunctionSpace(mesh, "DG", 0)
VCG1 = FunctionSpace(mesh, "CG", 1)
VDG1 = FunctionSpace(mesh, "DG", 1)
Vt = FunctionSpace(mesh, theta_element)
Vt_brok = FunctionSpace(mesh, BrokenElement(theta_element))
Vu = FunctionSpace(mesh, v_element)
VuCG1 = VectorFunctionSpace(mesh, "CG", 1)
VuDG1 = VectorFunctionSpace(mesh, "DG", 1)
# set up initial conditions
np.random.seed(0)
expr = np.random.randn() + np.random.randn()*z
# our actual theta and rho and v
rho_CG1_true = Function(VCG1).interpolate(expr)
theta_CG1_true = Function(VCG1).interpolate(expr)
v_CG1_true = Function(VuCG1).interpolate(as_vector([expr, expr, expr]))
rho_Vt_true = Function(Vt).interpolate(expr)
# make the initial fields by projecting expressions into the lowest order spaces
rho_DG0 = Function(VDG0).interpolate(expr)
rho_CG1 = Function(VCG1)
theta_Vt = Function(Vt).interpolate(expr)
theta_CG1 = Function(VCG1)
v_Vu = Function(Vu).project(as_vector([expr, expr, expr]))
v_CG1 = Function(VuCG1)
rho_Vt = Function(Vt)
# make the recoverers and do the recovery
rho_recoverer = Recoverer(rho_DG0, rho_CG1, VDG=VDG1, boundary_method=Boundary_Method.dynamics)
theta_recoverer = Recoverer(theta_Vt, theta_CG1, VDG=VDG1, boundary_method=Boundary_Method.dynamics)
v_recoverer = Recoverer(v_Vu, v_CG1, VDG=VuDG1, boundary_method=Boundary_Method.dynamics)
rho_Vt_recoverer = Recoverer(rho_DG0, rho_Vt, VDG=Vt_brok, boundary_method=Boundary_Method.physics)
rho_recoverer.project()
theta_recoverer.project()
v_recoverer.project()
rho_Vt_recoverer.project()
rho_diff = errornorm(rho_CG1, rho_CG1_true) / norm(rho_CG1_true)
theta_diff = errornorm(theta_CG1, theta_CG1_true) / norm(theta_CG1_true)
v_diff = errornorm(v_CG1, v_CG1_true) / norm(v_CG1_true)
rho_Vt_diff = errornorm(rho_Vt, rho_Vt_true) / norm(rho_Vt_true)
return (rho_diff, theta_diff, v_diff, rho_Vt_diff)
def run_3d_recovery(dirname):
(rho_diff, theta_diff, v_diff, rho_Vt_diff) = setup_3d_recovery(dirname)
return (rho_diff, theta_diff, v_diff, rho_Vt_diff)
def test_3d_boundary_recovery(tmpdir):
dirname = str(tmpdir)
rho_diff, theta_diff, v_diff, rho_Vt_diff = run_3d_recovery(dirname)
tolerance = 1e-7
assert rho_diff < tolerance
assert theta_diff < tolerance
assert v_diff < tolerance
assert rho_Vt_diff < tolerance
|
11,808 | d178f6e24513ac9fac633afdc7598d068db0d4ea | class Solution:
def shuffle(self, nums: List[int], n: int) -> List[int]:
mid = len(nums) // 2
x, y, arr = nums[:mid], nums[mid:], list()
lenx, leny = len(x), len(y)
for i in range(lenx+leny):
arr.append((x if i % 2 == 0 else y).pop(0))
return arr
|
11,809 | f5cc500aac2f3b3e3c4ae80f12670e7cf32082d8 | import asyncio
import os
from zuscale.providers import ALL_CLOUDS
zuscale_hosts = []
async def get_hosts():
for provider, _cloud in ALL_CLOUDS.items():
# Skip providers that are not the provider specified in environ.
if "PROVIDER" in os.environ and provider != os.environ["PROVIDER"]:
continue
# Variables for all servers.
ssh_user = "root"
# ec2 specific bs
if provider == "ec2":
ssh_user = "ec2-user"
cloud = _cloud()
servers = await cloud.list_servers()
for server in servers:
# Ignore servers without IPs or tags.
if not server.ip4 or not server.server_tags:
continue
# Add servers if they have a special tag on them.
# XXX Find a way to make this less fixed.
if "project_erin_archiveteam" in server.server_tags or "PYINFRA_ALL" in os.environ:
zuscale_hosts.append(
(server.ip4, {
"ssh_user": ssh_user,
"provider": cloud.NAME,
})
)
await cloud.cleanup()
def get_static_hosts():
if not os.path.exists("./static_hosts.txt"):
return
with open("./static_hosts.txt", "r") as f:
for host in f.readlines():
host = host.strip()
if not host:
continue
zuscale_hosts.append(
(host, {
"ssh_user": "root",
"provider": "static",
})
)
# Main area
asyncio.run(get_hosts()) # Add dynamic hosts first.
# Add static hosts if the provider is undefined or is static.
if os.environ.get("PROVIDER", "static") == "static":
get_static_hosts() # Add static hosts next.
print(zuscale_hosts)
|
11,810 | da40af1027a26565b5b94fcda72e3e0325617a41 | """
Author: Yijia Xu
Usage:
# Detect the child speech segments based on the manually annotated mom speech segments,
# modify mom speech segment intervals, and output both to textgrids
# export both wav segments, and transcribe them using kaldi ASPIRE model
# write transcription results to json file
$ python run.py --child_puzzle_wav=\
--mom_puzzle_wav=\
--mom_puzzle_textgrid=\
--child_outfile_textgrid=\
--child_segment_wav_outdir=\
--mom_segment_wav_outdir=\
--add_seconds_at_boundary=\
"""
from scipy.io import wavfile
import pdb
import matplotlib.pyplot as plt
from vad import VoiceActivityDetector
import numpy as np
import tgt
import praatio.tgio as tgio
import os
import json
import tensorflow as tf
from tools import child_speech_detector
from tools import export_child_audio_segments
from tools import export_mom_audio_segments
from tools import write_to_txtgrids
from tools import transcription
flags = tf.app.flags
FLAGS = flags.FLAGS
flags.DEFINE_string('child_puzzle_wav', '/Users/yijiaxu/Desktop/child_segment/child_wav_files_textgrids_by_session/child_puzzle_1_wav/MCRP_ID#3001_G1_child.wav', \
'full path of the audio recorded by mic on child')
flags.DEFINE_string('mom_puzzle_wav', '/Users/yijiaxu/Desktop/child_segment/mom_wav_files_textgrids_by_session/mom_puzzle_1_wav/MCRP_ID#3001_G1.wav', \
'full path of the audio recorded by mic on mom')
flags.DEFINE_string('mom_puzzle_textgrid', '/Users/yijiaxu/Desktop/child_segment/mom_wav_files_textgrids_by_session/mom_puzzle_1_textgrids/MCRP_ID#3001_G1.TextGrid', \
'full path of the textgrids annotated manually for audio recorded by mic on mom')
flags.DEFINE_string('child_outfile_textgrid', 'egs.TextGrid', \
'full path of the textgrids to be created by program for both child detected speech segments and modified mom speech segments')
flags.DEFINE_string('child_segment_wav_outdir', 'child_seg_wav', \
'dir to store detected child audio segments')
flags.DEFINE_string('mom_segment_wav_outdir', 'mom_seg_wav', \
'dir to store detected mom audio segments')
flags.DEFINE_float('add_seconds_at_boundary', 0.2, \
'seconds to add at boundary of child speech detected')
# child_puzzle_wav = '/Users/yijiaxu/Desktop/child_segment/child_wav_files_textgrids_by_session/child_puzzle_1_wav/MCRP_ID#3001_G1_child.wav'
# mom_puzzle_wav = '/Users/yijiaxu/Desktop/child_segment/mom_wav_files_textgrids_by_session/mom_puzzle_1_wav/MCRP_ID#3001_G1.wav'
# mom_puzzle_textgrid = '/Users/yijiaxu/Desktop/child_segment/mom_wav_files_textgrids_by_session/mom_puzzle_1_textgrids/MCRP_ID#3001_G1.TextGrid'
# child_outfile_textgrid = 'egs.TextGrid'
# add_seconds_at_boundary = 0.2
# child_segment_wav_outdir = 'child_seg_wav/'
# mom_segment_wav_outdir = 'mom_seg_wav/'
child_puzzle_wav = FLAGS.child_puzzle_wav
mom_puzzle_wav = FLAGS.mom_puzzle_wav
mom_puzzle_textgrid = FLAGS.mom_puzzle_textgrid
child_outfile_textgrid = FLAGS.child_outfile_textgrid
add_seconds_at_boundary = FLAGS.add_seconds_at_boundary
child_segment_wav_outdir = FLAGS.child_segment_wav_outdir
mom_segment_wav_outdir = FLAGS.mom_segment_wav_outdir
if not os.path.exists(child_segment_wav_outdir):
os.makedirs(child_segment_wav_outdir)
if not os.path.exists(mom_segment_wav_outdir):
os.makedirs(mom_segment_wav_outdir)
# detects child speech parts
v = VoiceActivityDetector(child_puzzle_wav)
data = v.data
total_time = len(data)*1.0/v.rate
total_time = float("{0:.2f}".format(total_time))
speech_time,mom_tier = child_speech_detector(mom_puzzle_textgrid,v)
# export detected child speech segments wav
turns = export_child_audio_segments(total_time,child_puzzle_wav,add_seconds_at_boundary,child_segment_wav_outdir,speech_time)
total_turns = turns
tier = write_to_txtgrids('Machine-Label-CS',turns)
# modify manually annotated mom speech segments, and export the wav segments
mom_turns = export_mom_audio_segments(mom_puzzle_wav,mom_tier,mom_segment_wav_outdir)
total_turns+=mom_turns
mom_tier = write_to_txtgrids('Human-Label-MS(modified)', mom_turns)
# write child and mom speech segment results to the textgrids
tg = tgio.Textgrid()
tg.addTier(mom_tier)
tg.addTier(tier)
tg.save(child_outfile_textgrid)
# do transcriptions of the detected segments
transcription(total_turns,mom_puzzle_wav,child_puzzle_wav,mom_segment_wav_outdir,child_segment_wav_outdir,'JSONData.json')
|
11,811 | c4ac94da8d4e8eddd9a0739e359ffd35d17efe94 | """
_simulations_options.py: Parses position initialization options for simulations.
Copyright (c) 2020 Charles Li // UCSB, Department of Chemical Engineering
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
"""
from __future__ import absolute_import
__author__ = "Charles Li"
__version__ = "1.0"
from ast import literal_eval
import os
from simtk.openmm.app import PDBReporter
from simtk.unit import angstrom
from openmmtools.testsystems import subrandom_particle_positions
import mdtraj as md
from ._options import _Options
__all__ = ['FileOptions', 'SubrandomParticlePositions', 'DodecaneAcrylatePositionOptions']
class _PositionOptions(_Options):
_SECTION_NAME = "_Position"
# =========================================================================
def __init__(self, simulations_options):
super(_PositionOptions, self).__init__()
self.simulations_options = simulations_options
# =========================================================================
def _create_filepath(self, filepath):
directory = self.simulations_options.input_options.directory
if directory is None:
directory = ""
return os.path.join(directory, filepath)
# =========================================================================
def set_positions(self, simulation, *args):
pass
class FileOptions(_PositionOptions):
_SECTION_NAME = "File"
# =========================================================================
def __init__(self, simulations_options):
super(FileOptions, self).__init__(simulations_options)
self.file = None
self.top = None
self.frame = 0
def _create_options(self):
super(FileOptions, self)._create_options()
self._OPTIONS['file'] = self._parse_file
self._OPTIONS['top'] = self._parse_top
self._OPTIONS['frame'] = self._parse_frame
# =========================================================================
def _check_for_incomplete_input(self):
if self.file is None:
self._incomplete_error('file')
# =========================================================================
def _parse_file(self, *args):
self.file = self._create_filepath(args[0])
def _parse_top(self, *args):
self.top = self._create_filepath(args[0])
def _parse_frame(self, *args):
self.frame = literal_eval(args[0])
# =========================================================================
def set_positions(self, simulation, *args):
if self.top is None:
t = md.load(self.file, frame=self.frame)
else:
t = md.load(self.file, top=self.top, frame=self.frame)
simulation.context.setPositions(t.xyz[0])
simulation.context.setPeriodicBoxVectors(*t.unitcell_vectors[0])
class SubrandomParticlePositions(_PositionOptions):
_SECTION_NAME = "SubrandomParticlePositions"
# =========================================================================
def __init__(self, simulations_options):
super(SubrandomParticlePositions, self).__init__(simulations_options)
self.method = 'sobol'
def _create_options(self):
super(SubrandomParticlePositions, self)._create_options()
self._OPTIONS['method'] = self._parse_method
# =========================================================================
def _parse_method(self, *args):
self.method = args[0]
# =========================================================================
def set_positions(self, simulation, *args):
topology = simulation.topology
system = simulation.system
num_residues = topology.getNumAtoms()
box_vectors = system.getDefaultPeriodicBoxVectors()
positions = subrandom_particle_positions(num_residues, box_vectors, method=self.method)
simulation.context.setPositions(positions)
class DodecaneAcrylatePositionOptions(_PositionOptions):
_SECTION_NAME = "DodecaneAcrylatePosition"
# =========================================================================
def __init__(self, simulations_options):
super(DodecaneAcrylatePositionOptions, self).__init__(simulations_options)
self.file = None
def _create_options(self):
super(DodecaneAcrylatePositionOptions, self)._create_options()
self._OPTIONS['file'] = self._parse_file
# =========================================================================
def _parse_file(self, *args):
self.file = self._create_filepath(args[0])
# =========================================================================
def set_positions(self, simulation, *args):
import MDAnalysis as mda
import mdapackmol
# Get topology options
topology_options = args[0]
# Create default instructions
box_vectors = simulation.context.getState().getPeriodicBoxVectors()
a = box_vectors[0][0].value_in_unit(angstrom)
b = box_vectors[1][1].value_in_unit(angstrom)
c = box_vectors[2][2].value_in_unit(angstrom)
default_instructions = ["inside box 0. 0. 0. {:.1f} {:.1f} {:.1f}".format(a, b, c)]
# Create input for packmol
mdapackmol_input = []
for chain_options in topology_options.chains:
instructions = chain_options.instructions
if instructions is None:
instructions = default_instructions
chain_filepath = "data/{}.pdb".format(chain_options.sequence_str)
if topology_options.forceField_str == 'OPLS-AA':
chain_filepath = "data/{}_aa.pdb".format(chain_options.sequence_str)
molecule = mda.Universe(
os.path.join(os.path.dirname(__file__), chain_filepath)
)
packmol_structure = mdapackmol.PackmolStructure(
molecule, number=chain_options.num,
instructions=instructions
)
mdapackmol_input.append(packmol_structure)
for branched_chain_options in topology_options.branched_chains:
instructions = branched_chain_options.instructions
if instructions is None:
instructions = default_instructions
molecule = mda.Universe(
branched_chain_options.pdb
)
packmol_structure = mdapackmol.PackmolStructure(
molecule, number=branched_chain_options.num,
instructions=instructions
)
mdapackmol_input.append(packmol_structure)
if topology_options.numDodecane > 0:
instructions = topology_options.dodecaneInstructions
if instructions is None:
instructions = default_instructions
dodecane_pdb_filepath = "data/C12.pdb"
if topology_options.forceField_str == 'OPLS-AA':
dodecane_pdb_filepath = "data/C12_aa.pdb"
molecule = mda.Universe(
os.path.join(os.path.dirname(__file__), dodecane_pdb_filepath)
)
packmol_structure = mdapackmol.PackmolStructure(
molecule, number=topology_options.numDodecane,
instructions=instructions
)
mdapackmol_input.append(packmol_structure)
if topology_options.numSqualane > 0:
instructions = default_instructions
if topology_options.forceField_str == 'TraPPE-UA':
squalane_pdb_filepath = "data/squalane_ua.pdb"
else:
raise NotImplementedError("force field not implemented for squalane")
molecule = mda.Universe(
os.path.join(os.path.dirname(__file__), squalane_pdb_filepath)
)
packmol_structure = mdapackmol.PackmolStructure(
molecule, number=topology_options.numSqualane,
instructions=instructions
)
mdapackmol_input.append(packmol_structure)
# Call Packmol
system = mdapackmol.packmol(mdapackmol_input)
# Set positions to simulation
positions = system.coord.positions/10.0
simulation.context.setPositions(positions)
# Save to PDB file
if self.file is not None:
PDBReporter(self.file, 1).report(simulation, simulation.context.getState(getPositions=True))
|
11,812 | 3b887afa0cf1f136abb6773b5af90b02bc24b787 |
from flask_restful import Api
from .Task import Task
from .TaskBYID import TaskBYID
from app import flaskAppInstance
restServer=Api(flaskAppInstance)
restServer.add_resource(Task, "/api/v1.0/task")
restServer.add_resource(TaskBYID,"/api/v1.0/task/id/<string:taskId>")
|
11,813 | f4d252815aff9139353a0d0b37e51532b0d775f7 | #This program is used to just clean up the Pronunciation Dictionary and
#remove all the consonants
def main():
print("testing 123")
file = open("PronunciationDictionary.txt")
word_dict = {}
consonant_set = {"B", "CH", "D", "F", "G", "K", "L", "M", "N",
"NG", "P", "R", "S", "SH", "T", "TH", "V", "W",
"Y", "Z", "ZH"}
for line in file:
curr_list = line.split()
phonemes = curr_list[1:]
vowels = ''
for sound in phonemes:
if(not(sound in consonant_set)):
vowels += sound + " "
word_dict[curr_list[0]] = vowels[:-1]
#print(word_dict)
f= open("ConsonantLess_PD.txt","w+")
for word in word_dict:
f.write(word + " "+word_dict[word]+"\n")
#print(word + " "+word_dict[word]+"\n")
main()
#words with a hash tag are consonant examples
'''
AA odd AA D
AE at AE T
AH hut HH AH T
AO ought AO T
AW cow K AW
AY hide HH AY D
# B be B IY
# CH cheese CH IY Z
# D dee D IY
DH thee DH IY
EH Ed EH D
ER hurt HH ER T
EY ate EY T
# F fee F IY
# G green G R IY N
HH he HH IY
IH it IH T
IY eat IY T
JH gee JH IY
# K key K IY
# L lee L IY
# M me M IY
# N knee N IY
# NG ping P IH NG
OW oat OW T
OY toy T OY
# P pee P IY
# R read R IY D
# S sea S IY
# SH she SH IY
# T tea T IY
# TH theta TH EY T AH
UH hood HH UH D
UW two T UW
# V vee V IY
# W we W IY
# Y yield Y IY L D
# Z zee Z IY
# ZH
'''
|
11,814 | 9f48090fe110438d33bd79cd92b01930758ce719 | from django.apps import AppConfig
class FamedicUsersConfig(AppConfig):
name = 'famedic_users'
|
11,815 | 199e588c33ecc1465eaf973d4a0766effcb61896 | from pack import all_packs, cust_packs
|
11,816 | 6bf1fd682a6ff9427d37383340eb9861fec9bcda | from PIL import Image
from tempfile import NamedTemporaryFile
import os
def is_image_compressable(path):
ACCEPTABLE = [".jpg", ".png"]
ext = os.path.splitext(path)[1].lower()
return ext in ACCEPTABLE
def _detect_valid_format(path):
ext = os.path.splitext(path)[1].lower()
return {".jpg": "jpeg", ".png": "png"}.get(ext)
def compress(path, size=(1024, 3000)):
im = Image.open(path)
im.thumbnail(size, Image.ANTIALIAS)
with NamedTemporaryFile(delete=False) as temp:
im.save(temp, _detect_valid_format(path), quality=100, optimize=True)
return temp
|
11,817 | 9e4c6530739f7f3e5a64bfdbfaa3ce0e4966d61d |
from __future__ import division
a = int(input())
b = int(input())
m = int(input())
print(int(a/b))
print(a%b)
print(divmod(a, b)) # Divmod is in built python function gives you division, and remainder
# PowerMod
print(pow(a,b))
print(pow(a,b,m))
|
11,818 | bdadf47612db4b10bf758e6d7084cd75be07b437 | import re
# caracter "?": o caracter anterior pode vir uma ou nenhuma vez.
regex = re.compile('a?b')
print(regex.match('b'))
print(regex.match('ab'))
print(regex.match('aab'))
# {m,n}: pode implementar qualquer um dos repetidores que vimos anteirormente. m e n são parâmetros integer.
# m: caracter anterior terá pelo menos m repetições; n: caracter anterior terá no máximo n repetições.
regex = re.compile('a{3,5}') # aaa, aaaa ou aaaaa
print(regex.match('a')) # None, já que espera pelo menos 3 repetições do a
print(regex.match('aaa'))
print(regex.match('aaaaaa')) # retorna o match dos primeiros 5 "as" e não dos 6.
# {0,} substitui *
regex = re.compile('a{0,}')
print(regex.match('a'))
print(regex.match(''))
# {1,} substitui +
regex = re.compile('a{1,}')
print(regex.match(''))
print(regex.match('aaa'))
# {0,1} substitui ?
regex = re.compile('a{0,1}')
print(regex.match('a'))
print(regex.match('aa'))
|
11,819 | 49a9211ba4c974a704439fd470109c99fb696756 | import math
import matplotlib.pyplot as plt
N = 10
print("Com uma lista de %d elementos" %N)
print("Busca linear = %d" %N)
print("Busca binária = %d" %(math.log2(N)+1))
n = list(range(1,N))
p = [math.log2(i)+1 for i in n]
plt.title("Performance busca linear x busca binária")
plt.xlabel("Quantidade de elementos")
plt.ylabel("Quantidade de verificações")
plt.plot(n,n,label="Busca linear")
plt.plot(n,p,label="Busca binária")
plt.legend()
plt.grid()
plt.show() |
11,820 | 31631cd068fcd30f22bca973207856fcb7f4ebe1 |
# coding: utf-8
# In[33]:
import tensorflow as tf
import tensorflow.keras as keras
import tensorflow.keras.backend as K
from sklearn.metrics import confusion_matrix, precision_recall_fscore_support
from tensorflow.keras.layers import Conv2D, MaxPooling2D, Dense, Flatten, Dropout
from tensorflow.keras.layers import BatchNormalization
import matplotlib.pyplot as plt
import numpy as np
# In[34]:
x_train = np.load('Xtrain.npy')
y_train = np.load('Ytrain.npy')
x_test = np.load('Xtest.npy')
y_test = np.load('Ytest.npy')
# In[35]:
batch_size = 50
num_classes = 96
epochs = 20
img_rows, img_cols = 28, 28
if K.image_data_format() == 'channels_first':
x_train = x_train.reshape(x_train.shape[0], 3, img_rows, img_cols)
x_test = x_test.reshape(x_test.shape[0], 3, img_rows, img_cols)
input_shape = (3, img_rows, img_cols)
else:
x_train = x_train.reshape(x_train.shape[0], img_rows, img_cols, 3)
x_test = x_test.reshape(x_test.shape[0], img_rows, img_cols, 3)
input_shape = (img_rows, img_cols, 3)
# convert class vectors to binary class matrices
# y_train = keras.utils.to_categorical(y_train, num_classes)
# y_test = keras.utils.to_categorical(y_test, num_classes)
# In[36]:
model = keras.Sequential()
model.add(Conv2D(filters = 32, kernel_size = (5,5),padding = 'Same',
activation ='relu', input_shape = input_shape))
model.add(Conv2D(filters = 32, kernel_size = (5,5),padding = 'Same',
activation ='relu'))
model.add(MaxPooling2D(pool_size=(2,2)))
model.add(Dropout(0.25))
model.add(Conv2D(filters = 64, kernel_size = (3,3),padding = 'Same',
activation ='relu'))
model.add(Conv2D(filters = 64, kernel_size = (3,3),padding = 'Same',
activation ='relu'))
model.add(MaxPooling2D(pool_size=(2,2), strides=(2,2)))
model.add(Dropout(0.25))
model.add(Flatten())
model.add(Dense(256, activation = "relu"))
model.add(Dropout(0.5))
model.add(Dense(num_classes, activation = "softmax"))
model.compile(loss=keras.losses.categorical_crossentropy,
optimizer=keras.optimizers.Adam(),
metrics=['accuracy'])
history = model.fit(x_train, y_train,
batch_size=batch_size,
epochs=epochs,
verbose=1,
validation_data=(x_test, y_test))
# summarize history for accuracy
plt.plot(history.history['acc'])
plt.plot(history.history['val_acc'])
plt.title('model accuracy')
plt.ylabel('accuracy')
plt.xlabel('epoch')
plt.legend(['train', 'test'], loc='upper left')
plt.show()
# summarize history for loss
plt.plot(history.history['loss'])
plt.plot(history.history['val_loss'])
plt.title('model loss')
plt.ylabel('loss')
plt.xlabel('epoch')
plt.legend(['train', 'test'], loc='upper left')
plt.show()
score = model.evaluate(x_test, y_test, verbose=0)
print('Test loss:', score[0])
print('Test accuracy:', score[1])
predictions = model.predict(x_test)
matrix = confusion_matrix(y_test.argmax(axis=1), predictions.argmax(axis=1))
print (matrix)
print ("########################################################")
params = precision_recall_fscore_support(y_test.argmax(axis=1), predictions.argmax(axis=1))
print (params)
model.save('on_lines_adv.h5') |
11,821 | fcf9e82ce1523a9b72c9992a2c969edb51f8089d | print("%d" % 432)
print("%d %d" % (432, 345))
print("%f" %432.123)
print("%f %f" %(432.123, 10.3))
print("%f" %432.123456)
print("%f" %432.12345651)
print("%s" % "GeekyShows")
print("%s %s" % ("Hello", "GeekyShows"))
print("%d %s" % (432, "GeekyShows"))
#print("%s %d" % (432, "GeekyShows")) TypeError
print("%(nm)s %(ag)d" % {'ag':432, 'nm':"GeekyShows"})
print("% d" % 432)
print("% d" % 432)
print("%+d" % 432)
print("%8d" % 432)
print("%08d" % 432)
print("%.3f" %432.123)
print("%.2f" %432.123)
print("%.2f" %432.128)
print("%9.2f" %432.128)
print("%09.2f" %432.123)
print("%9.2f" %4388453232.124)
|
11,822 | 4372c94c18afafbf0b148da02d718319c3f6c8eb | # -*- coding: utf-8 -*-
"""Gasoline signals."""
from flask.signals import Namespace
__all__ = ['event']
signals = Namespace()
# used to notify various events
event = signals.signal('event')
# used to notify activity
activity = signals.signal('activity')
# triggered at application initialization when all plugins have been loaded
plugins_registered = signals.signal('plugins_registered')
|
11,823 | 148b9613dc46a6b2c68b898657b1fded522bf4e9 | #!/usr/bin/python
#-*-coding:utf8-*-
from pprint import pprint
from weibopy.auth import OAuthHandler
from weibopy.api import API
from weibopy.binder import bind_api
from weibopy.error import WeibopError
import time,os,pickle,sys
import logging.config
from multiprocessing import Process
import sqlite3 as sqlite
import math
import re
MAX_INSERT_ERROR = 5000
#from pymongo import Connection
CALL_BACK = 'http://www.littlebuster.com'
CALL_BACK=None
CALL_BACK='oob'
mongo_addr = 'localhost'
mongo_port = 27017
db_name = 'weibo'
a_consumer_key = '211160679'
a_consumer_secret = '63b64d531b98c2dbff2443816f274dd3'
a_key = '44bd489d6a128abefdd297ae8d4a494d'
a_secret = 'fb4d6d537ccc6b23d21dc888007a08d6'
someoneid = '1404376560'
davidid='3231589944'
a_ids = [davidid]
class Sina_reptile():
"""
爬取sina微博数据
"""
def __init__(self,consumer_key,consumer_secret,userdbname):
self.consumer_key,self.consumer_secret = consumer_key,consumer_secret
self.con_user = None
self.cur_user = None
try:
self.con_user = sqlite.connect(userdbname,timeout = 20)
self.cur_user = self.con_user.cursor()
except Exception,e:
print 'Sina_reptile init无法连接数据库!'
print e
return None
#self.connection = Connection(mongo_addr,mongo_port)
#self.db = self.connection[db_name]
#self.collection_userprofile = self.db['userprofile']
#self.collection_statuses = self.db['statuses']
def getAtt(self, key):
try:
return self.obj.__getattribute__(key)
except Exception, e:
print e
return ''
def getAttValue(self, obj, key):
try:
return obj.__getattribute__(key)
except Exception, e:
print e
return ''
def auth(self):
"""
用于获取sina微博 access_token 和access_secret
"""
if len(self.consumer_key) == 0:
print "Please set consumer_key"
return
if len(self.consumer_secret) == 0:
print "Please set consumer_secret"
return
self.auth = OAuthHandler(self.consumer_key, self.consumer_secret,CALL_BACK)
auth_url = self.auth.get_authorization_url()
print 'Please authorize: ' + auth_url
verifier = raw_input('PIN: ').strip()
#403error
self.auth.get_access_token(verifier)
self.api = API(self.auth)
print 'authorize success'
def setToken(self, token, tokenSecret):
"""
通过oauth协议以便能获取sina微博数据
"""
self.auth = OAuthHandler(self.consumer_key, self.consumer_secret)
self.auth.setToken(token, tokenSecret)
self.api = API(self.auth)
def get_userprofile(self,id):
"""
获取用户基本信息
"""
try:
userprofile = {}
userprofile['id'] = id
user = self.api.get_user(id)
self.obj = user
userprofile['screen_name'] = self.getAtt("screen_name")
userprofile['name'] = self.getAtt("name")
userprofile['province'] = self.getAtt("province")
userprofile['city'] = self.getAtt("city")
userprofile['location'] = self.getAtt("location")
userprofile['description'] = self.getAtt("description")
userprofile['url'] = self.getAtt("url")
userprofile['profile_image_url'] = self.getAtt("profile_image_url")
userprofile['domain'] = self.getAtt("domain")
userprofile['gender'] = self.getAtt("gender")
userprofile['followers_count'] = self.getAtt("followers_count")
userprofile['friends_count'] = self.getAtt("friends_count")
userprofile['statuses_count'] = self.getAtt("statuses_count")
userprofile['favourites_count'] = self.getAtt("favourites_count")
userprofile['created_at'] = self.getAtt("created_at")
userprofile['following'] = self.getAtt("following")
userprofile['allow_all_act_msg'] = self.getAtt("allow_all_act_msg")
userprofile['geo_enabled'] = self.getAtt("geo_enabled")
userprofile['verified'] = self.getAtt("verified")
# for i in userprofile:
# print type(i),type(userprofile[i])
# print i,userprofile[i]
#
except WeibopError, e: #捕获到的WeibopError错误的详细原因会被放置在对象e中
print "error occured when access userprofile use user_id:",id
print "Error:",e
#log.error("Error occured when access userprofile use user_id:{0}\nError:{1}".format(id, e),exc_info=sys.exc_info())
return None
return userprofile
def get_specific_weibo(self,id):
"""
获取用户最近发表的50条微博
"""
statusprofile = {}
statusprofile['id'] = id
try:
#重新绑定get_status函数
get_status = bind_api( path = '/statuses/show/{id}.json',
payload_type = 'status',
allowed_param = ['id'])
except:
return "**绑定错误**"
status = get_status(self.api,id)
self.obj = status
statusprofile['created_at'] = self.getAtt("created_at")
statusprofile['text'] = self.getAtt("text")
statusprofile['source'] = self.getAtt("source")
statusprofile['favorited'] = self.getAtt("favorited")
statusprofile['truncated'] = self.getAtt("ntruncatedame")
statusprofile['in_reply_to_status_id'] = self.getAtt("in_reply_to_status_id")
statusprofile['in_reply_to_user_id'] = self.getAtt("in_reply_to_user_id")
statusprofile['in_reply_to_screen_name'] = self.getAtt("in_reply_to_screen_name")
statusprofile['thumbnail_pic'] = self.getAtt("thumbnail_pic")
statusprofile['bmiddle_pic'] = self.getAtt("bmiddle_pic")
statusprofile['original_pic'] = self.getAtt("original_pic")
statusprofile['geo'] = self.getAtt("geo")
statusprofile['mid'] = self.getAtt("mid")
statusprofile['retweeted_status'] = self.getAtt("retweeted_status")
return statusprofile
def get_latest_weibo(self,user_id,count):
"""
获取用户最新发表的count条数据
"""
statuses,statusprofile = [],{}
try: #error occur in the SDK
timeline = self.api.user_timeline(count=count, user_id=user_id)
except Exception as e:
print "error occured when access status use user_id:",user_id
print "Error:",e
#log.error("Error occured when access status use user_id:{0}\nError:{1}".format(user_id, e),exc_info=sys.exc_info())
return None
for line in timeline:
self.obj = line
statusprofile['usr_id'] = user_id
statusprofile['id'] = self.getAtt("id")
statusprofile['created_at'] = self.getAtt("created_at")
statusprofile['text'] = self.getAtt("text")
statusprofile['source'] = self.getAtt("source")
statusprofile['favorited'] = self.getAtt("favorited")
statusprofile['truncated'] = self.getAtt("ntruncatedame")
statusprofile['in_reply_to_status_id'] = self.getAtt("in_reply_to_status_id")
statusprofile['in_reply_to_user_id'] = self.getAtt("in_reply_to_user_id")
statusprofile['in_reply_to_screen_name'] = self.getAtt("in_reply_to_screen_name")
statusprofile['thumbnail_pic'] = self.getAtt("thumbnail_pic")
statusprofile['bmiddle_pic'] = self.getAtt("bmiddle_pic")
statusprofile['original_pic'] = self.getAtt("original_pic")
statusprofile['geo'] = repr(pickle.dumps(self.getAtt("geo"),pickle.HIGHEST_PROTOCOL))
statusprofile['mid'] = self.getAtt("mid")
statusprofile['retweeted_status'] = repr(pickle.dumps(self.getAtt("retweeted_status"),pickle.HIGHEST_PROTOCOL))
statuses.append(statusprofile)
# print '*************',type(statusprofile['retweeted_status']),statusprofile['retweeted_status'],'********'
# for j in statuses:
# for i in j:
# print type(i),type(j[i])
# print i,j[i]
return statuses
def friends_ids(self,id):
"""
获取用户关注列表id
"""
next_cursor,cursor = 1,0
ids = []
while(0!=next_cursor):
fids = self.api.friends_ids(user_id=id,cursor=cursor)
self.obj = fids
ids.extend(self.getAtt("ids"))
cursor = next_cursor = self.getAtt("next_cursor")
previous_cursor = self.getAtt("previous_cursor")
return ids
def followers_ids(self,id):
"""
获取用户粉丝列表id
"""
next_cursor,cursor = 1,0
ids = []
while(0!=next_cursor):
fids = self.api.followers_ids(user_id=id,cursor=cursor)
self.obj = fids
ids.extend(self.getAtt("ids"))
cursor = next_cursor = self.getAtt("next_cursor")
previous_cursor = self.getAtt("previous_cursor")
return ids
def manage_access(self):
"""
管理应用访问API速度,适时进行沉睡
"""
info = self.api.rate_limit_status()
self.obj = info
sleep_time = round( (float)(self.getAtt("reset_time_in_seconds"))/self.getAtt("remaining_hits"),2 ) if self.getAtt("remaining_hits") else self.getAtt("reset_time_in_seconds")
print self.getAtt("remaining_hits"),self.getAtt("reset_time_in_seconds"),self.getAtt("hourly_limit"),self.getAtt("reset_time")
print "sleep time:",sleep_time,'pid:',os.getpid()
time.sleep(sleep_time + 1.5)
def save_data(self,userprofile,statuses):
#self.collection_statuses.insert(statuses)
#self.collection_userprofile.insert(userprofile)
pass
def reptile(sina_reptile,userid):
ids_num,ids,new_ids,return_ids = 1,[userid],[userid],[]
while(ids_num <= 10000000):
next_ids = []
for id in new_ids:
try:
sina_reptile.manage_access()
return_ids = sina_reptile.friends_ids(id)
ids.extend(return_ids)
userprofile = sina_reptile.get_userprofile(id)
statuses = sina_reptile.get_latest_weibo(count=50, user_id=id)
if statuses is None or userprofile is None:
continue
sina_reptile.save_data(userprofile,statuses)
except Exception as e:
print "log Error occured in reptile"
#log.error("Error occured in reptile,id:{0}\nError:{1}".format(id, e),exc_info=sys.exc_info())
time.sleep(60)
continue
ids_num+=1
print ids_num
if(ids_num >= 10000000):break
next_ids.extend(return_ids)
next_ids,new_ids = new_ids,next_ids
def run_crawler(consumer_key,consumer_secret,key,secret,userid,userdbname):
try:
sina_reptile = Sina_reptile(consumer_key,consumer_secret,userdbname)
sina_reptile.setToken(key, secret)
reptile(sina_reptile,userid)
#sina_reptile.connection.close()
except Exception as e:
print e
print 'log Error occured in run_crawler'
#log.error("Error occured in run_crawler,pid:{1}\nError:{2}".format(os.getpid(), e),exc_info=sys.exc_info())
def run_my_crawler(consumer_key,consumer_secret,key,secret,userdbname,ids):
if ids:
if len(ids)>0:
try:
sina_reptile = Sina_reptile(consumer_key,consumer_secret,userdbname)
sina_reptile.setToken(key, secret)
reptile_friends_of_uids_to_db(sina_reptile,ids,userdbname)
except Exception as e:
print 'Error occured in run_my_crawler,pid:%s'%str(os.getpid())
print e
#log.error("Error occured in run_my_crawler,pid:{1}\nError:{2}".format(os.getpid(), e),exc_info=sys.exc_info())
else:
print 'run_my_crawler ids[]<=0',ids
else:
print 'run_my_crawler ids[] is None',ids
def get_uids_in_weibodb(weibodbname):
'''
任务:从数据库weibodbname中获取uids='xxx'
返回:uids[]
None 如果无法连接数据库
'''
#init db
try:
con_weibo = sqlite.connect(weibodbname)
cur_weibo = con_weibo.cursor()
except Exception,e:
print 'reptile_friends_of_uids_to_db无法连接数据库!'
print e
return None
try:
cur_weibo.execute("SELECT DISTINCT userid FROM weibos")
con_weibo.commit()
except Exception,E:
print 'get_uids_in_weibodb:从db读取uid错误'
print E
return None
list = cur_weibo.fetchall()
uids=[]
print 'get_uids_in_weibodb共读取用户:%d个 从weibodb:%s'%(len(list),weibodbname)
for row in list:
uid, = row
if uid:
uids.append(str(uid))
print 'get_uids_in_weibodb返回取用户:%d个'%len(uids)
con_weibo.close()
return uids
def get_undonwload_ids(ids):
'''
任务:从userdbname数据库中的relation表中
返回:[]待下载的ids
None 连接数据库错误
'''
print 'get_undonwload_ids:得到%d个用户,从%s找出待下载关系的用户'%(len(ids),userdbname)
#init db
try:
con_user = sqlite.connect(userdbname)
cur_user = con_user.cursor()
except Exception,e:
print 'get_undonwload_ids 无法连接数据库!'
print e
return None
#从gotrelation表找出没下载过的ids
ids_to_download = []
for userid in ids:
userid = str(userid)
if not has_gotrelation_db(cur_user,con_user,userid):
if userid not in ids_to_download:
ids_to_download.append(userid)
print 'get_undonwload_ids:还需要下载%d个用户'%(len(ids_to_download))
return ids_to_download
def create_user_db_table(userdbname):
#init db
print 'create_user_db_table in db:%s'%userdbname
try:
con_user = sqlite.connect(userdbname)
cur_user = con_user.cursor()
except Exception,e:
print 'create_user_db_table: error'
print e
return None
#create tb
try:
cur_user.execute('CREATE TABLE relation(userid TEXT ,followerid TEXT,PRIMARY KEY(userid,followerid));')
con_user.commit()
except Exception,e:
print e
pass
try:
cur_user.execute('CREATE TABLE gotrelation(userid TEXT PRIMARY KEY,gotfans INTERGER,gotfos INTERGER);')
con_user.commit()
except Exception,e:
print e
pass
def reptile_friends_of_uids_to_db(sina_reptile,ids_to_download,userdbname):
'''
任务:把ids的粉丝/关注用api爬取,放到userdbname数据库中的relation表中
返回:None 无法连接数据库
True 完成
'''
print 'reptile_friends_of_uids_to_db:得到%d个用户,待爬取关系至%s'%(len(ids_to_download),userdbname)
for userid in ids_to_download:
#id 的关注
frids = reptile_friends_of_uid(sina_reptile,userid)
#id的粉丝
foids = reptile_fos_of_uid(sina_reptile,userid)
print 'reptile_friends_of_uids_to_db:为用户%s找到%d个关注,%d个粉丝'%(userid,len(frids),len(foids))
count=0
gotfans = len(foids)
gotfos = len(frids)
ins_fans = 0
ins_fos = 0
has_relation = 0
sql_fri = ''
sql_fo = ''
if frids:#用户的关注
fri_ins_error = 0#记录插入fan错误次数
for frid in frids:
frid = str(frid)
count+=1
ins_fos+=1
sql_fri = 'INSERT INTO relation(userid ,followerid) VALUES("%s","%s");'%(frid,userid)
try:
sina_reptile.cur_user.execute(sql_fri)
except Exception,e:
#print 'got fri relation %s fo %s'%(str(userid),str(frid))
has_relation+=1
fri_ins_error+=1
#print sql_fri
#print e
if fri_ins_error>MAX_INSERT_ERROR:#如果插入三次都错误,很有可能是已有记录,跳出for
print '\t插入%d次错误,跳出%s关注关系插入'%(fri_ins_error,userid)
break
continue
pass
try:
sina_reptile.con_user.commit()
except Exception,e:
print 'reptile_friends_of_uids_to_db commit插入%s的关注(%d个)有问题:'%(userid,len(frids))
print e
pass
if foids:#用户的粉丝
fo_ins_error = 0#记录插入fo错误次数
for foid in foids:
followerid = str(foid)
count+=1
ins_fans+=1
sql_fo = 'INSERT INTO relation(userid ,followerid) VALUES("%s","%s");'%(userid,followerid)
try:
sina_reptile.cur_user.execute(sql_fo)
except Exception,e:
#print 'got fri relation %s fo %s'%(str(foid),str(userid))
has_relation+=1
fo_ins_error+=1
#print sql_fo
print e
if fo_ins_error>MAX_INSERT_ERROR:#如果插入三次都错误,很有可能是已有记录,跳出for
print '\t插入%d次错误,跳出%s粉丝关系插入'%(fo_ins_error,userid)
break
continue
pass
try:
sina_reptile.con_user.commit()
except Exception,e:
print 'reptile_friends_of_uids_to_db commit插入%s的粉丝(%d个)有问题:'%(userid,len(foids))
print e
pass
if has_relation!=0:
print '\tuid:%s已经有关系记录'%str(userid),has_relation,'个'
if count!=(len(frids)+len(foids)):
print '\t 用户%s少添加关系%d个'%(userid, (len(frids) + len(foids) - count) )
#更新下载表gotrelation
print '\t更新gotrelation表 uid:%s,fans/fos:'%userid,gotfans,gotfos
update_gotrelation_db(sina_reptile.cur_user, sina_reptile.con_user,userid,gotfans,gotfos)
sina_reptile.con_user.close()
print 'reptile_friends_of_uids_to_db:完成%d个用户的关系爬取至%s'%(len(ids_to_download),userdbname)
return True
def has_gotrelation_db(cur_user,con_user,uid,check_serious=True):
'''
任务:检查是否下载过关系
#如果check_serious 则从db table relation与gotrelation找出fans fos数校对(1秒1个 慢)
#否则 查若有gotrelation项 则return True
'''
#如果严格检查,则从relation表中找出某个uid的 fans fos数量(1秒1个 慢)
if check_serious:
fans=0
fos=0
#get fans relation num
try:
cur_user.execute("""SELECT COUNT(*) FROM relation WHERE userid=='%s' ;"""%uid)
con_user.commit()
res = cur_user.fetchone()
fans,=res
except Exception,e:
print 'has_gotrelation_db 读取relation表有问题,uid= %s'%(uid)
print e
return False
#get fri relation num
try:
cur_user.execute("""SELECT COUNT(*) FROM relation WHERE followerid=='%s' ;"""%uid)
con_user.commit()
res = cur_user.fetchone()
fos,=res
except Exception,e:
print 'has_gotrelation_db 读取relation表有问题,uid= %s'%(uid)
print e
return False
#从gotrelation表中获取 fans fos数(快)
try:
cur_user.execute("""SELECT userid,gotfans,gotfos FROM gotrelation WHERE userid=='%s' ;"""%uid)
con_user.commit()
except Exception,e:
print 'has_gotrelation_db 读取gotrelation表有问题,uid= %s'%(uid)
print e
return False
list = cur_user.fetchone()
if list:
userid,gotfans,gotfos = list
if str(userid)==str(uid):
#看参数决定是否严格检查
if check_serious:
if gotfans<=fans and gotfos<=fos:
#print 'has_got(serious)....',list,fans,fos
return True
else:#不严格检查 有项则跳过
#print 'not_got',list,fans,fos
return True
#print 'final_not_got',uid,fans,fos
return False
#无用
def test_load_gotrelation_db(userids):
userids=['1937245577','1402787970','1234567890']
con_user = sqlite.connect('../users.db')
cur_user = con_user.cursor()
sql = '''SELECT userid FROM gotrelation WHERE userid=='%s' '''
for userid in userids:
try:
cur_user.execute( sql%str(userid) )
tup= cur_user.fetchone()
if tup is not None:#有用户
print sql,userid
print tup
except Exception,e:
print 'test_load_gotrelation_db 读取gotrelation表有问题,uid= %s'%(userid)
print e
con_user.close()
#无用
def load_gotrelation_db(cur,con,userids):
'''
给定userids,到users.db->gotrelation中看看是否有下载好的userid,若没有,加入wait_userids[]
返回:需要下载的wait_userids
'''
#userids.sort()
sql = '''SELECT userid FROM gotrelation WHERE userid=='%s' '''
#sql = '''SELECT count(*) FROM gotrelation '''
wait_userids = []
con_user = sqlite.connect('../users.db')
cur_user = con_user.cursor()
for userid in userids:
#???没有返回??? 单步试试
try:
cur_user.execute( sql% str(userid) )
tup= cur_user.fetchone()
if tup is not None:#有用户
print '\t已有用户:%s'%str(userid)
print sql,userid
print tup
else:
#print '\t没有用户:%s'%str(userid)
wait_userids.append(userid)
except Exception,e:
print 'test_load_gotrelation_db 读取gotrelation表有问题,uid= %s'%(userid)
print e
print 'load_gotrelation_db 复查:需要下载%d个用户'%len(wait_userids)
con_user.close()
return wait_userids
def update_gotrelation_db(cur_user,con_user,userid,gotfans,gotfos):
#更新下载表gotrelation
try:
cur_user.execute("""REPLACE INTO gotrelation(userid,gotfans,gotfos) VALUES('%s',%d,%d)"""%(userid,gotfans,gotfos))
con_user.commit()
except Exception,e:
print 'update_gotrelation_db 更新gotrelation表有问题,uid= %s'%(userid)
print e
def reptile_fos_of_uid(sina_reptile,id):
'''
返回:ids[] id的粉丝
'''
try:
sina_reptile.manage_access()
#ids = [int,int,...]
return_ids = []
return_ids.extend(sina_reptile.followers_ids(id))
#print '获取id:%s的fos:'%id
#print return_ids
except Exception as e:
#log.error("Error occured in reptile,id:{0}\nError:{1}".format(id, e),exc_info=sys.exc_info())
print 'logerror("Error occured in reptile_fans_fos_of_uid,id:{0}\nError:{1}".format(id, e),exc_info=sys.exc_info()'
time.sleep(60)
return return_ids
def reptile_friends_of_uid(sina_reptile,id):
'''
返回:ids[] id关注的用户
'''
try:
return_ids = []
sina_reptile.manage_access()
#ids = [int,int,...]
return_ids.extend( sina_reptile.friends_ids(id))
#print '获取id:%s的fos:'%id
#print return_ids
except Exception as e:
#log.error("Error occured in reptile,id:{0}\nError:{1}".format(id, e),exc_info=sys.exc_info())
print 'logerror("Error occured in reptile_friends_of_uid,id:{0}\nError:{1}".format(id, e),exc_info=sys.exc_info()'
time.sleep(60)
return return_ids
#split the arr into N chunks
#如[1,2,3,4,5] m=2 -> [[1,2,3] [4,5]]
def chunks(arr, m):
n = int(math.ceil(len(arr) / float(m)))
return [arr[i:i + n] for i in range(0, len(arr), n)]
#或者让一共有m块,自动分(尽可能平均)
#如[1,2,3,4,5] m=2 -> [[1,3,5] [2,4]]
def chunks_avg(arr, m):
n = int(math.ceil(len(arr) / float(m)))
res = [arr[i:i + n] for i in range(0, len(arr), n)]
if m < len(arr):
maxsplit = m
else:
maxsplit = len(arr)
newres = [ [] for i in range(0,maxsplit)]
for i in range(0,len(arr)):
newres[i%m].append(arr[i])
pass
return newres
def test_chunks():
arr = []
m = 100
for i in range(1,50):
arr.append(i)
res = chunks_avg(arr,m)
print 'chunks_avg:'
for i in res:
print i
res = chunks(arr,m)
print 'chunks:'
for i in res:
print i
if __name__ == "__main__":
'''
读取weiqun2download.txt的weiqunid,从weiqunid.db获取用户id,用api下载用户关系
'''
#读weiqunid
print '从weiqun2download.txt读取准备下载的weiqunIDs:'
weiqunlist = 'weiqun2download.txt'
weiqunIDs=[]
weiqunparas=[]
with open(weiqunlist) as f:
for i in f.readlines():
res = re.sub('#',' ',i).split(' ')
weiqunid = res[0].strip()
endpage = int(res[1].strip())
startpage = 1
print 'weiqunid:',weiqunid
print 'page:',startpage,'~',endpage
weiqunparas.append( (weiqunid,startpage,endpage) )
weiqunIDs.append(weiqunid)
logging.config.fileConfig("logging.conf")
log = logging.getLogger('logger_sina_reptile')
#consumer_key= '应用的key'
#consumer_secret ='应用的App Secret'
#token = '用户的Access token key'
#tokenSecret = '用户的Access token secret'
userdbname = '../users.db'
weiqunids = weiqunIDs
weibodbnames=[]
ids_to_download = []
# my test
#sina_reptile = Sina_reptile(a_consumer_key,a_consumer_secret,userdbname)
#sina_reptile.setToken(a_key, a_secret)
#建立users.db(负责储存下载列表,储存用户关系)
create_user_db_table(userdbname)
#获取所有weiqundb的ids
for weiqunid in weiqunids:
weibodbnames.append('../weiqun/%s.db'%weiqunid)
for weibodbname in weibodbnames:
ids = get_uids_in_weibodb(weibodbname)
if ids:
ids_to_download.extend( get_undonwload_ids(ids) )
#单个爬虫运行
#reptile_friends_of_uids_to_db(sina_reptile,ids_to_download,userdbname)
#多个爬虫运行
#获取爬虫数目
crawler_count = 0
crawlerids = 'clawer.txt'#20线程
crawlerids = 'crawlertest.txt'#2线程
with open(crawlerids) as f:
for i in f.readlines():
crawler_count+=1
print '有%d个sina API sectret key'%crawler_count
#切分ids[]
if len(ids_to_download):
ids_list = chunks(ids_to_download,crawler_count)
print '切分成任务块:',crawler_count
else:#没有任务则推出
print '没有任务,退出'
sys.exit(0)
i=0
for ids in ids_list:
i+=len(ids)
print '\t把%d个ID分成%d个任务.\n开始爬行!!!!!!!!'%(i,len(ids_list))
#开始爬行
print 'API secret:'
with open(crawlerids) as f:
index=0
for i in f.readlines():
print i
j = i.strip().split(' ')
p = Process(target=run_my_crawler, args=(j[0],j[1],j[2],j[3],userdbname,ids_list[index]))
index+=1
print '爬虫%d启动!!'%index
p.start()
#time.sleep(10000)
#friendids = reptile_friends_of_uid(sina_reptile,ids)
#print friendids
#userprofile = sina_reptile.get_userprofile(davidid)
#weibo = sina_reptile.get_specific_weibo("3408234545293850")
#print userprofile
#sina_reptile.manage_access()
#print weibo
#'''
# origins:
#sina_reptile = Sina_reptile('2173594644','fc76ecb30a3734ec6e493e472c5797f8')
#sina_reptile.auth()
#sina_reptile.setToken("e42c9ac01abbb0ccf498689f70ecce56", "dee15395b02e87eedc56e380807528a8")
#sina_reptile.get_userprofile("1735950160")
# sina_reptile.get_specific_weibo("3408234545293850")
## sina_reptile.get_latest_weibo(count=50, user_id="1735950160")
## sina_reptile.friends_ids("1404376560")
# reptile(sina_reptile)
# sina_reptile.manage_access()
|
11,824 | 50bef73e8d6d216e9d2a68ef462d1b37e3a71671 | class employee:
_employee_name = ''
_employee_salary = 0.0
_employee_designation = ''
def __init__(self, employee_name, employee_salary, employee_designation):
self._employee_name = employee_name
self._employee_salary = employee_salary
self._employee_designation = employee_designation
def get_employee_name(self):
return self._employee_name
def get_employee_salary(self):
return self._employee_salary
def get_employee_designation(self):
return self._employee_designation
def to_string(self):
return 'Employee Name: ' + self._employee_name + ', Employee Designation: ' + self._employee_designation + ', Employee Salary: ' + str(self._employee_salary)
|
11,825 | 9b8fde63a99d8626218022892048061a91ea0691 | # -*- coding: utf-8 -*-
from __future__ import absolute_import, unicode_literals
from config.template_middleware import TemplateResponse
from tekton import router
from gaecookie.decorator import no_csrf
from gaepermission.decorator import login_not_required
from classificacaof1_app import facade
from routes.classificacaof1s import admin
@login_not_required
@no_csrf
def index():
cmd = facade.list_classificacaof1s_cmd()
classificacaof1s = cmd()
public_form = facade.classificacaof1_public_form()
classificacaof1_public_dcts = [public_form.fill_with_model(classificacaof1) for classificacaof1 in classificacaof1s]
context = {'classificacaof1s': classificacaof1_public_dcts,'admin_path':router.to_path(admin)}
return TemplateResponse(context)
|
11,826 | 4a37fd7798268796b57220e9f08a88f4a645bafc | # -*- coding: utf-8 -*-
#
# Copyright (C) 2012-2019 Ben Kurtovic <ben.kurtovic@gmail.com>
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
"""
This module contains accessory functions for other parts of the library. Parser
users generally won't need stuff from here.
"""
from __future__ import unicode_literals
from .compat import bytes, str
from .nodes import Node
from .smart_list import SmartList
__all__ = ["parse_anything"]
def parse_anything(value, context=0, skip_style_tags=False):
"""Return a :class:`.Wikicode` for *value*, allowing multiple types.
This differs from :meth:`.Parser.parse` in that we accept more than just a
string to be parsed. Unicode objects (strings in py3k), strings (bytes in
py3k), integers (converted to strings), ``None``, existing :class:`.Node`
or :class:`.Wikicode` objects, as well as an iterable of these types, are
supported. This is used to parse input on-the-fly by various methods of
:class:`.Wikicode` and others like :class:`.Template`, such as
:meth:`wikicode.insert() <.Wikicode.insert>` or setting
:meth:`template.name <.Template.name>`.
Additional arguments are passed directly to :meth:`.Parser.parse`.
"""
from .parser import Parser
from .wikicode import Wikicode
if isinstance(value, Wikicode):
return value
elif isinstance(value, Node):
return Wikicode(SmartList([value]))
elif isinstance(value, str):
return Parser().parse(value, context, skip_style_tags)
elif isinstance(value, bytes):
return Parser().parse(value.decode("utf8"), context, skip_style_tags)
elif isinstance(value, int):
return Parser().parse(str(value), context, skip_style_tags)
elif value is None:
return Wikicode(SmartList())
elif hasattr(value, "read"):
return parse_anything(value.read(), context, skip_style_tags)
try:
nodelist = SmartList()
for item in value:
nodelist += parse_anything(item, context, skip_style_tags).nodes
return Wikicode(nodelist)
except TypeError:
error = "Needs string, Node, Wikicode, file, int, None, or iterable of these, but got {0}: {1}"
raise ValueError(error.format(type(value).__name__, value))
|
11,827 | de4dc0d7e09c2e5c2909673d0c6eef402351bf9c | #!/usr/bin/python3
import pandas as pd
from pandas.compat import StringIO
import os
from pathlib import Path
import subprocess
import re
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
# identify folder path
filepath = '/var/www/wou/tmp/'
# identify upload target filename
with open("/var/www/wou/data/filelist.txt", "r") as file:
for last_line in file:
pass
file.close()
# assign uploaded target filename as variable
lastfilename = last_line
# open apache client_info file to get client info
command_output = os.popen('tail -n1 /var/www/wou/data/client_info.log')
r2 = re.findall(r" [lLwW][iI][nN].{4,11} ", command_output.read())
# merge folder path with filename
file_to_open = filepath + lastfilename.rstrip()
# open the target file
f = open(file_to_open, 'r')
datainfo = f.readlines(3)
lines = list(f)
f.close()
df = pd.read_csv(file_to_open, names=['Time','Severity','Text'], engine='python')
# export as html
df.to_html('../output.html', justify='center')
# generate pie chart and calculate percentage based on severity column
df.Severity.value_counts().plot.pie(y='Severities', figsize=(7, 7),autopct='%1.1f%%', startangle=90)
plt.savefig('../images/chart_output.png')
# generate HTML page
print("Content-Type: text/html\n")
print("<html>\n")
print("""\
<head>
<meta charset="utf-8">
<title>Data Analytic Output</title>
<meta name="description" content="Basic Data output">
<meta name="author" content="vasikadedomena">
<link rel="stylesheet" href="../css/styles.css" />
</head>
""")
print("<body>\n")
print("<p>You are using a {} system</p>".format(r2))
print("\n")
print("<p>Returns of your data filename: {}</p>".format(lastfilename))
print("""\
<object data="/tmp/{}" type="text/plain" width="800" style="height: 300px"></object>
""".format(lastfilename))
print("""\
<div class="box-1">
<iframe src="https://www.vasikadedomena.site/output.html" style="border: none; width: 600px; height: 300px;" ></ifram$
</div>
<div class="image-box-1">
<img src='../images/chart_output.png'>
</div>
<div class="box-2">
<button onclick="window.location.href='https://www.vasikadedomena.site';">Back To Main Page</button>
</div>
</body>
</html>
""")
|
11,828 | 166f1dca5c8c995701224886a4e8e1cff2c9b023 | import csv
file = open("./jian.txt","r")
file2 = open("./fan.txt","r")
read = csv.reader(file)
read2 = csv.reader(file2)
a = ""
b= ""
for line in read:
a = line[0]
for line2 in read2:
b = line2[0]
print(a)
print(b)
with open("./all.txt","a+",encoding="utf-8") as f:
f.write("{")
if len(a) == len(b):
for i in range(len(a)):
with open("./all.txt","a+",encoding="utf-8") as f:
f.write("'")
f.write(b[i])
f.write("'")
f.write(":")
f.write("'")
f.write(a[i])
f.write("'")
f.write(",")
with open("./all.txt","a+",encoding="utf-8") as f:
f.write("}") |
11,829 | e8720c2715321e10c040c94db7a949ed9ba84a35 | from .flowsomtool import flowsom
from .__version__ import __version__
__all__ = ['flowsom', '__version__'] |
11,830 | f21f10effdc2dfcc8c258faad991a36964ccb9d4 | import numpy as np
from tf_util.tf_logging import tf_logging
class NLIPairingTrainConfig:
vocab_filename = "bert_voca.txt"
vocab_size = 30522
seq_length = 300
max_steps = 100000
num_gpu = 1
save_train_payload = False
class HPCommon:
'''Hyperparameters'''
# data
# training
batch_size = 16 # alias = N
lr = 2e-5 # learning rate. In paper, learning rate is adjusted to the global step.
logdir = 'logdir' # log directory
# model
seq_max = 300 # Maximum number of words in a sentence. alias = T.
# Feel free to increase this if you are ambitious.
hidden_units = 768 # alias = C
num_blocks = 12 # number of encoder/decoder blocks
num_heads = 12
dropout_rate = 0.1
sinusoid = False # If True, use sinusoid. If false, positional embedding.
intermediate_size = 3072
vocab_size = 30522
type_vocab_size = 2
num_classes = 3
def find_padding(input_mask):
return np.where(input_mask == 0)[0][0]
def find_seg2(segment_ids):
return np.where(segment_ids == 1)[0][0]
def train_fn_factory(sess, loss_tensor, all_losses, train_op, batch2feed_dict, batch, step_i):
loss_val, all_losses_val, _ = sess.run([loss_tensor, all_losses, train_op,
],
feed_dict=batch2feed_dict(batch)
)
n_layer = len(all_losses_val)
verbose_loss_str = " ".join(["{0}: {1:.2f}".format(i, all_losses_val[i]) for i in range(n_layer)])
tf_logging.debug("Step {0} train loss={1:.04f} {2}".format(step_i, loss_val, verbose_loss_str))
return loss_val, 0 |
11,831 | 37c91cacbd4ba5752809b3811df72794bc81da93 | #!/usr/bin/env python
# -*- coding: ascii -*-
"""
Festis.telestaff: downlaods data from telestaff
"""
__author__ = 'Joe Porcelli (porcej@gmail.com)'
__copyright__ = 'Copyright (c) 2017 Joe Porcelli'
__license__ = 'New-style BSD'
__vcs_id__ = '$Id$'
__version__ = '0.1.0' #Versioning: http://www.python.org/dev/peps/pep-0386/
from festis import telestaff |
11,832 | 5bf052868fbe272947118bc72c7fd3d6d6817182 | import itertools
import math
l = []
n = int(input())
for i in range (n):
xy = list(map (int,input().split()))
l.append(xy)
com = list(itertools.combinations(l, 3))
for j in range (n*(n-1)*(n-2)//6):
if com[j][0][0] - com[j][1][0] == 0:
if com[j][2][0] == com[j][0][0]:
print('Yes')
exit()
else:
continue
katamuki = (com[j][0][1]-com[j][1][1]) / (com[j][0][0]-com[j][1][0])
seppen = com[j][0][1] - katamuki * com[j][0][0]
if com[j][2][1] == round(katamuki * com[j][2][0] + seppen, 10):
print('Yes')
exit()
else:
print('No')
###---------------------------------------------------------
import itertools
la = []
lb = []
N = int(input())
xs = []
ys = []
for i in range(N):
x, y = map(int, input().split())
xs.append(x)
ys.append(y)
xcom = list(itertools.combinations(xs, 2))
ycom = list(itertools.combinations(ys, 2))
ways = int(N*(N-1)/2)
consts = []
for j in range(ways):
cnt = 0
if xcom[j][1] - xcom[j][0] != 0:
a = (ycom[j][1]-ycom[j][0])/(xcom[j][1]-xcom[j][0])
b = ycom[j][0] - a*xcom[j][0]
else:
for o in xs:
if o == xcom[j][0]:
cnt += 1
if cnt >= 3:
print('Yes')
exit()
la.append(a)
lb.append(b)
for k in range(ways):
point = 0
a = la[k]
b = lb[k]
for l in range(N):
if ys[l] == round(a*xs[l] + b, 10):
point += 1
if point == 3:
print('Yes')
exit()
if point != 3 :
print('No')
|
11,833 | 58580d3026c1ea44bfadf30562900bcc9f99b0dd | from django.conf.urls import url
from .views import ChannelsView
from .views import ChannelsDetailView
urlpatterns = [
url(r'^$', ChannelsView.as_view(), name='list'),
url(r'^channels/(?P<uid>[a-f0-9]{8}-?[a-f0-9]{4}-?4[a-f0-9]{3}-?[89ab][a-f0-9]{3}-?[a-f0-9]{12})', ChannelsDetailView.as_view(), name='detail'),
url(r'^channels/(?P<slug>[-\w]+)/$', ChannelsDetailView.as_view(),
name='slug-detail'),
]
|
11,834 | df213ebee6ea1d39d86d854d407774e9896212df |
from flask import Flask, jsonify, request
import spotifyconnect
app = Flask('SpotifyConnect')
# #API routes
# Login routes
@app.route('/login/_zeroconf', methods=['GET', 'POST'])
def login_zeroconf():
action = request.args.get('action') or request.form.get('action')
if not action:
return jsonify({
'status': 301,
'spotifyError': 0,
'statusString': 'ERROR-MISSING-ACTION'})
if action == 'getInfo' and request.method == 'GET':
return get_info()
elif action == 'addUser' and request.method == 'POST':
return add_user()
else:
return jsonify({
'status': 301,
'spotifyError': 0,
'statusString': 'ERROR-INVALID-ACTION'})
def get_info():
zeroconf_vars = spotifyconnect._session_instance.get_zeroconf_vars()
return jsonify({
'status': 101,
'spotifyError': 0,
'activeUser': zeroconf_vars.active_user,
'brandDisplayName': spotifyconnect._session_instance.config.brand_name,
'accountReq': zeroconf_vars.account_req,
'deviceID': zeroconf_vars.device_id,
'publicKey': zeroconf_vars.public_key,
'version': '2.0.1',
'deviceType': zeroconf_vars.device_type,
'modelDisplayName': spotifyconnect._session_instance.config.model_name,
# Status codes are ERROR-OK (not actually an error),
# ERROR-MISSING-ACTION, ERROR-INVALID-ACTION, ERROR-SPOTIFY-ERROR,
# ERROR-INVALID-ARGUMENTS, ERROR-UNKNOWN, and ERROR_LOG_FILE
'statusString': 'ERROR-OK',
'remoteName': zeroconf_vars.remote_name,
})
def add_user():
args = request.form
# TODO: Add parameter verification
username = str(args.get('userName'))
blob = str(args.get('blob'))
clientKey = str(args.get('clientKey'))
spotifyconnect._session_instance.connection.login(
username, zeroconf=(blob, clientKey))
return jsonify({
'status': 101,
'spotifyError': 0,
'statusString': 'ERROR-OK'
})
|
11,835 | c97f4366a10ee02cc81685bc96c3561f75f0c3ad | __all__ = ['block', 'connection', 'entity', 'facing', 'minecraft', 'nbt', 'util', 'vec3']
|
11,836 | 25302cbaca1e8214af167b4bd0bd235b493821d4 | # encoding: utf-8
# module PyQt4.QtGui
# from /usr/lib64/python2.6/site-packages/PyQt4/QtGui.so
# by generator 1.136
# no doc
# imports
import PyQt4.QtCore as __PyQt4_QtCore
class QPainterPath(): # skipped bases: <type 'sip.simplewrapper'>
# no doc
def addEllipse(self, *args, **kwargs): # real signature unknown
pass
def addPath(self, *args, **kwargs): # real signature unknown
pass
def addPolygon(self, *args, **kwargs): # real signature unknown
pass
def addRect(self, *args, **kwargs): # real signature unknown
pass
def addRegion(self, *args, **kwargs): # real signature unknown
pass
def addRoundedRect(self, *args, **kwargs): # real signature unknown
pass
def addRoundRect(self, *args, **kwargs): # real signature unknown
pass
def addText(self, *args, **kwargs): # real signature unknown
pass
def angleAtPercent(self, *args, **kwargs): # real signature unknown
pass
def arcMoveTo(self, *args, **kwargs): # real signature unknown
pass
def arcTo(self, *args, **kwargs): # real signature unknown
pass
def boundingRect(self, *args, **kwargs): # real signature unknown
pass
def closeSubpath(self, *args, **kwargs): # real signature unknown
pass
def connectPath(self, *args, **kwargs): # real signature unknown
pass
def contains(self, *args, **kwargs): # real signature unknown
pass
def controlPointRect(self, *args, **kwargs): # real signature unknown
pass
def cubicTo(self, *args, **kwargs): # real signature unknown
pass
def currentPosition(self, *args, **kwargs): # real signature unknown
pass
def elementAt(self, *args, **kwargs): # real signature unknown
pass
def elementCount(self, *args, **kwargs): # real signature unknown
pass
def fillRule(self, *args, **kwargs): # real signature unknown
pass
def intersected(self, *args, **kwargs): # real signature unknown
pass
def intersects(self, *args, **kwargs): # real signature unknown
pass
def isEmpty(self, *args, **kwargs): # real signature unknown
pass
def length(self, *args, **kwargs): # real signature unknown
pass
def lineTo(self, *args, **kwargs): # real signature unknown
pass
def moveTo(self, *args, **kwargs): # real signature unknown
pass
def percentAtLength(self, *args, **kwargs): # real signature unknown
pass
def pointAtPercent(self, *args, **kwargs): # real signature unknown
pass
def quadTo(self, *args, **kwargs): # real signature unknown
pass
def setElementPositionAt(self, *args, **kwargs): # real signature unknown
pass
def setFillRule(self, *args, **kwargs): # real signature unknown
pass
def simplified(self, *args, **kwargs): # real signature unknown
pass
def slopeAtPercent(self, *args, **kwargs): # real signature unknown
pass
def subtracted(self, *args, **kwargs): # real signature unknown
pass
def subtractedInverted(self, *args, **kwargs): # real signature unknown
pass
def toFillPolygon(self, *args, **kwargs): # real signature unknown
pass
def toFillPolygons(self, *args, **kwargs): # real signature unknown
pass
def toReversed(self, *args, **kwargs): # real signature unknown
pass
def toSubpathPolygons(self, *args, **kwargs): # real signature unknown
pass
def united(self, *args, **kwargs): # real signature unknown
pass
def __add__(self, y): # real signature unknown; restored from __doc__
""" x.__add__(y) <==> x+y """
pass
def __and__(self, y): # real signature unknown; restored from __doc__
""" x.__and__(y) <==> x&y """
pass
def __eq__(self, y): # real signature unknown; restored from __doc__
""" x.__eq__(y) <==> x==y """
pass
def __ge__(self, y): # real signature unknown; restored from __doc__
""" x.__ge__(y) <==> x>=y """
pass
def __gt__(self, y): # real signature unknown; restored from __doc__
""" x.__gt__(y) <==> x>y """
pass
def __iadd__(self, y): # real signature unknown; restored from __doc__
""" x.__iadd__(y) <==> x+y """
pass
def __iand__(self, y): # real signature unknown; restored from __doc__
""" x.__iand__(y) <==> x&y """
pass
def __init__(self, *args, **kwargs): # real signature unknown
pass
def __ior__(self, y): # real signature unknown; restored from __doc__
""" x.__ior__(y) <==> x|y """
pass
def __isub__(self, y): # real signature unknown; restored from __doc__
""" x.__isub__(y) <==> x-y """
pass
def __le__(self, y): # real signature unknown; restored from __doc__
""" x.__le__(y) <==> x<=y """
pass
def __lt__(self, y): # real signature unknown; restored from __doc__
""" x.__lt__(y) <==> x<y """
pass
def __mul__(self, y): # real signature unknown; restored from __doc__
""" x.__mul__(y) <==> x*y """
pass
def __ne__(self, y): # real signature unknown; restored from __doc__
""" x.__ne__(y) <==> x!=y """
pass
def __or__(self, y): # real signature unknown; restored from __doc__
""" x.__or__(y) <==> x|y """
pass
def __radd__(self, y): # real signature unknown; restored from __doc__
""" x.__radd__(y) <==> y+x """
pass
def __rand__(self, y): # real signature unknown; restored from __doc__
""" x.__rand__(y) <==> y&x """
pass
def __rmul__(self, y): # real signature unknown; restored from __doc__
""" x.__rmul__(y) <==> y*x """
pass
def __ror__(self, y): # real signature unknown; restored from __doc__
""" x.__ror__(y) <==> y|x """
pass
def __rsub__(self, y): # real signature unknown; restored from __doc__
""" x.__rsub__(y) <==> y-x """
pass
def __sub__(self, y): # real signature unknown; restored from __doc__
""" x.__sub__(y) <==> x-y """
pass
__weakref__ = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
"""list of weak references to the object (if defined)"""
CurveToDataElement = 3
CurveToElement = 2
LineToElement = 1
MoveToElement = 0
|
11,837 | b9c5b85ad7a2caccac689e6b5391c55a66aad5f5 | import numpy as np
import os
from os.path import join as pjoin
import pandas as pd
import tqdm
from dipy.io.image import load_nifti
import seaborn as sns
import matplotlib.pyplot as plt
import scipy.stats as stats
PPMI_PATH = '/media/theo/285EDDF95EDDC02C/Users/Public/Documents/PPMI'
ADNI_PATH = '/media/theo/285EDDF95EDDC02C/Users/Public/Documents/ADNI'
PPMI_PATH_patients = '/home/theo/Documents/Harmonisation/data/CCNA/'#'/home/theo/Documents/Data/PPMI/'
ADNI_PATH_patients = '/home/theo/Documents/Harmonisation/data/CCNA/'#'/home/theo/Documents/Data/ADNI/raw/'
patients_PPMI = [x for x in os.listdir(PPMI_PATH_patients) if os.path.isdir(
pjoin(PPMI_PATH_patients, x))]
patients_ADNI = [x for x in os.listdir(ADNI_PATH_patients) if os.path.isdir(
pjoin(ADNI_PATH_patients, x))]
patients_PPMI = patients_PPMI[:10]
patients_ADNI = patients_ADNI[10:]
sites = pd.read_csv(pjoin(PPMI_PATH, 'Center-Subject_List.csv'))
sites = sites.astype({'PATNO': 'str'})
sites = {p: s for p, s in zip(sites['PATNO'], sites['CNO']) if p in [
p.split('_')[0] for p in patients_PPMI]}
sites_dict = {s: i for i, s in enumerate(sorted(set(sites.values())))}
path_dicts = [
{'name': patient,
'fa': pjoin(PPMI_PATH, 'metrics', patient, 'metrics', 'fa.nii.gz'),
'md': pjoin(PPMI_PATH, 'metrics', patient, 'metrics', 'md.nii.gz'),
'gfa': pjoin(PPMI_PATH, 'metrics', patient, 'metrics', 'gfa.nii.gz'),
'site': 0#sites_dict[sites[patient.split('_')[0]]],
}
for patient in patients_PPMI]
path_dicts += [
{'name': patient,
'fa': pjoin(ADNI_PATH, 'metrics', patient, 'metrics', 'fa.nii.gz'),
'md': pjoin(ADNI_PATH, 'metrics', patient, 'metrics', 'md.nii.gz'),
'gfa': pjoin(ADNI_PATH, 'metrics', patient, 'metrics', 'gfa.nii.gz'),
'site': 1#sites_dict[sites[patient.split('_')[0]]],
}
for patient in patients_ADNI]
sites_dict = {0: 'PPMI', 1: 'ADNI'}#{i: s for s, i in sites_dict.items()}
sns.set_style("white")
kwargs = dict(hist_kws={'alpha': .6}, kde_kws={'linewidth': 2})
plt.figure(figsize=(10, 7), dpi=80)
colors = ['r', 'g', 'b', 'k', 'y', 'purple', 'pink', 'cyan', 'orange']
colors = {s: colors[i] for i, s in sites_dict.items()}
lfa = {}
lfa_mean = {}
nb_lfa = {}
for path in tqdm.tqdm(path_dicts):
try:
fa, affine = load_nifti(path['fa'])
except Exception as e:
print('Error', path['name'])
continue
fa = fa.reshape(-1)
fa = fa[fa != 0]
site = sites_dict[path['site']]
if site not in lfa.keys():
lfa[site] = fa
lfa_mean[site] = [np.mean(fa)]
nb_lfa[site] = 1
else:
lfa[site] = np.concatenate((lfa[site], fa), axis=0)
lfa_mean[site].append(np.mean(fa))
nb_lfa[site] += 1
#sns.distplot(fa, color=colors[site], label=str(site), **kwargs)
print(nb_lfa)
import operator as op
sorted_keys, sorted_vals = zip(*sorted(lfa.items()))
sorted_keys, sorted_vals_mean = zip(*sorted(lfa_mean.items()))
print('stats :', stats.f_oneway(*lfa_mean.values()))
#sns.boxplot(data=sorted_vals, width=.18)
sns.swarmplot(data=sorted_vals_mean,
size=6, edgecolor="black", linewidth=.9)
# category labels
plt.xticks(plt.xticks()[0], sorted_keys)
plt.legend()
plt.show()
|
11,838 | bbe01509133f9bf461cb809e398cb5a7ab5b2969 | #线性插值法
def linear(x,x1,x2,y1,y2):
if x2-x1==0:
result=y1
else:
result=y1+(x-x1)*(y2-y1)/(x2-x1)
return result |
11,839 | 8c6db741398a6c8a336b7635f94cb603f487938e | # -*- coding: utf-8 -*-
"""
@author: Gregory Krulin
Added variable HumanCount to store count of rock paper or scissor choice as
naively counting is too costly when amount of rounds increases
Added function fequency to assign probability list
"""
import numpy as np
from random import randint
def UpdateGameRecord(GameRecord,ChoiceOfComputerPlayer,ChoiceOfHumanPlayer,Outcome, HumanCount):
GameRecord[0].append(ChoiceOfHumanPlayer)
GameRecord[1].append(ChoiceOfComputerPlayer)
GameRecord[2].append(Outcome)
HumanCount[ChoiceOfHumanPlayer] +=1
outcome_list = {0:"It is a tie", 1:"Computer wins", 2: "Human wins"}
print()
print('-'*5 + 'Outcome' + '-'*5)
print("%s: Computer chose %s; Human chose %s" %(outcome_list[Outcome],\
ChoiceOfComputerPlayer,\
ChoiceOfHumanPlayer))
print('-'*20)
def HumanPlayer(GameRecord):
end = ['r','s','p','q','rock', 'scissors', 'paper', 'quit']
valid = ['r','s','p','q','g','game','rock', 'scissors', 'paper', 'quit']
word = {'r':'rock', 's':'scissors', 'p':'paper', 'q':'quit'}
ChoiceOfHumanPlayer = ' '
print("\nLets play.....")
while not(ChoiceOfHumanPlayer in end):
print("Choose (r)rock, (s)scissors, or (p)paper\n \
or choose (g)game to see game results so far\n \
or choose (q)quit to quit the game.")
ChoiceOfHumanPlayer = input("Please input a valid choice: ")
if not(ChoiceOfHumanPlayer in valid):
print("Not valid choice.")
print()
if ChoiceOfHumanPlayer == 'g' or ChoiceOfHumanPlayer =='game':
if len(GameRecord[2]) != 0:
print('-'*5 + 'Record Of the Game' + '-'*5)
print('Number of rounds so far: %d' %(len(GameRecord[2])))
print('Number of draws: %d' %(GameRecord[2].count(0)))
print('Number of computer wins: %d' %(GameRecord[2].count(1)))
print('Number of human wins: %d' % (GameRecord[2].count(2)))
print('Human; Computer')
for x in range(0, len(GameRecord[2])):
print('%d: %s; %s' %(x + 1, GameRecord[0][x], GameRecord[1][x]))
print('-'*25)
else:
print("No rounds have been played so far")
if ChoiceOfHumanPlayer == 'q' or ChoiceOfHumanPlayer =='p'or ChoiceOfHumanPlayer == 'r' or ChoiceOfHumanPlayer == 's':
ChoiceOfHumanPlayer = word[ChoiceOfHumanPlayer]
return ChoiceOfHumanPlayer
def ComputerPlayer(GameRecord,HumanCount):
if len(GameRecord[2]) == 0:
dice = (randint(0,8)%3)
dice_result = {0:'paper', 1:'rock', 2:'scissors'}
return dice_result[dice]
else:
return np.random.choice(['rock', 'paper', 'scissors'], 1, p = Frequency(HumanCount))[0]
def Frequency(HumanCount):
R = HumanCount['rock']
S = HumanCount['scissors']
P = HumanCount['paper']
Rfreq = R/(R+S+P)
Sfreq = S/(R+S+P)
Pfreq = P/(R+S+P)
p = [Sfreq, Rfreq, Pfreq]
return p
def Judge(ChoiceOfComputerPlayer, ChoiceOfHumanPlayer):
Outcome = -1
if ChoiceOfComputerPlayer == ChoiceOfHumanPlayer:
Outcome = 0
elif ChoiceOfComputerPlayer =='rock':
if ChoiceOfHumanPlayer == 'paper':
Outcome = 2
else:
Outcome = 1
elif ChoiceOfComputerPlayer =='paper':
if ChoiceOfHumanPlayer == 'scissors':
Outcome = 2
else:
Outcome =1
elif ChoiceOfComputerPlayer =='scissors':
if ChoiceOfHumanPlayer == 'rock':
Outcome = 2
else:
Outcome =1
return Outcome
def Tester(n):
GameRecord = [[],[],[]]
HumanCount = { 'rock': 0, 'paper': 0,'scissors': 0}
ChoiceOfHumanPlayer = 'scissors'
ChoiceOfComputerPlayer = ''
i = 0
while i < n:
ChoiceOfComputerPlayer = ComputerPlayer(GameRecord, HumanCount)
Outcome = Judge(ChoiceOfComputerPlayer, ChoiceOfHumanPlayer)
UpdateGameRecord(GameRecord,ChoiceOfComputerPlayer,ChoiceOfHumanPlayer,Outcome,HumanCount)
i+=1
print('Number of rounds so far: %d' %(len(GameRecord[2])))
print('Number of draws: %d' %(GameRecord[2].count(0)))
print('Number of computer wins: %d' %(GameRecord[2].count(1)))
print('Number of human wins: %d' % (GameRecord[2].count(2)))
print(HumanCount)
print(Frequency(HumanCount))
def PlayGame():
GameRecord = [[],[],[]]
HumanCount = {'rock':0, 'paper':0,'scissors':0}
ChoiceOfHumanPlayer = ' '
print("Welcome to Rock-Paper-Scissors!")
while ChoiceOfHumanPlayer != 'quit':
ChoiceOfHumanPlayer = HumanPlayer(GameRecord)
if ChoiceOfHumanPlayer == 'quit':
print("Goodbye")
return
ChoiceOfComputerPlayer = ComputerPlayer(GameRecord, HumanCount)
Outcome = Judge(ChoiceOfComputerPlayer, ChoiceOfHumanPlayer)
UpdateGameRecord(GameRecord,ChoiceOfComputerPlayer,ChoiceOfHumanPlayer,Outcome, HumanCount) |
11,840 | 49fa5dea62da0ee4902f20ca185615f972bace57 | # Enter your code here. Read input from STDIN. Print output to STDOUT
# input defect probability (p) and failure trial (n)
nums = list(map(int, input().split()))
p = nums[0] / nums[1]
n = int(input())
# print rounded geometric distribution probability
print(round(((1-p)**(n-1)) * p, 3))
|
11,841 | 63f8be70d2c6630de6b98c90ff2a2f86ddb9ad25 | from kata import two_sum as ts
def test_two_sum_success():
nums, target = [2,7,11,15], 9
summer = ts.Solution()
# assert summer.twoSum(nums, target) == [0,1]
nums, target = [3,2,4], 6
assert summer.twoSum(nums, target) == [1,2]
nums, target = [3, 3], 6
# assert summer.twoSum(nums, target) == [0, 1]
|
11,842 | a682ac928073ddfed2ff44c24c1cbebb43a0671e | def get():
i=int(input("请输入当月利润为:"))
salary = 0
if i<=10:
salary=i*0.1
print("应发放奖金为%d"% salary)
elif i>10 and i<=20:
salary=(10*0.1)+(i-10)*0.075
print("应发放奖金为%d" %salary)
elif i>20 and i<=40:
salary=(10*0.1)+(10)*0.075+(i-20)*0.05
print("应发放奖金为%d" %salary)
else:
salary=(10*0.1)+(10)*0.075+(20)*0.2+(i-40)*0.03
print("应发放奖金为%d" %salary)
get()
|
11,843 | c69343ae5ecb50d8eb7a46a39c9209e4ee625b10 | def sum_array(array):
'''
Args:
array: an array or list containing values.
Returns:
int: the sum of the array/list
Examples:
>>> sum_array([1,2,3])
6
'''
if len(array) == 0:
return 0
else:
return array[0] + sum_array(array[1:])
def fibonacci(n):
'''
Return nth term in fibonacci sequence
Args:
n: nth term in a fibonacci sequence
Returns:
int: fibonacci sequence number in positon n
Examples:
>>> fibonacci(6)
8
'''
if n <= 1:
return n
else:
return fibonacci(n - 1) + fibonacci(n - 2)
def factorial(n):
'''
Args:
n: factorial
Returns:
int: factorial of n
Example:
>>> factorial(5)
120
'''
if n == 1:
return n
elif n == 0:
return 1
else:
return n * factorial(n-1) # this is equivalent to -> 5! = 5 * (5-1)!
return
def reverse(word):
'''
Args:
word: a string literal.
Returns:
string: string literal in reverse
Examples:
>>> reverse('talk')
klat
'''
if len(word) == 0:
return word
else:
return reverse(word[1:]) + word[0]
|
11,844 | ae7ce020d2a245a04013e5cd0ee4ade9d5500e8c | class Locker:
def __init__(self, adminno, date, location, size, lockerno):
self.__id = ''
self.__adminno = adminno
self.__date = date
self.__location = location
self.__size = size
self.__lockerno = lockerno
def get_id(self):
return self.__id
def set_id(self, id):
self.__id = id
def get_adminno(self):
return self.__adminno
def set_adminno(self, adminno):
self.__adminno = adminno
def get_date(self):
return self.__date
def set_date(self, date):
self.__date = date
def get_location(self):
return self.__location
def set_location(self, location):
self.__location = location
def get_size(self):
return self.__size
def set_size(self, size):
self.__size = size
def get_lockerno(self):
return self.__lockerno
def set_lockerno(self, lockerno):
self.__lockerno = lockerno
|
11,845 | 417058ff65bfb0a7005c82e60c74a1e191229f5b | SlabColourChosen = ""
ConstSlabColourOptions = ["grey", "red", "green"]
SlabColourCustom = ""
SlabColourCheck = False
SlabColourCustomCheck = False
SlabDepthChosen = 0
ConstSlabDepthOptions = ["38", "45"]
SlabDepthCheck = False
ConstSlabShapeOptions = ["square", "rectangle", "round"]
SlabShapeChosen = ""
SlabSizeChosen = ""
SlabShapeCheck = False
SlabSizeCheck = False
SingleSlabVolume = 0
TotalSlabVolume = 0
GreySlabPrice = 0
FinalSlabPrice = 0
GreyConcreteCost = 0
print("Please follow the instructions to make the slab that you require")
while True:
print("Please choose a colour from the following:\nGrey\nRed\nGreen\nCustom")
SlabColourChosen = input().lower()
if SlabColourChosen in ConstSlabColourOptions:
print(f"Slab colour set to {SlabColourChosen}")
SlabColourCheck = True
break
elif SlabColourChosen == "custom":
print("Please enter your custom colour")
SlabColourCustom = input().lower()
print(f"Custom slab colour set to {SlabColourCustom}")
SlabColourCheck = True
SlabColourCustomCheck = True
break
else:
print("That is not a valid colour")
while True:
print("Please choose one of the following depths for your slab (All measurements are in millimeters):\n38\n45")
SlabDepthChosen = input()
if SlabDepthChosen in ConstSlabDepthOptions:
if SlabDepthChosen == "38":
SlabDepthChosen = 38
print(f"Slab depth has been set to {SlabDepthChosen}")
SlabDepthCheck = True
break
elif SlabDepthChosen == "45":
SlabDepthChosen = 45
print(f"Slab depth has been set to {SlabDepthChosen}")
SlabDepthCheck = True
break
else:
print("That is not a valid depth")
while True:
print("Please choose a slab shape from the following options:\nSquare\nRectangle\nRound")
SlabShapeChosen = input().lower()
if SlabShapeChosen in ConstSlabShapeOptions:
if SlabShapeChosen == "square":
SlabShapeCheck = True
print("Please choose between one of the following sizes (All measurements are in millimeters):\nPress 'A' for 600x600\nPress 'B' for 450x450")
SlabSizeChosen = input().lower()
if SlabSizeChosen == "a":
SlabSizeChosen = "600x600"
print("Slab shape set to square\nSlab size set to 600mmx600mm")
SlabSizeCheck = True
break
elif SlabSizeChosen == "b":
SlabSizeChosen = "450x450"
print("Slab shape set to square\nSlab size set to 450mmx450mm")
SlabSizeCheck = True
break
else:
print("That is not a valid size")
elif SlabShapeChosen == "rectangle":
SlabShapeCheck = True
print("Please choose between one of the following sizes (All measurements are in millimeters):\nPress 'A' for 600x700\nPress 'B' for 600x450")
SlabSizeChosen = input().lower()
if SlabSizeChosen == "a":
SlabSizeChosen = "600x700"
print("Slab shape set to rectangle\nSlab size set to 600mmx700mm")
SlabSizeCheck = True
break
elif SlabSizeChosen == "b":
SlabSizeChosen = "600x450"
SlabSizeCheck = True
print("Slab shape set to rectangle\nSlab size set to 600mmx450mm")
break
else:
print("That is not a valid size")
elif SlabShapeChosen == "round":
SlabShapeCheck = True
print("Please choose between one of the following diameters (All measurements are in millimeters):\nPress 'A' for 300\nPress 'B' for 450")
SlabSizeChosen = input().lower()
if SlabSizeChosen == "a":
SlabSizeChosen = "300"
print("Slab shape set to round\nSlab diameter set to 300mm")
SlabSizeCheck = True
break
elif SlabSizeChosen == "b":
SlabSizeChosen = "450"
print("Slab shape set to round\nSlab diameter set to 450mm")
SlabSizeCheck = True
break
else:
print("That is not a valid size")
else:
print("That is not a valid shape")
while True:
try:
print("The cost of concrete is variable")
print("Please enter the cost of 100000 millimeters cubed of grey concrete in dollars")
GreyConcreteCost = float(input())
break
except:
print("That is not a valid cost")
print(f"Grey concrete cost set to ${GreyConcreteCost}")
while True:
print("Choose one of the following grades of concrete\n'A' for Basic\n'B' for Best")
SlabGradeChosen = input().lower()
if SlabGradeChosen == "a":
SlabGradeChosen = "basic"
print(f"Concrete grade set to: Basic")
break
elif SlabGradeChosen == "b":
SlabGradeChosen = "best"
print(f"Concrete grade set to: Best")
break
else:
print("That is not a valid concrete grade")
if SlabSizeChosen == "600x600":
SingleSlabVolume = (600*600)*SlabDepthChosen
elif SlabSizeChosen == "450x450":
SingleSlabVolume = (450*450)*SlabDepthChosen
elif SlabSizeChosen == "600x700":
SingleSlabVolume = (600*700)*SlabDepthChosen
elif SlabSizeChosen == "600x450":
SingleSlabVolume = (600*450)*SlabDepthChosen
elif SlabSizeChosen == "300":
SingleSlabVolume = (3.142*(150**2))*SlabDepthChosen
elif SlabSizeChosen == "450":
SingleSlabVolume = (3.142*(225**2))*SlabDepthChosen
TotalSlabVolume = SingleSlabVolume*20
if SlabGradeChosen == "basic":
if SlabColourChosen == "grey":
GreySlabPrice = (TotalSlabVolume/100000)*GreyConcreteCost
FinalSlabPrice = GreySlabPrice
elif SlabColourChosen == "red" or SlabColourChosen == "green":
GreySlabPrice = ((TotalSlabVolume/100000)*GreyConcreteCost)
FinalSlabPrice = ((10/100)*GreySlabPrice) + GreySlabPrice
elif SlabColourCustomCheck == True:
GreySlabPrice = ((TotalSlabVolume/100000)*GreyConcreteCost)
FinalSlabPrice = 5 + ((15/100)*GreySlabPrice) + GreySlabPrice
elif SlabGradeChosen == "best":
if SlabColourChosen == "grey":
GreySlabPrice = (TotalSlabVolume/100000)*GreyConcreteCost
FinalSlabPrice = ((7/100)*GreySlabPrice) + GreySlabPrice
elif SlabColourChosen == "red" or SlabColourChosen == "green":
GreySlabPrice = ((TotalSlabVolume/100000)*GreyConcreteCost)
FinalSlabPrice = ((10/100)*GreySlabPrice) + GreySlabPrice
FinalSlabPrice = ((7/100)*FinalSlabPrice) + FinalSlabPrice
elif SlabColourCustomCheck == True:
GreySlabPrice = ((TotalSlabVolume/100000)*GreyConcreteCost)
FinalSlabPrice = 5 + ((15/100)*GreySlabPrice) + GreySlabPrice
FinalSlabPrice = ((7/100)*FinalSlabPrice) + FinalSlabPrice
FinalSlabPrice = str(round(FinalSlabPrice, 2))
if SlabColourCheck == True and SlabDepthCheck == True and SlabShapeCheck == True and SlabSizeCheck == True:
print(f"The options that you chose are:\nSlab Colour: {SlabColourChosen.capitalize()}\nSlab Depth: {SlabDepthChosen}\nSlab Shape: {SlabShapeChosen.capitalize()}\nSlab Size: {SlabSizeChosen}\nGrey Concrete Price: {GreyConcreteCost}\nSlab Grade = ")
print(f"The price for your selection is ${FinalSlabPrice}")
|
11,846 | 74abd1329835a3b89531082ca883c31f4e4cf641 | #!/usr/bin/env python2.7
# ROS python API
import rospy
# Laser pixel coordinates message structure
from drone_laser_alignment.msg import Pixel_coordinates
# Joy message structure
from sensor_msgs.msg import Joy
# 3D point & Stamped Pose msgs
from geometry_msgs.msg import Point, Vector3, PoseStamped, TwistStamped
from gazebo_msgs.msg import *
# import all mavros messages and services
from mavros_msgs.msg import *
from mavros_msgs.srv import *
import numpy as np
import tf
from tf.transformations import quaternion_from_euler
import time
# Flight modes class
# Flight modes are activated using ROS services
class fcuModes:
def __init__(self):
pass
def setArm(self):
rospy.wait_for_service('mavros/cmd/arming')
try:
armService = rospy.ServiceProxy('mavros/cmd/arming', mavros_msgs.srv.CommandBool)
armService(True)
except rospy.ServiceException, e:
print "Service arming call failed: %s"%e
def setDisarm(self):
rospy.wait_for_service('mavros/cmd/arming')
try:
armService = rospy.ServiceProxy('mavros/cmd/arming', mavros_msgs.srv.CommandBool)
armService(False)
except rospy.ServiceException, e:
print "Service disarming call failed: %s"%e
def setStabilizedMode(self):
rospy.wait_for_service('mavros/set_mode')
try:
flightModeService = rospy.ServiceProxy('mavros/set_mode', mavros_msgs.srv.SetMode)
flightModeService(custom_mode='STABILIZED')
except rospy.ServiceException, e:
print "service set_mode call failed: %s. Stabilized Mode could not be set."%e
def setOffboardMode(self):
rospy.wait_for_service('mavros/set_mode')
try:
flightModeService = rospy.ServiceProxy('mavros/set_mode', mavros_msgs.srv.SetMode)
flightModeService(custom_mode='OFFBOARD')
except rospy.ServiceException, e:
print "service set_mode call failed: %s. Offboard Mode could not be set."%e
def setAltitudeMode(self):
rospy.wait_for_service('mavros/set_mode')
try:
flightModeService = rospy.ServiceProxy('mavros/set_mode', mavros_msgs.srv.SetMode)
flightModeService(custom_mode='ALTCTL')
except rospy.ServiceException, e:
print "service set_mode call failed: %s. Altitude Mode could not be set."%e
def setPositionMode(self):
rospy.wait_for_service('mavros/set_mode')
try:
flightModeService = rospy.ServiceProxy('mavros/set_mode', mavros_msgs.srv.SetMode)
flightModeService(custom_mode='POSCTL')
except rospy.ServiceException, e:
print "service set_mode call failed: %s. Position Mode could not be set."%e
def setAutoLandMode(self):
rospy.wait_for_service('mavros/set_mode')
try:
flightModeService = rospy.ServiceProxy('mavros/set_mode', mavros_msgs.srv.SetMode)
flightModeService(custom_mode='AUTO.LAND')
except rospy.ServiceException, e:
print "service set_mode call failed: %s. Autoland Mode could not be set."%e
# Main class: Converts joystick commands to position setpoints
class Controller:
# initialization method
def __init__(self):
# Drone state
# self.state = State()
# Instantiate laser pixel coordinates
self.coordinates = Pixel_coordinates()
# Instantiate a setpoints message
self.sp = PositionTarget()
# set the flag to use velocity and position setpoints, and yaw angle
self.sp.type_mask = int('010111000000', 2) # int('010111111000', 2)
# LOCAL_NED
self.sp.coordinate_frame = 1
# Joystick button
self.alignment_flag = 0
# Instantiate a velocity setpoint message
#self.vel_sp = TwistStamped()
# We will fly at a fixed altitude for now
# Altitude setpoint, [meters]
self.ALT_SP = 1.5
# update the setpoint message with the required altitude
self.sp.position.z = self.ALT_SP
# Instantiate a joystick message
self.joy_msg = Joy()
# initialize
self.joy_msg.axes = [0.0, 0.0, 0.0]
# Step size for position update
self.STEP_SIZE = 2.0
# Fence. We will assume a rectangular fence [Cage flight area]
self.FENCE_LIMIT_X = 1.5
self.FENCE_LIMIT_Y = 2
# A Message for the current local position of the drone(Anchor)
self.local_pos = Point(0.0, 0.0, 0.0)
self.local_vel = Vector3(0.0, 0.0, 0.0)
self.modes = fcuModes()
# Position controllers
self.current_time = time.time()
self.last_time_z = self.current_time
self.last_time_y = self.current_time
self.last_time_x = self.current_time
self.windup_guard = 20
self.u_z = 0.0
self.ITerm_z = 0.0
self.SetPoint_z = self.ALT_SP
self.u_x = 0.0
self.ITerm_x = 0.0
self.SetPoint_x = 0
self.u_y = 0.0
self.ITerm_y = 0.0
self.SetPoint_y = 0
# Controller values
self.kp_val = 0.0005 #0.0005
self.ki_val = 0.0007 #0.0007
self.pxl_err = 4
# Keep drone inside the cage area limits
def bound(self, v, low, up):
r = v
if v > up:
r = up
if v < low:
r = low
return r
# Callbacks
def PID_z(self, current_z):
Kp_z = 0.5 #prev 0.5
Ki_z = 0.1 #prev 0.1
self.current_time = time.time()
delta_time = self.current_time - self.last_time_z
self.last_time_z = self.current_time
error_z = self.SetPoint_z - current_z
PTerm_z = Kp_z * error_z
self.ITerm_z += error_z * delta_time
if (self.ITerm_z < -self.windup_guard):
self.ITerm_z = -self.windup_guard
elif (self.ITerm_z > self.windup_guard):
self.ITerm_z = self.windup_guard
self.u_z = PTerm_z + (Ki_z * self.ITerm_z)
def PID_x(self, current_x):
Kp_x = self.kp_val
Ki_x = self.ki_val*1.3
self.current_time = time.time()
delta_time = self.current_time - self.last_time_x
self.last_time_x = self.current_time
error_x = abs(self.SetPoint_x - current_x)
PTerm_x = Kp_x * error_x
self.ITerm_x += error_x * delta_time
if (self.ITerm_x < -self.windup_guard):
self.ITerm_x = -self.windup_guard
elif (self.ITerm_x > self.windup_guard):
self.ITerm_x = self.windup_guard
self.u_x = PTerm_x + (Ki_x * self.ITerm_x)
def PID_y(self, current_y):
Kp_y = self.kp_val #0.00033
Ki_y = self.ki_val*1.3 #0.0004
self.current_time = time.time()
delta_time = self.current_time - self.last_time_y
self.last_time_y = self.current_time
error_y = abs(self.SetPoint_y - current_y)
PTerm_y = Kp_y * error_y
self.ITerm_y += error_y * delta_time
if (self.ITerm_y < -self.windup_guard):
self.ITerm_y = -self.windup_guard
elif (self.ITerm_y > self.windup_guard):
self.ITerm_y = self.windup_guard
self.u_y = PTerm_y + (Ki_y * self.ITerm_y)
## local position callback
def posCb(self, msg):
self.local_pos.x = msg.pose.position.x
self.local_pos.y = msg.pose.position.y
self.local_pos.z = msg.pose.position.z
quater = (msg.pose.orientation.x, msg.pose.orientation.y,\
msg.pose.orientation.z, msg.pose.orientation.w)
euler = tf.transformations.euler_from_quaternion(quater)
self.current_yaw = euler[2]
## local velocity callback
def velCb(self, msg):
self.local_vel.x = msg.twist.linear.x
self.local_vel.y = msg.twist.linear.y
self.local_vel.z = msg.twist.linear.z
## Pixel coordinates callback
def pxl_coordCb(self, msg):
self.coordinates.xp = msg.xp
self.coordinates.yp = msg.yp
self.coordinates.blob = msg.blob
## joystick callback
def joyCb(self, msg):
self.joy_msg = msg
if msg.buttons[0] > 0 :
self.modes.setArm()
if msg.buttons[1] > 0 :
self.modes.setAutoLandMode()
if msg.buttons[2] > 0 :
self.modes.setOffboardMode()
if msg.buttons[10] > 0 :
self.modes.setDisarm()
if msg.buttons[4] > 0 :
self.alignment_flag = 1
if msg.buttons[3] > 0 :
self.alignment_flag = 0
## Drone State callback
# def stateCb(self, msg):
#self.state = msg
## Update setpoint message
def updateSp(self):
x = 1.0*self.joy_msg.axes[1]
y = 1.0*self.joy_msg.axes[0]
# Switch to velocity setpoints (Laser coordinates)
if self.alignment_flag and self.coordinates.blob:
# Set the flag to use velocity setpoints and yaw angle
self.sp.type_mask = int('010111000111', 2)
print "Velocity Controller active"
# Altitude controller based on local position
self.SetPoint_z = self.ALT_SP
self.PID_z(self.local_pos.z)
ez = abs(self.ALT_SP - self.local_pos.z)
# if ez < 0.001 :
# self.sp.velocity.z = 0
# elif ez > 0.001 :
self.sp.velocity.z = self.u_z
# x and y controller based on distance from blob center to image center (0,0)
if ez < 0.1:
self.SetPoint_x = 0
self.SetPoint_y = 0
self.PID_x(self.coordinates.xp)
self.PID_y(self.coordinates.yp)
self.u_x= np.sign(self.SetPoint_x - self.coordinates.xp)*self.u_x
self.u_y= np.sign(self.SetPoint_y - self.coordinates.yp)*self.u_y
ex = abs(self.SetPoint_x - self.coordinates.xp)
ey = abs(self.SetPoint_x - self.coordinates.yp)
if ex < self.pxl_err:
self.sp.velocity.x = 0
elif ex > self.pxl_err:
self.sp.velocity.x = self.u_x
if ey < self.pxl_err:
self.sp.velocity.y = 0
elif ey > self.pxl_err:
self.sp.velocity.y = self.u_y
#print "ex : ",self.SetPoint_x - self.coordinates.xp, " u_x : ",self.u_x
#print "ey : ",self.SetPoint_y - self.coordinates.yp, " u_y : ",self.u_y
#print "ez : ",self.ALT_SP - self.local_pos.z," u_z : ",self.u_z
#landing
# if z < 0 or z == 0:
# #print("Landing mode")
# self.SetPoint_z = 1
# self.PID_z(self.local_pos.z)
# self.sp.velocity.z = self.u_z
# print "ez : ",self.ALT_SP-self.sp.position.z," u_z : ",self.u_z
# elif (z<0) and abs(self.local_pos.z - 0)<0.01:
# self.sp.velocity.z = 0
# Switch to position setpoints (Joystick)
else:
# set the flag to use position setpoints and yaw angle
print "Manual mode (Joystick)"
self.sp.type_mask = int('010111111000', 2)
# Update
xsp = self.local_pos.x + self.STEP_SIZE*x
ysp = self.local_pos.y + self.STEP_SIZE*y
# limit
self.sp.position.x = self.bound(xsp, -1.0*self.FENCE_LIMIT_X, self.FENCE_LIMIT_X)
self.sp.position.y = self.bound(ysp, -1.0*self.FENCE_LIMIT_Y, self.FENCE_LIMIT_Y)
# Main function
def main():
# initiate node
rospy.init_node('setpoint_node', anonymous = True)
# controller object
cnt = Controller()
# ROS loop rate, [Hz]
rate = rospy.Rate(20.0)
# Subscribe to drone state
#rospy.Subscriber('mavros/state', State, cnt.stateCb)
# Subscribe to drone's local position
rospy.Subscriber('mavros/local_position/pose', PoseStamped, cnt.posCb)
# Subscribe to drone's local velocity
rospy.Subscriber('mavros/local_position/velocity_local', TwistStamped, cnt.velCb)
# Subscribe to laser pixel coordinates
rospy.Subscriber('laser_alignment/coordinates', Pixel_coordinates, cnt.pxl_coordCb)
# Subscribe to joystick topic
rospy.Subscriber('joy', Joy, cnt.joyCb)
# Setpoint publisher
sp_pub = rospy.Publisher('mavros/setpoint_raw/local', PositionTarget, queue_size = 1)
joy_pub = rospy.Publisher('/joy', Joy, queue_size=1)
# We need to send few setpoint messages, then activate OFFBOARD mode, to take effect
k = 0
while k < 10:
sp_pub.publish(cnt.sp)
rate.sleep()
k = k + 1
# start with
# activate OFFBOARD mode
cnt.modes.setOffboardMode()
print("OFFBOARD mode active")
# ROS main loop
while not rospy.is_shutdown():
cnt.updateSp()
sp_pub.publish(cnt.sp)
#dist_anchor_tag_pub.publish(cnt.distance_anchor_tag)
#vel_sp_pub.publish(cnt.vel_sp)
rate.sleep()
if __name__ == '__main__':
try:
main()
except rospy.ROSInterruptException:
pass
|
11,847 | 7c5406c01403c200d7ff3b6c9f98c5f3e89e03ec | name = ""
output = "string"
if not name:
output = "One for you, one for me."
else:
output = "One for " + name + " , one for me."
print(output) |
11,848 | 365728666e057160e6e487ec2beae62dee47e980 | # -*- coding: utf-8 -*-
from django.core.exceptions import ValidationError
from rest_framework import serializers
from formidable.models import Formidable
from formidable.serializers import fields
from formidable.serializers.common import WithNestedSerializer
from formidable.serializers.presets import PresetModelSerializer
class FormidableSerializer(WithNestedSerializer):
fields = fields.FieldSerializer(many=True)
presets = PresetModelSerializer(many=True, required=False)
nested_objects = ['fields', 'presets']
class Meta:
model = Formidable
fields = ('label', 'description', 'fields', 'id', 'presets')
depth = 2
extra_kwargs = {'id': {'read_only': True}}
def validate(self, data):
"""
The validation step called by the preset validation.
The preset validation ensures that presets are correctly defined
and that defined arguments are correct.
Since we cannot check if fields set up in preset arguments exist
inside the form itself, we must check this here.
"""
# calling subserializer validate method (fields, and presets)
data = super(FormidableSerializer, self).validate(data)
# we check every field define in presets are define inside the form.
if 'fields' in data and 'presets' in data:
data = self.check_presets_cohesion(data)
return data
def check_presets_cohesion(self, data):
presets = data['presets']
# validation already called on fields we are sur the slug is set
# Samet thing for argument is presets
fields_slug = [field['slug'] for field in data['fields']]
for preset in presets:
arguments = preset['arguments']
for argument in arguments:
field_id = argument.get('field_id')
if field_id and field_id not in fields_slug:
raise ValidationError(
'Preset ({}) argument is using an undefined field ({})'.format( # noqa
preset['slug'], field_id
)
)
return data
class ContextFormSerializer(serializers.ModelSerializer):
fields = fields.ContextFieldSerializer(read_only=True, many=True)
class Meta:
model = Formidable
fields = ('id', 'label', 'description', 'fields')
depth = 2
def __init__(self, *args, **kwargs):
super(ContextFormSerializer, self).__init__(*args, **kwargs)
self.fields['fields'].set_context('role', self._context['role'])
|
11,849 | d6486d9b8d87fb982880829ba1a44aefb303278e | from Bio.Seq import Seq #Using Biopython to find the reverse complement.
f = open("rosalind_dbru.txt","r") #extracting the input from a file.
inputv = f.readlines()
s = []
for i in inputv:
s.append(i.strip())
srv = []
for i in s: #determining the revese complement.
a = Seq(i)
srv.append("%s" %a.reverse_complement())
a = []
b = []
for i in range(len(s)):
p = s[i][:-1]
q = s[i][1:]
pq = (p,q)
a.append(pq)
for i in range(len(s)): #Using set to determine adjcency list and to idenotify unique sets.
r = srv[i][:-1]
s = srv[i][1:]
rs = (r, s)
b.append(rs)
result = set(a) #Using set to identify unique sets.
result.update(b)
for (a,b) in sorted(result): #displaying the result.
print '(%s, %s)' %(a,b)
|
11,850 | 4c50a6e03937b97f47a184a3752897f6077a8032 | #Atividade 1 - 08/04 - Marcos Silva 1902671
num1 = int(input())
num2 = int(input())
mult = 1
num = 0
while mult <= num2:
num = num + num1
print(num)
mult = mult + 1
|
11,851 | a086065db79c5bdc4938ac961beefcb741a698da | import requests
import json
import sys
userData = {
"username": "wz634",
"password": "root",
}
headers = {'Content-type': 'application/json', 'Accept': 'text/plain'}
requests.post('http://localhost:3000/login', data = json.dumps(userData), headers = headers) |
11,852 | 394b90a65ebe5a8e9df3e8bf61b163b57dfcd4b2 | # Generated by Django 2.2.5 on 2019-09-18 18:30
import django.core.validators
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('CareerFlash', '0014_auto_20190918_0310'),
]
operations = [
migrations.AddField(
model_name='profile',
name='profile_picture',
field=models.URLField(blank=True, null=True, validators=[django.core.validators.URLValidator]),
),
]
|
11,853 | 4a611644722a15e30454c3048cd2b158a4a9b94c | #coding:utf8
import socket
import time
import threading
class Client:
def __init__(self,address,port,nickname):
self.address=address
self.port=port
self.client_socket=None
self.nickname=nickname
def socketConnect(self):
self.client_socket=socket.socket(socket.AF_INET,socket.SOCK_STREAM)
server_addr=(self.address,self.port)
print 'service address{},port{}'.format(self.address,self.port)
self.client_socket.connect(server_addr)
def socketReceive(self):
while True:
data=self.client_socket.recv(512)
print data
def socketSend(self):
while True:
send_message=raw_input()
#send_message=raw_input()
self.client_socket.sendall(nickname+":"+send_message)
def start(self):
self.socketConnect()
thread_receive=threading.Thread(target=self.socketReceive)
thread_receive.start()
self.socketSend()
def closeSocket(self):
self.client_socket.close()
if __name__=='__main__':
address='www.fyc.pub'
port=5004
print 'Please input your nickname.......'
nickname=raw_input()
client=Client(address,port,nickname)
try:
client.start()
except:
client.closeSocket()
|
11,854 | b9177113405bf270f1adfeba5f46408c5d27eecc | #!/usr/bin/python
import socket
import cPickle
import os
import sys
import signal
PORT = 54321
def handle(cs, addr):
print "Conn from", addr
cs.sendall("HAI\n")
try:
l = cPickle.loads(cs.recv(1024))
#cs.sendall("--- pickle.loads() --- %s\n" % l)
s = sum(l)
#cs.sendall("--- sum() --- %d\n" % s)
cs.sendall("%d\n" % s)
except Exception as e:
#cs.sendall("--- EXCEPTION --- %s\n" % e)
cs.sendall("fail :(\n")
cs.sendall("bye\n")
cs.close()
signal.signal(signal.SIGCHLD, signal.SIG_IGN)
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.bind(("0.0.0.0", PORT))
s.listen(100)
while 1:
(cs, addr) = s.accept()
pid = os.fork()
if pid == 0:
s.close()
handle(cs, addr)
sys.exit(0)
cs.close()
|
11,855 | 826a0b36b408e499ef850e9eccd117f696037f10 | from rest_framework import views
from rest_framework.routers import DefaultRouter
from rest_framework.permissions import IsAdminUser
from rest_framework.response import Response
from rest_framework.reverse import reverse
from rest_framework.urlpatterns import format_suffix_patterns
class SecuredDefaultRouter(DefaultRouter):
"""
Extend the `DefaultRouter` so that the API root view
is only visible for the admin users
"""
def get_api_root_view(self):
"""
Return a view to use as the API root.
"""
api_root_dict = {}
list_name = self.routes[0].name
for prefix, viewset, basename in self.registry:
api_root_dict[prefix] = list_name.format(basename=basename)
class APIRoot(views.APIView):
_ignore_model_permissions = True
# only accept admin users
permission_classes = (IsAdminUser,)
def get(self, request, format=None):
ret = {}
for key, url_name in api_root_dict.items():
ret[key] = reverse(url_name, request=request, format=format)
return Response(ret)
return APIRoot.as_view()
|
11,856 | afcebef571f357e317951a469bc820b77218ea57 | #!/usr/bin/env python
import numpy as np
import math
import sys
filein=sys.argv[2];
infile_fs=open(filein,"r");
inlines=infile_fs.readlines();
length=len(inlines);
infile_fs.close();
infile_fs=open(filein,"r");
cell_p=8.386494280;
angle=float(sys.argv[1]);
delta=math.tan(angle/180.0*math.pi)*cell_p/4;
inline=infile_fs.readline();
while(inline):
if inline.find("ATOMIC_POSITIONS")!=-1:
print inline.replace('\n','');
inline=infile_fs.readline();
while inline:
stream=inline.split();
if stream[0]=='O' and round(float(stream[3])/cell_p*4)==1:
n=round(float(stream[1])/cell_p*4);
k=round(float(stream[2])/cell_p*4);
if n==1 and k==0:
print 'O'+"\t"+str(float(stream[1])-delta)+"\t"+stream[2]+"\t"+stream[3];
elif n==3 and k==0:
print 'O'+"\t"+str(float(stream[1])+delta)+"\t"+stream[2]+"\t"+stream[3];
elif n==0 and k==1:
print 'O'+"\t"+stream[1]+"\t"+str(float(stream[2])+delta)+"\t"+stream[3];
elif n==2 and k==1:
print 'O'+"\t"+stream[1]+"\t"+str(float(stream[2])-delta)+"\t"+stream[3];
elif n==1 and k==2:
print 'O'+"\t"+str(float(stream[1])+delta)+"\t"+stream[2]+"\t"+stream[3];
elif n==3 and k==2:
print 'O'+"\t"+str(float(stream[1])-delta)+"\t"+stream[2]+"\t"+stream[3];
elif n==0 and k==3:
print 'O'+"\t"+stream[1]+"\t"+str(float(stream[2])-delta)+"\t"+stream[3];
elif n==2 and k==3:
print 'O'+"\t"+stream[1]+"\t"+str(float(stream[2])+delta)+"\t"+stream[3];
elif stream[0]=='O' and round(float(stream[3])/cell_p*4)==3:
n=round(float(stream[1])/cell_p*4);
k=round(float(stream[2])/cell_p*4);
if n==1 and k==0:
print 'O'+"\t"+str(float(stream[1])+delta)+"\t"+stream[2]+"\t"+stream[3];
elif n==3 and k==0:
print 'O'+"\t"+str(float(stream[1])-delta)+"\t"+stream[2]+"\t"+stream[3];
elif n==0 and k==1:
print 'O'+"\t"+stream[1]+"\t"+str(float(stream[2])-delta)+"\t"+stream[3];
elif n==2 and k==1:
print 'O'+"\t"+stream[1]+"\t"+str(float(stream[2])+delta)+"\t"+stream[3];
elif n==1 and k==2:
print 'O'+"\t"+str(float(stream[1])-delta)+"\t"+stream[2]+"\t"+stream[3];
elif n==3 and k==2:
print 'O'+"\t"+str(float(stream[1])+delta)+"\t"+stream[2]+"\t"+stream[3];
elif n==0 and k==3:
print 'O'+"\t"+stream[1]+"\t"+str(float(stream[2])+delta)+"\t"+stream[3];
elif n==2 and k==3:
print 'O'+"\t"+stream[1]+"\t"+str(float(stream[2])-delta)+"\t"+stream[3];
else:
print inline.replace('\n','');
inline=infile_fs.readline();
else:
print inline.replace('\n','');
inline=infile_fs.readline();
|
11,857 | d1c9f5375234c806f64df505a6b5af4c5914fe1d | #sending a mail
import smtplib
FROM="bogadipayal573@gmail.com"
TO="xxx@gmail.com"
SUBJECT="mail test"
TEXT="pyhton"
pwd="xxxxxxxxxxxxx"
message="SUBJECT:%s\n\n%s"%(SUBJECT,TEXT)
print(message)
server=smtplib.SMTP("smtp.gmail.com",587)
server.ehlo()
server.starttls()
server.login(FROM,pwd)
server.sendmail(FROM,TO,message)
server.close()
print("successfully sent mail") |
11,858 | 7640460ccfd68ec5eb290aaac5871102d6317ad5 | from pinkfrog.targetgroup.group_creator import TargetGroup |
11,859 | 5b897e5d26238a89ef063bed30d79746b8bc2cc7 | #mayor y nemor
campo1 = raw_input('primer numero: ')
campo2 = raw_input('segundo numero: ')
salir = str(raw_input('y'))
if campo1 < campo2:
print ("el menor es" ,campo1)
elif campo1 > campo2:
print ('el mayor es' ,campo1)
elif campo1 == campo2:
print 'ambos son iguales'
print 'desea salir?'
elif salir == 'y':
exit()
|
11,860 | d5bf92200957b2ce27b8b846295a9e98a14b3e8a | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models
# Create your models here.
#models.Model to convert a class to a Model
class Article(models.Model):
id = models.AutoField(primary_key=True)
title = models.TextField(max_length=254)
body = models.TextField()
likes = models.IntegerField()
class StaffInfo(models.Model):
id = models.AutoField(primary_key=True)
uid = models.TextField(max_length=25)
username = models.TextField(max_length=254)
password = models.TextField(max_length=25)
role = models.TextField(max_length=50)
certification = models.TextField(max_length=25)
specialization = models.TextField(max_length=50)
hward = models.TextField(max_length=25)
class PatientInfo(models.Model):
id = models.AutoField(primary_key=True)
patientName = models.TextField(max_length=254)
patientPassword = models.TextField(max_length=25)
purpose = models.TextField(max_length=500)
doctorName = models.TextField(max_length=254)
patientHward = models.TextField(max_length=25)
|
11,861 | 37525033a33c665afc27eccfdf2867ce34cd00d2 | from flask import Flask, render_template, request, make_response, g
from redis import Redis
import os
import socket
import random
import json
exp_docker_low = os.getenv('EXP_OPTION_A', "exp_docker_low")
exp_docker_medium = os.getenv('EXP_OPTION_B', "exp_docker_medium")
exp_docker_high = os.getenv('EXP_OPTION_C', "exp_docker_high")
want_docker_low = os.getenv('WANT_OPTION_A', "want_docker_low")
want_docker_medium = os.getenv('WANT_OPTION_B', "want_docker_medium")
want_docker_high = os.getenv('WANT_OPTION_C', "want_docker_high")
hostname = socket.gethostname()
app = Flask(__name__)
def get_redis():
if not hasattr(g, 'redis'):
g.redis = Redis(host="redis", db=0, socket_timeout=5)
return g.redis
@app.route("/", methods=['POST','GET'])
def hello():
voter_id = request.cookies.get('voter_id')
if not voter_id:
voter_id = hex(random.getrandbits(64))[2:-1]
exp_vote = None
want_vote = None
if request.method == 'POST':
redis = get_redis()
exp_vote = request.form['exp_vote']
want_vote = request.form['want_vote']
data = json.dumps({'voter_id': voter_id, 'exp_vote': exp_vote, 'want_vote': want_vote})
redis.rpush('votes', data)
resp = make_response(render_template(
'index.html',
exp_docker_low=exp_docker_low,
exp_docker_medium=exp_docker_medium,
exp_docker_high=exp_docker_high,
want_docker_low=want_docker_low,
want_docker_medium=want_docker_medium,
want_docker_high=want_docker_high,
hostname=hostname,
exp_vote=exp_vote,
want_vote=want_vote,
))
resp.set_cookie('voter_id', voter_id)
return resp
if __name__ == "__main__":
app.run(host='0.0.0.0', port=80, debug=True, threaded=True)
|
11,862 | 80778df11dc069024bda791556d4003b2918ca47 | import re
def main():
'''Program to check weather the enter text is a valid email id or not'''
string = input('Enter the text to be checked for email id: ')
if re.findall(r'[a-z0-9]+(\.[a-z0-9]+)?@[a-z]+(\.[a-z]+)+',string) and not re.findall(r'\.\.',string):
print('The entered string is a valid email id')
else:
print('The entered string is not a valid email id')
if __name__ == '__main__':
main()
|
11,863 | e824107edc1bb1536e61676dd9009d41c14ef2a0 | # 494. 双队列实现栈
# 中文English
# 利用两个队列来实现一个栈的功能
#
# 例1:
# 输入:
# push(1)
# pop()
# push(2)
# isEmpty() // return false
# top() // return 2
# pop()
# isEmpty() // return true
# 例2:
#
# 输入:
# isEmpty()
from collections import deque
class Stack:
"""
@param: x: An integer
@return: nothing
"""
def __init__(self):
self.q1 = deque()
self.q2 = deque()
def push(self, x):
# write your code here
self.q1.append(x)
"""
@return: nothing
"""
def pop(self):
# write your code here
while len(self.q1) > 1:
self.q2.append(self.q1.popleft())
item = self.q1.popleft()
self.q1, self.q2 = self.q2, self.q1
return None
"""
@return: An integer
"""
def top(self):
# write your code here
while len(self.q1) > 1:
self.q2.append(self.q1.popleft())
item = self.q1.popleft()
self.q2.append(item)
self.q1, self.q2 = self.q2, self.q1
return item
"""
@return: True if the stack is empty
"""
def isEmpty(self):
return len(self.q1) == 0
s = Stack()
s.push(1)
s.push(2)
s.pop()
s.push(3)
s.isEmpty() #// return false
s.top() #// #return 2
s.pop()
s.isEmpty() #// return true
s = Stack()
s.push(1)
s.pop()
s.push(2)
s.isEmpty()
s.top()
s.pop()
s.isEmpty() |
11,864 | 7e6ae50604f752609b4ef64df51ab7638bb03804 | '''
Maximum sum of contiguous sub-array using DnC
'''
def helper(nums,l,m,h):
tot=0;leftsum=float('-inf')
for i in range(m,l-1,-1):
tot+=nums[i]
leftsum=max(leftsum,tot)
tot=0;rightsum=float('-inf')
for i in range(m+1,h+1):
tot+=nums[i]
rightsum=max(rightsum,tot)
return max(leftsum,rightsum,leftsum+rightsum)
def maxSubArray(nums,l,h):
if l==h:
return nums[0]
else:
m=(l+h)//2
lsum=maxSubArray(nums,l,m)
rsum=maxSubArray(nums,m+1,h)
return max(lsum,rsum,helper(nums,l,m,h))
while True:
n=int(input())
nums=list(map(int,input().split()))
print(maxSubArray(nums,0,n-1))
|
11,865 | 53cadab79b8c53c58f113cc6ab7f632e10f036de | import re
delimiter = "id is_deleted creator_id input_params log execution_finished_at"
f = open("allLogs.txt")
#data = f.read()
count = 0
for line in f:
queries = line.split("Query:")
for i in range(1, len(queries)):
fw = open('queries/q'+str(count)+'.txt', 'w')
fw.write(queries[i])
fw.close()
count += 1
|
11,866 | fdc0fde5bc40dfba16cbc29e2c8b620b6c7e19fc | # Generated by Django 3.2 on 2021-05-02 04:49
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('quiz', '0005_auto_20210501_1348'),
]
operations = [
migrations.AlterField(
model_name='home',
name='url',
field=models.URLField(default='https://adoring-hawking-6d5afd.netlify.app/', max_length=210),
),
]
|
11,867 | e894d4cfe2804956e7c585513aad5af43fa0d00f | import datetime
import flask
import rds.mailChecker
import rds.load_datapage
from flask import Flask, g, request
from dateutil import relativedelta
import sqlite3
import flask_excel
from flask.json import jsonify
from flask_cors import CORS
app = Flask(__name__)
flask_excel.init_excel(app)
CORS(app)
@app.route('/download', methods=['GET'])
def download():
from_date = datetime.datetime.strptime(request.args.get('from'), '%d%m%Y')
to_date = datetime.datetime.strptime(request.args.get('to'), '%d%m%Y')
conn = create_connection("dbfiles/db_file.db")
curs = conn.cursor()
curs.execute("SELECT * FROM main.payments")#3 город,
rows = [i for i in curs.fetchall() #if i[3] != "Санкт-Петербург"
if from_date < datetime.datetime.strptime(i[1], " %d.%m.%Y %H:%M:%S") < to_date]
print(str(from_date) + str(to_date))
print((str(from_date) + "<" + str(datetime.datetime.strptime(i[1], " %d.%m.%Y %H:%M:%S")) + "<" + str(to_date))
for i in curs.fetchall() # if i[3] != "Санкт-Петербург"
if from_date < datetime.datetime.strptime(i[1], " %d.%m.%Y %H:%M:%S") < to_date)
output = flask_excel.make_response_from_array(rows, "csv")
output.headers["Content-Disposition"] = "attachment; filename=export.csv"
output.headers["Content-type"] = "text/csv"
return output
def create_connection(db_file):
""" create a database connection to the SQLite database
specified by db_file
:param db_file: database file
:return: Connection object or None
"""
conn = None
try:
conn = sqlite3.connect(db_file)
return conn
except sqlite3.Error as e:
print(e)
return conn
def create_table(conn, create_table_sql):
""" create a table from the create_table_sql statement
:param conn: Connection object
:param create_table_sql: a CREATE TABLE statement
:return:
"""
try:
c = conn.cursor()
c.execute(create_table_sql)
except sqlite3.Error as e:
print(e)
def setting_up_db():
""" creating connection to dbfile, tables.
Checking correctness
:return: Result message and connection object or None
"""
result = "ok"
sql_create_payments_table = """ CREATE TABLE IF NOT EXISTS payments (
id integer PRIMARY KEY,
time_date text NOT NULL,
email text,
city text,
amount integer
);"""
sql_create_daily_payments_table = """ CREATE TABLE IF NOT EXISTS dailypayments (
id integer PRIMARY KEY,
time_date text NOT NULL,
times_that_day integer,
amount integer
);"""
conn = None
try:
conn = create_connection("dbfiles/db_file.db")
except:
result = "error! cannot create db connection"
print(result)
if conn is not None:
create_table(conn, sql_create_payments_table)
create_table(conn, sql_create_daily_payments_table)
else:
result = "Error! cannot create tables"
print(result)
return result, conn
@app.route("/react")
def reacted():
return flask.render_template("index.html", token="flask + react")
@app.route("/")
def hello():
result = "zdravstvuyte"
return result
@app.route("/checkMail")
def mail():
result, conn = setting_up_db()
rds.mailChecker.regular_check(conn)
return result
@app.route("/donations")
def showDonations():
return flask.render_template("index.html")
@app.route("/api/getList")
def getListOfDonations():
result, conn = setting_up_db()
curs = conn.cursor()
temp = rds.load_datapage.load_page(curs)
return jsonify({"items": temp[0],
"donations_per_day": temp[1]})
@app.route("/daysLeft")
def days():
date = datetime.date(2020, 9, 24)
diff = relativedelta.relativedelta(datetime.datetime.strptime(str('2020-09-24'), '%Y-%m-%d'),
datetime.date.today())
full_sentence = relativedelta.relativedelta(datetime.datetime.strptime(str('2020-09-24'), '%Y-%m-%d'),
datetime.datetime.strptime(str('2018-12-24'), '%Y-%m-%d'))
return "days: " + str(abs((date.today() - date).days)) + "(" + str(abs((date.today() - date).days) - 32) + ")" + " \\ " + str(
abs((datetime.date(2018, 12, 24) - date).days)) + "</br>" \
+ "weeks: " + str(abs(date.today() - date).days // 7) + "(" + str((abs((date.today() - date).days) - 32) // 7) + ")" + " \\ " + str(
abs((datetime.date(2018, 12, 24) - date).days) // 7) + "</br>" \
+ "months: " + str(diff.years * 12 + diff.months + round(diff.days / 30, 1)) + "(" + str(diff.years * 12 + diff.months + round((diff.days - 32) / 30, 1)) + ")" + " \\ " + str(
full_sentence.years * 12 + full_sentence.months + round(full_sentence.days / 30, 1))
if __name__ == "__main__":
flask_excel.init_excel(app)
app.run(debug=True, host='0.0.0.0', port=5000)
|
11,868 | 99ad1ec03cce44f62cd0e9b6735323d6918c15fe | A1 = 1
A2 = 1
n = input("")
i = 2
while i < n:
A_sum = A2 + A1
A1 = A2
A2 = A_sum
i += 1
print (A_sum) |
11,869 | 737a140fd620cde47d7894794a60a0434462a6e7 | from microbit import *
button_long_a_pressed = "button_long_a"
button_long_b_pressed = "button_long_b"
button_a_pressed = "button_a"
button_b_pressed = "button_b"
button_together_pressed = "button_together"
button_long_together_pressed = "button_long_together"
class ButtonHandler(object):
both_button_push_grace_ms = 50
long_push_ms = 400
button_reset = True
def get_button_press(self):
if (button_a.is_pressed() or button_b.is_pressed()):
if self.button_reset:
self.button_reset = False
print("Got a button push at %s" % running_time())
# delay to catch up if both are being pushed
sleep(self.both_button_push_grace_ms)
return self.identify_button_press()
else:
self.button_reset = True
return None
def identify_button_press(self):
press = None
if (button_a.is_pressed()
and button_b.is_pressed()):
if self.is_long([button_a.is_pressed, button_b.is_pressed]):
press = button_long_together_pressed
else:
press = button_together_pressed
elif button_a.is_pressed():
if self.is_long([button_a.is_pressed]):
press = button_long_a_pressed
else:
press = button_a_pressed
elif button_b.is_pressed():
if self.is_long([button_b.is_pressed]):
press = button_long_b_pressed
else:
press = button_b_pressed
return press
def is_long(self, conditions):
if len(conditions) > 0:
time = running_time()
print('Time = %s' % time)
while running_time() - time < self.long_push_ms:
print('Time = %s, Conditions = %s' %
(running_time(),
[condition() for condition in conditions]))
if not all(cond() for cond in conditions):
print("short")
return False
print("long")
return True
buttonActionHandler=ButtonHandler()
while True:
press = buttonActionHandler.get_button_press()
if press == button_a_pressed:
print("Motors on")
pin0.write_digital(0)
pin16.write_digital(1)
pin8.write_digital(0)
pin12.write_digital(1)
if press == button_b_pressed:
print("Motors off")
pin0.write_digital(0)
pin16.write_digital(0)
pin8.write_digital(0)
pin12.write_digital(0) |
11,870 | dcd9971f0950373c77d8232a81304a753aaddd41 | # coding=utf-8
from concurrent import futures
import urllib2
import datetime
import time
import requests
from lxml import html
import urllib,urllib2,httplib,cookielib,os,sys
from bs4 import BeautifulSoup
import lxml.html
import socket, traceback
import random
import linecache
import base64
from pyDes import *
from xml.dom.minidom import parse, parseString
from socket import *
import re
import json
def get_page(opener,url,data={}):
a = random.randrange(1, 9173)
ua = linecache.getline(r'ua_list.txt', a)
headers = {'User-Agent': ua}
postdata=urllib.urlencode(data)
if postdata:
request=urllib2.Request(url,postdata,headers=headers)
else:
request=urllib2.Request(url,headers=headers)
f = opener.open(request)
content = f.read()
#log(content,url);
return content
def amazonDe(arg,path):
t = 'amazon.sh'
cmd = "./%s %s %s" % (t,arg,path)
session = os.popen(cmd).read()
session = session.strip('\n')
return session
URLS = ['http://m.rossmannversand.de/produkt/364396/aptamil-mit-prebiotics-ha-3-folgenahrung-mit-hydrolysiertem-eiweiss.aspx']
#URLS = ['http://m.rossmannversand.de/produkt/359207/aptamil-pronutra-folgemilch-2.aspx']
def load_url(url, timeout):
OFFERID_SELECTOR = '//div[@class="col-xs-7 col-sm-offset-1 col-sm-6 col-lg-offset-4 col-lg-3"]/a/@disabled'
HREF_SELECTOR = '//div[@class="col-xs-7 col-sm-offset-1 col-sm-6 col-lg-offset-4 col-lg-3"]/a/@*'
filename = 'cookiejp.txt'
cookiejar=cookielib.MozillaCookieJar(filename)
file = open(filename)
cookielines = file.readlines(100)
if cookielines:
cookiejar.load('cookiejp.txt', ignore_discard=True, ignore_expires=True)
else:
cookiejar=cookielib.MozillaCookieJar(filename)
cj=urllib2.HTTPCookieProcessor(cookiejar)
opener=urllib2.build_opener(cj)
price = get_page(opener,url)
print price
tree = html.fromstring(price)
asinpath = './mobildata/Rossman_Aptamil_HA3'
datapath = asinpath + '/data'
if os.path.isdir(asinpath):
pass
else:
os.mkdir(asinpath)
if os.path.exists(datapath):
os.remove(datapath)
os.mknod(datapath)
else:
os.mknod(datapath)
local2 = open(datapath, 'w')
local2.write(price)
local2.close()
inStock = tree.xpath(HREF_SELECTOR)
print inStock
if "disabled" in inStock:
print datetime.datetime.now(),"NO STOCK"
else:
if "#ctl00_Main_mbAddToCart_modal" in inStock:
amazonDe("sendmailRosman","HA3")
print("OK")
else:
print datetime.datetime.now(),"NO STOCK"
while True:
with futures.ThreadPoolExecutor(max_workers=10) as executor:
time.sleep(10)
future_to_url = dict((executor.submit(load_url, url, 60), url)
for url in URLS)
|
11,871 | 511e3a7f9480acb2dbd2d35ef97d5bc03705bf1e | # -*- coding: utf-8 -*-
"""
RNA Library Item
================
"""
import numpy
from typing import List, Dict
from neoRNA.library.shape_mapper.shape_profile_item import ShapeProfileItem
from neoRNA.library.shape_mapper.shape_reactivity_item import ShapeReactivityItem
from neoRNA.sequence.sequence import Sequence
from neoRNA.sequence.barcode import Barcode
class LibraryItem(object):
"""
RNA library Item Object.
Each item should include the following elements:
- RNA ID: The unique ID (usually a number) throughout the library.
- RNA Barcode: The unique "barcode" sequence that represents this RNA
- RNA sequence: The actual target sequence of this RNA
- Notes: A string to describe this RNA, supplementary info.
"""
# The default value for "invalid value"
INVALID_VALUE = -999.0
# ----------------------------------
# region Init
def __init__(self, rna_id: str, rna_barcode_string: str, rna_sequence_string: str,
notes: str = None):
r"""
Init
Parameters
----------
rna_id: str
RNA ID
rna_barcode_string: str
The RNA Library Item barcode string
rna_sequence_string: str
The RNA Library Item sequence string
notes: str
Notes content
"""
self.rna_id: str = rna_id
self.barcode: Barcode = Barcode(rna_barcode_string)
self.sequence: Sequence = Sequence(rna_sequence_string)
self.notes: str = notes
# ----------
# Profile data from ShapeMapper 2.x
# - The list of profile data for each of "nt".
# - The elements in the list follows the "ordering" of nt position.
# - The `dict` is indexed by "nt position"
self.profile_list: List[ShapeProfileItem] = []
self.profile_dict: Dict[str, ShapeProfileItem] = {}
# ----------
# Shape Reactivity
# - The value is from ShapeMapper 2.x
# - The `dict` is indexed by "nt position"
self.shape_reactivity_list: List[ShapeReactivityItem] = []
self.shape_reactivity_dict: Dict[str, ShapeReactivityItem] = {}
# ----------
# Reactivity from "OWN" method - a diff. method from ShapeMapper 2.x
#
# NOTE:
# - Based on the calculation algorithm, the elements inside this list may not
# include the "entire" original sequence.
self.neo_reactivity_list: List[float] = []
# ----------------------------------
# Stats
self.modified_read_depth = None
self.untreated_read_depth = None
# endregion
# ----------------------------------
# region Properties
@property
def total_nt(self) -> int:
r"""
Get the total number of the "nt" - the length of sequence.
Returns
-------
nt_length: int
The "length" of the sequence.
"""
return self.sequence.length
# endregion
# ----------------------------------
# region Methods - Stats
def shape_profile_list_low_quality(self, nt_a_c_only: bool = True) -> List[ShapeProfileItem]:
r"""
Retrieve the list of "ShapeMapper 2.x profile" which have "low quality".
"low quality" is determined by the flag - `in_high_quality` inside the "Profile Item".
Parameters
----------
nt_a_c_only: bool
If only do the calculation based on "A", "C" nt.
Returns
-------
low_quality_list: List[ShapeProfileItem]
The list of "low quality" profile item, by ShapeMapper 2.x.
"""
#
if nt_a_c_only:
self.sequence.calculate_length()
return [item for index, item in enumerate(self.profile_list)
if self.sequence.is_nt_ac(index + 1) and item.in_high_quality is False]
#
return [item for item in self.profile_list if item.in_high_quality is False]
def total_nt_with_condition(self, nt_a_c_only: bool = True) -> int:
r"""
Get the "total #" of "nt", with condition.
Parameters
----------
nt_a_c_only: bool
If only do the calculation based on "A", "C" nt.
Returns
-------
nt_length: int
The "length" of the sequence, with condition.
"""
#
if not nt_a_c_only:
return self.total_nt
#
total_nt = 0
for index, item in enumerate(self.profile_list):
#
if self.sequence.is_nt_ac(index + 1):
total_nt += 1
#
return total_nt
# endregion
# ----------------------------------
# region Methods - Reactivity List
def flatten_reactivity_list(self, reactivity_type: str = 'own') -> List[float]:
r"""
Flatten the "reactivity object list" to a list of "single reactivity value".
Parameters
----------
reactivity_type: str
Which type of "reactivity" data to get.
Returns
-------
flatten_reactivity_list: List[float]
The list of "reactivity" data, ordered by "nt position"
"""
if reactivity_type == 'shape':
# Convert "None"
return [item.shape_reactivity if item.shape_reactivity is not None else ShapeReactivityItem.NUMBER_FOR_NONE
for item in self.shape_reactivity_list]
# return [item.shape_reactivity for item in self.shape_reactivity_list]
if reactivity_type == 'own':
return self.neo_reactivity_list
# endregion
# ----------------------------------
# region Methods Reactivity Calculation
def calculate_reactivity_v1(self,
sequence_slice: slice = slice(0, None),
nt_a_c_only: bool = True):
r"""
Calculate "reactivity" with own method.
Version 1 - The simplest version.
General Algorithm:
- For the rate of "each" nt, calculate a `adjusted rate` = `mod rate` - `non_mod rate`
- if it is `< 0`, use `0`
- Normalize the "rate" by dividing it against the **maximum rate** front the "targeting" list.
- Use this `normalized rate` list as the "1D Reactivity" data.
NOTE:
- Since this algorithm needs to find the "max rate" from the "targeting" list,
it needs to pass the actual "slice" to help define the "targeting" list.
Parameters
----------
sequence_slice: slice
The target "sequence slice".
Default to `slice(0, None)` - the entire sequence.
nt_a_c_only: bool
If only do the calculation based on "A", "C" nt.
Returns
-------
"""
# Get the list of "rates" - "modified" and "untreated"
# - Directly use `reactivity_profile` (= modified - untreated)
# modified_rate = [item.modified_rate for item in self.profile_list]
# untreated_rate = [item.untreated_rate for item in self.profile_list]
reactivity_profile = [item.reactivity_profile for item in self.profile_list]
# --------------
# Adjusted profile rate
# Rules
# - negative value -> 0
# - "None" -> INVALID
# - If "AC-only", INVALID for "GU" nt.
#
# - negative value -> 0
reactivity_profile_adjusted \
= [abs(rate) if rate is not None and rate < 0.0 else rate for rate in reactivity_profile]
# - "None" -> INVALID
reactivity_profile_adjusted \
= [rate if rate is not None else self.INVALID_VALUE for rate in reactivity_profile_adjusted]
if nt_a_c_only:
# INVALID for "GU" nt.
self.sequence.calculate_length()
reactivity_profile_adjusted \
= [rate if rate is not None and self.sequence.is_nt_ac(index + 1) else self.INVALID_VALUE for index, rate in enumerate(reactivity_profile_adjusted)]
# Convert it to "numpy array"
reactivity_profile_adjusted = numpy.array(reactivity_profile_adjusted, dtype=float)
# Normalize the rates
reactivity_profile_adjusted = reactivity_profile_adjusted[sequence_slice] # Apply the "sequence slice"
max_rate = numpy.amax(reactivity_profile_adjusted)
self.neo_reactivity_list = \
numpy.array([rate / max_rate if rate != self.INVALID_VALUE else rate for rate in reactivity_profile_adjusted])
self.neo_reactivity_list = self.neo_reactivity_list.tolist()
# endregion
# ----------------------------------
# region Method
# endregion
|
11,872 | 70584bad5605f5c6855f60dfb8a5cb90e9ceaad7 | #!/usr/bin/env python3
import discord
from discord.ext import commands
from db import insertNewPlayer, findCode, findDiscordId
TOKEN = "ENTERTOKENHERE"
client = commands.Bot(command_prefix="&")
@client.event
async def on_ready():
print(":: Bot launched")
await client.change_presence(activity=discord.Game("&howto"))
@client.command(pass_context=True)
async def howto(ctx):
embed = discord.Embed(
title="CodeMan",
description="CodeMan is a BOT to manage slippi connect codes for ssbm.",
color=0x44a963
)
embed.add_field(name="&add", value="Adds your connect code to the database.", inline=False)
embed.add_field(name="&code", value="Shows your code or the one of someone else.", inline=False)
embed.add_field(name="&whois", value="Finds a discord username from a code.", inline=False)
embed.add_field(name="&ask", value="Asks for you if someone want to play.", inline=False)
embed.add_field(name="&howto", value="Shows this message.", inline=False)
await ctx.send(embed=embed)
@client.command()
async def add(ctx, code = None):
if code is None:
await ctx.send("You must enter a code !")
else:
author = ctx.message.author.id
insertNewPlayer(author, code)
await ctx.send("Done !")
@client.command()
async def code(ctx,user: discord.User = None):
if user is None:
author = ctx.message.author.id
code = findCode(author)
await ctx.send(code)
else:
code = findCode(user.id)
if code is None:
await ctx.send("This user has no connect code !")
else:
await ctx.send(code)
@client.command(pass_context=True)
async def ask(ctx, role: discord.Role = None):
author = ctx.message.author.id
author_name = ctx.message.author.name
await ctx.message.delete()
code = findCode(author)
if role is None:
await ctx.send("{} wants to play, code: {}".format(author_name, code))
else:
await ctx.send("{} {} wants to play, code: {}".format(role.mention, author_name, code))
@client.command()
async def whois(ctx, code = None):
if code is None:
await ctx.send("You must enter a code !")
else:
discord_id = findDiscordId(code)
if discord_id is None:
await ctx.send("This code has no player attached to it :/")
else:
discord_name = client.get_user(discord_id)
await ctx.send("{} is {}".format(code, discord_name))
client.run(TOKEN) |
11,873 | 2de94e0eb629b89563421fb4e708be891337a338 | # https://www.codewars.com/kata/59c633e7dcc4053512000073/train/python
'''
Given a lowercase string that has alphabetic characters only and no spaces,
return the highest value of consonant substrings. Consonants are any letters
of the alphabet except "aeiou".
We shall assign the following values: a = 1, b = 2, c = 3, .... z = 26.
For example, for the word "zodiacs", let's cross out the vowels. We get:
"z o d ia cs"
-- The consonant substrings are: "z", "d" and "cs" and the values are z = 26,
d = 4 and cs = 3 + 19 = 22. The highest is 26.
solve("zodiacs") = 26
For the word "strength", solve("strength") = 57
-- The consonant substrings are: "str" and "ngth" with values "str" = 19 +
20 + 18 = 57 and "ngth" = 14 + 7 + 20 + 8 = 49. The highest is 57.
For C: do not mutate input.
More examples in test cases. Good luck!
If you like this Kata, please try:
Word values
Vowel-consonant lexicon
'''
import re
def solve(s):
REGEX_REPLACEMENTS = [
(r"[aieou]", " ")
]
max_total = 0
changed_s = s
for old, new in REGEX_REPLACEMENTS:
changed_s = re.sub(old, new, changed_s, flags=re.IGNORECASE)
groups_s = changed_s.split(" ")
for x in groups_s:
alphabet = list(x)
totals = sum(list(map(lambda x: ord(x) - ord("a") + 1, alphabet)))
if totals > max_total:
max_total = totals
return(max_total)
|
11,874 | b0edfe48cb688e3a1cde9d1c71749970c57cbd37 | from netmiko import Netmiko
net_connect = Netmiko(
"10.223.252.122",
username="DSV.API",
password="bale-pE3WFx!",
device_type="cisco_ios",
)
print(net_connect.find_prompt())
net_connect.disconnect() |
11,875 | f63c3c4fd3ff6fc1607fa98d74632f9bf93b9af2 | #!/usr/bin/env python
# Copyright (c) 2017, Daniel Liew
# This file is covered by the license in LICENSE.txt
# vim: set sw=4 ts=4 softtabstop=4 expandtab:
"""
Read two result info files and generate a scatter plot of execution time
"""
from load_smtrunner import add_smtrunner_to_module_search_path
add_smtrunner_to_module_search_path()
from smtrunner import ResultInfo, DriverUtil, ResultInfoUtil, analysis, event_analysis
import smtrunner.util
import matplotlib.pyplot as plt
import argparse
import json
import logging
import math
import os
import pprint
import random
import re
import sys
import yaml
_logger = None
def strip(prefix, path):
if prefix == "":
return path
if path.startswith(prefix):
return path[len(prefix):]
def main(args):
global _logger
global _fail_count
parser = argparse.ArgumentParser(description=__doc__)
DriverUtil.parserAddLoggerArg(parser)
parser.add_argument('first_result_info',
type=argparse.FileType('r'))
parser.add_argument('second_result_info',
type=argparse.FileType('r'))
parser.add_argument('--base', type=str, default="")
parser.add_argument('--point-size', type=float, default=25.0, dest='point_size')
parser.add_argument('--allow-merge-failures',
dest='allow_merge_failures',
default=False,
action='store_true',
)
parser.add_argument('--max-exec-time',
default=None,
type=float,
dest='max_exec_time',
)
parser.add_argument('--title',
default="{num_keys} benchmarks, {num_points} jointly SAT or timeout"
)
parser.add_argument("--xlabel",
type=str,
default=None,
)
parser.add_argument("--ylabel",
type=str,
default=None,
)
parser.add_argument("--axis-label-suffix",
type=str,
default=" execution time (s)",
dest="axis_label_suffix",
)
parser.add_argument("--axis-label-colour",
type=str,
default="black",
dest="axis_label_colour",
)
parser.add_argument("--annotate",
default=False,
action='store_true',
)
parser.add_argument("--annotate-use-legacy-values",
default=False,
action='store_true',
)
parser.add_argument("--output",
default=None,
type=argparse.FileType('wb'),
)
parser.add_argument("--error-bars",
default=False,
action='store_true',
)
parser.add_argument("--annotate-timeout-point",
dest='annotate_timeout_point',
default=False,
action='store_true',
)
parser.add_argument("--require-time-abs-diff",
dest="require_time_abs_diff",
default=0.0,
type=float
)
parser.add_argument('--true-type-fonts',
default=False,
action='store_true'
)
pargs = parser.parse_args(args)
DriverUtil.handleLoggerArgs(pargs, parser)
_logger = logging.getLogger(__name__)
if pargs.max_exec_time is None:
_logger.error('--max-exec-time must be specified')
return 1
if pargs.true_type_fonts:
smtrunner.util.set_true_type_font()
index_to_raw_result_infos = []
index_to_file_name = []
for index, result_infos_file in enumerate([pargs.first_result_info, pargs.second_result_info]):
try:
_logger.info('Loading "{}"'.format(result_infos_file.name))
result_infos = ResultInfo.loadRawResultInfos(result_infos_file)
index_to_raw_result_infos.append(result_infos)
index_to_file_name.append(result_infos_file.name)
except ResultInfo.ResultInfoValidationError as e:
_logger.error('Validation error:\n{}'.format(e))
return 1
_logger.info('Loading done')
result_infos = None
# Perform grouping by benchmark name
key_to_results_infos, rejected_result_infos = ResultInfoUtil.group_result_infos_by(
index_to_raw_result_infos)
if len(rejected_result_infos) > 0:
_logger.warning('There were rejected result infos')
num_merge_failures = 0
for index, l in enumerate(rejected_result_infos):
_logger.warning('Index {} had {} rejections'.format(index, len(l)))
num_merge_failures += len(l)
if num_merge_failures > 0:
if pargs.allow_merge_failures:
_logger.warning('Merge failures being allowed')
else:
_logger.error('Merge failures are not allowed')
return 1
# Generate scatter points
x_scatter_points = []
x_scatter_errors = [[], [] ]
y_scatter_points = []
y_scatter_errors = [[], []]
count_dual_timeout = 0
count_x_lt_y_not_dt = 0
count_x_gt_y_not_dt = 0
count_x_eq_y_not_dt = 0
# New counting vars
bounds_incomparable_keys = set()
x_gt_y_keys = set()
x_lt_y_keys = set()
x_eq_y_keys = set()
x_eq_y_and_is_timeout_keys = set()
for key, raw_result_info_list in sorted(key_to_results_infos.items(), key=lambda kv:kv[0]):
_logger.info('Ranking on "{}" : '.format(key))
indices_to_use = []
# Compute indices to use
modified_raw_result_info_list = [ ]
# Handle "unknown"
# Only compare results that gave sat/unsat
for index, ri in enumerate(raw_result_info_list):
if isinstance(ri['event_tag'], str):
# single result
event_tag = ri['event_tag']
else:
assert isinstance(ri['event_tag'], list)
event_tag, _ = event_analysis.merge_aggregate_events(
ri['event_tag'])
# Event must be sat or timeout
_logger.info('index {} is {}'.format(index, event_tag))
if event_tag not in { 'sat', 'timeout', 'soft_timeout'}:
# Skip this. We can't do a meaningful comparison here
continue
indices_to_use.append(index)
# Normalise timeouts to have fixed values for the time.
if event_tag in {'timeout', 'soft_timeout'}:
modified_ri = analysis.get_result_with_modified_time(
ri,
pargs.max_exec_time)
_logger.debug('modified_ri: {}'.format(
pprint.pformat(modified_ri)))
_logger.debug(
'Treating index {} for {} due to unknown as having max-time'.format(
index,
key))
modified_raw_result_info_list.append(modified_ri)
else:
modified_raw_result_info_list.append(ri)
_logger.debug('used indices_to_use: {}'.format(indices_to_use))
if len(indices_to_use) != 2:
# Skip this one. One of the result infos can't be compared
# against.
continue
assert len(indices_to_use) == 2
# Get execution times
index_to_execution_time_bounds = analysis.get_index_to_execution_time_bounds(
modified_raw_result_info_list,
indices_to_use,
pargs.max_exec_time,
analysis.get_arithmetic_mean_and_99_confidence_intervals,
['dsoes_wallclock', 'wallclock'])
assert isinstance(index_to_execution_time_bounds, list)
x_scatter_point_bounds = index_to_execution_time_bounds[0]
y_scatter_point_bounds = index_to_execution_time_bounds[1]
x_scatter_point = x_scatter_point_bounds[1] # mean
y_scatter_point = y_scatter_point_bounds[1] # mean
x_scatter_lower_error = x_scatter_point_bounds[1] - x_scatter_point_bounds[0]
assert x_scatter_lower_error >= 0
x_scatter_higher_error = x_scatter_point_bounds[2] - x_scatter_point_bounds[1]
assert x_scatter_higher_error >= 0
y_scatter_lower_error = y_scatter_point_bounds[1] - y_scatter_point_bounds[0]
assert y_scatter_lower_error >= 0
y_scatter_higher_error = y_scatter_point_bounds[2] - y_scatter_point_bounds[1]
assert y_scatter_higher_error >= 0
x_scatter_points.append(x_scatter_point)
y_scatter_points.append(y_scatter_point)
# Error bar points
#x_scatter_errors.append((x_scatter_lower_error, x_scatter_higher_error))
x_scatter_errors[0].append(x_scatter_lower_error)
x_scatter_errors[1].append(x_scatter_higher_error)
#y_scatter_errors.append((y_scatter_lower_error, y_scatter_higher_error))
y_scatter_errors[0].append(y_scatter_lower_error)
y_scatter_errors[1].append(y_scatter_higher_error)
# LEGACY: Now do some counting
if x_scatter_point == y_scatter_point:
if x_scatter_point == pargs.max_exec_time:
assert x_scatter_lower_error == 0
assert x_scatter_higher_error == 0
assert y_scatter_lower_error == 0
assert y_scatter_higher_error == 0
count_dual_timeout += 1
else:
_logger.info('Found count_x_eq_y_not_dt: x: {}, key: {}'.format(
x_scatter_point,
key))
count_x_eq_y_not_dt += 1
elif x_scatter_point > y_scatter_point:
count_x_gt_y_not_dt += 1
else:
assert x_scatter_point < y_scatter_point
count_x_lt_y_not_dt += 1
# SMARTER counting: uses error bounds
if analysis.bounds_overlap(x_scatter_point_bounds, y_scatter_point_bounds):
# Bounds overlap, we can't compare the execution times in a meaningful way
bounds_incomparable_keys.add(key)
# However if both are timeouts we can note this
if x_scatter_point == pargs.max_exec_time:
x_eq_y_and_is_timeout_keys.add(key)
else:
# Compare the means
if x_scatter_point > y_scatter_point and abs(x_scatter_point - y_scatter_point) > pargs.require_time_abs_diff:
x_gt_y_keys.add(key)
elif x_scatter_point < y_scatter_point and abs(x_scatter_point - y_scatter_point) > pargs.require_time_abs_diff:
x_lt_y_keys.add(key)
else:
if pargs.require_time_abs_diff == 0.0:
assert x_scatter_point == y_scatter_point
x_eq_y_keys.add(key)
# Report counts
print("# of points : {}".format(len(x_scatter_points)))
print("LEGACY: count_dual_timeout: {}".format(count_dual_timeout))
print("LEGACY: count_x_eq_y_not_dt: {}".format(count_x_eq_y_not_dt))
print("LEGACY: count_x_gt_y_not_dt: {}".format(count_x_gt_y_not_dt))
print("LEGACY: count_x_lt_y_not_dt: {}".format(count_x_lt_y_not_dt))
print("")
print("# x > y and no bound overlap: {}".format(len(x_gt_y_keys)))
print("# x < y and no bound overlap: {}".format(len(x_lt_y_keys)))
print("# x = y and no bound overlap: {}".format(len(x_eq_y_keys)))
print("# incomparable: {}".format(len(bounds_incomparable_keys)))
print("# of x = y and is timeout: {}".format(len(x_eq_y_and_is_timeout_keys)))
# Now plot
extend = 100
tickFreq = 100
assert len(x_scatter_points) == len(y_scatter_points)
fig, ax = plt.subplots()
fig.patch.set_alpha(0.0) # Transparent
if pargs.error_bars:
splot = ax.errorbar(
x_scatter_points,
y_scatter_points,
xerr=x_scatter_errors,
yerr=y_scatter_errors,
fmt='o',
picker=5,
ms=pargs.point_size/2.0, # HACK
ecolor='black',
capsize=5,
#capthick=10,
)
else:
splot = ax.scatter(x_scatter_points, y_scatter_points, picker=5, s=pargs.point_size)
xlabel = index_to_file_name[0] if pargs.xlabel is None else pargs.xlabel
ylabel = index_to_file_name[1] if pargs.ylabel is None else pargs.ylabel
xlabel += pargs.axis_label_suffix
ylabel += pargs.axis_label_suffix
ax.xaxis.label.set_color(pargs.axis_label_colour)
ax.yaxis.label.set_color(pargs.axis_label_colour)
ax.tick_params(axis='x', colors=pargs.axis_label_colour)
ax.tick_params(axis='y', colors=pargs.axis_label_colour)
ax.set_xlabel(xlabel)
ax.set_ylabel(ylabel)
ax.set_xlim(0,pargs.max_exec_time + extend)
ax.set_ylim(0,pargs.max_exec_time + extend)
# +1 is just so the pargs.max_exec_time is included because range()'s end is not inclusive
ax.set_xticks(range(0, int(pargs.max_exec_time) + 1, tickFreq))
ax.set_yticks(range(0, int(pargs.max_exec_time) + 1, tickFreq))
# Construct title keyword args
title_kwargs = {
'num_points': len(x_scatter_points),
'xlabel': xlabel,
'ylabel': ylabel,
'num_keys': len(key_to_results_infos.keys()),
}
ax.set_title(pargs.title.format(**title_kwargs))
# Identity line
ax.plot([ 0 , pargs.max_exec_time + extend], [0, pargs.max_exec_time + extend], linewidth=1.0, color='black')
if pargs.annotate:
if pargs.annotate_use_legacy_values:
_logger.warning('Displaying legacy values')
x_lt_value_to_display = count_x_lt_y_not_dt
x_gt_value_to_display = count_x_gt_y_not_dt
else:
_logger.info('Displaying new values')
x_lt_value_to_display = len(x_lt_y_keys)
x_gt_value_to_display = len(x_gt_y_keys)
ax.annotate(
'{}'.format(x_lt_value_to_display),
xy=(200,550),
fontsize=40
)
ax.annotate(
'{}'.format(x_gt_value_to_display),
xy=(550,200),
fontsize=40
)
# timeout point annotation
if pargs.annotate_timeout_point:
num_dual_timeouts = len(x_eq_y_and_is_timeout_keys)
dual_timeout_txt = None
if num_dual_timeouts == 1:
dual_timeout_txt = '{} dual timeout'.format(num_dual_timeouts)
else:
dual_timeout_txt = '{} dual timeouts'.format(num_dual_timeouts)
ax.annotate(dual_timeout_txt,
# HACK -5 is to offset arrow properly
xy=(pargs.max_exec_time - 15.00, pargs.max_exec_time), xycoords='data',
xytext=(-50, 0), textcoords='offset points',
arrowprops=dict(facecolor='black', shrink=0.05, width=1.5, headwidth=7.0),
horizontalalignment='right', verticalalignment='center',
bbox=dict(boxstyle='round',fc='None'),
fontsize=15)
# Finally show
if pargs.output is None:
plt.show()
else:
# For command line usage
fig.show()
fig.savefig(pargs.output, format='pdf')
return 0
if __name__ == '__main__':
sys.exit(main(sys.argv[1:]))
|
11,876 | fd4a95a01065019a61ccd184f767dab0e5c978a9 | import numpy as np
from sklearn.ensemble import RandomForestRegressor
from sklearn import linear_model
import matplotlib.pyplot as plt
def create_dataset(samples, tile_size):
margin = (tile_size - 1) / 2
n = np.random.randint(0, high=40000, size=(samples))
i = np.random.randint(margin, high=120 - margin, size=(samples))
j = np.random.randint(margin, high=80 - margin, size=(samples))
c = np.dstack((n, j, i))[0]
x_tiles = np.zeros((samples, tile_size*tile_size*3), dtype=np.float32)
y_tiles = np.zeros((samples), dtype=np.float32)
for k, idx in enumerate(c):
x_tiles[k, :] = x[idx[0], idx[1] - margin:idx[1] + (margin+1), idx[2] - margin:idx[2] + (margin+1), :].flatten()
y_tiles[k] = y[idx[0], idx[1], idx[2]]
return x_tiles, y_tiles
def predict(model, n, tile_size):
margin = (tile_size - 1) / 2
pred = np.zeros((80, 120), dtype=np.float32)
for i in range(margin, 120-margin):
for j in range(margin, 80-margin):
pred[j, i] = model.predict([x[n, j-margin:j+(margin+1), i-margin:i+(margin+1), :].flatten()])[0]
return pred
def mae(y, y_hat, tile_size):
margin = (tile_size - 1) / 2
return np.sum(np.absolute((y[margin:80-margin, margin:120-margin] - y_hat[margin:80-margin, margin:120-margin])))
def save_precip(name, y, y_hat):
plt.imsave(name+"era", y, vmax=17, cmap='Blues')
plt.imsave(name+"pred", y_hat, vmax=17, cmap='Blues')
if __name__ == "__main__":
x = np.load("/Users/pablo/Downloads/3zlevels.npy")
y = (np.load("/Users/pablo/Downloads/full_tp_1980_2016.npy") * 1000).clip(min=0)
for tile_size in [3, 5, 7, 9, 11]:
x_tiles, y_tiles = create_dataset(30000, tile_size)
rf = RandomForestRegressor(max_depth=25, random_state=0)
rf.fit(x_tiles, y_tiles)
maes = 0
for n in range(40000, 40100):
y_hat = predict(rf, n, tile_size)
save_precip("output/RF_{}".format(n), y[n, :], y_hat)
maes += mae(y[n, :], y_hat, tile_size)
print "Random Forest", tile_size, ":", maes/100.0
ls = linear_model.Lasso(alpha=0.1)
ls.fit(x_tiles, y_tiles)
maes = 0
for n in range(40000, 40100):
y_hat = predict(ls, n, tile_size)
save_precip("output/LASSO_{}".format(n), y[n, :], y_hat)
maes += mae(y[n, :], y_hat, tile_size)
print "LASSO", tile_size, ":", maes/100.0
lr = linear_model.LinearRegression()
lr.fit(x_tiles, y_tiles)
maes = 0
for n in range(40000, 40100):
y_hat = predict(lr, n, tile_size)
save_precip("output/LR_{}".format(n), y[n, :], y_hat)
maes += mae(y[n, :], y_hat, tile_size)
print "Linear Regression", tile_size, ":", maes/100.0
|
11,877 | a575a42ad7c24df1d1ccfdfa9bfa1786f25834ee | import copy
import numbers
import astropy.units as u
import numpy as np
from EXOSIMS.util.get_dirs import get_cache_dir
from EXOSIMS.util.get_module import get_module
from EXOSIMS.util.keyword_fun import get_all_args
from EXOSIMS.util.vprint import vprint
class PlanetPopulation(object):
r""":ref:`PlanetPopulation` Prototype
Args:
arange (list(float)):
[Min, Max] semi-major axis (in AU). Defaults to [0.1,100.]
erange (list(float)):
[Min, Max] eccentricity. Defaults to [0.01,0.99]
Irange (list(float)):
[Min, Max] inclination (in degrees). Defaults to [0.,180.]
Orange (list(float)):
[Min, Max] longitude of the ascending node (in degrees).
Defaults to [0.,360.]
wrange (list(float)):
[Min, Max] argument of periapsis. Defaults to [0.,360.]
prange (list(float)):
[Min, Max] geometric albedo. Defaults to [0.1,0.6]
Rprange (list(float)):
[Min, Max] planet radius (in Earth radii). Defaults to [1.,30.]
Mprange (list(float)):
[Min, Max] planet mass (in Earth masses). Defaults to [1.,4131.]
scaleOrbits (bool):
Scale orbits by :math:`\sqrt{L}` where :math:`L` is the stellar luminosity.
This has the effect of matching insolation distnaces and preserving the
habitable zone of the population. Defaults to False.
constrainOrbits (bool):
Do not allow orbits where orbital radius can exceed the ``arange`` limits.
Defaults to False
eta (float):
Overall occurrence rate of the population. The expected number of planets
per target star. Must be strictly positive, but may be greater than 1
(if more than 1 planet is expected per star, on average). Defaults to 0.1.
cachedir (str, optional):
Full path to cachedir.
If None (default) use default (see :ref:`EXOSIMSCACHE`)
**specs:
:ref:`sec:inputspec`
Attributes:
_outspec (dict):
:ref:`sec:outspec`
arange (astropy.units.quantity.Quantity):
[Min, Max] semi-major axis
cachedir (str):
Path to the EXOSIMS cache directory (see :ref:`EXOSIMSCACHE`)
constrainOrbits (bool):
Do not allow orbits where orbital radius can exceed the ``arange`` limits.
erange (numpy.ndarray):
[Min, Max] eccentricity.
eta (float):
Overall occurrence rate of the population. The expected number of planets
per target star. Must be strictly positive, but may be greater than 1
(if more than 1 planet is expected per star, on average).
Irange (astropy.units.quantity.Quantity):
[Min, Max] inclination
Mprange (astropy.units.quantity.Quantity):
[Min, Max] planet mass
Orange (astropy.units.quantity.Quantity):
[Min, Max] longitude of the ascending node
pfromRp (bool):
Albedo is dependent on planetary radius
PlanetPhysicalModel (:ref:`PlanetPhysicalModel`):
Planet physical model object
prange (numpy.ndarray):
[Min, Max] geometric albedo.
Rprange (astropy.units.quantity.Quantity):
[Min, Max] planet radius
rrange (astropy.units.quantity.Quantity):
[Min, Max] orbital radius
scaleOrbits (bool):
Scale orbits by :math:`\sqrt{L}` where :math:`L` is the stellar luminosity.
This has the effect of matching insolation distnaces and preserving the
habitable zone of the population.
wrange (astropy.units.quantity.Quantity):
[Min, Max] argument of periapsis.
"""
_modtype = "PlanetPopulation"
def __init__(
self,
arange=[0.1, 100.0],
erange=[0.01, 0.99],
Irange=[0.0, 180.0],
Orange=[0.0, 360.0],
wrange=[0.0, 360.0],
prange=[0.1, 0.6],
Rprange=[1.0, 30.0],
Mprange=[1.0, 4131.0],
scaleOrbits=False,
constrainOrbits=False,
eta=0.1,
cachedir=None,
**specs
):
# start the outspec
self._outspec = {}
# get the cache directory
self.cachedir = get_cache_dir(cachedir)
self._outspec["cachedir"] = self.cachedir
specs["cachedir"] = self.cachedir
# load the vprint function (same line in all prototype module constructors)
self.vprint = vprint(specs.get("verbose", True))
# check range of parameters
self.arange = self.checkranges(arange, "arange") * u.AU
self.erange = self.checkranges(erange, "erange")
self.Irange = self.checkranges(Irange, "Irange") * u.deg
self.Orange = self.checkranges(Orange, "Orange") * u.deg
self.wrange = self.checkranges(wrange, "wrange") * u.deg
self.prange = self.checkranges(prange, "prange")
self.Rprange = self.checkranges(Rprange, "Rprange") * u.earthRad
self.Mprange = self.checkranges(Mprange, "Mprange") * u.earthMass
assert isinstance(scaleOrbits, bool), "scaleOrbits must be boolean"
# scale planetary orbits by sqrt(L)
self.scaleOrbits = scaleOrbits
assert isinstance(constrainOrbits, bool), "constrainOrbits must be boolean"
# constrain planetary orbital radii to sma range
self.constrainOrbits = constrainOrbits
assert isinstance(eta, numbers.Number) and (
eta > 0
), "eta must be strictly positive"
# global occurrence rate defined as expected number of planets per
# star in a given universe
self.eta = eta
# populate outspec with all inputs
kws = get_all_args(self.__class__)
ignore_kws = ["self", "cachedir"]
kws = list((set(kws) - set(ignore_kws)))
for att in kws:
if att not in ["vprint", "_outspec"]:
dat = copy.copy(self.__dict__[att])
self._outspec[att] = dat.value if isinstance(dat, u.Quantity) else dat
# albedo is independent of planetary radius range
self.pfromRp = False
# derive orbital radius range
ar = self.arange.to("AU").value
er = self.erange
if self.constrainOrbits:
self.rrange = [ar[0], ar[1]] * u.AU
else:
self.rrange = [ar[0] * (1.0 - er[1]), ar[1] * (1.0 + er[1])] * u.AU
# define prototype distributions of parameters (uniform and log-uniform)
self.uniform = lambda x, v: np.array(
(np.array(x) >= v[0]) & (np.array(x) <= v[1]), dtype=float, ndmin=1
) / (v[1] - v[0])
self.logunif = lambda x, v: np.array(
(np.array(x) >= v[0]) & (np.array(x) <= v[1]), dtype=float, ndmin=1
) / (x * np.log(v[1] / v[0]))
# import PlanetPhysicalModel
self.PlanetPhysicalModel = get_module(
specs["modules"]["PlanetPhysicalModel"], "PlanetPhysicalModel"
)(**specs)
def checkranges(self, var, name):
"""Helper function provides asserts on all 2 element lists of ranges
Args:
var (list):
2-element list
name (str):
Variable name
Returns:
list:
Sorted input variable
Raises AssertionError on test fail.
"""
# reshape var
assert len(var) == 2, "%s must have two elements," % name
var = np.array([float(v) for v in var])
# check values
if name in ["arange", "Rprange", "Mprange"]:
assert np.all(var > 0), "%s values must be strictly positive" % name
if name in ["erange", "prange"]:
assert np.all(var >= 0) and np.all(var <= 1), (
"%s values must be between 0 and 1" % name
)
# the second element must be greater or equal to the first
if var[1] < var[0]:
var = var[::-1]
return var
def __str__(self):
"""String representation of the Planet Population object
When the command 'print' is used on the Planet Population object, this
method will print the attribute values contained in the object"""
for att in self.__dict__:
print("%s: %r" % (att, getattr(self, att)))
return "Planet Population class object attributes"
def gen_input_check(self, n):
"""
Helper function checks that input is integer, casts to int, is >= 0
Args:
n (float):
An integer to validate
Returns:
int:
The input integer as an integer
Raises AssertionError on test fail.
"""
assert (
isinstance(n, numbers.Number) and float(n).is_integer()
), "Input must be an integer value."
assert n >= 0, "Input must be nonnegative"
return int(n)
def gen_mass(self, n):
"""Generate planetary mass values in units of Earth mass.
The prototype provides a log-uniform distribution between the minimum and
maximum values.
Args:
n (int):
Number of samples to generate
Returns:
~astropy.units.Quantity(~numpy.ndarray(float)):
Planet mass values in units of Earth mass.
"""
n = self.gen_input_check(n)
Mpr = self.Mprange.to("earthMass").value
Mp = (
np.exp(np.random.uniform(low=np.log(Mpr[0]), high=np.log(Mpr[1]), size=n))
* u.earthMass
)
return Mp
def gen_angles(self, n, commonSystemPlane=False, commonSystemPlaneParams=None):
"""Generate inclination, longitude of the ascending node, and argument
of periapse in degrees
The prototype generates inclination as sinusoidally distributed and
longitude of the ascending node and argument of periapse as uniformly
distributed.
Args:
n (int):
Number of samples to generate
commonSystemPlane (bool):
Generate delta inclinations from common orbital plane rather than
fully independent inclinations and Omegas. Defaults False. If True,
commonSystemPlaneParams must be supplied.
commonSystemPlaneParams (None or list):
4 element list of [I mean, I standard deviation,
O mean, O standard deviation]
in units of degrees, describing the distribution of
inclinations and Omegas relative to a common orbital plane.
Ignored if commonSystemPlane is False.
Returns:
tuple:
I (~astropy.units.Quantity(~numpy.ndarray(float))):
Inclination in units of degrees OR deviation in inclination (deg)
O (~astropy.units.Quantity(~numpy.ndarray(float))):
Longitude of the ascending node (deg)
w (~astropy.units.Quantity(~numpy.ndarray(float))):
Argument of periapsis (deg)
"""
n = self.gen_input_check(n)
# inclination
C = 0.5 * (np.cos(self.Irange[0]) - np.cos(self.Irange[1]))
if commonSystemPlane:
assert (
len(commonSystemPlaneParams) == 4
), "commonSystemPlaneParams must be a four-element list"
I = ( # noqa: 741
np.random.normal(
loc=commonSystemPlaneParams[0],
scale=commonSystemPlaneParams[1],
size=n,
)
* u.deg
)
O = (
np.random.normal(
loc=commonSystemPlaneParams[2],
scale=commonSystemPlaneParams[3],
size=n,
)
* u.deg
)
else:
I = ( # noqa: 741
np.arccos(np.cos(self.Irange[0]) - 2.0 * C * np.random.uniform(size=n))
).to("deg")
# longitude of the ascending node
Or = self.Orange.to("deg").value
O = np.random.uniform(low=Or[0], high=Or[1], size=n) * u.deg # noqa: 741
# argument of periapse
wr = self.wrange.to("deg").value
w = np.random.uniform(low=wr[0], high=wr[1], size=n) * u.deg
return I, O, w
def gen_plan_params(self, n):
"""Generate semi-major axis (AU), eccentricity, geometric albedo, and
planetary radius (earthRad)
The prototype generates semi-major axis and planetary radius with
log-uniform distributions and eccentricity and geometric albedo with
uniform distributions.
Args:
n (int):
Number of samples to generate
Returns:
tuple:
a (~astropy.units.Quantity(~numpy.ndarray(float))):
Semi-major axis in units of AU
e (~numpy.ndarray(float)):
Eccentricity
p (~numpy.ndarray(float)):
Geometric albedo
Rp (~astropy.units.Quantity(~numpy.ndarray(float))):
Planetary radius in units of earthRad
"""
n = self.gen_input_check(n)
# generate samples of semi-major axis
ar = self.arange.to("AU").value
# check if constrainOrbits == True for eccentricity
if self.constrainOrbits:
# restrict semi-major axis limits
arcon = np.array(
[ar[0] / (1.0 - self.erange[0]), ar[1] / (1.0 + self.erange[0])]
)
a = (
np.exp(
np.random.uniform(
low=np.log(arcon[0]), high=np.log(arcon[1]), size=n
)
)
* u.AU
)
tmpa = a.to("AU").value
# upper limit for eccentricity given sma
elim = np.zeros(len(a))
amean = np.mean(ar)
elim[tmpa <= amean] = 1.0 - ar[0] / tmpa[tmpa <= amean]
elim[tmpa > amean] = ar[1] / tmpa[tmpa > amean] - 1.0
elim[elim > self.erange[1]] = self.erange[1]
elim[elim < self.erange[0]] = self.erange[0]
# uniform distribution
e = np.random.uniform(low=self.erange[0], high=elim, size=n)
else:
a = (
np.exp(np.random.uniform(low=np.log(ar[0]), high=np.log(ar[1]), size=n))
* u.AU
)
e = np.random.uniform(low=self.erange[0], high=self.erange[1], size=n)
# generate geometric albedo
pr = self.prange
p = np.random.uniform(low=pr[0], high=pr[1], size=n)
# generate planetary radius
Rpr = self.Rprange.to("earthRad").value
Rp = (
np.exp(np.random.uniform(low=np.log(Rpr[0]), high=np.log(Rpr[1]), size=n))
* u.earthRad
)
return a, e, p, Rp
def dist_eccen_from_sma(self, e, a):
"""Probability density function for eccentricity constrained by
semi-major axis, such that orbital radius always falls within the
provided sma range.
The prototype provides a uniform distribution between the minimum and
maximum allowable values.
Args:
e (~numpy.ndarray(float)):
Eccentricity values
a (~numpy.ndarray(float)):
Semi-major axis value in AU. Not an astropy quantity.
Returns:
~numpy.ndarray(float):
Probability density of eccentricity constrained by semi-major axis
"""
# cast a and e to array
e = np.array(e, ndmin=1, copy=False)
a = np.array(a, ndmin=1, copy=False)
# if a is length 1, copy a to make the same shape as e
if a.ndim == 1 and len(a) == 1:
a = a * np.ones(e.shape)
# unitless sma range
ar = self.arange.to("AU").value
arcon = np.array(
[ar[0] / (1.0 - self.erange[0]), ar[1] / (1.0 + self.erange[0])]
)
# upper limit for eccentricity given sma
elim = np.zeros(a.shape)
amean = np.mean(arcon)
elim[a <= amean] = 1.0 - ar[0] / a[a <= amean]
elim[a > amean] = ar[1] / a[a > amean] - 1.0
elim[elim > self.erange[1]] = self.erange[1]
elim[elim < self.erange[0]] = self.erange[0]
# if e and a are two arrays of different size, create a 2D grid
if a.size not in [1, e.size]:
elim, e = np.meshgrid(elim, e)
f = np.zeros(e.shape)
mask = np.where((a >= arcon[0]) & (a <= arcon[1]))
f[mask] = self.uniform(e[mask], (self.erange[0], elim[mask]))
return f
def dist_sma(self, a):
"""Probability density function for semi-major axis in AU
The prototype provides a log-uniform distribution between the minimum
and maximum values.
Args:
a (~numpy.ndarray(float)):
Semi-major axis value(s) in AU. Not an astropy quantity.
Returns:
~numpy.ndarray(float):
Semi-major axis probability density
"""
return self.logunif(a, self.arange.to("AU").value)
def dist_eccen(self, e):
"""Probability density function for eccentricity
The prototype provides a uniform distribution between the minimum and
maximum values.
Args:
e (~numpy.ndarray(float)):
Eccentricity value(s)
Returns:
~numpy.ndarray(float):
Eccentricity probability density
"""
return self.uniform(e, self.erange)
def dist_albedo(self, p):
"""Probability density function for albedo
The prototype provides a uniform distribution between the minimum and
maximum values.
Args:
p (~numpy.ndarray(float)):
Albedo value(s)
Returns:
~numpy.ndarray(float):
Albedo probability density
"""
return self.uniform(p, self.prange)
def dist_radius(self, Rp):
"""Probability density function for planetary radius in Earth radius
The prototype provides a log-uniform distribution between the minimum
and maximum values.
Args:
Rp (~numpy.ndarray(float)):
Planetary radius value(s) in Earth radius. Not an astropy quantity.
Returns:
~numpy.ndarray(float):
Planetary radius probability density
"""
return self.logunif(Rp, self.Rprange.to("earthRad").value)
def dist_mass(self, Mp):
"""Probability density function for planetary mass in Earth mass
The prototype provides an unbounded power law distribution. Note
that this should really be a function of a density model and the radius
distribution for all implementations that use it.
Args:
Mp (~numpy.ndarray(float)):
Planetary mass value(s) in Earth mass. Not an astropy quantity.
Returns:
~numpy.ndarray(float):
Planetary mass probability density
"""
Mearth = np.array(Mp, ndmin=1) * u.earthMass
tmp = ((Mearth >= self.Mprange[0]) & (Mearth <= self.Mprange[1])).astype(float)
Mjup = Mearth.to("jupiterMass").value
return tmp * Mjup ** (-1.3)
|
11,878 | a217570b465bc9c19cb1cb9bcfc88ac00e036e7a | class NumStr(object):
def __init__(self, num = 0, string = ''):
self.__num = num
self.__string = string
def __str__(self):
return '[%d :: %r]' % (self.__num, self.__string)
__repr__ = __str__
def __add__(self, other):
if isinstance(other, NumStr):
return self.__class__(self.__num + other.__num, \
self.__string + other.__string)
else:
raise TypeError, \
'Illegal argument type for built-in operation'
def __mul__(self, num):
if isinstance(num, int):
return self.__class__(self.__num * num, self.__string * num)
else:
raise TypeError, \
'Illegal argument for build-in operation'
def __nonzero__(self):
return self.__num or len(self.__string)
def __norm_cva(self, cmpres):
return cmp(cmpres, 0)
def __cmp__(self, other):
return self.__norm_cva(cmp(self.__num, other.__num)) + \
self.__norm_cva(cmp(self.__string, other.__string))
a = NumStr(3, 'foo')
b = NumStr(3, 'goo')
c = NumStr(2, 'foo')
d = NumStr()
e = NumStr(string = 'boo')
f = NumStr(1)
print a
print b
print c
print d
print e
print f
print a < b
print b < c
print a == a
print b * 2
print a * 3
print b + e
print e + b
if d: print 'not false'
else: print 'false'
if e: print 'not false'
else: print 'false'
print cmp(a,b)
print cmp(a,c)
print cmp(a,a)
g = NumStr(2,'moo')
print g == b
|
11,879 | df3c6199abf5b1fe76e04cf910ff19e1c1b05845 | import asyncio
import discord
from discord.ext.commands import Bot
from discord.ext import commands
import datetime
import sys, traceback
import time
########################################
def get_prefix(bot, message):
prefixes = ['>?', 'lol ', '##']
if not message.guild:
return '?'
return commands.when_mentioned_or(*prefixes)(bot, message)
initial_extensions = ['cogs.music', 'cogs.translate','cogs.mod','cogs.fun']
bot = commands.Bot(command_prefix=get_prefix, description='A Fuj-Bot!')
@bot.event
async def on_ready():
print(f'\n\nLogged in as: {bot.user.name} - {bot.user.id}\nVersion: {discord.__version__}\n')
if __name__ == '__main__':
for extension in initial_extensions:
try:
bot.load_extension(extension)
except Exception as e:
print(f'Failed to load extension {extension}.', file=sys.stderr)
traceback.print_exc()
print(f'Successfully logged in and booted...!')
@bot.event
async def on_guild_join(server):
print("New Server Joined: {}!".format(server))
owner=bot.get_user(162939111680901122)
servername= server.name
serverreg= server.region
serverid= server.id
channel=discord.utils.get(server.text_channels)
serverowner= server.owner
ownerid= server.owner_id
joinedguild = discord.Embed(colour = discord.Colour(0xA522B3))
joinedguild.set_author(name = '[SERVER JOINED]')
joinedguild.add_field(name="Server Name:", value= servername)
joinedguild.add_field(name="Server ID:", value= serverid)
joinedguild.add_field(name="Server Region:", value= serverreg)
joinedguild.add_field(name="Server Owner:", value= serverowner)
joinedguild.set_footer(text = time.strftime("%d/%m/%Y - %I:%M:%S %p CET"))
await owner.send(embed = joinedguild)
bot.run('MzkyMTA1ODg2ODk5NzY1MjU4.DRrSsA.aawWm282Ht923J1sc_eC3GD6x6A', bot=True, reconnect=True) |
11,880 | 400b0c60ebe273f8999c0d21465ae7f633d22e3e | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.contrib import admin
from .models import CategoryChallenge, PlotChallenge, Challenge
# Register your models here.
#admin.site.register(Person)
admin.site.register(CategoryChallenge)
admin.site.register(PlotChallenge)
admin.site.register(Challenge)
|
11,881 | b2da068d2661b35b2a593308a59fb9bf14f3b087 | import os
import traceback
from click.testing import CliRunner
import show.main as show
show_interfaces_alias_output="""\
Name Alias
----------- -------
Ethernet0 etp1
Ethernet4 etp2
Ethernet8 etp3
Ethernet12 etp4
Ethernet16 etp5
Ethernet20 etp6
Ethernet24 etp7
Ethernet28 etp8
Ethernet32 etp9
Ethernet36 etp10
Ethernet40 etp11
Ethernet44 etp12
Ethernet48 etp13
Ethernet52 etp14
Ethernet56 etp15
Ethernet60 etp16
Ethernet64 etp17
Ethernet68 etp18
Ethernet72 etp19
Ethernet76 etp20
Ethernet80 etp21
Ethernet84 etp22
Ethernet88 etp23
Ethernet92 etp24
Ethernet96 etp25
Ethernet100 etp26
Ethernet104 etp27
Ethernet108 etp28
Ethernet112 etp29
Ethernet116 etp30
Ethernet120 etp31
Ethernet124 etp32
"""
show_interfaces_alias_Ethernet0_output="""\
Name Alias
--------- -------
Ethernet0 etp1
"""
show_interfaces_neighbor_expected_output="""\
LocalPort Neighbor NeighborPort NeighborLoopback NeighborMgmt NeighborType
----------- ---------- -------------- ------------------ -------------- --------------
Ethernet112 ARISTA01T1 Ethernet1 None 10.250.0.51 LeafRouter
Ethernet116 ARISTA02T1 Ethernet1 None 10.250.0.52 LeafRouter
Ethernet120 ARISTA03T1 Ethernet1 None 10.250.0.53 LeafRouter
Ethernet124 ARISTA04T1 Ethernet1 None 10.250.0.54 LeafRouter
"""
show_interfaces_neighbor_expected_output_Ethernet112="""\
LocalPort Neighbor NeighborPort NeighborLoopback NeighborMgmt NeighborType
----------- ---------- -------------- ------------------ -------------- --------------
Ethernet112 ARISTA01T1 Ethernet1 None 10.250.0.51 LeafRouter
"""
show_interfaces_neighbor_expected_output_etp29="""\
LocalPort Neighbor NeighborPort NeighborLoopback NeighborMgmt NeighborType
----------- ---------- -------------- ------------------ -------------- --------------
etp29 ARISTA01T1 Ethernet1 None 10.250.0.51 LeafRouter
"""
show_interfaces_portchannel_output="""\
Flags: A - active, I - inactive, Up - up, Dw - Down, N/A - not available,
S - selected, D - deselected, * - not synced
No. Team Dev Protocol Ports
----- --------------- ----------- --------------
0001 PortChannel0001 LACP(A)(Dw) Ethernet112(D)
0002 PortChannel0002 LACP(A)(Up) Ethernet116(S)
0003 PortChannel0003 LACP(A)(Up) Ethernet120(S)
0004 PortChannel0004 LACP(A)(Up) N/A
1001 PortChannel1001 N/A
"""
show_interfaces_portchannel_in_alias_mode_output="""\
Flags: A - active, I - inactive, Up - up, Dw - Down, N/A - not available,
S - selected, D - deselected, * - not synced
No. Team Dev Protocol Ports
----- --------------- ----------- --------
0001 PortChannel0001 LACP(A)(Dw) etp29(D)
0002 PortChannel0002 LACP(A)(Up) etp30(S)
0003 PortChannel0003 LACP(A)(Up) etp31(S)
0004 PortChannel0004 LACP(A)(Up) N/A
1001 PortChannel1001 N/A
"""
class TestInterfaces(object):
@classmethod
def setup_class(cls):
print("SETUP")
def test_show_interfaces(self):
runner = CliRunner()
result = runner.invoke(show.cli.commands["interfaces"], [])
print(result.exit_code)
print(result.output)
assert result.exit_code == 0
def test_show_interfaces_alias(self):
runner = CliRunner()
result = runner.invoke(show.cli.commands["interfaces"].commands["alias"], [])
print(result.exit_code)
print(result.output)
assert result.exit_code == 0
assert result.output == show_interfaces_alias_output
def test_show_interfaces_alias_Ethernet0(self):
runner = CliRunner()
result = runner.invoke(show.cli.commands["interfaces"].commands["alias"], ["Ethernet0"])
print(result.exit_code)
print(result.output)
assert result.exit_code == 0
assert result.output == show_interfaces_alias_Ethernet0_output
def test_show_interfaces_alias_etp1(self):
runner = CliRunner()
os.environ['SONIC_CLI_IFACE_MODE'] = "alias"
result = runner.invoke(show.cli.commands["interfaces"].commands["alias"], ["etp1"])
os.environ['SONIC_CLI_IFACE_MODE'] = "default"
print(result.exit_code)
print(result.output)
assert result.exit_code == 0
assert result.output == show_interfaces_alias_Ethernet0_output
def test_show_interfaces_alias_invalid_name(self):
runner = CliRunner()
result = runner.invoke(show.cli.commands["interfaces"].commands["alias"], ["Ethernet3"])
print(result.exit_code)
print(result.output)
assert result.exit_code != 0
assert "Error: Invalid interface name Ethernet3" in result.output
def test_show_interfaces_naming_mode_default(self):
runner = CliRunner()
result = runner.invoke(show.cli.commands["interfaces"].commands["naming_mode"], [])
print(result.exit_code)
print(result.output)
assert result.exit_code == 0
assert result.output.rstrip() == "default"
def test_show_interfaces_naming_mode_alias(self):
runner = CliRunner()
os.environ['SONIC_CLI_IFACE_MODE'] = "alias"
result = runner.invoke(show.cli.commands["interfaces"].commands["naming_mode"], [])
os.environ['SONIC_CLI_IFACE_MODE'] = "default"
print(result.exit_code)
print(result.output)
assert result.exit_code == 0
assert result.output.rstrip() == "alias"
def test_show_interfaces_neighbor_expected(self):
runner = CliRunner()
result = runner.invoke(show.cli.commands["interfaces"].commands["neighbor"].commands["expected"], [])
print(result.exit_code)
print(result.output)
# traceback.print_tb(result.exc_info[2])
assert result.exit_code == 0
assert result.output == show_interfaces_neighbor_expected_output
def test_show_interfaces_neighbor_expected_Ethernet112(self):
runner = CliRunner()
result = runner.invoke(show.cli.commands["interfaces"].commands["neighbor"].commands["expected"], ["Ethernet112"])
print(result.exit_code)
print(result.output)
# traceback.print_tb(result.exc_info[2])
assert result.exit_code == 0
assert result.output == show_interfaces_neighbor_expected_output_Ethernet112
def test_show_interfaces_neighbor_expected_etp29(self):
runner = CliRunner()
os.environ['SONIC_CLI_IFACE_MODE'] = "alias"
result = runner.invoke(show.cli.commands["interfaces"].commands["neighbor"].commands["expected"], ["etp29"])
os.environ['SONIC_CLI_IFACE_MODE'] = "default"
print(result.exit_code)
print(result.output)
# traceback.print_tb(result.exc_info[2])
assert result.exit_code == 0
assert result.output == show_interfaces_neighbor_expected_output_etp29
def test_show_interfaces_neighbor_expected_Ethernet0(self):
runner = CliRunner()
result = runner.invoke(show.cli.commands["interfaces"].commands["neighbor"].commands["expected"], ["Ethernet0"])
print(result.exit_code)
print(result.output)
# traceback.print_tb(result.exc_info[2])
assert result.exit_code == 0
assert result.output.rstrip() == "No neighbor information available for interface Ethernet0"
def test_show_interfaces_portchannel(self):
runner = CliRunner()
result = runner.invoke(show.cli.commands["interfaces"].commands["portchannel"], [])
print(result.exit_code)
print(result.output)
traceback.print_tb(result.exc_info[2])
assert result.exit_code == 0
assert result.output == show_interfaces_portchannel_output
def test_show_interfaces_portchannel_in_alias_mode(self):
runner = CliRunner()
os.environ['SONIC_CLI_IFACE_MODE'] = "alias"
result = runner.invoke(show.cli.commands["interfaces"].commands["portchannel"], [])
os.environ['SONIC_CLI_IFACE_MODE'] = "default"
print(result.exit_code)
print(result.output)
traceback.print_tb(result.exc_info[2])
assert result.exit_code == 0
assert result.output == show_interfaces_portchannel_in_alias_mode_output
@classmethod
def teardown_class(cls):
print("TEARDOWN")
|
11,882 | ed5356356efb918281fe52cb01afc123bcfcf8a2 | #!/usr/bin/env python3
import _thread
import sys
import cfg
import uuid
from xmlrpc.client import ServerProxy
from enums import ReduceStatus, Status
from fake_fs import FakeFS
import map_libs.word_count
import os
import importlib.util
import json
# fake mapper client for testing
# communication reducer and mapper
class FakeMapperClient:
def __init__(self):
self.data = {}
def load_mapped_data(self, map_addr, task_id, region):
if map_addr not in self.data and task_id not in self.data[map_addr] \
and region not in self.data[map_addr][task_id]:
return {'status': Status.not_found}
return self.data[map_addr][task_id][region]
def put(self, map_addr, task_id, region, data):
if map_addr not in self.data:
self.data[map_addr] = {}
if task_id not in self.data[map_addr]:
self.data[map_addr][task_id] = {}
self.data[map_addr][task_id][region] = data
class RPCMapperClient:
def load_mapped_data(self, map_addr, task_id, region):
cl = ServerProxy(map_addr)
return cl.read_mapped_data(task_id, region)['data']
class ReduceTask:
def __init__(self, task_id, region, mappers, script_path):
self.task_id = task_id
self.region = region
self.mappers = mappers
self.status = ReduceStatus.accepted
self.script_path = script_path
class Reducer:
def __init__(self, fs, name, addr, opts, mapper_cl):
self.fs = fs
self.name = name
self.addr = addr
self.job_tracker = ServerProxy(opts["jt_addr"])
self.tasks = {}
self.mapper_cl = mapper_cl # client for loading data from mappers
self.work_dir = opts["base_dir"] + name
def log(self, task_id, msg):
print("Task", task_id, ":", msg)
def err(self, task_id, msg, e=None):
print("Task", task_id, ":", msg, e, file=sys.stderr)
# signal from JT for starting reducing
# task_id - unique task_id
# region for which reducer is responsible
# mappers which contain data for current task
# path in DFS to files
def reduce(self, task_id, region, mappers, script_path):
self.log(task_id, "Get request for start reducing of region " + str(region))
if task_id not in self.tasks:
self.tasks[task_id] = {}
task = ReduceTask(task_id, region, mappers, script_path)
self.tasks[task_id][region] = task
_thread.start_new_thread(self._process_reduce_task, (task, ))
return {'status': ReduceStatus.accepted}
def _process_reduce_task(self, task):
data = self._load_data_from_mappers(task)
if task.status == ReduceStatus.data_loaded:
reducer = self._load_reduce_script(task)
if task.status == ReduceStatus.reducer_loaded:
result = self.execute_reduce_script(reducer, task, data)
if task.status == ReduceStatus.data_reduced:
self._save_result_to_dfs(task, result)
if task.status == ReduceStatus.data_saved:
self._send_reducing_done(task)
def _load_data_from_mappers(self, task):
try:
self.log(task.task_id, "Start loading data from mappers to region " + str(task.region))
task.status = ReduceStatus.start_data_loading
result = []
for mapper in task.mappers:
data = self.mapper_cl.load_mapped_data(mapper, task.task_id, task.region)
result.extend(data)
task.status = ReduceStatus.data_loaded
return result
except Exception as e:
task.status = ReduceStatus.err_data_loading
self.err(task.task_id, "Error during loading data for region " + str(region), e)
def _load_reduce_script(self, task):
try:
l_path = self.work_dir + "/" + str(task.task_id) + "/reduce.py"
r = self.fs.download_to(task.script_path, l_path)
if r['status'] == Status.not_found:
task.status = ReduceStatus.reduce_script_not_found
return None
spec = importlib.util.spec_from_file_location("reduce" + str(task.task_id), l_path)
mod = importlib.util.module_from_spec(spec)
spec.loader.exec_module(mod)
task.status = ReduceStatus.reducer_loaded
return mod.Reducer()
except Exception as e:
self.err(task.task_id, "error during script execution", e)
task.status = ReduceStatus.err_reducer_loading
return None
def execute_reduce_script(self, reducer, task, data):
try:
self.log(task.task_id, "Start loading reducing script for executing " + task.script_path)
r = reducer.run_reduce(data)
task.status = ReduceStatus.data_reduced
return r
except Exception as e:
task.status = ReduceStatus.err_reduce_script
self.err("Error during executing reducer script", e)
# save reduced result to dfs
def _save_result_to_dfs(self, task, result):
try:
path = "/" + str(task.task_id) + "/result/" + str(task.region)
self.log(task.task_id, "Save result of region " + str(task.region) + " to " + path)
self.fs.save(json.dumps(result), path)
task.status = ReduceStatus.data_saved
except Exception as e:
task.status = ReduceStatus.err_save_result
self.err(task.task_id, "Error during saving region " + str(task.region) + " to DFS")
def _send_reducing_done(self, task):
try:
self.job_tracker.reducing_done(self.addr, str(task.task_id), task.region)
self.log(task.task_id, "Sent message to job tracker about finishing reducing of region " + str(task.region))
task.status = ReduceStatus.finished
except Exception as e:
task.status = ReduceStatus.err_send_done
self.err(task.task_id, "Failed to send result to JT for region " + str(task.region), e)
# get status of current reducer execution
# task_id - unique task_id
# region - regions of keys which reducer should reduce
# returns dict {'status': ReduceStatus }
def get_status(self, task_id, region):
if task_id not in self.tasks and region not in self.tasks[task_id]:
return {'status': ReduceStatus.reduce_not_found}
t = self.tasks[task_id][region]
return {'status': t.status}
if __name__ == '__main__':
name = sys.argv[1]
port = int(sys.argv[2])
cfg_path = sys.argv[3]
script_path = sys.argv[4]
opts = cfg.load(cfg_path)
print("JT address", opts["jt_addr"])
fs = FakeFS()
task_id = uuid.uuid4()
with open(script_path, "r") as file:
data = file.read()
fs.save(data, "/scripts/word_count.py")
region = 1
mapper_cl = FakeMapperClient()
mapper_cl.put("map1", task_id, region, [('a', 1), ('a', 1), ('a', 1), ('b', 1), ('b', 1)])
mapper_cl.put("map2", task_id, region, [('a', 1), ('b', 1), ('b', 1), ('d', 1)])
mapper_cl.put("map3", task_id, region, [('a', 1), ('d', 1)])
reducer = Reducer(fs, name, "http://localhost:" + str(port), opts, mapper_cl)
r = reducer.reduce(task_id, region, ["map1", "map2"], "/scripts/word_count.py")
while reducer.get_status(task_id, region)['status'] != ReduceStatus.err_send_done:
pass
reg_1 = fs.get_chunk("/"+str(task_id) + "/result/" + str(region))
print('reduce has finished', reg_1)
|
11,883 | 7ea2331e02160fae3cb2c8f49aa850a76081c2b0 | import numpy as np
from typing import List
from classifier import Classifier
class DecisionStump(Classifier):
def __init__(self, s:int, b:float, d:int):
self.clf_name = "Decision_stump"
self.s = s
self.b = b
self.d = d
def train(self, features: List[List[float]], labels: List[int]):
pass
def predict(self, features: List[List[float]]) -> List[int]:
features=np.array(features)
xds=features[:,self.d]
prediction=np.zeros(xds.shape)
prediction[np.where(xds>self.b)[0]]=self.s
prediction[np.where(xds<=self.b)[0]]=(-1)*self.s
return prediction.tolist()
|
11,884 | c57eaea649fe5e2df37a9609db2b2924c74dd74a | def align_legend(legend):
"""
Aligns text in a legend
Parameters
----------
legend : matplotlib.legend.Legend
"""
renderer = legend.get_figure().canvas.get_renderer()
shift = max([t.get_window_extent(renderer).width for t in legend.get_texts()])
for t in legend.get_texts():
t.set_ha('right') # ha is alias for horizontalalignment
t.set_position((shift,0)) |
11,885 | b248c076e85cc61bc942113c77103b20e210dbfb | # 学校:四川轻化工大学
# 学院:自信学院
# 学生:胡万平
# 开发时间:2021/10/7 14:23
'''add () 通过重写 add ()方法,可使用自定义对象具有“+”功能
通过重写 len ()方法,让内置函数len()的参数可以是白定义类型
'''
a = 20
b = 100
c =a + b #两个整数类型的对象的相加操作
d = a.__add__(b)
print(c)
print(d)
class Student:
def __init__(self, name):
self.name = name
def __add__(self, other):
return self.name + other.name
def __len__(self):
return len(self.name)
stu1 = Student('Jack')
stu2 = Student('李四')
s = stu1 + stu2 #实现了两个对象的加法运算(因为在Student类中编写__add__()特殊的方法)
print(s)
s = stu1.__add__(stu2)
print(s)
print('---------------------------')
lst = [1, 2, 3, 4]
print(len(lst)) #len是内容函数len
print(lst.__len__())
print(len(stu1)) |
11,886 | 28379fc36e57704ef7bae7468f92036a8cc92ab1 | hungry= input("Are you really hungry")
if hungry=="yes":
print("Eat Pizza")
else:
print("contimue github") |
11,887 | 27ec697bf60bfd38e0b286b9a54a62baa0f12c63 | import logging
from typing import List
import boto3
from item import Item
db = boto3.client("dynamodb")
try:
db.create_table(
TableName="items",
AttributeDefinitions=[
{
"AttributeName": "Id",
"AttributeType": "S"
}
],
KeySchema=[
{
"AttributeName": "Id",
"KeyType": "HASH"
}
],
ProvisionedThroughput={
"ReadCapacityUnits": 5,
"WriteCapacityUnits": 5
}
)
except Exception as ex:
logging.error(ex)
TABLE = "items"
def create_item(item: Item) -> Item:
db.put_item(TableName=TABLE, Item=item.to_record())
return item
def get_all_items() -> List[Item]:
items = db.scan(TableName=TABLE)["Items"]
return list(map(Item.from_record, items))
def get_one_item(item_id: str) -> Item:
response = db.query(TableName=TABLE,
KeyConditionExpression="Id = :item_id",
ExpressionAttributeValues={
":item_id": {
"S": item_id
}
})
if response.count is 1:
return Item.from_record(response.items[0])
else:
return None
|
11,888 | 1a004af88ae3bbae54b005c489683d67c3caac80 | # This program is used to read 2 files. the first file is "Top25HalloweenSongs
#Contains halloween songs and the second one file is Top25HalloweenSongs_Comments
# contains a matching comment to the song information
# written on 10/16/17 by john paul lucia
HeaderStr = "## Welcome to my scary Halloween song selection program...Boo! ##"
print("#" * len(HeaderStr))
print(HeaderStr)
print("#" * len(HeaderStr))
print()
# Read in the song information file
Songs = list()
SongsFP = open("Top25HalloweenSongs.txt")
Songs = SongsFP.readlines()
SongsFP.close()
# Read in the song Comments file
SongComments = list()
SongsCommentsFP = open("Top25HalloweenSongs_Comments.txt")
SongComments = SongsCommentsFP.readlines()
SongsCommentsFP.close()
# Get the user's song selection.....
SongNumber = int(input("Please enter the song number:"))
#Test if user input is beyond number of lines in file...
while SongNumber >= len(Songs):
print("Invalid input. Please try again...")
SongNumber = int(input("Please enter the song number:"))
# Split the song line into pieces parts....
SongLine = str()
SongLine = Songs[SongNumber - 1]
SongParts = list()
SongParts = SongLine.split(',')
# Print the Song Information.....
SongPrintStr = str()
SongPrintStr = "Song number " + SongParts[0] + " is from artist " + SongParts[1] + " and the song is " + SongParts[2]
print(SongPrintStr)
print()
print("----------\n")
#Split comment line into pieces parts
SongCommentLine = str()
SongCommentLine = SongComments[SongNumber - 1]
SongCommentParts = list()
SongCommentParts = SongCommentLine.split('%')
#Print the song comments
print(SongCommentParts[1])
#Save the song selection to a file named "MyFavoriteHalloweenSong.txt"
SongSelectionFP = open("MyFavoriteHalloweenSong.txt", 'w')
SongSelectionFP.writelines(SongPrintStr) # Song Info
SongSelectionFP.writelines("\n")
SongSelectionFP.writelines(SongCommentParts[1]) # Song Comment
SongSelectionFP.close()
#print(Songs[SongNumber - 1], SongComments[SongNumber - 1])
EnderStr = "## Thank you...Boo! ##"
print("#" * len(EnderStr))
print(EnderStr)
print("#" * len(EnderStr))
|
11,889 | c999cfe205a90a313253c00034b4aa1382b1e5ff | from django.apps import AppConfig
class PetConfig(AppConfig):
name = 'pet'
|
11,890 | 98d7ebdc7c12e4847c8e341dfd2e72f891850196 | n = int(input())
bala = 0
for i in range(n):
if 2**i == n:
bala = 1
break
if bala == 1:
print("yes")
else:
print("no")
|
11,891 | 3bfb2bba57d4b1d83954f381b9f09fb8023cdaba | # -*- coding:utf-8 _*-
"""
@author:Runqiu Hu
@license: Apache Licence
@file: data.py
@time: 2020/10/07
@contact: hurunqiu@live.com
@project: bikeshare rebalancing
* Cooperating with Dr. Matt in 2020
"""
import numpy as np
import pandas as pd
distance_matrix = pd.read_csv("/Users/hurunqiu/aaai/ffbs_dynamic/resources/data_set/station_dist_matrix_300.csv",
header=None).to_numpy()
truck_velocity = 420
reserved_time = 5
truck_capacity = 60
station_info = [
{'cluster': 3, 'demand': 12, 'diversity': None, 'full_empty_time': 40, 'key_distance': None, 'latest_time': 45,
'priority': 1.41, 'ratio': 0.18, 'station_id': 2, 'velocity': 0.18, 'warning_time': 0, 'distance': 0.52},
{'cluster': 3, 'demand': 82, 'diversity': None, 'full_empty_time': 0, 'key_distance': None, 'latest_time': 5,
'priority': 2.63, 'ratio': 0, 'station_id': 5, 'velocity': 1.02, 'warning_time': 0, 'distance': 1.39},
{'cluster': 3, 'demand': 3, 'diversity': None, 'full_empty_time': 53.33, 'key_distance': None, 'latest_time': 58.33,
'priority': 1.06, 'ratio': 0.36, 'station_id': 13, 'velocity': 0.08, 'warning_time': 24, 'distance': 1.03},
{'cluster': 3, 'demand': 9, 'diversity': None, 'full_empty_time': 38.18, 'key_distance': None, 'latest_time': 43.18,
'priority': 1.39, 'ratio': 0.3, 'station_id': 14, 'velocity': 0.18, 'warning_time': 13.09, 'distance': 0.46},
{'cluster': 3, 'demand': 13, 'diversity': None, 'full_empty_time': 28.8, 'key_distance': None, 'latest_time': 33.8,
'priority': 1.48, 'ratio': 0.19, 'station_id': 17, 'velocity': 0.21, 'warning_time': 0, 'distance': 0.94},
{'cluster': 3, 'demand': 12, 'diversity': None, 'full_empty_time': 30, 'key_distance': None, 'latest_time': 35,
'priority': 1.43, 'ratio': 0.42, 'station_id': 22, 'velocity': 0.27, 'warning_time': 15.75, 'distance': 1.32},
{'cluster': 3, 'demand': -19, 'diversity': None, 'full_empty_time': 18, 'key_distance': None, 'latest_time': 23,
'priority': 1.7, 'ratio': 0.71, 'station_id': 26, 'velocity': -0.33, 'warning_time': 5.4, 'distance': 0.71},
{'cluster': 3, 'demand': 3, 'diversity': None, 'full_empty_time': 60, 'key_distance': None, 'latest_time': 65,
'priority': 1.03, 'ratio': 0.71, 'station_id': 32, 'velocity': 0.22, 'warning_time': 48, 'distance': 1.0},
{'cluster': 3, 'demand': 6, 'diversity': None, 'full_empty_time': 60, 'key_distance': None, 'latest_time': 65,
'priority': 1.0, 'ratio': 0.19, 'station_id': 38, 'velocity': -0.18, 'warning_time': 0, 'distance': 1.72},
{'cluster': 3, 'demand': -23, 'diversity': None, 'full_empty_time': 3.33, 'key_distance': None, 'latest_time': 8.33,
'priority': 1.99, 'ratio': 0.96, 'station_id': 40, 'velocity': -0.3, 'warning_time': 0, 'distance': 1.41},
{'cluster': 3, 'demand': 14, 'diversity': None, 'full_empty_time': 30, 'key_distance': None, 'latest_time': 35,
'priority': 1.55, 'ratio': 0.3, 'station_id': 48, 'velocity': 0.27, 'warning_time': 9.75, 'distance': 0.71},
{'cluster': 3, 'demand': 12, 'diversity': None, 'full_empty_time': 0, 'key_distance': None, 'latest_time': 5,
'priority': 1.67, 'ratio': 0, 'station_id': 55, 'velocity': 0.15, 'warning_time': 0, 'distance': 1.45},
{'cluster': 3, 'demand': 5, 'diversity': None, 'full_empty_time': 60, 'key_distance': None, 'latest_time': 65,
'priority': 1.05, 'ratio': 0.08, 'station_id': 56, 'velocity': 0.02, 'warning_time': 0, 'distance': 1.08},
{'cluster': 3, 'demand': -8, 'diversity': None, 'full_empty_time': 0, 'key_distance': None, 'latest_time': 5,
'priority': 1.65, 'ratio': 1.22, 'station_id': 59, 'velocity': 0, 'warning_time': 0, 'distance': 0.7},
{'cluster': 3, 'demand': -10, 'diversity': None, 'full_empty_time': 20, 'key_distance': None, 'latest_time': 25,
'priority': 1.52, 'ratio': 0.82, 'station_id': 63, 'velocity': -0.15, 'warning_time': 0, 'distance': 1.11},
{'cluster': 3, 'demand': 90, 'diversity': None, 'full_empty_time': 9, 'key_distance': None, 'latest_time': 14,
'priority': 2.68, 'ratio': 0.11, 'station_id': 69, 'velocity': 1.33, 'warning_time': 0, 'distance': 0.48},
{'cluster': 3, 'demand': -7, 'diversity': None, 'full_empty_time': 45, 'key_distance': None, 'latest_time': 50,
'priority': 1.12, 'ratio': 0.75, 'station_id': 70, 'velocity': -0.13, 'warning_time': 9, 'distance': 1.61},
{'cluster': 3, 'demand': -11, 'diversity': None, 'full_empty_time': 20, 'key_distance': None, 'latest_time': 25,
'priority': 1.5, 'ratio': 0.86, 'station_id': 75, 'velocity': -0.15, 'warning_time': 0, 'distance': 1.51},
{'cluster': 3, 'demand': -23, 'diversity': None, 'full_empty_time': 0, 'key_distance': None, 'latest_time': 5,
'priority': 2.12, 'ratio': 1.15, 'station_id': 83, 'velocity': -0.22, 'warning_time': 0, 'distance': 0.21},
{'cluster': 3, 'demand': 13, 'diversity': None, 'full_empty_time': 56.67, 'key_distance': None,
'latest_time': 61.67, 'priority': 1.32, 'ratio': 0.29, 'station_id': 87, 'velocity': 0.3, 'warning_time': 17.33,
'distance': 0.76},
{'cluster': 3, 'demand': 1, 'diversity': None, 'full_empty_time': 60, 'key_distance': None, 'latest_time': 65,
'priority': 1.1, 'ratio': 0.26, 'station_id': 89, 'velocity': 0.02, 'warning_time': 56, 'distance': 0.82},
{'cluster': 3, 'demand': 21, 'diversity': None, 'full_empty_time': 0, 'key_distance': None, 'latest_time': 5,
'priority': 1.96, 'ratio': 0, 'station_id': 95, 'velocity': 0.22, 'warning_time': 0, 'distance': 1.17},
{'cluster': 3, 'demand': -49, 'diversity': None, 'full_empty_time': 0, 'key_distance': None, 'latest_time': 5,
'priority': 2.48, 'ratio': 1.61, 'station_id': 97, 'velocity': -0.37, 'warning_time': 0, 'distance': 1.27},
{'cluster': 3, 'demand': -35, 'diversity': None, 'full_empty_time': 0, 'key_distance': None, 'latest_time': 5,
'priority': 2.36, 'ratio': 1.64, 'station_id': 98, 'velocity': -0.23, 'warning_time': 0, 'distance': 1.49},
{'cluster': 3, 'demand': 5, 'diversity': None, 'full_empty_time': 0, 'key_distance': None, 'latest_time': 5,
'priority': 1.62, 'ratio': 0, 'station_id': 115, 'velocity': 0.02, 'warning_time': 0, 'distance': 1.47},
{'cluster': 3, 'demand': -16, 'diversity': None, 'full_empty_time': 60, 'key_distance': None, 'latest_time': 65,
'priority': 1.35, 'ratio': 0.82, 'station_id': 117, 'velocity': 0.02, 'warning_time': 0, 'distance': 1.3},
{'cluster': 3, 'demand': -16, 'diversity': None, 'full_empty_time': 60, 'key_distance': None, 'latest_time': 65,
'priority': 1.45, 'ratio': 0.84, 'station_id': 122, 'velocity': 0.01, 'warning_time': 0, 'distance': 0.19},
{'cluster': 3, 'demand': -13, 'diversity': None, 'full_empty_time': 0, 'key_distance': None, 'latest_time': 5,
'priority': 1.76, 'ratio': 1.36, 'station_id': 126, 'velocity': -0.08, 'warning_time': 0, 'distance': 1.59},
{'cluster': 3, 'demand': 3, 'diversity': None, 'full_empty_time': 60, 'key_distance': None, 'latest_time': 65,
'priority': 1.15, 'ratio': 0.27, 'station_id': 135, 'velocity': 0.05, 'warning_time': 16, 'distance': 0.66},
{'cluster': 3, 'demand': -44, 'diversity': None, 'full_empty_time': 0, 'key_distance': None, 'latest_time': 5,
'priority': 2.44, 'ratio': 1.55, 'station_id': 137, 'velocity': -0.18, 'warning_time': 0, 'distance': 1.08},
{'cluster': 3, 'demand': -8, 'diversity': None, 'full_empty_time': 42.35, 'key_distance': None,
'latest_time': 47.35, 'priority': 1.37, 'ratio': 0.78, 'station_id': 139, 'velocity': -0.14, 'warning_time': 4.24,
'distance': 0.43},
{'cluster': 3, 'demand': -16, 'diversity': None, 'full_empty_time': 0, 'key_distance': None, 'latest_time': 5,
'priority': 1.87, 'ratio': 1.62, 'station_id': 142, 'velocity': 0.05, 'warning_time': 0, 'distance': 0.77},
{'cluster': 3, 'demand': -28, 'diversity': None, 'full_empty_time': 0, 'key_distance': None, 'latest_time': 5,
'priority': 2.18, 'ratio': 1.42, 'station_id': 152, 'velocity': -0.26, 'warning_time': 0, 'distance': 1.76},
{'cluster': 3, 'demand': 2, 'diversity': None, 'full_empty_time': 60, 'key_distance': None, 'latest_time': 65,
'priority': 1.08, 'ratio': 0.89, 'station_id': 153, 'velocity': 0.25, 'warning_time': 0, 'distance': 0.89},
{'cluster': 3, 'demand': -34, 'diversity': None, 'full_empty_time': 0, 'key_distance': None, 'latest_time': 5,
'priority': 2.4, 'ratio': 1.13, 'station_id': 162, 'velocity': -0.34, 'warning_time': 0, 'distance': 0.77},
{'cluster': 3, 'demand': 15, 'diversity': None, 'full_empty_time': 0, 'key_distance': None, 'latest_time': 5,
'priority': 1.84, 'ratio': 0, 'station_id': 167, 'velocity': 0.18, 'warning_time': 0, 'distance': 0.98},
{'cluster': 3, 'demand': -8, 'diversity': None, 'full_empty_time': 60, 'key_distance': None, 'latest_time': 65,
'priority': 1.24, 'ratio': 0.37, 'station_id': 170, 'velocity': -0.77, 'warning_time': 49.83, 'distance': 0.49},
{'cluster': 3, 'demand': -10, 'diversity': None, 'full_empty_time': 60, 'key_distance': None, 'latest_time': 65,
'priority': 1.13, 'ratio': 0.84, 'station_id': 172, 'velocity': -0.11, 'warning_time': 0, 'distance': 1.31},
{'cluster': 3, 'demand': 4, 'diversity': None, 'full_empty_time': 60, 'key_distance': None, 'latest_time': 65,
'priority': 1.19, 'ratio': 0.53, 'station_id': 177, 'velocity': 0.23, 'warning_time': 42.86, 'distance': 0.6},
{'cluster': 3, 'demand': -10, 'diversity': None, 'full_empty_time': 58.06, 'key_distance': None,
'latest_time': 63.06, 'priority': 1.21, 'ratio': 0.67, 'station_id': 180, 'velocity': -0.26, 'warning_time': 23.23,
'distance': 1.11},
{'cluster': 3, 'demand': 3, 'diversity': None, 'full_empty_time': 0, 'key_distance': None, 'latest_time': 5,
'priority': 1.6, 'ratio': 0, 'station_id': 183, 'velocity': 0.02, 'warning_time': 0, 'distance': 1.19},
{'cluster': 3, 'demand': 56, 'diversity': None, 'full_empty_time': 3.04, 'key_distance': None, 'latest_time': 8.04,
'priority': 2.51, 'ratio': 0.02, 'station_id': 188, 'velocity': 0.66, 'warning_time': 0, 'distance': 0.33},
{'cluster': 3, 'demand': -1, 'diversity': None, 'full_empty_time': 60, 'key_distance': None, 'latest_time': 65,
'priority': 1.02, 'ratio': 0.77, 'station_id': 195, 'velocity': -0.02, 'warning_time': 40, 'distance': 0.96},
{'cluster': 3, 'demand': -1, 'diversity': None, 'full_empty_time': 60, 'key_distance': None, 'latest_time': 65,
'priority': 1.26, 'ratio': 0.52, 'station_id': 196, 'velocity': -0.14, 'warning_time': 57.88, 'distance': 0.09},
{'cluster': 3, 'demand': -10, 'diversity': None, 'full_empty_time': 40, 'key_distance': None, 'latest_time': 45,
'priority': 1.3, 'ratio': 0.85, 'station_id': 203, 'velocity': -0.12, 'warning_time': 0, 'distance': 0.97},
{'cluster': 3, 'demand': -19, 'diversity': None, 'full_empty_time': 0, 'key_distance': None, 'latest_time': 5,
'priority': 1.93, 'ratio': 1.48, 'station_id': 207, 'velocity': -0.08, 'warning_time': 0, 'distance': 1.12},
{'cluster': 3, 'demand': 60, 'diversity': None, 'full_empty_time': 0, 'key_distance': None, 'latest_time': 5,
'priority': 2.55, 'ratio': 0, 'station_id': 214, 'velocity': 0.77, 'warning_time': 0, 'distance': 1.32},
{'cluster': 3, 'demand': 76, 'diversity': None, 'full_empty_time': 0, 'key_distance': None, 'latest_time': 5,
'priority': 2.59, 'ratio': 0, 'station_id': 222, 'velocity': 0.96, 'warning_time': 0, 'distance': 0.49},
{'cluster': 3, 'demand': 12, 'diversity': None, 'full_empty_time': 0, 'key_distance': None, 'latest_time': 5,
'priority': 1.73, 'ratio': 0, 'station_id': 228, 'velocity': 0.12, 'warning_time': 0, 'distance': 1.23},
{'cluster': 3, 'demand': -7, 'diversity': None, 'full_empty_time': 33.33, 'key_distance': None,
'latest_time': 38.33, 'priority': 1.28, 'ratio': 0.62, 'station_id': 230, 'velocity': -0.15, 'warning_time': 16,
'distance': 1.46},
{'cluster': 3, 'demand': 26, 'diversity': None, 'full_empty_time': 0, 'key_distance': None, 'latest_time': 5,
'priority': 2.15, 'ratio': 0, 'station_id': 232, 'velocity': 0.28, 'warning_time': 0, 'distance': 0.96},
{'cluster': 3, 'demand': 13, 'diversity': None, 'full_empty_time': 0, 'key_distance': None, 'latest_time': 5,
'priority': 1.78, 'ratio': 0, 'station_id': 233, 'velocity': 0.13, 'warning_time': 0, 'distance': 1.12},
{'cluster': 3, 'demand': -28, 'diversity': None, 'full_empty_time': 0, 'key_distance': None, 'latest_time': 5,
'priority': 2.22, 'ratio': 1.89, 'station_id': 237, 'velocity': -0.11, 'warning_time': 0, 'distance': 0.85},
{'cluster': 3, 'demand': 34, 'diversity': None, 'full_empty_time': 0, 'key_distance': None, 'latest_time': 5,
'priority': 2.33, 'ratio': 0, 'station_id': 241, 'velocity': -0.04, 'warning_time': 0, 'distance': 0.97},
{'cluster': 3, 'demand': -23, 'diversity': None, 'full_empty_time': 0, 'key_distance': None, 'latest_time': 5,
'priority': 2.08, 'ratio': 1.3, 'station_id': 254, 'velocity': -0.16, 'warning_time': 0, 'distance': 0.35},
{'cluster': 3, 'demand': 13, 'diversity': None, 'full_empty_time': 60, 'key_distance': None, 'latest_time': 65,
'priority': 1.23, 'ratio': 0.2, 'station_id': 263, 'velocity': -0.03, 'warning_time': 0, 'distance': 1.24},
{'cluster': 3, 'demand': -24, 'diversity': None, 'full_empty_time': 0, 'key_distance': None, 'latest_time': 5,
'priority': 2.02, 'ratio': 1.67, 'station_id': 267, 'velocity': -0.12, 'warning_time': 0, 'distance': 1.03},
{'cluster': 3, 'demand': -18, 'diversity': None, 'full_empty_time': 0, 'key_distance': None, 'latest_time': 5,
'priority': 1.9, 'ratio': 1.27, 'station_id': 283, 'velocity': -0.12, 'warning_time': 0, 'distance': 1.07},
{'cluster': 3, 'demand': -38, 'diversity': None, 'full_empty_time': 26.81, 'key_distance': None,
'latest_time': 31.81, 'priority': 2.29, 'ratio': 0.65, 'station_id': 284, 'velocity': -0.78, 'warning_time': 11.49,
'distance': 1.34},
{'cluster': 3, 'demand': 25, 'diversity': None, 'full_empty_time': 0, 'key_distance': None, 'latest_time': 5,
'priority': 2.05, 'ratio': 0, 'station_id': 285, 'velocity': -0.38, 'warning_time': 0, 'distance': 1.16},
{'cluster': 3, 'demand': 28, 'diversity': None, 'full_empty_time': 0, 'key_distance': None, 'latest_time': 5,
'priority': 2.25, 'ratio': 0, 'station_id': 286, 'velocity': 0.33, 'warning_time': 0, 'distance': 0.74},
{'cluster': 3, 'demand': 3, 'diversity': None, 'full_empty_time': 0, 'key_distance': None, 'latest_time': 5,
'priority': 1.57, 'ratio': 0, 'station_id': 292, 'velocity': -0.05, 'warning_time': 0, 'distance': 1.44},
{'cluster': 3, 'demand': 5, 'diversity': None, 'full_empty_time': 60, 'key_distance': None, 'latest_time': 65,
'priority': 1.17, 'ratio': 0.29, 'station_id': 296, 'velocity': 0.13, 'warning_time': 24, 'distance': 0.65},
{'cluster': 3, 'demand': -15, 'diversity': None, 'full_empty_time': 0, 'key_distance': None, 'latest_time': 5,
'priority': 1.81, 'ratio': 1.12, 'station_id': 298, 'velocity': -0.16, 'warning_time': 0, 'distance': 1.34}]
initial_bike = np.array((
[2, 25, 7, 17, 10, 0, 0, 35, 15, 60, 22, 6, 39, 4, 7, 0, 48, 6, 15, 0, 37, 16, 8, 4, 12, 22, 15, 12, 1, 15, 23, 30,
15, 75, 77, 29, 63, 0, 5, 0, 25, 25, 17, 0, 42, 4, 29, 15, 8, 9, 14, 30, 26, 23, 31, 0, 2, 40, 2, 22, 13, 28, 0,
14, 16, 32, 24, 32, 12, 12, 18, 13, 25, 3, 19, 18, 0, 15, 8, 0, 3, 45, 57, 30, 34, 8, 17, 17, 23, 6, 2, 23, 15, 28,
42, 0, 18, 53, 41, 19, 18, 40, 16, 22, 13, 4, 16, 10, 10, 1, 27, 18, 11, 15, 44, 0, 4, 23, 0, 10, 43, 12, 21, 0, 5,
0, 19, 11, 46, 11, 30, 0, 11, 26, 0, 3, 21, 68, 2, 21, 5, 10, 21, 27, 0, 21, 16, 10, 42, 0, 2, 18, 27, 17, 58, 0,
31, 30, 0, 0, 49, 0, 44, 61, 44, 25, 62, 0, 12, 0, 33, 44, 68, 37, 10, 0, 46, 16, 8, 0, 30, 0, 29, 0, 0, 5, 27, 47,
2, 0, 7, 42, 31, 22, 0, 23, 15, 11, 37, 22, 21, 3, 0, 29, 40, 21, 48, 31, 21, 17, 42, 58, 10, 24, 0, 0, 10, 19, 0,
0, 45, 0, 0, 43, 3, 0, 42, 28, 0, 10, 8, 3, 0, 0, 59, 15, 16, 36, 8, 0, 52, 0, 24, 12, 41, 18, 13, 27, 33, 65, 0,
13, 15, 45, 35, 15, 0, 4, 61, 0, 17, 0, 7, 5, 0, 0, 0, 30, 0, 2, 23, 0, 62, 28, 0, 0, 14, 5, 20, 2, 0, 58, 20, 28,
39, 0, 0, 18, 28, 12, 26, 0, 0, 0, 0, 0, 10, 15, 18, 17]))
max_capacity = np.array([57, 36, 38, 20, 51, 98, 30, 104, 37, 68, 18, 67, 35,
11, 23, 20, 61, 32, 27, 54, 64, 22, 19, 45, 42, 76,
21, 50, 11, 57, 16, 43, 21, 53, 44, 18, 44, 61, 27,
106, 26, 79, 57, 23, 40, 34, 25, 40, 27, 21, 31, 51,
59, 40, 50, 14, 26, 52, 9, 18, 36, 46, 68, 17, 27,
51, 40, 45, 14, 106, 24, 33, 26, 79, 42, 21, 50, 21,
12, 36, 32, 53, 43, 26, 53, 18, 37, 59, 29, 23, 36,
50, 14, 39, 35, 37, 73, 33, 25, 43, 128, 34, 40, 30,
32, 87, 24, 38, 29, 57, 29, 39, 41, 53, 35, 14, 52,
28, 55, 81, 75, 21, 25, 55, 30, 146, 14, 36, 35, 40,
28, 40, 43, 35, 33, 11, 31, 44, 44, 27, 23, 27, 13,
47, 51, 100, 28, 46, 24, 6, 23, 42, 19, 19, 62, 49,
20, 56, 50, 52, 38, 54, 39, 43, 40, 28, 63, 19, 61,
39, 89, 37, 81, 40, 13, 51, 46, 30, 44, 91, 45, 62,
29, 9, 57, 44, 32, 33, 89, 78, 58, 40, 53, 57, 72,
30, 29, 58, 22, 33, 41, 23, 35, 34, 56, 29, 50, 21,
38, 46, 35, 59, 39, 38, 70, 60, 38, 49, 40, 30, 43,
78, 91, 27, 65, 34, 40, 31, 23, 43, 13, 65, 47, 24,
45, 117, 53, 19, 41, 79, 53, 45, 33, 24, 41, 51, 32,
22, 37, 54, 119, 30, 44, 31, 27, 32, 49, 53, 34, 34,
40, 67, 39, 25, 62, 82, 50, 18, 48, 39, 41, 59, 46,
59, 70, 66, 23, 27, 13, 37, 35, 53, 44, 22, 60, 59,
38, 47, 32, 65, 62, 60, 7, 85, 54, 48, 34, 26, 16,
29])
travel_cost = 0.42
working_cost = 0.67
station_count = 300
lat_lon = pd.read_csv('/Users/hurunqiu/project/bs_rebalancing_platform/bs_server/resources/dataset/lat_lon.csv',
header=0, usecols=[1, 2]).to_numpy()
final_station_info = {}
for item in station_info:
final_station_info[item['station_id']] = item
final_station_info[item['station_id']]['max_capacity'] = max_capacity[item['station_id']]
final_station_info[item['station_id']]['init_inventory'] = initial_bike[item['station_id']]
final_station_info[item['station_id']].pop('station_id')
station_info = final_station_info |
11,892 | 5e657dbc100ecc58eecf22366b3e231623a6c51f | class atm(money):
def __init__(self, model, color, company, speed_limit):
self.model = model
self.color = color
self.company = company
self.speed_limit = speed_limit
def start(self):
print("started")
return 1
def stop(self):
print("stopped")
return 2
def accelerate(self):
print("accelerating")
def change_gear(self,gear_type):
print("gear changed")
# Define some cars
audi = Car("A6", "red", "audi", 80)
print(audi.start())
print(audi.accelerate())
print(audi.stop())
|
11,893 | 9c2c1007c4ca63f91c52ef62022ba28ed126a9d4 | def user_groups(request):
context = {
'user_groups': request.user.groups.values_list('name', flat=True)
}
return context
|
11,894 | 707935dd66e2acf746dfd1e6869620f7824e3289 |
def temporal_iou(span_A, span_B):
"""
Calculates the intersection over union of two temporal "bounding boxes"
span_A: (start, end)
span_B: (start, end)
"""
union = min(span_A[0], span_B[0]), max(span_A[1], span_B[1])
inter = max(span_A[0], span_B[0]), min(span_A[1], span_B[1])
if inter[0] >= inter[1]:
return 0
else:
return float(inter[1] - inter[0]) / float(union[1] - union[0])
|
11,895 | 8beb75233bccee2b5a79a068dd510eb4e84a4a22 | #
# commands.py
# Parses !sdvxin commands and does work based on the command
# # Only discord related work should be done, such as sending/editing/reacting messages
# # sdvx.in related work should be sent to sdvx.py
#
# External imports
import re
import configparser
import discord
# Internal imports
from command import Command
from CroBot.features.sdvxin import sdvx, embeds, regex
# To keep track of db updates
sdvx_db_update = False
# Command tracker
sdvx_command = Command('!sdvxin')
######################
# DATABASE FUNCTIONS #
######################
async def ongoing_update(message):
"""
ongoing_update: Sends a message saying an update is ongoing, if there is one
:param message: The message to respond to
:return: True if the update is ongoing
False if the update is not ongoing
"""
global sdvx_db_update
if sdvx_db_update:
await message.channel.send(embed=embeds.db_update_ongoing())
return True
return False
async def error_check(errors, message, song=None):
"""
error_check: A function to check if the number of errors and send the correct embed accordingly
:param errors: A list of errors
:param message: The discord message to edit
:return: N/A
"""
# If there are no issues with the update,
if len(errors) == 0:
# If there's a song attached
if song is not None:
await message.edit(embed=embeds.db_update_song_success(song=song))
# If there is not a song attached
else:
await message.edit(embed=embeds.db_update_success())
# If there are issues with the update
else:
await message.edit(embed=embeds.db_update_failed(errors))
async def update_song(song, message):
"""
update_song: A helper function to cut down on repetitive code for updating a song since it occurs three times
:param song: The song to be updated
:param message: The message to respond to
:return: N/A
"""
global sdvx_db_update
message_update = await message.channel.send(embed=embeds.db_update_song_start(song=song))
# Attempt to update
sdvx_db_update = True
errors = await sdvx.update_song(song.song_id)
sdvx_db_update = False
await error_check(errors, message_update, song)
@sdvx_command.register('update')
async def update(client, message):
"""
update: For the request to update the database
:param client: Client to update game status
:param message: The message to reply to
:return: N/A
"""
global sdvx_db_update
# If there already is an update going on
if await ongoing_update(message):
return
await client.change_presence(activity=discord.Game(name='Updating SDVX DB'))
# If the message is requesting a light update (nothing after update)
if message.content == '!sdvxin update':
# Send the update message, start updating the database, and then edit the message to be be the completed embed
message_update = await message.channel.send(embed=embeds.db_update_start())
sdvx_db_update = True
errors = await sdvx.update()
sdvx_db_update = False
await error_check(errors, message_update)
# Otherwise, find the song the user is trying to manually update
else:
# If the passed value is a url
if re.search(regex.link, message.content) is not None:
# Search for the song given the url
link = re.search(regex.link, message.content).group(0)
song = await sdvx.search_song_link(link)
# If the song exists, update it
if song is not None:
await update_song(song, message)
# If the song does not exist, add it
else:
message_update = await message.channel.send(embed=embeds.db_update_song_start(name=link))
# Attempt to update
sdvx_db_update = True
song_id = re.search(regex.song_id, message.content).group(0)
errors = await sdvx.add_song(song_id)
sdvx_db_update = False
song = await sdvx.search_song_id(song_id)
await error_check(errors, message_update, song)
# If the passed value is a song_id
elif re.search(regex.song_id, message.content) is not None:
# Attempt to update the song based on song_id
song_id = re.search(regex.song_id, message.content).group(0)
song = await sdvx.search_song_id(song_id)
# Send the proper embeds
# If the song exists
if song is not None:
await update_song(song, message)
# If it does not exist, return a song not found
# Would prefer not to do song adds by id by user in the case the song is just all numbers (444 gets close)
else:
await message.channel.send(embed=embeds.search_not_found())
# Otherwise, treat it as a general update query
else:
query = re.search(regex.update, message.content).group(2)
song_list = await sdvx.search_song(query)
# If there has only one song that has been found then go ahead and update it
if len(song_list) == 1:
song = song_list[0]
await update_song(song, message)
# If there are less than 10, send an embed listing them off
elif len(song_list) < 10:
await message.channel.send(embed=embeds.search_list(song_list))
# Otherwise, there are too many found, send an embed saying too many were found
else:
await message.channel.send(embed=embeds.search_too_many())
await client.change_presence(activity=None)
######################
# QUERY FUNCTIONS #
######################
@sdvx_command.register('random')
async def random(client, message):
"""
random: The random query for sdvx, obtains a random song and sends it as an embed
:param client: Not used, sent by default from commands
:param message: The message to reply to
:return: N/A
"""
# If the message just wants a random song
if message.content == '!sdvxin random':
song = await sdvx.fetch_random()
# If there's a song
if song is not None:
await message.channel.send(embed=embeds.song(song))
else:
await message.channel.send(embed=embeds.search_not_found())
# Otherwise, if it is a certain level the user wants
else:
level = re.search(regex.random, message.content)
# If a level even exists in this query
if level is not None:
level = level.group(1)
song = await sdvx.fetch_random(level)
# If there's a song
if song is not None:
await message.channel.send(embed=embeds.song(song))
else:
await message.channel.send(embed=embeds.search_not_found())
# If not, kick it over to default, in the case that it's a default song
else:
await search(message)
@sdvx_command.register('')
async def default(client, message):
"""
default: The default query for sdvx.in, it should have a search query behind it
- Due to how command configuration is done, this should be the last to be instantiated
:param client: Not used, sent by default from commands
:param message: The message to reply to
:return: N/A
"""
await search(message)
async def search(message):
"""
search: Helper function for default, so that it can be used with random as a fallback
Otherwise, it would kick saying the function is NoneType
:param message: The message to reply to
:return: N/A
"""
# Fetch the query and attempt to search for it
query = re.search(regex.query, message.content).group(2)
if query is not None:
# If a song_id was passed, fetch the song and send the embed if it exists.
# If it doesn't exist, continue down to main query
if re.search(regex.song_id, message.content) is not None:
song_id = re.search(regex.song_id, message.content).group(0)
song = await sdvx.search_song_id(song_id)
if song is not None:
await message.channel.send(embed=embeds.song(song))
return
# If a link was passed, fetch the song and send the embed if it exists.
# If it doesn't exist, continue down to main query
elif re.search(regex.link, message.content) is not None:
link = re.search(regex.link, message.content).group(0)
song = await sdvx.search_song_link(link)
if song is not None:
await message.channel.send(embed=embeds.song(song))
return
# Main query searching
# Fetch a song_list based on the query
song_list = await sdvx.search_song(query)
# If there's only one song, just simply return the only existing song
if len(song_list) == 1:
await message.channel.send(embed=embeds.song(song_list[0]))
# If no songs were found, send the not found embed
elif len(song_list) == 0:
await message.channel.send(embed=embeds.search_not_found())
# If less than 10 errors were found, send a list of songs found
elif len(song_list) < 10:
await message.channel.send(embed=embeds.search_list(song_list))
# Otherwise, too many songs were found, send the too many songs found
else:
await message.channel.send(embed=embeds.search_too_many())
|
11,896 | 18160c1a6e85d26f38962570129a619af01a8655 | # Generated by Django 2.2.4 on 2021-06-05 20:26
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('build_pc_app', '0012_auto_20210605_1100'),
('build_pc_app', '0012_order_user_order'),
]
operations = [
]
|
11,897 | 0e67127c1fe4667ff633cda6528f12de5a664035 | from django.db import models
# Create your models here.
class diagnosis(models.Model):
glucose = models.FloatField()
insulin = models.FloatField()
bmi = models.FloatField()
age = models.IntegerField()
def __str__(self):
return self.glucose
|
11,898 | 649d518fadae649669c00645c40d958dd8ac2058 | from __future__ import print_function
import math
import pandas as pd
import numpy as np
import random
import time
import tensorflow as tf
from sklearn.preprocessing import LabelEncoder
from sklearn.compose import ColumnTransformer
from sklearn.pipeline import Pipeline
from sklearn.impute import SimpleImputer
from sklearn.preprocessing import StandardScaler, OneHotEncoder, PolynomialFeatures, LabelBinarizer
from sklearn.linear_model import LogisticRegression
from sklearn.model_selection import train_test_split, GridSearchCV
from sklearn.metrics import accuracy_score
def next_batch(train_data, train_target, batch_size):
index = [i for i in range(0,len(train_target))]
np.random.shuffle(index)
batch_data = []
batch_target = []
for i in range(0, batch_size):
batch_data.append(train_data[index[i]])
batch_target.append(train_target[index[i]])
return batch_data, batch_target
if __name__ == '__main__':
print('Start read data')
learning_rate = 0.01
training_epochs = 1000
batch_size = 100
display_step = 50
n_input = 28
n_classes = 6
loadpath = "E:\\graduate-design\\test-pro\py-test\\test.csv"
encoder = LabelEncoder()
one_hot = OneHotEncoder(categories='auto')
data = pd.read_csv(loadpath)
data.columns = ["CheckType", "BlockType", "BlockSLOC", "ExceptionRatio", "ReturnInBlock", "ThrowInBlock",
"SettingFlag", "MethodCallCount", "MethodParameterCount", "VariableDeclarationCount", "Logdensity",
"LogNumber", "AverageLogLength", "AverageeLogParameterCount", "LogLevel"]
# sess = tf.InteractiveSession()
numeric_features = ["BlockSLOC", "MethodCallCount", "MethodParameterCount", "VariableDeclarationCount", "LogNumber",
"AverageLogLength", "AverageeLogParameterCount"]
numeric_transformer = Pipeline(steps=[('scaler', StandardScaler())])
categorical_features = ["CheckType", "BlockType"]
categorical_transformer = Pipeline(steps=[('onehot', OneHotEncoder(handle_unknown='ignore'))])
preprocessor = ColumnTransformer(
transformers=[
('num', numeric_transformer, numeric_features),
('cat', categorical_transformer, categorical_features)], remainder='passthrough')
# clf = Pipeline(steps=[('preprocessor', preprocessor)])
X = data.drop("LogLevel", axis=1)
X = preprocessor.fit_transform(X)
X = np.reshape(X, (X.shape[0], -1)).astype(np.float32)
Y = data["LogLevel"].values
# Y = encoder.fit_transform(Y)
Y = np.reshape(Y, (-1, 1))
Y = one_hot.fit_transform(Y).toarray()
X_train, X_test, y_train, y_test = train_test_split(X, Y, test_size=0.2)
sess = tf.InteractiveSession()
x = tf.placeholder(tf.float32, [None, 28])
W = tf.Variable(tf.random_normal([n_input, n_classes]))
b = tf.Variable(tf.random_normal([n_classes]))
y = tf.nn.softmax(tf.matmul(x, W) + b)
y_ = tf.placeholder(tf.float32, [None, 6])
cross_entropy = tf.reduce_mean(-tf.reduce_sum(y_ * tf.log(y),
reduction_indices=[1]))
train_step = tf.train.GradientDescentOptimizer(0.01).minimize(cross_entropy)
tf.global_variables_initializer().run()
for i in range(1000):
batch_xs, batch_ys = next_batch(X_train, y_train, 100)
train_step.run({x: batch_xs, y_: batch_ys})
correct_prediction = tf.equal(tf.argmax(y, 1), tf.argmax(y_, 1))
accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
print(accuracy.eval({x: X_test, y_: y_test}))
|
11,899 | 90b9437265487678ab458ffd044e83dbed0304f5 | x:int = 1
y:bool = True
x = False
y = 2
z = 3
x = z = 4
x = z = None
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.