index int64 0 1,000k | blob_id stringlengths 40 40 | code stringlengths 7 10.4M |
|---|---|---|
21,200 | 4baa41577e1e66166961fd58c630b59de6f1a621 | from rp import *
paths=get_file_paths("/Users/Ryan/Desktop/CleanCode/Zebra/2019/Images/CautionSign/Inputs")
images=load_images(paths)
print("DONE!") |
21,201 | 68ae05b6b974d1da9dace5550fa31ffe8b684400 | import glob
import os
import numpy as np
from skimage import io, transform # conda install -c conda-forge scikit-image
from tqdm import tqdm
max_wh = 2000 # max image size
files = list(glob.iglob('images/**/*.*', recursive=True))
for f in tqdm(files, desc='Scanning images', total=len(files)):
# Remove bad suffixes
suffix = f.split('.')[-1]
if suffix in ['gif', 'svg']:
print('Removing %s' % f)
os.remove(f)
continue
# Read Image
try:
img = io.imread(f)
# Downsize to max_wh if necessary
r = max_wh / max(img.shape) # ratio
if r < 1: # resize
print('Resizing %s' % f)
img = transform.resize(img, (round(img.shape[0] * r), round(img.shape[1] * r)))
io.imsave(f, img.astype(np.uint8))
# Remove corrupted
except:
print('Removing corrupted %s' % f)
os.remove(f)
|
21,202 | f97f972da05cdcc19e4fa003ad8b74b637cb2fa7 | #/bin/python
import sys
import boto3
Region = str(sys.argv[1])
#This will create a client of aws ec2 for the specified region
ec2 = boto3.client('ec2', region_name=Region)
InstanceList = ['etcd-0', 'controller-0', 'worker-0', 'worker-1', 'worker-2']
data = ""
workerList = []
#Above created client will interect with ec2 instances using AWS SDKs based upon filtered instances
response = ec2.describe_instances()
#This is the sample attribute that you can fetch, I have fetched "PublicIp" for worker nodes, etcd, controller and created host file for ansible
for reservation in response["Reservations"]:
for instance in reservation["Instances"]:
for tags in instance['Tags']:
if tags['Key'] == 'Name' and tags['Value'] in InstanceList and instance['State']['Name'] == "running":
if tags['Value'].startswith('etcd'):
#data = data + "[etcd]\n" + instance['PublicIpAddress'] + "\n"
data = data + "[etcd]\netcd ansible_ssh_host=" + instance['PublicIpAddress'] + "\n"
elif tags['Value'].startswith('cont'):
data = data + "\n[controller]\ncontroller ansible_ssh_host=" + instance['PublicIpAddress'] + "\n"
elif tags['Value'].startswith('worker'):
workerList.append(tags['Value'] + ':' + instance['PublicIpAddress'])
data = data + "\n[worker]\n"
for i in range(0,len(workerList)):
if workerList[i].split(':')[0] == "worker-0":
data = data + "worker0 ansible_ssh_host=" + workerList[i].split(':')[1] + "\n"
elif workerList[i].split(':')[0] == "worker-1":
data = data + "worker1 ansible_ssh_host=" + workerList[i].split(':')[1] + "\n"
elif workerList[i].split(':')[0] == "worker-2":
data = data + "worker2 ansible_ssh_host=" + workerList[i].split(':')[1] + "\n"
f = open('groups' ,'w')
f.write(data)
f.close()
|
21,203 | 700c476fbc02b598578c7f1329e28bdc9fd300f1 | from django.apps import AppConfig
class ComarcaConfig(AppConfig):
name = 'comarca'
|
21,204 | 68e69bb36f6b7e95593f19cf310e11963b93bea7 | '''
@Author: Yitao Qiu
'''
import numpy as np
import pandas as pd
import matplotlib as plt
import datetime
import gym
import gym.spaces
eps = 1e-8
# A class that is to perform the calculation of the portfolio
class Portfolio(object):
def __init__(self, steps, trading_cost, mode):
self.steps = steps
self.cost = trading_cost
self.mode = mode
def _step(self, w1, y1, reset):
assert w1.shape == y1.shape, 'w1 and y1 must have same number of products'
assert y1[0] == 1.0, 'y1[0] should be 1'
w0 = self.w0
p0 = self.p0
y0 = self.y0
dw1 = (y0 * w0) / (np.dot(y0, w0) + eps)
if self.mode == "Test" and reset == 1:
mu1 = self.cost * (np.abs(w1[1:])).sum()
else:
mu1 = self.cost * (np.abs(dw1[1:] - w1[1:])).sum()
p1 = p0 * (1 - mu1) * np.dot(y1, w1)
rho1 = p1 / p0 - 1
r1 = np.log((p1 + eps) / (p0 + eps))
reward = r1 / self.steps * 1000.
self.w0 = w1
self.p0 = p1
self.y0 = y1
# Run out of money, done
done = p1 == 0
info = {
"portfolio_value": p1,
"rate_of_return": rho1,
"log_return": r1,
}
self.infos.append(info)
return reward, info, done
def reset(self):
self.w0 = np.array([1.0] + [0.0] * 9)
self.infos = []
self.p0 = 1.0
self.y0 = np.zeros((10,), dtype=float)
self.y0[0] = 1 |
21,205 | 1a6393e18ed015e5a88b9a4c06f3c95baa81d79c | """Utilities. Mostly periodic checks. Everything that is neither core nor gui
contents (for use):
- run() -- call once on startup. takes care of all automatic tasks
- send_email() -- send an email
- get_name() -- get a pretty name
- get_book_data() -- attempt to get data about a book based on the ISBN (first local DB, then DNB).
to add late handlers, append them to late_handlers. they will receive arguments as specified in late_books()
"""
import base64
import tempfile
import email
import smtplib
import ssl
from datetime import datetime, timedelta, date
import time
import threading
import shutil
import os
import ftplib
import ftputil
import requests
import logging
import re
import bs4
import string
try:
from cryptography import fernet
except ImportError:
fernet = None
from buchschloss import core, config
class FormattedDate(date):
"""print a datetime.date as specified in config.core.date_format"""
def __str__(self):
return self.strftime(config.core.date_format)
@classmethod
def fromdate(cls, date_: date):
"""Create a FormattedDate from a datetime.date"""
if date_ is None:
return None
else:
return cls(date_.year, date_.month, date_.day)
def todate(self):
"""transform self to a datetime.date"""
return date(self.year, self.month, self.day)
def run_checks():
"""Run stuff to do as specified by times set in config"""
while True:
if datetime.now() > core.misc_data.check_date+timedelta(minutes=45):
for stuff in stuff_to_do:
threading.Thread(target=stuff).start()
core.misc_data.check_date = datetime.now() + config.utils.tasks.repeat_every
time.sleep(5*60*60)
def late_books():
"""Check for late and nearly late books.
Call the functions in late_handlers with arguments (late, warn).
late and warn are sequences of core.Borrow instances.
"""
late = []
warn = []
today = date.today()
for b in core.Borrow.search((
('is_back', 'eq', False),
'and', ('return_date', 'gt', today+config.utils.late_books_warn_time))):
if b.return_date < today:
late.append(b)
else:
warn.append(b)
for h in late_handlers:
h(late, warn)
def backup():
"""Local backups.
Run backup_shift and copy "name" db to "name.1", encrypting if a key is given in config
"""
backup_shift(os, config.utils.tasks.backup_depth)
if config.utils.tasks.secret_key is None:
shutil.copyfile(config.core.database_name, config.core.database_name+'.1')
else:
data = get_encrypted_database()
with open(config.core.database_name+'.1', 'wb') as f:
f.write(data)
def get_encrypted_database():
"""get the encrypted contents of the database file"""
if fernet is None:
raise RuntimeError('encryption requested, but no cryptography available')
with open(config.core.database_name, 'rb') as f:
plain = f.read()
key = base64.urlsafe_b64encode(config.utils.tasks.secret_key)
cipher = fernet.Fernet(key).encrypt(plain)
return base64.urlsafe_b64decode(cipher)
def web_backup():
"""Remote backups.
Run backup_shift and upload "name" DB as "name.1", encrypted if a key is given in config
"""
conf = config.utils
if conf.tasks.secret_key is None:
upload_path = config.core.database_name
file = None
else:
file = tempfile.NamedTemporaryFile(delete=False)
file.write(get_encrypted_database())
file.close()
upload_path = file.name
factory = ftplib.FTP_TLS if conf.tls else ftplib.FTP
# noinspection PyDeprecation
with ftputil.FTPHost(conf.ftp.host, conf.ftp.username, conf.ftp.password,
session_factory=factory, use_list_a_option=False) as host:
backup_shift(host, conf.tasks.web_backup_depth)
host.upload(upload_path, config.core.database_name+'.1')
if file is not None:
os.unlink(file.name)
def backup_shift(fs, depth):
"""shift all name.number up one number to the given depth
in the given filesystem (os or remote FTP host)"""
number_name = lambda n: '.'.join((config.core.database_name, str(n)))
try:
fs.remove(number_name(depth))
except FileNotFoundError:
pass
for f in range(depth, 1, -1):
try:
fs.rename(number_name(f-1), number_name(f))
except FileNotFoundError:
pass
def send_email(subject, text):
"""Send an email to the recipient specified in config"""
cfg = config.utils.email
msg = email.message.Message()
msg['From'] = cfg['from']
msg['To'] = cfg.recipient
msg['Subject'] = subject
msg.set_payload(text)
try:
with smtplib.SMTP(cfg.smtp.host, cfg.smtp.port) as conn:
if cfg.smtp.tls:
conn.starttls(context=ssl.create_default_context())
if cfg.smtp.username is not None:
conn.login(cfg.smtp.username, cfg.smtp.password)
conn.send_message(msg)
except smtplib.SMTPException as e:
logging.error('error while sending email: {}: {}'.format(type(e).__name__, e))
def get_name(internal: str):
"""Get an end-user suitable name.
Try lookup in config.utils.names.
"__" is replaced by ": " with components looked up individually
If a name isn't found, a warning is logged and the internal name returned, potentially modified
"<namespace>::<name>" may specify a namespace in which lookups are performed first,
falling back to the global names if nothing is found
"__" takes precedence over "::"
"""
if '__' in internal:
return ': '.join(get_name(s) for s in internal.split('__'))
*path, name = internal.split('::')
current = config.utils.names
look_in = [current]
try:
for k in path:
current = current[k]
look_in.append(current)
except KeyError:
# noinspection PyUnboundLocalVariable
logging.warning('invalid namespace {!r} of {!r}'.format(k, internal))
look_in.reverse()
for ns in look_in:
try:
val = ns[name]
if isinstance(val, str):
return val
elif isinstance(val, dict):
return val['*this*']
else:
raise TypeError('{!r} is neither dict nor str'.format(val))
except KeyError:
pass
logging.warning('Name "{}" was not found in the namefile'.format('::'.join(path+[name])))
return '::'.join(path+[name])
def break_string(text, size, break_char=string.punctuation, cut_char=string.whitespace):
"""Insert newlines every `size` characters.
Insert '\n' before the given amount of characters
if a character in `break_char` is encountered.
If the character is in `cut_char`, it is replaced by the newline.
"""
# TODO: move to misc
break_char += cut_char
r = []
while len(text) > size:
i = size
cut = False
while i:
if text[i] in break_char:
cut = text[i] in cut_char
break
i -= 1
else:
i = size-1
i += 1
r.append(text[:i-cut])
text = text[i:]
r.append(text)
return '\n'.join(r)
def get_book_data(isbn: int):
"""Attempt to get book data via the ISBN from the DB, if that fails,
try the DNB (https://portal.dnb.de)"""
try:
book = next(iter(core.Book.search(('isbn', 'eq', isbn))))
except StopIteration:
pass # actually, I could put the whole rest of the function here
else:
data = core.Book.view_str(book.id)
del data['id'], data['status'], data['return_date'], data['borrowed_by']
del data['borrowed_by_id'], data['__str__']
return data
try:
r = requests.get('https://portal.dnb.de/opac.htm?query=isbn%3D'
+ str(isbn) + '&method=simpleSearch&cqlMode=true')
r.raise_for_status()
except requests.exceptions.RequestException:
raise core.BuchSchlossError('no_connection', 'no_connection')
person_re = re.compile(r'(\w*, \w*) \((\w*)\)')
results = {'concerned_people': []}
page = bs4.BeautifulSoup(r.text)
table = page.select_one('#fullRecordTable')
if table is None:
# see if we got multiple results
link_to_first = page.select_one('#recordLink_0')
if link_to_first is None:
raise core.BuchSchlossError(
'Book_not_found', 'Book_with_ISBN_{}_not_in_DNB', isbn)
r = requests.get('https://portal.dnb.de'+link_to_first['href'])
page = bs4.BeautifulSoup(r.text)
table = page.select_one('#fullRecordTable')
for tr in table.select('tr'):
td = [x.get_text('\n').strip() for x in tr.select('td')]
if len(td) == 2:
if td[0] == 'Titel':
results['title'] = td[1].split('/')[0].strip()
elif td[0] == 'Person(en)':
for p in td[1].split('\n'):
g = person_re.search(p)
if g is None:
continue
g = g.groups()
if g[1] == 'Verfasser':
results['author'] = g[0]
else:
results['concerned_people'].append(g[1]+': '+g[0])
elif td[0] == 'Verlag':
results['publisher'] = td[1].split(':')[1].strip()
elif td[0] == 'Zeitliche Einordnung':
results['year'] = td[1].split(':')[1].strip()
elif td[0] == 'Sprache(n)':
results['language'] = td[1].split(',')[0].split()[0].strip()
results['concerned_people'] = '; '.join(results['concerned_people'])
return results
def run():
"""handling function."""
for k in config.utils.tasks.startup:
threading.Thread(target=globals()[k], daemon=True).start()
threading.Thread(target=run_checks, daemon=True).start()
def _default_late_handler(late, warn):
head = datetime.now().strftime(config.core.date_format).join(('\n\n',))
with open('late.txt', 'w') as f:
f.write(head)
f.write('\n'.join(str(L) for L in late))
with open('warn.txt', 'w') as f:
f.write(head)
f.write('\n'.join(str(w) for w in warn))
late_handlers = [_default_late_handler]
stuff_to_do = [globals()[k] for k in config.utils.tasks.recurring]
|
21,206 | 8a9ebe750c04c162289fb1e3e0454ee9ffada306 | # views.team.team
from flask import redirect, url_for, flash
from app.forms import (
CreateTeam as CreateTeamForm,
UpdateTeam as UpdateTeamForm,
AddTeamMember as AddMemberForm,
)
from app.util import session as session_util
from app.views.generic import AccountFormView
class TeamView(AccountFormView):
"""View for Team management.
This view displays forms for creating, updating, leaving, and adding
members to a team. However, we don't accept POSTs for any of thoses
here, thoses are in different subclasssed views.
"""
def get_template_name(self):
return 'form2/team.html'
def get_form(self):
return CreateTeamForm()
def post(self):
return redirect(url_for('team'))
def render_template(self, **kwargs):
"""
We need to render a different form based on whether the user
is on a team or not.
If account.team is None, we should let them create a new team
using CreateTeamForm. Otherwise, we show all the options available
to existing teams: Editing team details, adding/removing team
members, or leaving the team.
"""
account = session_util.get_account()
if not account.team:
create_form = kwargs.get('create_form', None) or self.get_form()
return super().render_template(create_form=create_form)
else:
updateTeamForm = kwargs.get('update_form', None) \
or UpdateTeamForm(team_name=account.team.team_name,
division=account.team.division)
addMemberForm = kwargs.get('add_form', None) \
or AddMemberForm()
return super().render_template(edit_form=updateTeamForm,
add_form=addMemberForm)
|
21,207 | 4ee03a43200073ed813ee34bb9ddfcf329a07095 | from setuptools import setup
package_name = 'rqt_gui_py'
setup(
name=package_name,
version='1.0.6',
package_dir={'': 'src'},
packages=['rqt_gui_py'],
data_files=[
('share/ament_index/resource_index/packages',
['resource/' + package_name]),
('share/' + package_name, ['package.xml']),
('share/' + package_name, ['plugin.xml'])
],
install_requires=['setuptools'],
zip_safe=False,
author='Dirk Thomas',
author_email='dthomas@osrfoundation.org',
maintainer='Dirk Thomas',
maintainer_email='dthomas@osrfoundation.org',
keywords=['ROS'],
classifiers=[
'Intended Audience :: Developers',
'License :: OSI Approved :: Apache Software License',
'Programming Language :: Python',
'Topic :: Software Development',
],
description=(
'rqt_gui_py enables GUI plugins to use the Python client library for ROS.'
),
license='BSD',
tests_require=['pytest'],
)
|
21,208 | 736a0112b2b8fcc7fed383e8aaf57686c1655b12 | from django.contrib import admin
from django.contrib.auth.admin import UserAdmin
from django.contrib.auth.models import User
class UndeletableUserAdmin(UserAdmin):
# Customise the admin for django.contrib.auth.user to never provide the delete button.
# Instead of deleting, users should be disabled by setting the 'active' flag to false.
def has_delete_permission(self, request, obj=None):
return False
admin.site.unregister(User)
admin.site.register(User, UndeletableUserAdmin)
|
21,209 | d23bc22c3f1e961ed12941e095d373acffce789f | import decimal
import json
import requests
from blockchain import util
from .models import Transaction
from payments.tasks import send_webhook
from payments.tasks import send_receipt
SATOSHI = decimal.Decimal(0.00000001)
def set_tx_details(history_data, transaction):
"""Does check for 1 transaction"""
txid = None
if isinstance(history_data, dict):
if history_data['addr'][0] == transaction.to_address:
txid = history_data['txid']
else:
for item in history_data:
if item['addr'][0] == transaction.to_address:
txid = item['txid']
if not txid:
return
mapping = {
'Confirmed': Transaction.STATUS_CONFIRMED,
'Partially Confirmed': Transaction.STATUS_PARTIALLY_CONFIRMED,
'Unconfirmed': Transaction.STATUS_UNCONFIRMED
}
r = requests.get("https://www.blockonomics.co/api/tx_detail",
params={'txid': txid})
tx_detail = json.loads(r.content.decode('utf-8'))
status = tx_detail['status']
for item in tx_detail['vout']:
if item['address'] == transaction.to_address:
value = decimal.Decimal(item['value'])
amount = value * SATOSHI
amount = round(amount, 8)
if mapping[status] == Transaction.STATUS_CONFIRMED:
send_receipt.apply_async(kwargs={'transaction_id': transaction.id})
transaction.txid = txid
transaction.status = mapping[status]
transaction.amount_paid = amount
transaction.save()
if transaction.status == Transaction.STATUS_CONFIRMED:
send_webhook.apply_async(kwargs={'transaction_id': transaction.id})
else:
blockchain_set_tx_detail(transaction)
def blockchain_set_tx_detail(transaction):
"""Check transaction detals and save updates using blockchain.info API"""
info_endpoint = "address/%s?format=json" % transaction.to_address
try:
info = json.loads(util.call_api(info_endpoint))
except:
return
transaction.txid = info['txs'][0]['hash']
transaction.amount_paid = round(info['total_received'] * SATOSHI, 8)
if transaction.amount_paid >= transaction.amount_btc:
transaction.status = Transaction.STATUS_CONFIRMED
send_webhook.apply_async(kwargs={'transaction_id': transaction.id})
transaction.save()
def check(transaction):
"""check transaction status based on to_address"""
if not isinstance(transaction, Transaction):
transaction = Transaction.objects.get(id=transaction)
r = requests.post("https://www.blockonomics.co/api/searchhistory",
data=json.dumps({"addr": transaction.to_address}))
try:
history_data = json.loads(r.content.decode('utf-8'))['history'][0]
except:
return
set_tx_details(history_data, transaction)
def checks(transactions):
"""check transactions status based on to_address"""
txs = transactions.values_list('to_address', flat=True)
addrs = ' '.join([tx for tx in txs if tx])
r = requests.post("https://www.blockonomics.co/api/searchhistory",
data=json.dumps({"addr": addrs}))
try:
history_data = json.loads(r.content.decode('utf-8'))['history']
except:
[blockchain_set_tx_detail(transaction) for transaction in transactions]
[set_tx_details(history_data, transaction) for transaction in transactions]
|
21,210 | cc948591a8d1aa8e929949f78d55b2052312e256 | import numpy as np
from signal_processing import U_
from signal_processing.extensions import pint_extension
from signal_processing.segment import Segment
def test_Segment():
segment_1 = Segment(np.array([3, 5]) * U_.meter)
assert pint_extension.allclose(segment_1.start, 3 * U_.meter)
assert pint_extension.allclose(segment_1.end, 5 * U_.meter)
assert pint_extension.allclose(segment_1.edges, np.array([3,5]) * U_.meter)
assert str(segment_1) == str(segment_1.edges)
assert pint_extension.allclose(segment_1.center, 4 *U_.meter)
assert pint_extension.allclose(segment_1.width, 2 * U_.meter)
assert pint_extension.allclose(segment_1.width_half, 1 * U_.meter)
#print len(segment_1)
#assert pint_extension.allclose(len(segment_1) , 2 * U_.meter)
assert 4 * U_.meter in segment_1
assert not 2 * U_.meter in segment_1
assert np.allclose(np.array([True, True]), segment_1.is_each_in(np.array([4, 4]) * U_.meter))
assert segment_1.is_close(segment_1)
segment_2 = Segment(np.array([3, 4]) * U_.meter)
assert not segment_1.is_close(segment_2)
assert segment_1.is_close(Segment((3 * U_.meter, 5 * U_.meter)))
assert segment_1.is_close(Segment((3, 5), U_.meter))
assert Segment([0, 1 * U_.meter]).is_close(Segment([0, 1], U_.meter))
segment_3 = Segment(np.array([2.5, 6.5]) * U_.sec)
def test_from_center():
segment_1 = Segment(np.array([3, 5]) * U_.meter)
segment_2 = Segment.from_center(4, 1, U_.meter)
segment_3 = Segment.from_center(4, 2, U_.meter, mode='width')
segment_4 = Segment.from_center(4, 1, U_.meter, mode='width')
assert segment_1.is_close(segment_2)
assert segment_1.is_close(segment_3)
assert not segment_1.is_close(segment_4)
|
21,211 | 8ecf751eb75530b2e04fde2f8953f2154f3cdc06 | ## program 5 - to find the first occurrence of sub string in a given string using index() method
# to find first occurrence of sub string in a main string
str = input('Enter main string: ')
sub = input('Enter sub string: ')
# find position of sub in str
# search from 0th to last characters in str
try:
n = str.index(sub, 0, len(str))
except ValueError:
print('Sub string not found')
else:
print('Sub string found at position :', (n+1))
'''
F:\PY>py string_finding_substring_5.py
Enter main string: This is a boook
Enter sub string: s
Sub string found at position : 4
F:\PY>
'''
|
21,212 | f5e5bd47a5da4b3ddd41540f4bf504fd8b9c6ab9 | import threading
import time
from DataBase import database
from source import source
from threadings import Tnuls
from threads import qthreadt
import threading
import crawler
ustr=""
strl=""
nustr=""
bool=True
urllist=""
def bigin_click(self):
# starttime=time.time(); #记录开始时间
if self.bool==True:
threads = [] #创建一个线程列表,用于存放需要执行的子线程
t1 = threading.Thread(target=self.threadcl) #创建第一个子线程,子线程的任务是调用task1函数,注意函数名后不能
threads.append(t1)#将这个子线程添加到线程列表中
t1.setDaemon(True)
t1.start()
self.bool=False
# for t in threads: #遍历线程列表
# t.setDaemon(True) #将线程声明为守护线程,必须在start() 方法调用之前设置,如果不设置为守护线程程序会被无限挂起
# t.start() #启动子线程
def show_click(self):
self.strl=self.comboBox.currentText()
self.textEdit_get.setText(self.strl)
self.textEdit_getstr.setText(self.ustr)
def crawling(self, url):
cl=crawler.crawler()
cl.gethtml(url)
self.urllist=cl.geturllist()
nexturllist=cl.getnexturl()
self.getnexturl(nexturllist, cl)
for i in range(len(self.urllist)):
ul=self.urllist[i]
self.ustr=self.ustr+str(i)+"、"+ul[1]+" :"+ul[0]+"\n\n"
#for ur in self.urllist:
# cl.gethtml(ur[0])
#sl=cl.getstrlist()
# self.strl=self.strl+sl
def getnexturl(self, nexturllist, cl):
for i in range(len(nexturllist)):
nul=nexturllist[i]
self.nustr=self.nustr+nul[1]+nul[0]+"\n"
cl.gethtml("http://gz.58.com"+nul[0])
uls=cl.geturllist()
if cl.isend():
if i==(len(nexturllist)-1):
cl.gethtml("http://gz.58.com"+nul[0])
nus=cl.getnexturl()
del nus[0:(i+1)]
self.getnexturl(nus, cl)
self.urllist=self.urllist+uls
def threadcl(self):
url="http://gz.58.com/tech/?key=java%E8%BD%AF%E4%BB%B6%E5%B7%A5%E7%A8%8B%E5%B8%88&cmcskey=java%E8%BD%AF%E4%BB%B6%E5%B7%A5%E7%A8%8B%E5%B8%88&final=1&jump=2&specialtype=gls&canclequery=isbiz%3D0&sourcetype=4"
self.crawling(url)
'''#时针
transform.translate(50,50)
transform.rotate(hour_angle)
transform.translate(-50,-50)
painter.setWorldTransform(transform)
painter.setPen(Qt.NoPen)
painter.setBrush(QBrush(Qt.darkRed))
painter.drawPolygon(QPolygonF(hourPoints))
transform.reset()
#分针
transform.translate(50,50)
transform.rotate(minite_angle)
transform.translate(-50,-50)
painter.setWorldTransform(transform)
painter.setBrush(QBrush(Qt.darkGreen))
painter.drawPolygon(QPolygonF(minPoints))
transform.reset()
#秒针
transform.translate(50,50)
transform.rotate(-53)#second_angle
transform.translate(-50,-50)
painter.setWorldTransform(transform)
painter.setPen(QPen(Qt.darkCyan,1))
painter.drawLine(50,50,90,20)
'''
class Window(QMainWindow):
def __init__(self):
super(Window, self).__init__()
self.initUI()
def initUI(self):
self.setGeometry(100, 35, 1200, 670)
def paintEvent(self,event):
source.ui.groupBox_show.close()
pt=QPainter(self)
pt.begin(self)
#self.drawRect(pt)
self.drawclock(pt)
pt.end()
def drawRect(self, pt):
pen1=QPen(QColor(225, 225, 225, 225))
rec=QRect(500, 500,500, 500)
pt.setPen(pen1)
pt.drawRect(rec)
pt.setBrush(QColor(0, 0, 0, 255))
pt.drawRect(300, 300, 300, 600)
def drawclock(self, painter):
painter.setRenderHint(QPainter.Antialiasing)
#设置表盘中的文字字体
font=QFont("Times",6)
fm=QFontMetrics(font)
fontRect=fm.boundingRect("99")#获取绘制字体的矩形范围
#分针坐标点
minPoints=[QPointF(50,25),
QPointF(48,50),
QPointF(52,50)]
#时钟坐标点
hourPoints=[QPointF(50,35),
QPointF(48,50),
QPointF(52,50)]
side=min(self.width(),self.height())
painter.setViewport((2*self.width())/5,self.height()/16,
(4*side)/7, (4*side)/7)#始终处于窗口中心位置显示
#设置QPainter的坐标系统,无论窗体大小如何变化,
#窗体左上坐标为(0,0),右下坐标为(100,100),
#因此窗体中心坐标为(50,50)
painter.setWindow(0,0,100,100)
#绘制表盘,使用环形渐变色
niceBlue=QColor(150,150,200)
haloGrident=QRadialGradient(50,50,50,50,50)
haloGrident.setColorAt(0.0,Qt.lightGray)
haloGrident.setColorAt(0.5,Qt.darkGray)
haloGrident.setColorAt(0.9,Qt.white)
haloGrident.setColorAt(1.0,niceBlue)
painter.setBrush(haloGrident)
painter.setPen(QPen(Qt.darkGray,1))
painter.drawEllipse(0,0,100,100)
transform=QTransform()
#绘制时钟为0的字,以及刻度
painter.setPen(QPen(Qt.black,1.5))
fontRect.moveCenter(QPoint(50,10+fontRect.height()/2))
painter.setFont(font)
painter.drawLine(50,2,50,8)#
painter.drawText(QRectF(fontRect),Qt.AlignHCenter|Qt.AlignTop,"0")
for i in range(1,12):
transform.translate(50, 50)
transform.rotate(30)
transform.translate(-50,-50)
painter.setWorldTransform(transform)
painter.drawLine(50,2,50,8)
painter.drawText(QRectF(fontRect),Qt.AlignHCenter|Qt.AlignTop,"%d" % i)
transform.reset()
#绘制分钟刻度线
painter.setPen(QPen(Qt.blue,1))
for i in range(1,60):
transform.translate(50,50)
transform.rotate(6)
transform.translate(-50,-50)
if i%5!=0:
painter.setWorldTransform(transform)
painter.drawLine(50,2,50,5)
transform.reset()
#获取当前时间
currentTime=QTime().currentTime()
#hour=currentTime.hour() if currentTime.hour()<12 else currentTime.hour()-12
#minite=currentTime.minute()
second=currentTime.second()
#获取所需旋转角度
#hour_angle=hour*30.0+(minite/60.0)*30.0
#minite_angle=(minite/60.0)*360.0
second_angle=second*6.0-53
source.ui.textEdit_get.setText(str(second))
self.draw_line(painter, transform, second_angle)
self.draw_line(painter, transform)
def draw_line(self, painter, transform, angle=-53):
#秒针
transform.reset()
transform.translate(50,50)
transform.rotate(angle)#second_angle
transform.translate(-50,-50)
painter.setWorldTransform(transform)
painter.setPen(QPen(Qt.darkCyan,1))
painter.drawLine(50,50,90,20)
def drawRect(self, pt):
pen1=QPen(QColor(225, 225, 225, 225))
rec=QRect(500, 500,500, 500)
pt.setPen(pen1)
pt.drawRect(rec)
pt.setBrush(QColor(0, 0, 0, 255))
pt.drawRect(300, 300, 300, 600)
#**********************************************************************8
from PyQt5 import QtCore, QtGui, QtWidgets
from source import source
from threadings import Tnuls
import threading
import time
from ui_paint import Window
def bigin_click(self):
if not source.isbigan:
if self.comboBox_2.currentText ()=="58同城":
t=Tnuls(0)
t.start()
source.isbigan=True
if self.comboBox_2.currentText ()=="中华英才网":
t=Tnuls(1)
t.start()
source.isbigan=True
if self.comboBox_2.currentText ()=="智联招聘":
t=Tnuls(2)
t.start()
source.isbigan=True
if self.comboBox_2.currentText ()=="猎聘猎头网":
t=Tnuls(3)
t.start()
source.isbigan=True
if self.comboBox_2.currentText ()=="卓博人才网":
t=Tnuls(4)
t.start()
source.isbigan=True
if self.comboBox_2.currentText ()=="前程无忧":
t=Tnuls(5)
t.start()
source.isbigan=True
if source.isbigan:
t1 = threading.Thread(target=self.threadcl) #创建第一个子线程,子线程的任务
t1.setDaemon(True)
t1.start()
MainWindow.update()
def show_click(self):
MainWindow.getdata()
MainWindow.setUpdatesEnabled(False);
MainWindow.setUpdatesEnabled(True);
MainWindow.repaint();
if len(MainWindow.bestlist)>0:
bestlist=MainWindow.bestlist
antext=source.Eanalyze[self.comboBox.currentText ()]
analyze_text=""
for i in range(len(antext)):
analyze_text=analyze_text+antext[i]
if i<len(bestlist):
analyze_text=analyze_text+bestlist[i]
self.textEdit_get.setText(analyze_text)
def threadcl(self):
#database.open()#打开数据库
source.open_txt()
'''
for i in range(1, 2):#len(source.urllist)
t=Tnuls(i)
t.start()
time.sleep(0.5)
while source.threadnum>800:
time.sleep(0.3)
'''
while source.isbigan:
time.sleep(1)
if source.threadnum<1:
source.isgetweb=True
source.isbigan=False
# database.close()#关闭数据库
source.close_txt()
source.copy_txt()
#------------------------------------------------------------------------------------------
self.pushButton_bigin.clicked.connect(MainWindow.bigin_click)
self.pushButton_show.clicked.connect(MainWindow.show_click)
self.pushButton_back.clicked.connect(MainWindow.ui_reshow)
self.pushButton.clicked.connect(MainWindow.deep_ay)
self.pushButton_2.clicked.connect(MainWindow.duibi_hx)
self.pushButton_3.clicked.connect(MainWindow.duibi_zx)
self.pushButton_4.clicked.connect(MainWindow.duibi_nl)
if __name__ == "__main__":
import sys
app = QtWidgets.QApplication(sys.argv)
MainWindow =Window() #QtWidgets.QMainWindow()
source.ui = Ui_MainWindow()
source.ui.setupUi(MainWindow)
MainWindow.show()
sys.exit(app.exec_())
|
21,213 | b05ab7909b12a05ce2ee57e9909ae4541d73af42 | from entities import *
from functools import lru_cache
from reducers import *
@lru_cache(maxsize=None)
def increment(term):
assert isinstance(term, Term)
if isinstance(term, Variable):
return Variable(term.index + 1)
elif isinstance(term, Application):
return Application(increment(term.function), increment(term.argument))
elif isinstance(term, Abstraction):
return Abstraction(term.type, increment(term.body))
elif isinstance(term, Constant):
return term
@lru_cache(maxsize=None)
def augment(context, type):
assert isinstance(type, Type)
return ((Variable(0), type),) + tuple(
(increment(e), t)
for e, t in context
)
@lru_cache(maxsize=None)
def abstractions(context, steps):
assert isinstance(steps, int) and steps >= 0
return tuple(
(Abstraction(t, e), FunctionType(t, e_t))
for n in range(steps)
for t in types(n)
for e, e_t in terms(augment(context, t), steps - 1 - n)
)
@lru_cache(maxsize=None)
def functions(context, steps, argument):
assert isinstance(steps, int) and steps >= 0
assert isinstance(argument, Type)
return tuple(
(e, t)
for e, t in terms(context, steps)
if isinstance(t, FunctionType) and t.argument == argument
)
@lru_cache(maxsize=None)
def terms(context, steps):
assert isinstance(steps, int) and steps >= 0
# Non-debug version
return context if steps == 0 else tuple(
(e, t)
for e, t in abstractions(context, steps) + applications(context, steps)
if (normalize(e), t) not in normal_forms(context, steps - 1)
if not any(inductively_equal(e, t, e2, t2) for e2, t2 in normal_forms_type(context, steps - 1, t))
)
# Should also make sure that only 1 inductively-equivalent function makes it per round
'''
# Debug version
if steps == 0:
return context
else:
results = tuple(
(e, t)
for e, t in
abstractions(context, steps) + applications(context, steps)
if (normalize(e), t) not in normal_forms(context, steps - 1)
if not any(inductively_equal(e, t, e2, t2) for e2, t2 in normal_forms(context, steps - 1))
)
#for e, t in results:
# for n in range(steps):
# for e2, t2 in terms(context, n):
# if inductively_equal(e, t, e2, t2):
# raise Exception
#for e, t in results:
# if str(e) == '(λ:ℕ (λ:ℕ (((iter 0) succ) 1)))':
# #print('{} : {}'.format(e, t))
# for n in range(steps):
# for e2, t2 in terms(context, n):
# if str(e2) == '(λ:ℕ ((iter 0) succ))':
# #print('{} : {}'.format(e2, t2))
# print('Inductively equal: {}'.format(inductively_equal(e, t, e2, t2, log=True)))
# exit()
#return results
'''
@lru_cache(maxsize=None)
def normal_forms(context, steps):
assert isinstance(steps, int) and steps >= 0
return tuple(
(() if steps == 0 else normal_forms(context, steps - 1)) +
tuple((normalize(e), t) for e, t in terms(context, steps))
)
@lru_cache(maxsize=None)
def normal_forms_type(context, steps, type):
return tuple(
(e, t)
for e, t in normal_forms(context, steps)
if t == type
)
@lru_cache(maxsize=None)
def applications(context, steps):
assert isinstance(steps, int) and steps >= 0
return tuple(
(Application(e1, e2), e1_t.result)
for n in range(steps)
for e2, e2_t in terms(context, n)
for e1, e1_t in functions(context, steps - 1 - n, e2_t)
)
natural = BaseType('\u2115')
penalty = 1
@lru_cache(maxsize=None)
def types(steps):
assert isinstance(steps, int) and steps >= 0
return (natural,) if steps == 0 else tuple(
FunctionType(t1, t2)
for n in range(steps - penalty + 1)
for t1 in types(n)
for t2 in types(steps - penalty - n)
)
|
21,214 | 860257acb1fec7ba43471af5c95d0c48b6cc5113 | class Solution:
def numTrees(self, n: int) -> int:
dp = [1 for _ in range(n)]
for i in range(1, n):
dp[i] = 2 * dp[i-1] + sum([dp[k-1] * dp[i-k-1] for k in range(1, i)])
return dp[-1] |
21,215 | 08939bd7324be48595fed4b7365a6c3f4f1b2327 | import sys
sys.path.insert(0, '/anaconda/lib/python3.6/site-packages')
import pandas as pd
import numpy as np
from searchBeer import *
from InvertedIndexFoodsearch import *
from locationSearch import *
def mainPrompt():
user_input = int(input("How would you like to search for a particular beer?\n[1] By name\n[2] By location\n[3] By food pairing\n[4] Quit \nEnter numeric value:"))
while True:
if user_input == 1:
runBeerSearch()
mainPrompt()
if user_input == 2:
searchBreweriesNearby()
mainPrompt()
if user_input == 3:
RunPairingSearch()
mainPrompt()
if user_input == 4:
print("Exiting...")
sys.exit()
else:
selection = input ("Error. Would you like to retry your search? [y/n]").lower()
if selection == 'n':
break
else:
continue
mainPrompt()
|
21,216 | fc3f998a4c127e991a9290786246da6f2d851503 | #!/usr/local/bin/python3
# -*- coding: utf-8 -*-
# Author : rusi_
# 案例描述:
# 这里有几种不同面额的钞票,1元、5元、10元、20元、100元、200元的钞票无穷多张,现在使用这些钞票去支付X元的面额,问最少需要多少张?
# 解决方案:
# 尽可能多的使用面值较大的钞票。
# 例如X=628,我们通常会尝试从面额最大的钞票(200元)开始尝试,此时发现200元的钞票只能使用3张,此时剩余的面额是628-3*200=28元,
# 再依次查看次最大面额100元,因为100>28,因此不能用100元构成结果,再次以查看20元,发现可以使用1张,······,
# 以此类推,我们得到最终的最小钞票张数的结果是8(628 = 200*3 + 20*1 + 5*1 + 1*3)
# 问题拓展思考:如果增加一个7元面值的,贪心还能成立不?
# 因为之前的面值是,如果使用小的面额替换大的面额的时候,一定需要更多的其它面额的钞票。但是当有了七元面值的时候这就不一定了,所以是不成立的。
# 当然这种问题可以通过 dynamic programming 来解决。
def money_():
"""
解决钞票支付问题
:return: 多少张钞票
"""
moy = 628
bank_node = [200, 100, 20, 10, 5, 1]
count = 0
for i in range(len(bank_node)):
use = int(moy / bank_node[i])
moy = moy - use * bank_node[i]
count += use
if moy == 0:
break
return count
if __name__ == '__main__':
print(money_())
|
21,217 | 4a686d5d416fa4bce9308f6dc9dca554d3c4349f | # -*- coding: utf-8 -*-
"""Day.05 Assignment.01.ipynb
Automatically generated by Colaboratory.
Original file is located at
https://colab.research.google.com/drive/1bIZ4JPAjbpZifZ1nlDnG-pucI8o8r--b
"""
# WAP to indentify sub list [1,1,5] is there in the given list in the same order , if yes print "its match" if no then print "its gone" in function.
lst=[1,4,3,5,7,1,6,5,6]
sublst=[1,1,5]
for i in range(len(lst)):
if lst[i]==sublst[0]:
for j in range(i+1,len(lst)):
if lst[j]==sublst[1]:
for k in range(j+1,len(lst)):
if lst[k]==sublst[2]:
print("its match")
break
break
break
else:
print("its gone") |
21,218 | 6adfa69bd4cd8ea9d1d9bda56e5edf1dbbf52dc1 | class post_author_scorer:
def __init__(self, text_node, text, node):
self.text_node = text_node
self.text = text
self.node = node
def total_score(self):
# microformat rel="author" attribute check
rel_attr = self.text_node.xpath('@rel').extract_first()
xstr = lambda s: '' if s is None else str(s)
rel_score = 1 if xstr(rel_attr).lower() == 'author' else 0
# checking author keywords in attributes
attributes = ''.join(self.text_node.xpath('@*').extract())
auth_keywords = ['author', 'byline', 'source', 'courtesy', 'writer', 'written',
'published by', 'published', 'by', 'contributor', 'originator',
'creator', 'builder', 'editor']
flag = 0
for auth_keyword in auth_keywords:
if(attributes.find(auth_keyword) != -1):
flag = 1
attr_score = 1 if flag == 1 else 0
# meta author tag check
node_score = 1 if (self.node == 'author') else 0
# anchor tag check
link_score = 1 if (self.node == 'a') else 0
# checking the character length
len_score = 1 if len(self.text) <= 50 else 0
score_list = [rel_score, attr_score, node_score, link_score, len_score]
weights = [4,3,3,1,1]
# calculating total score of each text
total_score = 0
for i in range(len(score_list)):
total_score += score_list[i] * weights[i]
return total_score |
21,219 | c72aa88544178cd51928b6be69210691a8a8d1ca | #!/usr/bin/env python
#coding:utf-8
"""
使用方法:
1. 如果系统没有python,请先安装python。
2. 启动命令行,windows操作系统中是:开始 > 命令提示符。Mac操作系统中是:应用程序 > 终端。
3. 在命令行中输入以下命令: python pwgen.py -s
4. 正常情况下,会有一些输出信息,找到类似于 http://0.0.0.0:3722 的网址,复制到浏览器打开即可。
"""
from __future__ import print_function
import argparse
import base64
from Crypto.Cipher import AES
from Crypto import Random
# padding算法
BS = AES.block_size # aes数据分组长度为128 bit
pad = lambda s: s + (BS - len(s) % BS) * chr(0)
aes_mode = AES.MODE_CBC
def preprocess_key(key):
key_len = len(key)
if key_len <= 16:
key = key + ('0' * (16 - key_len))
elif key_len <= 24:
key = key + ('0' * (24 - key_len))
elif key_len <= 32:
key = key + ('0' * (32 - key_len))
else:
raise Exception('length of key must no more than 32')
return key
def encode(key, msg):
key = preprocess_key(key)
iv = Random.new().read(AES.block_size)
cryptor = AES.new(key, aes_mode, iv)
out = cryptor.encrypt(pad(msg))
return base64.b64encode(iv + out)
def decode(key, msg):
key = preprocess_key(key)
msg = base64.b64decode(msg)
iv = msg[0:AES.block_size]
ciphertext = msg[AES.block_size:len(msg)]
cryptor = AES.new(key, aes_mode, iv)
out = cryptor.decrypt(ciphertext)
return out.rstrip(chr(0))
def run_server():
from flask import Flask, render_template, request
import json, random
app = Flask(__name__)
@app.route('/', methods = ['GET', 'POST'])
def index():
if request.method == 'GET':
tpl = u'''
<form action="/" method="post">
秘钥: <input name="key" type="password">
数据: <input name="msg">
模式: <input type="radio" name="mode" value="encode" checked>加密 <input type="radio" name="mode" value="decode">解密
<input type="submit" value="提交" onclick="">
</form>
'''
return tpl, {'Context-Type': 'text/html;charset:utf-8'}
else:
mode = request.form['mode']
key = str(request.form['key'])
msg = str(request.form['msg'])
if mode == 'encode':
out = encode(key, msg)
elif mode == 'decode':
out = decode(key, msg)
data = {
'out' : out,
'status' : 200
}
return json.dumps(data, ensure_ascii=False), {'Content-Type':'application/json'}
app.run('0.0.0.0', port=random.randint(2049, 10000))
if __name__ == '__main__':
parse = argparse.ArgumentParser(description=u'密码加密工具')
parse.add_argument('-s', '--server', action='store_true', help=u'启动web服务')
parse.add_argument('-d', '--decode', action='store_true', help=u'解密模式,默认是加密模式')
parse.add_argument('-k', '--key', help=u'秘钥')
parse.add_argument('msg', nargs='?', help=u'消息内容')
args = parse.parse_args()
if args.server:
run_server()
exit(0)
if args.key is None or args.msg is None:
parse.print_help()
exit(1)
elif args.decode:
print(decode(args.key, args.msg))
else:
print(encode(args.key, args.msg))
|
21,220 | 328f24b9c481a573bf19f93856498e8e4effd3db |
EMBEDDING_DIM = 100
MAX_SEQUENCE_LENGTH = 50 # 100 or 50
VALIDATION_SPLIT = 0.1
RATE_DROP_LSTM = 0.25 # decrease to 0.1
RATE_DROP_DENSE = 0.25
NUMBER_LSTM = 50
NUMBER_DENSE_UNITS = 50
ACTIVATION_FUNCTION = 'relu'
siamese_config = {
'EMBEDDING_DIM': EMBEDDING_DIM,
'MAX_SEQUENCE_LENGTH' : MAX_SEQUENCE_LENGTH,
'VALIDATION_SPLIT': VALIDATION_SPLIT,
'RATE_DROP_LSTM': RATE_DROP_LSTM,
'RATE_DROP_DENSE': RATE_DROP_DENSE,
'NUMBER_LSTM': NUMBER_LSTM,
'NUMBER_DENSE_UNITS': NUMBER_DENSE_UNITS,
'ACTIVATION_FUNCTION': ACTIVATION_FUNCTION
}
|
21,221 | 3b7b764bc5e59d1d30c2d45413c5e3d15e1b3d77 |
import gzip
import random
import tempfile
from random import shuffle
import pandas as pd
import numpy as np
from .SequenceMethods import SequenceMethods
from .DataFrameAnalyzer import DataFrameAnalyzer
class FastaAnalyzer:
def trim_fasta_sequence(self, fasta_path, bp_width, output_path):
'''
Take a fasta file and generate a new one that has trimmed sequences
:param fasta_path:
:param bp_width:
:return:
'''
output_rows = []
fastas = self.get_fastas_from_file(fasta_path, uppercase=True)
print(len(fastas))
for header, seq in fastas:
for i in range(len(seq) / bp_width):
subsequence = seq[i * bp_width: (i + 1) * bp_width]
if len(subsequence) == bp_width:
output_rows.append([header, str(i), subsequence])
# write protein to output
writer = open(output_path, "w")
for r in output_rows:
writer.write(">" + r[0] + "_" + r[1] + "\n" + r[2] + "\n")
writer.close()
def convert_bed_to_bed_max_position(self, bed_peaks_path, bed_peaks_output_path,
compression=None):
"""
Convert a BED6+4 into a BED file that indicates the position of the peak
max only
:param bed_peaks_output_path:
:return:
"""
# create a new coordinates file with flanking sequences
df = pd.read_csv(bed_peaks_path, sep='\t', index_col=False,
names=['chrom', 'chromStart', 'chromEnd', 'name', 'score',
'strand', 'signalValue', 'pValue', 'qValue', 'peak'])
print('here...')
df['startPeak'] = df['chromStart'] + df['peak']
df['endPeak'] = df['startPeak'] + 1
df['id'] = (df['chrom'].astype(str) + ":" +
df['startPeak'].astype(str) + "-" + df['endPeak'].astype(str))
df = df[['chrom', 'startPeak', 'endPeak', 'id']]
print('saving tmp file...')
df.to_csv(bed_peaks_output_path, header=False, sep='\t', index=False,
compression=compression)
def convert_fasta_to_bed(self, fasta_path, bed_path):
headers = [fa[0] for fa in self.get_fastas_from_file(fasta_path)]
writer = open(bed_path, 'w')
for h in headers:
chromosome, peak_range = h.split(":")
start, end = peak_range.split("-")
writer.write('\t'.join([chromosome, start, end]) + "\n")
writer.close()
def convert_bed_to_peaks_from_summit(self, bed_path, bp_flanking=50,
stop_at=None):
'''
:param bed_path: The path to our BED file
:param output_path: The output bed that will be created
:param bp_flanking: If use_peak is True, then flanking regions will
(See https://www.biostars.org/p/102710/ for format description
be calculated from this file
:return:
'''
# create a new coordinates file with flanking sequences
print('reading tmp bed file...')
df = pd.read_csv(bed_path, sep='\t', index_col=False,
names=['chrom', 'chromStart', 'chromEnd', 'name', 'score',
'strand', 'signalValue', 'pValue', 'qValue', 'peak'],
nrows=stop_at)
print('here...')
df['startFromPeak'] = df['chromStart'] + df['peak'] - bp_flanking
df['endFromPeak'] = df['chromStart'] + df['peak'] + bp_flanking
df = df[['chrom', 'startFromPeak', 'endFromPeak']]
tmp_bed_path = tempfile.mkstemp()[1]
print('saving tmp file...')
df.to_csv(tmp_bed_path, header=False, sep='\t', index=False)
return tmp_bed_path
def scrambled_fasta_order(self, p, tmp_path=None, random_seed=None):
fasta = self.get_fastas_from_file(p)
if random_seed is not None:
random.seed(random_seed)
random.shuffle(fasta)
tmp_path = tempfile.mkstemp()[1] if tmp_path is None else tmp_path
self.write_fasta_from_sequences(fasta,
tmp_path, uppercase=True)
return tmp_path
def randomize_fasta(self, p, tmp_path=None):
fasta = self.get_fastas_from_file(p)
seqs = [self.randomize_sequence(s[1]) for s in fasta]
tmp_path = tempfile.mkstemp()[1] if tmp_path is None else tmp_path
self.write_fasta_from_sequences(seqs,
tmp_path)
return tmp_path
@staticmethod
def get_sequences_from_bed(bed_path_or_dataframe, genome='hg19', **kwargs):
fasta_path = tempfile.mkstemp()[1] if kwargs.get('fasta_path') is None else kwargs.get('fasta_path')
if 'fasta_path' in kwargs:
del kwargs['fasta_path']
FastaAnalyzer.convert_bed_to_fasta(bed_path_or_dataframe, fasta_path, genome=genome, **kwargs)
return FastaAnalyzer.get_fastas(fasta_path, **kwargs)
def create_bed_file(self, coordinates_table, bed_output_path):
"""
CHROMOSOME_ID START END
"""
if isinstance(coordinates_table, pd.DataFrame):
coordinates_table = [r.values[:3] for ri, r in coordinates_table.iterrows()]
# write HOMER motifs to tmp BED file. This one will be used later for validation
is_gzip = bed_output_path.endswith(".gz")
writer = gzip.open(bed_output_path, "w") if is_gzip else open(bed_output_path, "w")
for r in coordinates_table:
writer.write("\t".join(map(str, [r[0], r[1], r[2]])) + "\n")
writer.close()
@staticmethod
def get_fastas(fasta_path, uppercase=False,
stop_at=None, na_remove=False, is_gzip=False, **kwargs):
fa_analyzer = FastaAnalyzer()
return fa_analyzer.get_fastas_from_file(fasta_path, uppercase=uppercase, stop_at=stop_at,
is_gzip=is_gzip, **kwargs)
def get_fastas_from_file(self, fasta_path,
uppercase=False, stop_at=None, na_remove=False, is_gzip=False, **kwargs):
fastas = []
seq = None
header = None
for r in (
gzip.open(fasta_path, mode='rt') if (fasta_path.endswith(".gz") or is_gzip) else open(
fasta_path)):
r = r.strip()
if r.startswith(">"):
if seq != None and header != None:
fastas.append([header, seq])
if stop_at != None and len(fastas) >= stop_at:
break
seq = ""
header = r[1:]
else:
if seq != None:
seq += r.upper() if uppercase else r
else:
seq = r.upper() if uppercase else r
# append last fasta read by method
if stop_at != None and len(fastas) < stop_at:
fastas.append([header, seq])
elif stop_at == None:
fastas.append([header, seq])
if kwargs.get('as_dict', False):
if na_remove:
return {h: s for h, s in fastas if not "N" in s}
return {h: s for h, s in fastas}
if na_remove:
return [t for t in fastas if not "N" in t[1]]
return fastas
@staticmethod
def find_all(substring, dna):
subs = [dna[i:i + len(substring)] for i in range(0, len(dna))]
return [ind for ind, ele in enumerate(subs) if ele == substring]
@staticmethod
def write_fasta_from_sequences(sequences, fasta_path,
add_headers=True, uppercase=False):
# print '# of sequences to write', len(sequences)
writer = open(fasta_path, "w") if not fasta_path.endswith('.gz')\
else gzip.open(fasta_path, 'w')
# print 'writing fasta seq by seq...'
for i, entry in enumerate(sequences):
snp_id, seq = entry if len(entry) == 2 else [i, entry]
next_seq = (seq if not uppercase else seq.upper())
s = ((">" + str(snp_id) + "\n") if add_headers else "")+ (next_seq + "\n")
writer.write(s)
writer.close()
@staticmethod
def concatenate(fasta_paths, output_path=None):
output_path = tempfile.mkstemp()[1] if output_path is None else output_path
with open(output_path, 'w') as writer:
for i, p in enumerate(fasta_paths):
print(i, p)
for r in open(p):
writer.write(r)
return output_path
def randomize_sequence(self, sequence):
nucleotides = [nt for nt in sequence]
shuffle(nucleotides)
return "".join(nucleotides)
@staticmethod
def get_gene_tss(upstream, downstream=0, genome='hg19'):
assert genome == 'hg19'
all_ids_path = '/g/scb2/zaugg/rio/EclipseProjects/zaugglab/moritz_collaboration/data/all_genes_hg19.tsv.gz'
df = DataFrameAnalyzer.read_tsv_gz(all_ids_path)
chromosome_ids = list(map(str, list(range(1, 23)))) + ['X', 'Y']
df = df[df['chromosome_name'].isin({i for i in chromosome_ids})]
df['start'] = np.where(df['strand'] == 1, df['transcription_start_site'] - upstream, df['transcription_start_site'] - downstream)
df['start'] = np.where(df['start'] < 0, 0, df['start'])
df['end'] = np.where(df['strand'] == 1, df['transcription_start_site'] + downstream, df['transcription_start_site'] + upstream)
df['chromosome_name'] = 'chr' + df['chromosome_name']
df = SequenceMethods.parse_range2coordinate(df, ['chromosome_name', 'start', 'end'], 'range')
return df
|
21,222 | 16c3e5c997c0ea2705d46e5e1537ed7b05301788 | """
Figure 4A: right hemisphere VTC maps
"""
# Repo imports
import os
import numpy as np
from submm.constants import PATHS, PRIMARY_METRIC
from submm.utils.os_utils import savefig
# MPL imports
import matplotlib
matplotlib.use("Agg")
import matplotlib.pyplot as plt # noqa:E402
from matplotlib.pyplot import imread # noqa:E402
def main():
"""
Entry point for script
"""
# define bounding box for right VTC
width = 225
y_start = 245
x_start = 50
bbox = [y_start, x_start, width, width]
# first figure: betas for each predictor
fig, axes = plt.subplots(figsize=(27.7, 8), ncols=5, nrows=2)
# redefining contrast order to match figures
contrasts = [
"wordVSall",
"bodyVSall",
"adultVSall",
"carVSall",
"corridorVSall",
"numberVSall",
"limbVSall",
"childVSall",
"instrumentVSall",
"corridorVSall",
]
for contrast, ax in zip(contrasts, axes.ravel()):
image_path = (
f"{PATHS['figures_misc']}/figure_4_images/"
f"C1051_20160212_{contrast}_depth_1.png"
)
assert os.path.isfile(image_path)
img = imread(image_path)
img[np.where(np.sum(img, axis=2) == 0.0)] = 1.0
ax.imshow(img[bbox[0] : bbox[0] + bbox[2], bbox[1] : bbox[1] + bbox[3], :])
ax.axis("off")
savefig(f"{PATHS['figures']}/figure_4a.png")
plt.close(fig)
if __name__ == "__main__":
main()
|
21,223 | e991a7b9110cf749c0e84166b383dc5a7894d02f | from kafka import KafkaConsumer
from json import loads # topic, broker list
consumer_byte = KafkaConsumer("test",
bootstrap_servers="127.0.0.1:9092",
group_id = None,
enable_auto_commit=True,
auto_offset_reset='earliest',
consumer_timeout_ms = 5000
)
try :
for msg in consumer_byte :
print(msg.value.decode("unicode_escape"))
print(msg.topic, msg.partition, msg.offset, msg.key)
print("")
except :
print("finished --- 1")
|
21,224 | b5d13634f7d9556f87c8905d6a6867cfdc5024b6 | # Generated by Django 2.2.5 on 2019-10-06 08:00
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('cmsadmain', '0004_news_time'),
]
operations = [
migrations.AlterField(
model_name='news',
name='time',
field=models.DateTimeField(null=True),
),
]
|
21,225 | d62cb985fd4fc0ffc728cacc53bcad0f8a7b1115 | from Neural_networks.LSTM_neural_network.data_preperation import DataPrep
from Neural_networks.LSTM_neural_network.model import Model
from keras.layers import Flatten
from loguru import logger
def main():
data = DataPrep('WM.csv', train_pct=0.8)
m = Model(data.X_train.shape)
m.build()
x, y = data.get_training_data()
x = Flatten()(x)
m.train(x, y, 2, 32)
print(data.frame.head())
if __name__ == '__main__':
logger.info('Started.')
main()
logger.info('Ended.') |
21,226 | 3dfc11e49cb5ca23d42b743fecf480e645011ce6 | def lecture1():
fhand = open('/Users/zhuerika/Desktop/mbox-short.txt')
for line in fhand:
line = line.rstrip()
if line.find('From:')>=0:
print (line)
def lecture2():
import re
fhand = open('/Users/zhuerika/Desktop/mbox-short.txt')
for line in fhand:
line = line.rstrip()
if re.search('From:',line):
print (line)
def lecture3():
fhand = open('/Users/zhuerika/Desktop/mbox-short.txt')
for line in fhand:
line = line.rstrip()
if line.startswith('From:'):
print (line)
def lecture4():
import re
fhand = open('/Users/zhuerika/Desktop/mbox-short.txt')
for line in fhand:
line = line.rstrip()
if re.search('^From:',line):
print (line)
# ^X.*: means start with X then follow by any character for 0 or more times:
# ^X-/S+ start with X then follow by a dash, non-white space character for one or more times :
def lecture5():
import re
x = 'My 2 favourite numbers are 19 and 42'
y = re.findall('[0-9]+',x) # extract one or more digit
print (y)
import re
x = 'My 2 favourite numbers are 19 and 42'
y = re.findall('[AEIOU]+',x) # extract one or more UPPERCASE
def lecture6():
#greedy matching; tend to find the largest possible string
import re
x = 'From: Using the: character'
y = re.findall('^F.+:',x)
print (y)
import re
x = 'From: Using the: character'
y = re.findall('^F.+?:', x) # non-greedy
print (y)
def lecture7():
import re
x = 'From yzz5070@gmail.com Thu Jan 11 08:08:08 2018'
y = re.findall('\S+@\S+', x) # \S is non-blank character
print (y)
def lecture8():
# extracting less than matching by using ()
import re
x = 'From yzz5070@gmail.com Thu Jan 11 08:08:08 2018'
y = re.findall('^From (\S+@\S+)', x) # \S is non-blank character
print (y)
def lecture9():
# extracting host using find and string slicing
data = 'From yzz5070@gmail.com Thu Jan 11 08:08:08 2018'
atpos = data.find('@')
print(atpos)
stopus = data.find(' ',atpos)
print (stopus)
host = data[atpos+1: stopus]
print (host)
#double split
data = 'From yzz5070@gmail.com Thu Jan 11 08:08:08 2018'
words = data.split()
email = words[1]
piece = email.split('@')
host = piece[1]
print (host)
#Regex
import re
data = 'From yzz5070@gmail.com Thu Jan 11 08:08:08 2018'
host = re.findall('@([^ ]*)', data) # [^ ] match non-blank character
print (host)
import re
data = 'From yzz5070@gmail.com Thu Jan 11 08:08:08 2018'
host = re.findall('^From .*@([^ ]*)', data) # [^ ] match non-blank character
print (host)
def exercise1():
import re
fhand = open('/Users/zhuerika/Desktop/mbox-short.txt')
numlist = list()
for line in fhand:
line = line.rstrip()
spam = re.findall('^X-DSPAM-Confidence: ([0-9.]+)', line) #[0-9.] a digit or period
if len(spam) != 1:
continue
num = float(spam[0])
numlist.append(num)
print ('Maximum:', max(numlist))
def lecture10():
# escape character: ask a regular expression character to behave normally, prefix with '\'
import re
x = 'we just receive $10.00 for cookie'
y = re.findall('\$[0-9.]+', x)
print (y)
def assignment1():
import re
fname = raw_input('Enter file:')
if len(fname)<1:
fname = '/Users/zhuerika/Desktop/regex_sum_2554.txt'
fhand = open(fname)
l = list()
for line in fhand:
line = line.rstrip()
numbers = re.findall('[0-9]+', line)
for n in numbers:
number = int(n)
l.append(number)
print (sum(l))
if __name__=='__main__':
lecture8()
|
21,227 | 4520daddcb7b20f3cba6d1b42c739d9cb5e86639 | # List Functions
lucky_numbers = [4, 8, 15, 16, 23, 42]
friends = ["Kevin", "Karen", "Jim", "Oscar", "Toby"]
# Add lists together
# friends.extend(lucky_numbers)
print(friends)
# Append lists - Add items to the end of the list
friends1 = ["Kevin", "Karen", "Jim", "Oscar", "Toby"]
friends1.append("Creed")
print(friends1)
# Insert items into lists
friends2 = ["Kevin", "Karen", "Jim", "Oscar", "Toby"]
friends2.insert(1, "Kelly")
print(friends2)
# Remove elements from lists
friends3 = ["Kevin", "Karen", "Jim", "Oscar", "Toby"]
friends3.remove("Jim")
print(friends3)
# Clear elements from lists
friends4 = ["Kevin", "Karen", "Jim", "Oscar", "Toby"]
friends4.clear()
print(friends4)
# pop elements from lists - remove last element from the list
friends5 = ["Kevin", "Karen", "Jim", "Oscar", "Toby"]
friends5.pop()
print(friends5)
# find elements in lists - Which index value is the element in
friends6 = ["Kevin", "Karen", "Jim", "Oscar", "Toby"]
print(friends6.index("Kevin") )
print(friends6.index("Jim") )
# count how many times an element appears in a list
friends7 = ["Kevin", "Karen", "Jim", "Jim", "Oscar", "Toby"]
print(friends7.count("Jim") )
# sort list in ascending order
friends8 = ["Kevin", "Karen", "Jim", "Jim", "Oscar", "Toby"]
friends8.sort()
lucky_numbers.sort()
print(friends8)
print(lucky_numbers)
# sort list in decending order
lucky_numbers1 = [4, 8, 15, 16, 23, 42]
lucky_numbers1.reverse()
print(lucky_numbers1)
# copy list
friends9 = friends.copy()
print(friends9) |
21,228 | 6e64799e8b5c667582e74f8050b1f09325180f61 | from django.shortcuts import render, redirect
from django.contrib import messages
from .forms import UserRegistrationForm, UserUpdateForm, ProfileUpdateForm
from django.contrib.auth.decorators import login_required
from .models import Courses
from django.http import HttpResponse
# Create your views here.
def index(request):
'''
Index file used to display home page of web app
'''
template_url = 'crud/index.html'
return render(request, template_url)
def register(request):
'''
User Registration form, imported UserCreationFrom from auth.form
'''
template_url = 'crud/register.html'
registration_form = UserRegistrationForm()
if request.method == 'POST':
try:
registration_form = UserRegistrationForm(request.POST)
except UserRegistrationForm.DoesNotExist():
return HttpResponse('Error getting form')
if registration_form.is_valid():
registration_form.save()
username = registration_form.cleaned_data.get('username')
messages.success(request, f'Successfully account created {username}!')
return redirect('login')
else:
registration_form = UserRegistrationForm()
return render(request, template_url, {'form': registration_form})
@login_required(login_url='/login/')
def dashboard(request):
'''
Main Home Page after login
'''
tempalte_url = 'crud/dashboard.html'
try:
courses = Courses.objects.all()
except Courses.DoesNotExist():
return HttpResponse("No Such Model found.")
return render(request, tempalte_url, {'courses': courses})
@login_required(login_url='/login/')
def profile(request):
'''
User Profile Page
'''
if request.method == 'POST':
try:
user_form = UserUpdateForm(request.POST, instance=request.user)
profile_form = ProfileUpdateForm(request.POST, request.FILES, instance=request.user.profile)
except UserUpdateForm.DoesNotExist() or ProfileUpdateForm.DoesNotExist():
return HttpResponse('Error getting forms')
if user_form.is_valid() and profile_form.is_valid():
user_form.save()
profile_form.save()
messages.success(request, f'Your details has been updated')
return redirect('profile')
else:
user_form = UserUpdateForm(instance=request.user)
profile_form = ProfileUpdateForm(instance=request.user.profile)
context = {
'user_form': user_form,
'profile_form': profile_form
}
tempalte_url = 'crud/profile.html'
return render(request, tempalte_url, context)
|
21,229 | 0cf457447e1e427d52c0070c1675d9ce9145ad1e | from sense_hat import SenseHat
from time import sleep
import random
class RollDice:
sense = SenseHat()
dice_rolled=0
b = [0, 0, 0]
w = [150, 150, 150]
dice = [
[
w,w,w,w,w,w,w,w,
w,w,w,w,w,w,w,w,
w,w,w,w,w,w,w,w,
w,w,w,b,b,w,w,w,
w,w,w,b,b,w,w,w,
w,w,w,w,w,w,w,w,
w,w,w,w,w,w,w,w,
w,w,w,w,w,w,w,w
],
[
w,w,w,w,w,w,w,w,
w,b,b,w,w,w,w,w,
w,b,b,w,w,w,w,w,
w,w,w,w,w,w,w,w,
w,w,w,w,w,w,w,w,
w,w,w,w,w,b,b,w,
w,w,w,w,w,b,b,w,
w,w,w,w,w,w,w,w
],
[
w,w,w,w,w,w,w,w,
w,b,b,w,w,w,w,w,
w,b,b,w,w,w,w,w,
w,w,w,b,b,w,w,w,
w,w,w,b,b,w,w,w,
w,w,w,w,w,b,b,w,
w,w,w,w,w,b,b,w,
w,w,w,w,w,w,w,w
],
[
w,w,w,w,w,w,w,w,
w,b,b,w,w,b,b,w,
w,b,b,w,w,b,b,w,
w,w,w,w,w,w,w,w,
w,w,w,w,w,w,w,w,
w,b,b,w,w,b,b,w,
w,b,b,w,w,b,b,w,
w,w,w,w,w,w,w,w
],
[
w,w,w,w,w,w,w,w,
w,b,b,w,w,b,b,w,
w,b,b,w,w,b,b,w,
w,w,w,b,b,w,w,w,
w,w,w,b,b,w,w,w,
w,b,b,w,w,b,b,w,
w,b,b,w,w,b,b,w,
w,w,w,w,w,w,w,w
],
[
w,b,b,w,w,b,b,w,
w,b,b,w,w,b,b,w,
w,w,w,w,w,w,w,w,
w,b,b,w,w,b,b,w,
w,b,b,w,w,b,b,w,
w,w,w,w,w,w,w,w,
w,b,b,w,w,b,b,w,
w,b,b,w,w,b,b,w
]
]
def dice_roll(self):
rolling_counter=0
while rolling_counter<12:
self.sense.set_pixels(self.dice[rolling_counter%6])
sleep(.1)
rolling_counter+=1
self.dice_rolled = random.randrange(0,6)
self.sense.set_pixels(self.dice[self.dice_rolled])
return self.dice_rolled+1
def detect_shake(self):
value = 0
while value == 0:
x, y, z = self.sense.get_accelerometer_raw().values()
x = abs(x)
y = abs(y)
z = abs(z)
if x > 2 or y > 2 or z > 2:
value = self.dice_roll()
sleep(1)
self.sense.clear()
return value
def main():
rollDice = RollDice()
rollDice.sense.clear()
while True:
rollDice.detect_shake()
if __name__ == '__main__':
main() |
21,230 | 34ffdab25bcc85d529665afe5c4855807b188521 | from konlpy.tag import Twitter
import nltk
twitter = Twitter()
print(twitter.morphs(u'한글형태소분석기 테스트 중 입니다')) # ??
print(twitter.nouns(u'한글형태소분석기 테스트 중 입니다!')) #명사
print(twitter.pos(u'한글형태소분석기 테스트 중 입니다.')) #형태소
def read_data(filename):
with open(filename, 'r') as f:
data = [line.split('\t') for line in f.read().splitlines()]
return data
def tokenize(doc):
# norm, stem은 optional
return ['/'.join(t) for t in twitter.pos(doc, norm=True, stem=True)]
def term_exists(doc):
return {'exists({})'.format(word): (word in set(doc)) for word in selected_words}
# 트래이닝 데이터와 테스트 데이터를 읽기
train_data = read_data('data/ratings_train.txt')
test_data = read_data('data/ratings_test.txt')
# row, column의 수가 제대로 읽혔는지 확인
print(len(train_data)) # nrows: 150000
print(len(train_data[0])) # ncols: 3
print(len(test_data)) # nrows: 50000
print(len(test_data[0])) # ncols: 3
# 형태소 분류
train_docs = [(tokenize(row[1]), row[2]) for row in train_data[1:]]
test_docs = [(tokenize(row[1]), row[2]) for row in test_data[1:]]
#Training data의 token 모으기
tokens = [t for d in train_docs for t in d[0]]
print(len(tokens))
# Load tokens with nltk.Text()
text = nltk.Text(tokens, name='NMSC')
print(text.vocab().most_common(10))
# 텍스트간의 연어 빈번하게 등장하는 단어 구하기
text.collocations()
# term이 존재하는지에 따라서 문서를 분류
selected_words = [f[0] for f in text.vocab().most_common(2000)] # 여기서는 최빈도 단어 2000개를 피쳐로 사용
train_docs = train_docs[:10000] # 시간 단축을 위한 꼼수로 training corpus의 일부만 사용할 수 있음
train_xy = [(term_exists(d), c) for d, c in train_docs]
test_xy = [(term_exists(d), c) for d, c in test_docs]
# nltk의 NaiveBayesClassifier으로 데이터를 트래이닝 시키고, test 데이터로 확인
classifier = nltk.NaiveBayesClassifier.train(train_xy) #Naive Bayes classifier 적용
print(nltk.classify.accuracy(classifier, test_xy))
# => 0.80418
classifier.show_most_informative_features(10)
#nltk.polarity_scores("i love you")
|
21,231 | 4c3f8951d89127958593ea988935434921be0bba | #!/usr/bin/python
from mininet.topo import Topo
from mininet.net import Mininet
from mininet.cli import CLI
from mininet.log import setLogLevel, info, debug
from mininet.node import Host, RemoteController, OVSSwitch
QUAGGA_DIR = '/usr/lib/quagga'
# Must exist and be owned by quagga user (quagga:quagga by default on Ubuntu)
QUAGGA_RUN_DIR = '/var/run/quagga'
CONFIG_DIR = 'configs-ipv6'
class SdnIpHost(Host):
def __init__(self, name, ip, route, *args, **kwargs):
Host.__init__(self, name, ip=ip, *args, **kwargs)
self.name = name
self.ip = ip
self.route = route
def config(self, **kwargs):
Host.config(self, **kwargs)
debug("configuring route %s" % self.route)
self.cmd('ip addr add %s dev %s-eth0' % (self.ip, self.name))
self.cmd('ip route add default via %s' % self.route)
class Router(Host):
def __init__(self, name, quaggaConfFile, zebraConfFile, intfDict, *args, **kwargs):
Host.__init__(self, name, *args, **kwargs)
self.quaggaConfFile = quaggaConfFile
self.zebraConfFile = zebraConfFile
self.intfDict = intfDict
def config(self, **kwargs):
Host.config(self, **kwargs)
self.cmd('sysctl net.ipv4.ip_forward=1')
self.cmd('sysctl net.ipv6.conf.all.forwarding=1')
for intf, attrs in self.intfDict.items():
self.cmd('ip addr flush dev %s' % intf)
if 'mac' in attrs:
self.cmd('ip link set %s down' % intf)
self.cmd('ip link set %s address %s' % (intf, attrs['mac']))
self.cmd('ip link set %s up ' % intf)
for addr in attrs['ipAddrs']:
self.cmd('ip addr add %s dev %s' % (addr, intf))
self.cmd('/usr/lib/quagga/zebra -d -f %s -z %s/zebra%s.api -i %s/zebra%s.pid' % (self.zebraConfFile, QUAGGA_RUN_DIR, self.name, QUAGGA_RUN_DIR, self.name))
self.cmd('/usr/lib/quagga/bgpd -d -f %s -z %s/zebra%s.api -i %s/bgpd%s.pid' % (self.quaggaConfFile, QUAGGA_RUN_DIR, self.name, QUAGGA_RUN_DIR, self.name))
def terminate(self):
self.cmd("ps ax | egrep 'bgpd%s.pid|zebra%s.pid' | awk '{print $1}' | xargs kill" % (self.name, self.name))
Host.terminate(self)
class SdnSwitch(OVSSwitch):
def __init__(self, name, dpid, *args, **kwargs):
OVSSwitch.__init__(self, name, dpid=dpid, *args, **kwargs)
def start(self, controllers):
OVSSwitch.start(self, controllers)
self.cmd("ovs-vsctl set Bridge %s protocols=OpenFlow13" % self.name)
class SdnIpTopo( Topo ):
"SDN-IP tutorial topology"
def build( self ):
s1 = self.addSwitch('s1', cls=SdnSwitch, dpid='00000000000000a1')
s2 = self.addSwitch('s2', cls=SdnSwitch, dpid='00000000000000a2')
s3 = self.addSwitch('s3', cls=SdnSwitch, dpid='00000000000000a3')
s4 = self.addSwitch('s4', cls=SdnSwitch, dpid='00000000000000a4')
s5 = self.addSwitch('s5', cls=SdnSwitch, dpid='00000000000000a5')
s6 = self.addSwitch('s6', cls=SdnSwitch, dpid='00000000000000a6')
zebraConf = '%s/zebra.conf' % CONFIG_DIR
# Switches we want to attach our routers to, in the correct order
attachmentSwitches = [s1, s2, s5, s6]
for i in range(1, 4+1):
name = 'r%s' % i
eth0 = { 'mac' : '00:00:00:00:0%s:01' % i,
'ipAddrs' : ['2001:%s::1/48' % i] }
eth1 = { 'ipAddrs' : ['2001:10%s::101/48' % i] }
intfs = { '%s-eth0' % name : eth0,
'%s-eth1' % name : eth1 }
quaggaConf = '%s/quagga%s.conf' % (CONFIG_DIR, i)
router = self.addHost(name, cls=Router, quaggaConfFile=quaggaConf,
zebraConfFile=zebraConf, intfDict=intfs)
host = self.addHost('h%s' % i, cls=SdnIpHost,
ip='2001:10%s::1/48' % i,
route='2001:10%s::101' % i)
self.addLink(router, attachmentSwitches[i-1])
self.addLink(router, host)
# Set up the internal BGP speaker
bgpEth0 = { 'mac':'00:00:00:00:00:01',
'ipAddrs' : ['2001:1::101/48',
'2001:2::101/48',
'2001:3::101/48',
'2001:4::101/48',] }
bgpEth1 = { 'ipAddrs' : ['10.10.10.1/24'] }
bgpIntfs = { 'bgp-eth0' : bgpEth0,
'bgp-eth1' : bgpEth1 }
bgp = self.addHost( "bgp", cls=Router,
quaggaConfFile = '%s/quagga-sdn.conf' % CONFIG_DIR,
zebraConfFile = zebraConf,
intfDict=bgpIntfs )
self.addLink( bgp, s3 )
# Connect BGP speaker to the root namespace so it can peer with ONOS
root = self.addHost( 'root', inNamespace=False, ip='10.10.10.2/24' )
self.addLink( root, bgp )
# Wire up the switches in the topology
self.addLink( s1, s2 )
self.addLink( s1, s3 )
self.addLink( s2, s4 )
self.addLink( s3, s4 )
self.addLink( s3, s5 )
self.addLink( s4, s6 )
self.addLink( s5, s6 )
topos = { 'sdnip' : SdnIpTopo }
if __name__ == '__main__':
setLogLevel('debug')
topo = SdnIpTopo()
net = Mininet(topo=topo, controller=RemoteController)
net.start()
CLI(net)
net.stop()
info("done\n")
|
21,232 | f021e06488c6cd32d08aaf30cda61b91f59a836e | num = int(input("Enter a number : "))
prime = True
if(num<2):
prime = False
else:
for i in range(2 , num):
if (num%i == 0):
prime = False
break
if(prime):
print("The number is prime")
else:
print("The number is not prime")
|
21,233 | ebf65f927b02198e8e24d1ad018dae704a836460 | __author__ = 'vladimir'
import math
class A:
@classmethod
def func_one(cls):
print("Test")
A.field = 50
for i in range(0, 10):
print(A().field)
A().func_one()
def magic_num(self, a):
p = a
result = 0
d = 0
list_ = []
while a > 0:
list_.append(a-(a/10)*10)
d = a/10
a = d
for item in list_:
result += math.pow(item, len(list_))
if result == a:
print("Number %d is magic" % p)
else:
print("Number %d is not magic" % p)
A.func_one = magic_num
A().func_one(365)
#
# #A.method_1 = lambda p: print "Hi %d" % p
#
# A().method_1()
|
21,234 | 58ded71cca7195677c90528590776c804f9987cc | from wagtail.core import blocks
from wagtail.snippets.blocks import SnippetChooserBlock
class CardListBlock(blocks.StructBlock):
cards = blocks.ListBlock(SnippetChooserBlock('general.Card'))
class Meta:
template = 'general/blocks/card_list.html'
icon = 'placeholder'
|
21,235 | 8c5e3e14accb4ab37e0cfa3d4fa5582b6e0f1b6f | '''
Write a function to tokenize a given string and return a dictionary with the frequency of
each word
'''
import re
def tokenize(string):
"program to dictionary"
string_1 = string.split(" ")
#print("s",string_1)
string_list = []
string_dic = {}
for each in range(len(string_1)):
string_list.append(re.sub("[^a-z,A-Z,0-9]", "", string_1[each]))
#print("ss",string_list)
for word in string_list:
if word not in string_dic:
string_dic[word] = 1
#print("d",string_dic)
else:
string_dic[word] += 1
#print("dd",string_dic)
return string_dic
def main():
"program to accept input"
lines = int(input())
string = ""
for i in range(lines):
string += input()
string += '\n'
i += 1
for each in string:
if each == '\n':
string.replace(each, "")
#print(string)
print(tokenize(string))
if __name__ == '__main__':
main()
|
21,236 | 003b8ffc2aea790cf99619e3930896b5109d3eb6 | class User:
def __init__(self, name, email):
self.name = name
self.email = email
self.books = {}
def get_email(self):
return self.email
def change_email(self, address):
self.email = address
print("this users email has been updated")
def __repr__(self):
return "User {}, email: {}, + books read : {}".format(
self.name, self.email, len(self.books))
def __eq__(self, other_user):
if self.name == other_user.name and self.email == other_user.email:
return True
else:
return False
def read_book(self,book,rating= 0):
self.books.update({book:rating})
def get_average_rating(self):
total = 0
for i in self.books.values():
if type(i) == 'int':
total += i
return total/len(self.books)
# grayson = User('Grayson A Walker','gaw3aa@virginia.edu')
# tom = User('Grayson A Walker','gaw3aa@virginia.edu')
class Book:
def __init__(self, title, isbn):
self.title = title
self.isbn = isbn
self.ratings = []
def get_title(self):
return self.title
def get_isbn(self):
return self.isbn
def set_isbn(self,new_isbn):
self.isbn = new_isbn
print("this books ISBN has been updated.")
def add_rating(self,rating):
if 0 <= rating <= 4:
self.ratings.append(rating)
else:
print("Invalid Rating")
def __eq__(self, other_book):
if self.title == other_book.title and self.isbn == other_book.isbn:
return True
else:
return False
def get_average_rating(self):
total = 0
divisor = 1
if len(self.ratings)>1:
divisor = len(self.ratings)
for i in self.ratings:
total += i
return total/(divisor)
def __hash__(self):
return hash((self.title, self.isbn))
# dharma_bums = Book("Dharma Bums",69)
# dharma_bum = Book("Dharma Bums",420)
# dharma_bums.add_rating(2)
# dharma_bums.add_rating(4)
class Fiction(Book):
def __init__(self,title,isbn,author):
super().__init__(title,isbn)
self.author = author
def get_author(self):
return self.author
def __repr__(self):
return "{} by {}".format(self.title,self.author)
class Non_Fiction(Book):
def __init__(self,title,isbn,subject,level):
super().__init__(title,isbn)
self.subject = subject
self.level = level
def get_subject(self):
return self.subject
def get_level(self):
return self.level
def __repr__(self):
return "{title}, a {level} manual on {subject}".format(title=self.title,level=self.level,subject=self.subject)
# ulysses = Fiction("Ulysses",1234,"Joyce")
# superforecasting = Non_Fiction("Superforecasting",1234,"Statistics","intermed")
# grayson.read_book('dharma_bums',3)
# grayson.read_book('dharma',4)
class TomeRater():
def __init__(self):
self.users = {}
self.books = {}
def __repr__(self):
return "Users: {users}\n\nBooks: {books}".format(users=len(self.users),books=len(self.books))
def __eq__(self, diff):
return self.users == diff.users and self.books == diff.books
def create_book(self,title,isbn):
return Book(title,isbn)
def create_novel(self,title,author,isbn):
return Fiction(title,isbn,author)
def create_non_fiction(self,title,isbn,subject,level):
return Non_Fiction(title,isbn,subject,level)
def add_book_to_user(self,book,email,rating=None):
if email in self.users.keys():
self.users[email].read_book(book,rating)
if rating is not None:
book.add_rating(rating)
if book in self.books:
self.books[book] += 1
else:
self.books[book] = 1
def add_user(self,name,email,user_books=None):
user = User(name,email)
self.users[email] = user
if user_books != None:
for i in user_books:
self.add_book_to_user(i,email)
print(i)
def print_catalog(self):
for i in self.books.keys():
print(i)
def print_users(self):
for i in self.users.values():
print(i)
def most_read_book(self):
max_book = None
max_book_value = 0
for book,value in self.books.items():
if value > max_book_value:
max_book = book
max_book_value = value
return max_book
def most_positive_user(self):
highest_avg_rating = 0
highest_name = None
for user in self.users.values():
my_avg = user.get_average_rating()
if my_avg > highest_avg_rating:
highest_avg_rating = my_avg
highest_name = user.name
return ("Most positive user is {name} with average rating of {rating}".format(name=highest_name, rating=highest_avg_rating))
def highest_rated_book(self):
max_avg = 0
max_book = ''
for i,value in self.books.items():
avg = i.get_average_rating()
if avg > max_avg:
max_avg = avg
max_book = i
return max_book
def get_most_read_book(self):
high_read_count = 0
high_read_name = None
for book in self.books.keys():
if self.books[book] > high_read_count:
high_read_count = self.books[book]
high_read_name = book.title
return ("Book with highest read count is {name}".format(name=high_read_name))
tom_book = TomeRater()
tom_book.add_user("Grayson","gaw3aa@",["Lolita","Last Exit","Ulysses"])
|
21,237 | 50ffb4fcd4b180b26cdf7451d1fccf14a3dc2a91 | import tkinter as tk
# マウスイベントの割り当て
def callback_func(event):
label["text"] = "x:{0},y{1}".format(event.x, event.y)
def callback_func1(event):
button_num = event.num
label1["text"] = "button num{0}".format(button_num)
root = tk.Tk()
root.geometry("300x300")
root.title("mouse x,y")
label = tk.Label(root, text="mouse", relief="groove")
root.bind("<Button-1>", callback_func)
label.pack()
label1 = tk.Label(root, text="button num", relief="groove")
root.bind("<Button>", callback_func1)
label1.pack()
root.mainloop()
|
21,238 | a8ffa4b2d088f9ef75ccfde53c5aee1db2843450 | #!/usr/bin/env python3
#*-*coding: utf-8 *-*
#Autor: EdwardRamos
#Date: 10/10/2016 U.S.A
#Faça um programa para o cálculo de uma folha de pagamento, sabendo que os descontos são do Imposto de Renda,
#que depende do salário bruto (conforme tabela abaixo) e 3% para o Sindicato e que o FGTS corresponde a 11% do Salário Bruto,
#mas não é descontado (é a empresa que deposita). O Salário Líquido corresponde ao Salário Bruto menos os descontos.
#O programa deverá pedir ao usuário o valor da sua hora e a quantidade de horas trabalhadas no mês.
# Desconto do IR:
# Salário Bruto até 900 (inclusive) - isento
# Salário Bruto até 1500 (inclusive) - desconto de 5%
# Salário Bruto até 2500 (inclusive) - desconto de 10%
# Salário Bruto acima de 2500 - desconto de 20% Imprima na tela as informações, dispostas conforme o exemplo abaixo.
# No exemplo o valor da hora é 5 e a quantidade de hora é 220.
# Salário Bruto: (5 * 220) : R$ 1100,00
# (-) IR (5%) : R$ 55,00
# (-) INSS ( 10%) : R$ 110,00
# FGTS (11%) : R$ 121,00
# Total de descontos : R$ 165,00
# Salário Liquido : R$ 935,00
hora = float(input("Insira quantidade da hora trabalhada: "))
print(hora) #DEBUG
v_horas = float(input("Insira o valor da hora trabalhada: "))
print(v_horas) #DEBUG
salario = float(v_horas * hora)
print(salario) #DEBUG
if float(salario) <= 900:
inss = (float(salario) * 0.10)
sindicato = (float(salario) * 0.03)
ir = (salario * 0.05)
fgts = (salario * 0.11)
total_desc = (inss)
salario_desc = (salario - total_desc)
print(inss) #DEBUG
print(sindicato) #DEBUG
print(ir) #DEBUG
print(fgts) #DEBUG
print(total_desc) #DEBUG
print(salario_desc) #DEBUG
print("Salario Bruto: (5 * 220) : R$ %r" % salario)
print("(-) IR (5%) : R$ ISENTO")
print("(-) INSS (9%) : R$ " + str(inss))
print("FGTS (11%) : R$ " + str(fgts))
print("Total de descontos: : R$ " + str(total_desc))
print("Salário Liquido: : R$ " + str(salario_desc))
print("(+) Valor referente ao sindicato: R$" + str(sindicato))
if float(salario) >= 900 and salario <= 1500:
inss = (float(salario) * 0.10)
sindicato = (float(salario) * 0.03)
ir = (salario * 0.05)
fgts = (salario * 0.11)
total_desc = (ir + inss)
salario_desc = (salario - total_desc)
print("Salario Bruto: (5 * 220) : R$ %r" % salario)
print("(-) IR (5%) : R$ " + str(ir))
print("(-) INSS (9%) : R$ " + str(inss))
print("FGTS (11%) : R$ " + str(fgts))
print("Total de descontos: : R$ " + str(total_desc))
print("Salário Liquido: : R$ " + str(salario_desc))
print("(+) Valor referente ao sindicato: R$" + str(sindicato))
if float(salario) >= 1500 and salario <= 2500:
inss = (float(salario) * 0.10)
sindicato = (float(salario) * 0.03)
ir = (salario * 0.10)
fgts = (salario * 0.11)
total_desc = (ir + inss)
salario_desc = (salario - total_desc)
print("Salario Bruto: (5 * 220) : R$ %r" % salario)
print("(-) IR (5%) : R$ " + str(ir))
print("(-) INSS (9%) : R$ " + str(inss))
print("FGTS (11%) : R$ " + str(fgts))
print("Total de descontos: : R$ " + str(total_desc))
print("Salário Liquido: : R$ " + str(salario_desc))
print("(+) Valor referente ao sindicato: R$" + str(sindicato))
if float(salario) >= 2500:
inss = (float(salario) * 0.10)
sindicato = (float(salario) * 0.03)
ir = (salario * 0.20)
fgts = (salario * 0.11)
total_desc = (ir + inss)
salario_desc = (salario - total_desc)
print("Salario Bruto: (5 * 220) : R$ %r" % salario)
print("(-) IR (5%) : R$ " + str(ir))
print("(-) INSS (9%) : R$ " + str(inss))
print("FGTS (11%) : R$ " + str(fgts))
print("Total de descontos: : R$ " + str(total_desc))
print("Salário Liquido: : R$ " + str(salario_desc))
print("(+) Valor referente ao sindicato: R$" + str(sindicato))
|
21,239 | bc7c0d78cb2fdade72a927282a149322a3436b5f | # Sum of two numbers
# Write a program to print the sum of two integer inputs A and B
A = input()
B = input()
A = int(A)
B = int(B)
sum = A + B
print(sum)
|
21,240 | 34845c5d7c50cc7e90eae3ecd295b951bf69d8f6 | from Lex import Lex
from Node import Node
import TokenType
import re
#TODO: Add exception handling for if we run out of lexemes. That will handle som errors
class Parser:
def __init__(self, fn):
self.lex = Lex(fn)
self.parse()
self.headNode.out()
def parse(self):
tok = self.lex.tokens.pop(0)
self.match(tok, TokenType.ot)
name = self.lex.tokens.pop(0)
attributes = self.getAttributes()
self.match(self.lex.tokens.pop(0), TokenType.ct) # Should have a close tag here
self.headNode = Node(name, attributes)
self.getChildren(self.headNode)
def getAttributes(self):
attributes = {}
tok = self.lex.tokens[0]
while tok != '>' and tok != '/>':
key = self.lex.tokens.pop(0)
tok = self.lex.tokens.pop(0)
#print("ket: " + key)
#print(tok)
if tok == '=':
tok = self.lex.tokens.pop(0)
val = tok
attributes[key] = tok
else:
print("Unexpected token: " + tok)
exit(1)
tok = self.lex.tokens[0]
return attributes
def getChildren(self, node):
tok = self.lex.tokens.pop(0)
self.match(tok, TokenType.ot)
while tok != '</':
name = self.lex.tokens.pop(0)
attributes = self.getAttributes()
n = Node(name, attributes)
node.children.append(n)
tok = self.lex.tokens.pop(0)
if tok == '>':
self.getChildren(n)
elif tok == '/>':
pass
else:
print('Unexpected token: ' + tok)
exit(1)
tok = self.lex.tokens.pop(0)
tok = self.lex.tokens.pop(0)
if tok != node.tag:
print ("Missmatch close tag: " + tok)
exit(1)
self.match(self.lex.tokens.pop(0), TokenType.ct)
def match(self, tok, tokType):
if tokType.match(tok):
pass
else:
print("Invalid token: " + tok)
exit(1)
if __name__ == '__main__':
p = Parser('sample.xml')
|
21,241 | 5d9568a4bb2b665fdaae03b5d46767cbd3db4dff | from django.urls import path, reverse_lazy
from .views import LoginView, RegisterUserView, LogoutView
from django.contrib.auth import views as auth_views
app_name = 'auth'
urlpatterns = [
path('login/', LoginView.as_view(), name="login"),
path('register/', RegisterUserView.as_view(), name="register"),
path('logout/', LogoutView, name="logout"),
path('reset/',
auth_views.PasswordResetView.as_view(
template_name='accounts/reset_password.html',
email_template_name='accounts/reset_password_email.html',
subject_template_name='accounts/reset_password_subject.txt',
success_url = reverse_lazy('auth:password_reset_done')
),
name="password_reset"),
path('reset/done/',
auth_views.PasswordResetDoneView.as_view(
template_name='accounts/reset_password_done.html'
),
name='password_reset_done'),
path('reset/<uidb64>/<token>/',
auth_views.PasswordResetConfirmView.as_view(
template_name='accounts/reset_password_confirm.html',
success_url = reverse_lazy('auth:password_reset_complete')
),
name='password_reset_confirm'),
path('reset/complete/',
auth_views.PasswordResetCompleteView.as_view(
template_name='accounts/reset_password_complete.html'
),
name='password_reset_complete'),
path('password/', auth_views.PasswordChangeView.as_view(
template_name='accounts/change_password.html',
success_url = reverse_lazy('auth:password_change_done')
),
name='password_change'
),
path('password/done/', auth_views.PasswordChangeDoneView.as_view(
template_name='accounts/change_password_done.html',
),
name='password_change_done'
)
]
|
21,242 | e916fc1bac1e1d421667c80833fb30fb210358b8 | class Computer:
def __init__(self, name, age, height):
self.name = name
self.age = age
self.height = height
print("Name : {}\nAge : {}\nHeight : {}".format(self.name, self.age, self.height))
obj1 = Computer('sai', 25, 5)
obj2 = Computer('ram', 65, 6)
# obj1.display()
# obj2.display2()
class Car:
def __init__(self):
self.brand = 'Skoda'
self.model = 'Laura'
def update(self):
self.model = 'Laura Elegance'
c1 = Car()
c2 = Car()
c2.name = 'Rapid'
c2.model = 'High_End'
c1.update()
c2.update()
# print(c1.brand, c1.model)
# print(c2.brand, c2.model)
class School:
def __init__(self):
self.marks = 540
self.rank = 'First_rank'
sch1 = School()
sch2 = School()
# print(sch1.marks,sch1.rank)
|
21,243 | b4caaebb0469d745456eee26a0a08f828484ba94 | #gihanchanaka@gmail.com
import random
import math
import PoissonDristribution
import random
class Item_Demand():
#The only functions you can use are
# constructor(averagePerDay)------ 0<=averagePerDay<=100
# salesOfDay(day) ----- 0<=day<=364
def __init__(self,averagePerDay):
self.mean=averagePerDay
self.sales = [0] * 365
self.temp = [0] * 365
for day in range(365):
self.sales[day]=round(PoissonDristribution.getNumberInPoissonDistribution(self.mean))
self.sales.sort()
d=0
while True:
self.temp[math.floor(d/2)]=self.sales[d]
d += 1
if d==365: break
self.temp[364-math.floor(d/2)] = self.sales[d]
d += 1
randomShift=round(random.random()*365)
for d in range(365):
self.sales[d]=self.temp[(d+randomShift)%365]
def salesOfDay(self,day):
return self.sales[day]
|
21,244 | 0fab4313aa2ad7dc21c85e0060eefe4b0d9a163f |
import numpy as np
from sys import argv
import time
# This lets us turn on or off jit (just-in time compilation)
# using a single variable rather than having to adjust inside the code
use_jit = False
if use_jit == True:
#Use jit on core
print("Jitting core function")
from numba import jit
else:
#Or don't
print("No jitting")
#Create dummy function instead
def jit(fn):
def wrapper(*args):
return fn(*args)
return wrapper
#----------------------------------
def create_two_valued_array(n_items, n_high):
array1 = np.ones(n_items-n_high)
array2 = np.ones(n_high)*10
return np.random.permutation(np.concatenate((array1, array2)))
@jit
def exceeds(array, exceeds_val):
result = False
for el in array:
if el > exceeds_val: result = True
return result
@jit
def early_terminating_exceeds(array, exceeds_val):
for el in array:
if el > exceeds_val: return True
return False
@jit
def compiled_in(array, in_val):
return in_val in array
def main(num_items, num_high):
array = create_two_valued_array(num_items, num_high)
start_time = time.time()
max = np.max(array)
end_time = time.time()
print("Found max in ", end_time-start_time, "s")
start_time = time.time()
any = np.any(array >= 10)
end_time = time.time()
print("Found any in ", end_time-start_time, "s")
start_time = time.time()
any = (10 in array)
end_time = time.time()
print("Found 'in' in ", end_time-start_time, "s")
#Run compile before we time it
any = compiled_in(array, 10)
start_time = time.time()
any = compiled_in(array, 10)
end_time = time.time()
print("Found 'in' w. compilation in ", end_time-start_time, "s")
#Ensure jit-ter has run
any = exceeds(array, 5)
start_time = time.time()
any = exceeds(array, 5)
end_time = time.time()
print("Found first exceeds in ", end_time-start_time, "s")
#Ensure jit-ter has run
any = early_terminating_exceeds(array, 5)
start_time = time.time()
any = early_terminating_exceeds(array, 5)
end_time = time.time()
print("Found first exceeds (w term) in ", end_time-start_time, "s")
if __name__ == "__main__":
try:
num_items = int(argv[1])
num_high = int(argv[2])
except:
num_items = 10000
num_high = 10
print("Using ", num_items, num_high)
main( num_items, num_high)
|
21,245 | 41898c341708271a0420ef27a5c2d4ed478da621 | from common import *
app = Flask(__name__)
from app.main.cafe import cafe
from app.main.customer import customer
from app.main.kakaoPay import kakaoPay
from app.main.server import server
from app.main.kakaoLocation import kakaoLocation
from app.main.sharedInformation import sharedInformation
app.register_blueprint(cafe, url_prefix = '/cafe')
app.register_blueprint(customer, url_prefix = '/customer')
app.register_blueprint(kakaoPay)
app.register_blueprint(server, url_prefix = '/server')
app.register_blueprint(kakaoLocation, url_prefix = '/kakaoLocation')
app.register_blueprint(sharedInformation, url_prefix = '/sharedInformation')
|
21,246 | e8ebcee6f8518335bb8ea09be44c6ab5e8721187 | import discord
import os
import traceback
import asyncio
import requests
import re
import json
import random
import math
import time
import datetime
import dateparser
import pytz
import time as time_module
import wikipedia as wikip
import matplotlib.pyplot as plt
import numpy as np
from aioify import aioify
from dotenv import load_dotenv
from discord import channel
from discord.ext import commands, tasks
from src.sheets.events import get_events
from src.sheets.tournaments import get_tournament_channels
from src.sheets.censor import get_censor
from src.sheets.sheets import send_variables, get_variables, get_tags
from src.forums.forums import open_browser
from src.wiki.stylist import prettify_templates
from src.wiki.tournaments import get_tournament_list
from src.wiki.wiki import implement_command, get_page_tables
from src.wiki.schools import get_school_listing
from src.wiki.scilympiad import make_results_template, get_points
from src.wiki.mosteditstable import run_table
from info import get_about
from doggo import get_doggo, get_shiba
from bear import get_bear_message
from embed import assemble_embed
from commands import get_list, get_quick_list, get_help
from lists import get_state_list
import xkcd as xkcd_module # not to interfere with xkcd method
from commanderrors import CommandNotAllowedInChannel
load_dotenv()
TOKEN = os.getenv('DISCORD_TOKEN')
DEV_TOKEN = os.getenv('DISCORD_DEV_TOKEN')
dev_mode = os.getenv('DEV_MODE') == "TRUE"
##############
# SERVER VARIABLES
##############
# Roles
ROLE_WM = "Wiki/Gallery Moderator"
ROLE_GM = "Global Moderator"
ROLE_AD = "Administrator"
ROLE_VIP = "VIP"
ROLE_STAFF = "Staff"
ROLE_BT = "Bots"
ROLE_LH = "Launch Helper"
ROLE_AT = "All Tournaments"
ROLE_GAMES = "Games"
ROLE_MR = "Member"
ROLE_UC = "Unconfirmed"
ROLE_DIV_A = "Division A"
ROLE_DIV_B = "Division B"
ROLE_DIV_C = "Division C"
ROLE_EM = "Exalted Member"
ROLE_ALUMNI = "Alumni"
ROLE_MUTED = "Muted"
ROLE_PRONOUN_HE = "He / Him / His"
ROLE_PRONOUN_SHE = "She / Her / Hers"
ROLE_PRONOUN_THEY = "They / Them / Theirs"
ROLE_SELFMUTE = "Self Muted"
ROLE_QUARANTINE = "Quarantine"
ROLE_ALL_STATES = "All States"
# Channels
CHANNEL_TOURNAMENTS = "tournaments"
CHANNEL_BOTSPAM = "bot-spam"
CHANNEL_SUPPORT = "site-support"
CHANNEL_GAMES = "games"
CHANNEL_DMLOG = "dm-log"
CHANNEL_WELCOME = "welcome"
CHANNEL_LOUNGE = "lounge"
CHANNEL_LEAVE = "member-leave"
CHANNEL_DELETEDM = "deleted-messages"
CHANNEL_EDITEDM = "edited-messages"
CHANNEL_REPORTS = "reports"
CHANNEL_JOIN = "join-logs"
CHANNEL_UNSELFMUTE = "un-self-mute"
# Categories
CATEGORY_TOURNAMENTS = "tournaments"
CATEGORY_SO = "Science Olympiad"
CATEGORY_STATES = "states"
CATEGORY_GENERAL = "general"
CATEGORY_ARCHIVE = "archives"
CATEGORY_STAFF = "staff"
# Emoji reference
EMOJI_FAST_REVERSE = "\U000023EA"
EMOJI_LEFT_ARROW = "\U00002B05"
EMOJI_RIGHT_ARROW = "\U000027A1"
EMOJI_FAST_FORWARD = "\U000023E9"
EMOJI_UNSELFMUTE = "click_to_unmute"
EMOJI_FULL_UNSELFMUTE = "<:click_to_unmute:799389279385026610>"
# Rules
RULES = [
"Treat *all* users with respect.",
"No profanity or inappropriate language, content, or links.",
"Treat delicate subjects delicately. When discussing religion, politics, instruments, or other similar topics, please remain objective and avoid voicing strong opinions.",
"Do not spam or flood (an excessive number of messages sent within a short timespan).",
"Avoid intentional repeating pinging of other users (saying another user’s name).",
"Avoid excessive use of caps, which constitutes yelling and is disruptive.",
"Never name-drop (using a real name without permission) or dox another user.",
"No witch-hunting (requests of kicks or bans for other users).",
"While you are not required to use your Scioly.org username as your nickname for this Server, please avoid assuming the username of or otherwise impersonating another active user.",
"Do not use multiple accounts within this Server, unless specifically permitted. A separate tournament account may be operated alongside a personal account.",
"Do not violate Science Olympiad Inc. copyrights. In accordance with the Scioly.org Resource Policy, all sharing of tests on Scioly.org must occur in the designated Test Exchanges. Do not solicit test trades on this Server.",
"Do not advertise other servers or paid services with which you have an affiliation.",
"Use good judgment when deciding what content to leave in and take out. As a general rule of thumb: 'When in doubt, leave it out.'"
]
##############
# DEV MODE CONFIG
##############
intents = discord.Intents.default()
intents.members = True
if dev_mode:
BOT_PREFIX = "?"
SERVER_ID = int(os.getenv('DEV_SERVER_ID'))
else:
BOT_PREFIX = "!"
SERVER_ID = 698306997287780363
bot = commands.Bot(command_prefix=(BOT_PREFIX), case_insensitive=True, intents=intents)
##############
# CHECKS
##############
async def is_bear(ctx):
"""Checks to see if the user is bear, or pepperonipi (for debugging purposes)."""
return ctx.message.author.id == 353730886577160203 or ctx.message.author.id == 715048392408956950
async def is_staff(ctx):
"""Checks to see if the user is a staff member."""
member = ctx.message.author
vipRole = discord.utils.get(member.guild.roles, name=ROLE_VIP)
staffRole = discord.utils.get(member.guild.roles, name=ROLE_STAFF)
return vipRole in member.roles or staffRole in member.roles
async def is_launcher(ctx):
"""Checks to see if the user is a launch helper."""
member = ctx.message.author
staff = await is_staff(ctx)
lhRole = discord.utils.get(member.guild.roles, name=ROLE_LH)
if staff or lhRole in member.roles: return True
async def is_launcher_no_ctx(member):
server = bot.get_guild(SERVER_ID)
wmRole = discord.utils.get(server.roles, name=ROLE_WM)
gm_role = discord.utils.get(server.roles, name=ROLE_GM)
aRole = discord.utils.get(server.roles, name=ROLE_AD)
vipRole = discord.utils.get(server.roles, name=ROLE_VIP)
lhRole = discord.utils.get(server.roles, name=ROLE_LH)
roles = [wmRole, gm_role, aRole, vipRole, lhRole]
member = server.get_member(member)
for role in roles:
if role in member.roles: return True
return False
async def is_admin(ctx):
"""Checks to see if the user is an administrator, or pepperonipi (for debugging purposes)."""
member = ctx.message.author
aRole = discord.utils.get(member.guild.roles, name=ROLE_AD)
if aRole in member.roles or member.id == 715048392408956950: return True
def not_blacklisted_channel(blacklist):
"""Given a string array blacklist, check if command was not invoked in specified blacklist channels."""
async def predicate(ctx):
channel = ctx.message.channel
server = bot.get_guild(SERVER_ID)
for c in blacklist:
if channel == discord.utils.get(server.text_channels, name=c):
raise CommandNotAllowedInChannel(channel, "Command was invoked in a blacklisted channel.")
return True
return commands.check(predicate)
def is_whitelisted_channel(whitelist):
"""Given a string array whitelist, check if command was invoked in specified whitelisted channels."""
async def predicate(ctx):
channel = ctx.message.channel
server = bot.get_guild(SERVER_ID)
for c in whitelist:
if channel == discord.utils.get(server.text_channels, name=c):
return True
raise CommandNotAllowedInChannel(channel, "Command was invoked in a non-whitelisted channel.")
return commands.check(predicate)
##############
# CONSTANTS
##############
PI_BOT_IDS = [
723767075427844106,
743254543952904197,
637519324072116247
]
RULES_CHANNEL_ID = 737087680269123606
WELCOME_CHANNEL_ID = 743253216921387088
DISCORD_INVITE_ENDINGS = ["9Z5zKtV", "C9PGV6h", "s4kBmas", "ftPTxhC", "gh3aXbq", "skGQXd4", "RnkqUbK", "scioly"]
##############
# VARIABLES
##############
fish_now = 0
can_post = False
do_hourly_sync = False
CENSORED_WORDS = []
CENSORED_EMOJIS = []
EVENT_INFO = 0
REPORT_IDS = []
PING_INFO = []
TOURNEY_REPORT_IDS = []
COACH_REPORT_IDS = []
SHELLS_OPEN = []
CRON_LIST = []
RECENT_MESSAGES = []
STEALFISH_BAN = []
TOURNAMENT_INFO = []
REQUESTED_TOURNAMENTS = []
TAGS = []
STOPNUKE = False
##############
# FUNCTIONS TO BE REMOVED
##############
bot.remove_command("help")
##############
# ASYNC WRAPPERS
##############
aiowikip = aioify(obj=wikip)
##############
# FUNCTIONS
##############
@bot.event
async def on_ready():
"""Called when the bot is enabled and ready to be run."""
print(f'{bot.user} has connected!')
try:
await pull_prev_info()
except Exception as e:
print("Error in starting function with pulling previous information:")
print(e)
try:
await update_tournament_list()
except Exception as e:
print("Error in starting function with updating tournament list:")
print(e)
try:
refresh_sheet.start()
except Exception as e:
print("Error in starting function with updating tournament list:")
print(e)
post_something.start()
cron.start()
go_stylist.start()
manage_welcome.start()
store_variables.start()
change_bot_status.start()
update_member_count.start()
@tasks.loop(minutes=5)
async def update_member_count():
"""Updates the member count shown on hidden VC"""
guild = bot.get_guild(SERVER_ID)
channel_prefix = "Members"
vc = discord.utils.find(lambda c: channel_prefix in c.name, guild.voice_channels)
mem_count = guild.member_count
joined_today = len([m for m in guild.members if m.joined_at.date() == datetime.datetime.today().date()])
left_channel = discord.utils.get(guild.text_channels, name=CHANNEL_LEAVE)
left_messages = await left_channel.history(limit=200).flatten()
left_today = len([m for m in left_messages if m.created_at.date() == datetime.datetime.today().date()])
await vc.edit(name=f"{mem_count} Members (+{joined_today}/-{left_today})")
print("Refreshed member count.")
@tasks.loop(seconds=30.0)
async def refresh_sheet():
"""Refreshes the censor list and stores variable backups."""
try:
await refresh_algorithm()
except Exception as e:
print("Error when completing the refresh algorithm when refreshing the sheet:")
print(e)
try:
await prepare_for_sending()
except Exception as e:
print("Error when sending variables to log sheet:")
print(e)
print("Attempted to refresh/store data from/to sheet.")
@tasks.loop(hours=10)
async def store_variables():
await prepare_for_sending("store")
@tasks.loop(hours=24)
async def go_stylist():
await prettify_templates()
@tasks.loop(minutes=10)
async def manage_welcome():
server = bot.get_guild(SERVER_ID)
now = datetime.datetime.now()
# Channel message deleting is currently disabled
# if now.hour < ((0 - TZ_OFFSET) % 24) or now.hour > ((11 - TZ_OFFSET) % 24):
# print(f"Cleaning #{CHANNEL_WELCOME}.")
# # if between 12AM EST and 11AM EST do not do the following:
# channel = discord.utils.get(server.text_channels, name=CHANNEL_WELCOME)
# async for message in channel.history(limit=None):
# # if message is over 3 hours old
# author = message.author
# user_no_delete = await is_launcher_no_ctx(message.author)
# num_of_roles = len(author.roles)
# if num_of_roles > 4 and (now - author.joined_at).seconds // 60 > 1 and not user_no_delete:
# await _confirm([author])
# if (now - message.created_at).seconds // 3600 > 3 and not message.pinned:
# # delete it
# await message.delete()
# else:
# print(f"Skipping #{CHANNEL_WELCOME} clean because it is outside suitable time ranges.")
@tasks.loop(minutes=1)
async def cron():
print("Executed cron.")
global CRON_LIST
for c in CRON_LIST:
date = c['date']
if datetime.datetime.now() > date:
# The date has passed, now do
CRON_LIST.remove(c)
await handle_cron(c['do'])
async def handle_cron(string):
try:
if string.find("unban") != -1:
iden = int(string.split(" ")[1])
server = bot.get_guild(SERVER_ID)
member = await bot.fetch_user(int(iden))
await server.unban(member)
print(f"Unbanned user ID: {iden}")
elif string.find("unmute") != -1:
iden = int(string.split(" ")[1])
server = bot.get_guild(SERVER_ID)
member = server.get_member(int(iden))
role = discord.utils.get(server.roles, name=ROLE_MUTED)
self_role = discord.utils.get(server.roles, name=ROLE_SELFMUTE)
await member.remove_roles(role, self_role)
print(f"Unmuted user ID: {iden}")
elif string.find("unstealfishban") != -1:
iden = int(string.split(" ")[1])
STEALFISH_BAN.remove(iden)
print(f"Un-stealfished user ID: {iden}")
else:
print("ERROR:")
await auto_report("Error with a cron task", "red", f"Error: `{string}`")
except Exception as e:
await auto_report("Error with a cron task", "red", f"Error: `{e}`\nOriginal task: `{string}`")
@tasks.loop(hours=1)
async def change_bot_status():
statuses = [
{"type": "playing", "message": "Game On"},
{"type": "listening", "message": "my SoM instrument"},
{"type": "playing", "message": "with Pi-Bot Beta"},
{"type": "playing", "message": "with my gravity vehicle"},
{"type": "watching", "message": "the WS trials"},
{"type": "watching", "message": "birbs"},
{"type": "watching", "message": "2018 Nationals again"},
{"type": "watching", "message": "the sparkly stars"},
{"type": "watching", "message": "over the week"},
{"type": "watching", "message": "for tourney results"},
{"type": "listening", "message": "birb sounds"},
{"type": "playing", "message": "with proteins"},
{"type": "playing", "message": "with my detector"},
{"type": "playing", "message": "Minecraft"},
{"type": "playing", "message": "with circuits"},
{"type": "watching", "message": "my PPP fall"},
{"type": "playing", "message": "a major scale"},
{"type": "listening", "message": "clinking medals"},
{"type": "watching", "message": "the world learn"},
{"type": "watching", "message": "SciOly grow"},
{"type": "watching", "message": "tutorials"},
{"type": "playing", "message": "with wiki templates"},
{"type": "playing", "message": "the flute"},
{"type": "watching", "message": "bear eat users"},
{"type": "watching", "message": "xkcd"},
{"type": "playing", "message": "with wiki templates"},
{"type": "watching", "message": "Jmol tutorials"},
]
botStatus = statuses[math.floor(random.random() * len(statuses))]
if botStatus["type"] == "playing":
await bot.change_presence(activity=discord.Game(name=botStatus["message"]))
elif botStatus["type"] == "listening":
await bot.change_presence(activity=discord.Activity(type=discord.ActivityType.listening, name=botStatus["message"]))
elif botStatus["type"] == "watching":
await bot.change_presence(activity=discord.Activity(type=discord.ActivityType.watching, name=botStatus["message"]))
print("Changed the bot's status.")
@tasks.loop(hours=28)
async def post_something():
global can_post
"""Allows Pi-Bot to post markov-generated statements to the forums."""
if can_post:
print("Attempting to post something.")
await open_browser()
else:
can_post = True
async def refresh_algorithm():
"""Pulls data from the administrative sheet."""
try:
global CENSORED_WORDS
global CENSORED_EMOJIS
censor = await get_censor()
CENSORED_WORDS = censor[0]
CENSORED_EMOJIS = censor[1]
except Exception as e:
print("Could not refresh censor in refresh_algorithm:")
print(e)
try:
global EVENT_INFO
EVENT_INFO = await get_events()
except Exception as e:
print("Could not refresh event list in refresh_algorithm:")
print(e)
try:
global TAGS
TAGS = await get_tags()
except Exception as e:
print("Could not refresh tags in refresh_algorithm:")
print(e)
print("Refreshed data from sheet.")
return True
async def prepare_for_sending(type="variable"):
"""Sends local variables to the administrative sheet as a backup."""
r1 = json.dumps(REPORT_IDS)
r2 = json.dumps(PING_INFO)
r3 = json.dumps(TOURNEY_REPORT_IDS)
r4 = json.dumps(COACH_REPORT_IDS)
r5 = json.dumps(CRON_LIST, default = datetime_converter)
r6 = json.dumps(REQUESTED_TOURNAMENTS)
await send_variables([[r1], [r2], [r3], [r4], [r5], [r6]], type)
print("Stored variables in sheet.")
def datetime_converter(o):
if isinstance(o, datetime.datetime):
return o.__str__()
async def pull_prev_info():
data = await get_variables()
global PING_INFO
global REPORT_IDS
global TOURNEY_REPORT_IDS
global COACH_REPORT_IDS
global CRON_LIST
global REQUESTED_TOURNAMENTS
REPORT_IDS = data[0][0]
PING_INFO = data[1][0]
TOURNEY_REPORT_IDS = data[2][0]
COACH_REPORT_IDS = data[3][0]
cron = data[4][0]
for c in cron:
try:
c['date'] = datetime.datetime.strptime(c['date'], "%Y-%m-%d %H:%M:%S.%f")
except Exception as e:
print("ERROR WITH CRON TASK: ", e)
CRON_LIST = cron
REQUESTED_TOURNAMENTS = data[5][0]
print("Fetched previous variables.")
@bot.command(aliases=["tc", "tourney", "tournaments"])
async def tournament(ctx, *args):
member = ctx.message.author
new_args = list(args)
ignore_terms = ["invitational", "invy", "tournament", "regional", "invite"]
for term in ignore_terms:
if term in new_args:
new_args.remove(term)
await ctx.send(f"Ignoring `{term}` because it is too broad of a term. *(If you need help with this command, please type `!help tournament`)*")
if len(args) == 0:
return await ctx.send("Please specify the tournaments you would like to be added/removed from!")
for arg in new_args:
# Stop users from possibly adding the channel hash in front of arg
arg = arg.replace("#", "")
arg = arg.lower()
found = False
if arg == "all":
role = discord.utils.get(member.guild.roles, name=ROLE_AT)
if role in member.roles:
await ctx.send(f"Removed your `All Tournaments` role.")
await member.remove_roles(role)
else:
await ctx.send(f"Added your `All Tournaments` role.")
await member.add_roles(role)
continue
for t in TOURNAMENT_INFO:
if arg == t[1]:
found = True
role = discord.utils.get(member.guild.roles, name=t[0])
if role == None:
return await ctx.send(f"Apologies! The `{t[0]}` channel is currently not available.")
if role in member.roles:
await ctx.send(f"Removed you from the `{t[0]}` channel.")
await member.remove_roles(role)
else:
await ctx.send(f"Added you to the `{t[0]}` channel.")
await member.add_roles(role)
break
if not found:
uid = member.id
found2 = False
votes = 1
for t in REQUESTED_TOURNAMENTS:
if arg == t['iden']:
found2 = True
if uid in t['users']:
return await ctx.send("Sorry, but you can only vote once for a specific tournament!")
t['count'] += 1
t['users'].append(uid)
votes = t['count']
break
if not found2:
await auto_report("New Tournament Channel Requested", "orange", f"User ID {uid} requested tournament channel `#{arg}`.\n\nTo add this channel to the voting list for the first time, use `!tla {arg} {uid}`.\nIf the channel has already been requested in the list and this was a user mistake, use `!tla [actual name] {uid}`.")
return await ctx.send(f"Made request for a `#{arg}` channel. Please note your submission may not instantly appear.")
await ctx.send(f"Added a vote for `{arg}`. There " + ("are" if votes != 1 else "is") + f" now `{votes}` " + (f"votes" if votes != 1 else f"vote") + " for this channel.")
await update_tournament_list()
@bot.command()
@commands.check(is_staff)
async def tla(ctx, iden, uid):
global REQUESTED_TOURNAMENTS
for t in REQUESTED_TOURNAMENTS:
if t['iden'] == iden:
t['count'] += 1
await ctx.send(f"Added a vote for {iden} from {uid}. Now has `{t['count']}` votes.")
return await update_tournament_list()
REQUESTED_TOURNAMENTS.append({'iden': iden, 'count': 1, 'users': [uid]})
await update_tournament_list()
return await ctx.send(f"Added a vote for {iden} from {uid}. Now has `1` vote.")
@bot.command()
@commands.check(is_staff)
async def tlr(ctx, iden):
global REQUESTED_TOURNAMENTS
for t in REQUESTED_TOURNAMENTS:
if t['iden'] == iden:
REQUESTED_TOURNAMENTS.remove(t)
await update_tournament_list()
return await ctx.send(f"Removed `#{iden}` from the tournament list.")
async def update_tournament_list():
tl = await get_tournament_channels()
tl.sort(key=lambda x: x[0])
global TOURNAMENT_INFO
global REQUESTED_TOURNAMENTS
TOURNAMENT_INFO = tl
server = bot.get_guild(SERVER_ID)
tourney_channel = discord.utils.get(server.text_channels, name=CHANNEL_TOURNAMENTS)
tournament_category = discord.utils.get(server.categories, name=CATEGORY_TOURNAMENTS)
bot_spam_channel = discord.utils.get(server.text_channels, name=CHANNEL_BOTSPAM)
server_support_channel = discord.utils.get(server.text_channels, name=CHANNEL_SUPPORT)
gm = discord.utils.get(server.roles, name=ROLE_GM)
a = discord.utils.get(server.roles, name=ROLE_AD)
all_tournaments_role = discord.utils.get(server.roles, name=ROLE_AT)
string_lists = []
string_lists.append("")
open_soon_list = ""
channels_requested_list = ""
now = datetime.datetime.now()
for t in tl: # For each tournament in the sheet
# Check if the channel needs to be made / deleted
ch = discord.utils.get(server.text_channels, name=t[1])
r = discord.utils.get(server.roles, name=t[0])
tourney_date = t[4]
before_days = int(t[5])
after_days = int(t[6])
tourney_date_datetime = datetime.datetime.strptime(tourney_date, "%Y-%m-%d")
day_diff = (tourney_date_datetime - now).days
print(f"Tournament List: Handling {t[0]} (Day diff: {day_diff} days)")
if (day_diff < (-1 * after_days)) and ch != None:
# If past tournament date, now out of range
if ch.category.name != CATEGORY_ARCHIVE:
await auto_report("Tournament Channel & Role Needs to be Archived", "orange", f"The {ch.mention} channel and {r.mention} role need to be archived, as it is after the tournament date.")
elif (day_diff <= before_days) and ch == None:
# If before tournament and in range
new_role = await server.create_role(name=t[0])
new_channel = await server.create_text_channel(t[1], category=tournament_category)
await new_channel.edit(topic=f"{t[2]} - Discussion around the {t[0]} occurring on {t[4]}.", sync_permissions=True)
await new_channel.set_permissions(new_role, read_messages=True)
await new_channel.set_permissions(all_tournaments_role, read_messages=True)
await new_channel.set_permissions(server.default_role, read_messages=False)
string_to_add = (t[2] + " **" + t[0] + "** - `!tournament " + t[1] + "`\n")
while len(string_lists[-1] + string_to_add) > 2048:
string_lists.append("")
string_lists[-1] += string_to_add
elif ch != None:
string_to_add = (t[2] + " **" + t[0] + "** - `!tournament " + t[1] + "`\n")
while len(string_lists[-1] + string_to_add) > 2048:
string_lists.append("")
string_lists[-1] += string_to_add
elif (day_diff > before_days):
open_soon_list += (t[2] + " **" + t[0] + f"** - Opens in `{day_diff - before_days}` days.\n")
REQUESTED_TOURNAMENTS.sort(key=lambda x: (-x['count'], x['iden']))
spacing_needed = max([len(t['iden']) for t in REQUESTED_TOURNAMENTS]) if len(REQUESTED_TOURNAMENTS) > 0 else 0
for t in REQUESTED_TOURNAMENTS:
spaces = " " * (spacing_needed - len(t['iden']))
channels_requested_list += f"`!tournament {t['iden']}{spaces}` · **{t['count']} votes**\n"
embeds = []
embeds.append(assemble_embed(
title=":medal: Tournament Channels Listing",
desc=(
"Below is a list of **tournament channels**. Some are available right now, some will be available soon, and others have been requested, but have not received 10 votes to be considered for a channel." +
f"\n\n* To join an available tournament channel, head to {bot_spam_channel.mention} and type `!tournament [name]`." +
f"\n\n* To make a new request for a tournament channel, head to {bot_spam_channel.mention} and type `!tournament [name]`, where `[name]` is the name of the tournament channel you would like to have created." +
f"\n\n* Need help? Ping a {gm.mention} or {a.mention}, or ask in {server_support_channel.mention}"
)
))
for i, s in enumerate(string_lists):
embeds.append(assemble_embed(
title=f"Currently Available Channels (Page {i + 1}/{len(string_lists)})",
desc=s if len(s) > 0 else "No channels are available currently."
))
embeds.append(assemble_embed(
title="Channels Opening Soon",
desc=open_soon_list if len(open_soon_list) > 0 else "No channels are opening soon currently.",
))
embeds.append(assemble_embed(
title="Channels Requested",
desc=("Vote with the command associated with the tournament channel.\n\n" + channels_requested_list) if len(channels_requested_list) > 0 else f"No channels have been requested currently. To make a request for a tournament channel, head to {bot_spam_channel.mention} and type `!tournament [name]`, with the name of the tournament."
))
hist = await tourney_channel.history(oldest_first=True).flatten()
if len(hist) != 0:
# When the tourney channel already has embeds
if len(embeds) < len(hist):
messages = await tourney_channel.history(oldest_first=True).flatten()
for m in messages[len(embeds):]:
await m.delete()
count = 0
async for m in tourney_channel.history(oldest_first=True):
await m.edit(embed=embeds[count])
count += 1
if len(embeds) > len(hist):
for e in embeds[len(hist):]:
await tourney_channel.send(embed=e)
else:
# If the tournament channel is being initialized for the first time
past_messages = await tourney_channel.history(limit=100).flatten()
await tourney_channel.delete_messages(past_messages)
for e in embeds:
await tourney_channel.send(embed=e)
@bot.command()
@commands.check(is_staff)
async def vc(ctx):
server = ctx.message.guild
if ctx.message.channel.category.name == CATEGORY_TOURNAMENTS:
test_vc = discord.utils.get(server.voice_channels, name=ctx.message.channel.name)
if test_vc == None:
# Voice channel needs to be opened
new_vc = await server.create_voice_channel(ctx.message.channel.name, category=ctx.message.channel.category)
await new_vc.edit(sync_permissions=True)
# Make the channel invisible to normal members
await new_vc.set_permissions(server.default_role, view_channel=False)
at = discord.utils.get(server.roles, name=ROLE_AT)
for t in TOURNAMENT_INFO:
if ctx.message.channel.name == t[1]:
tourney_role = discord.utils.get(server.roles, name=t[0])
await new_vc.set_permissions(tourney_role, view_channel=True)
break
await new_vc.set_permissions(at, view_channel=True)
return await ctx.send("Created a voice channel. **Please remember to follow the rules! No doxxing or cursing is allowed.**")
else:
# Voice channel needs to be closed
await test_vc.delete()
return await ctx.send("Closed the voice channel.")
elif ctx.message.channel.category.name == CATEGORY_STATES:
test_vc = discord.utils.get(server.voice_channels, name=ctx.message.channel.name)
if test_vc == None:
# Voice channel does not currently exist
if len(ctx.message.channel.category.channels) == 50:
# Too many voice channels in the state category
# Let's move one state to the next category
new_cat = filter(lambda x: x.name == "states", server.categories)
new_cat = list(new_cat)
if len(new_cat) < 2:
return await ctx.send("Could not find alternate states channel to move overflowed channels to.")
else:
# Success, we found the other category
current_cat = ctx.message.channel.category
await current_cat.channels[-1].edit(category = new_cat[1], position = 0)
new_vc = await server.create_voice_channel(ctx.message.channel.name, category=ctx.message.channel.category)
await new_vc.edit(sync_permissions=True)
await new_vc.set_permissions(server.default_role, view_channel=False)
muted_role = discord.utils.get(server.roles, name=ROLE_MUTED)
all_states_role = discord.utils.get(server.roles, name=ROLE_ALL_STATES)
self_muted_role = discord.utils.get(server.roles, name=ROLE_SELFMUTE)
quarantine_role = discord.utils.get(server.roles, name=ROLE_QUARANTINE)
state_role_name = await lookup_role(ctx.message.channel.name.replace("-", " "))
state_role = discord.utils.get(server.roles, name = state_role_name)
await new_vc.set_permissions(muted_role, connect=False)
await new_vc.set_permissions(self_muted_role, connect=False)
await new_vc.set_permissions(quarantine_role, connect=False)
await new_vc.set_permissions(state_role, view_channel = True, connect=True)
await new_vc.set_permissions(all_states_role, view_channel = True, connect=True)
current_pos = ctx.message.channel.position
return await ctx.send("Created a voice channel. **Please remember to follow the rules! No doxxing or cursing is allowed.**")
else:
await test_vc.delete()
if len(ctx.message.channel.category.channels) == 49:
# If we had to move a channel out of category to make room, move it back
# Let's move one state to the next category
new_cat = filter(lambda x: x.name == "states", server.categories)
new_cat = list(new_cat)
if len(new_cat) < 2:
return await ctx.send("Could not find alternate states channel to move overflowed channels to.")
else:
# Success, we found the other category
current_cat = ctx.message.channel.category
await new_cat[1].channels[0].edit(category = current_cat, position = 1000)
return await ctx.send("Closed the voice channel.")
elif ctx.message.channel.name == "games":
# Support for opening a voice channel for #games
test_vc = discord.utils.get(server.voice_channels, name="games")
if test_vc == None:
# Voice channel needs to be opened/doesn't exist already
new_vc = await server.create_voice_channel("games", category=ctx.message.channel.category)
await new_vc.edit(sync_permissions=True)
await new_vc.set_permissions(server.default_role, view_channel=False)
games_role = discord.utils.get(server.roles, name=ROLE_GAMES)
member_role = discord.utils.get(server.roles, name=ROLE_MR)
await new_vc.set_permissions(games_role, view_channel=True)
await new_vc.set_permissions(member_role, view_channel=False)
return await ctx.send("Created a voice channel. **Please remember to follow the rules! No doxxing or cursing is allowed.**")
else:
# Voice channel needs to be closed
await test_vc.delete()
return await ctx.send("Closed the voice channel.")
else:
return await ctx.send("Apologies... voice channels can currently be opened for tournament channels and the games channel.")
@bot.command()
@commands.check(is_staff)
async def getVariable(ctx, var):
"""Fetches a local variable."""
if ctx.message.channel.name != "staff":
await ctx.send("You can only fetch variables from the staff channel.")
else:
await ctx.send("Attempting to find variable.")
try:
variable = globals()[var]
await ctx.send(f"Variable value: `{variable}`")
except:
await ctx.send(f"Can't find that variable!")
@bot.command(aliases=["eats", "beareats"])
@commands.check(is_bear)
async def eat(ctx, user):
"""Allows bear to eat users >:D"""
message = await get_bear_message(user)
await ctx.message.delete()
await ctx.send(message)
@bot.command()
@commands.check(is_staff)
async def refresh(ctx):
"""Refreshes data from the sheet."""
await update_tournament_list()
res = await refresh_algorithm()
if res == True:
await ctx.send("Successfully refreshed data from sheet.")
else:
await ctx.send(":warning: Unsuccessfully refreshed data from sheet.")
@bot.command(aliases=["gci", "cid", "channelid"])
async def getchannelid(ctx):
"""Gets the channel ID of the current channel."""
await ctx.send("Hey <@" + str(ctx.message.author.id) + ">! The channel ID is `" + str(ctx.message.channel.id) + "`. :)")
@bot.command(aliases=["gei", "eid"])
async def getemojiid(ctx, emoji: discord.Emoji):
"""Gets the ID of the given emoji."""
return await ctx.send(f"{emoji} - `{emoji}`")
@bot.command(aliases=["rid"])
async def getroleid(ctx, name):
role = discord.utils.get(ctx.message.author.guild.roles, name=name)
return await ctx.send(f"`{role.mention}`")
@bot.command(aliases=["gui", "ui", "userid"])
async def getuserid(ctx, user=None):
"""Gets the user ID of the caller or another user."""
if user == None:
await ctx.send(f"Your user ID is `{ctx.message.author.id}`.")
elif user[:3] != "<@!":
member = ctx.message.guild.get_member_named(user)
await ctx.send(f"The user ID of {user} is: `{member.id}`")
else:
user = user.replace("<@!", "").replace(">", "")
await ctx.send(f"The user ID of <@{user}> is `{user}`.")
@bot.command(aliases=["ufi"])
@commands.check(is_staff)
async def userfromid(ctx, iden:int):
"""Mentions a user with the given ID."""
user = bot.get_user(iden)
await ctx.send(user.mention)
@bot.command(aliases=["hi"])
async def hello(ctx):
"""Simply says hello. Used for testing the bot."""
await ctx.send("Well, hello there.")
@bot.command(aliases=["what"])
async def about(ctx):
"""Prints information about the bot."""
await ctx.send(get_about())
@bot.command(aliases=["server", "link", "invitelink"])
async def invite(ctx):
await ctx.send("https://discord.gg/scioly")
@bot.command()
async def forums(ctx):
await ctx.send("<https://scioly.org/forums>")
@bot.command()
async def obb(ctx):
await ctx.send("<https://scioly.org/obb>")
@bot.command(aliases=["tests", "testexchange"])
async def exchange(ctx):
await ctx.send("<https://scioly.org/tests>")
@bot.command()
async def gallery(ctx):
await ctx.send("<https://scioly.org/gallery>")
@bot.command(aliases=["random"])
async def rand(ctx, a=1, b=10):
r = random.randrange(a, b + 1)
await ctx.send(f"Random number between `{a}` and `{b}`: `{r}`")
@bot.command()
@not_blacklisted_channel(blacklist=[CHANNEL_WELCOME])
async def magic8ball(ctx):
msg = await ctx.send("Swishing the magic 8 ball...")
await ctx.channel.trigger_typing()
await asyncio.sleep(3)
await msg.delete()
sayings = [
"Yes.",
"Ask again later.",
"Not looking good.",
"Cannot predict now.",
"It is certain.",
"Try again.",
"Without a doubt.",
"Don't rely on it.",
"Outlook good.",
"My reply is no.",
"Don't count on it.",
"Yes - definitely.",
"Signs point to yes.",
"I believe so.",
"Nope.",
"Concentrate and ask later.",
"Try asking again.",
"For sure not.",
"Definitely no."
]
response = sayings[math.floor(random.random()*len(sayings))]
await ctx.message.reply(f"**{response}**")
@bot.command()
@not_blacklisted_channel(blacklist=[CHANNEL_WELCOME])
async def xkcd(ctx, num = None):
max_num = await xkcd_module.get_max()
if num == None:
rand = random.randrange(1, int(max_num))
return await xkcd(ctx, str(rand))
if num.isdigit() and 1 <= int(num) <= int(max_num):
return await ctx.send(f"https://xkcd.com/{num}")
else:
return await ctx.send("Invalid attempted number for xkcd.")
@bot.command()
async def rule(ctx, num):
"""Gets a specified rule."""
if not num.isdigit() or int(num) < 1 or int(num) > 13:
# If the rule number is not actually a number
return await ctx.send("Please use a valid rule number, from 1 through 13. (Ex: `!rule 7`)")
rule = RULES[int(num) - 1]
return await ctx.send(f"**Rule {num}:**\n> {rule}")
@bot.command()
async def coach(ctx):
"""Gives an account the coach role."""
await ctx.send("If you would like to apply for the `Coach` role, please fill out the form here: <https://forms.gle/UBKpWgqCr9Hjw9sa6>.")
@bot.command(aliases=["slow", "sm"])
@commands.check(is_staff)
async def slowmode(ctx, arg:int=None):
if arg == None:
if ctx.channel.slowmode_delay == 0:
await ctx.channel.edit(slowmode_delay=10)
await ctx.send("Enabled a 10 second slowmode.")
else:
await ctx.channel.edit(slowmode_delay=0)
await ctx.send("Removed slowmode.")
else:
await ctx.channel.edit(slowmode_delay=arg)
if arg != 0:
await ctx.send(f"Enabled a {arg} second slowmode.")
else:
await ctx.send(f"Removed slowmode.")
@bot.command(aliases=["state"])
async def states(ctx, *args):
"""Assigns someone with specific states."""
new_args = [str(arg).lower() for arg in args]
# Fix commas as possible separator
if len(new_args) == 1:
new_args = new_args[0].split(",")
new_args = [re.sub("[;,]", "", arg) for arg in new_args]
member = ctx.message.author
states = await get_state_list()
states = [s[:s.rfind(" (")] for s in states]
triple_word_states = [s for s in states if len(s.split(" ")) > 2]
double_word_states = [s for s in states if len(s.split(" ")) > 1]
removed_roles = []
added_roles = []
for term in ["california", "ca", "cali"]:
if term in [arg.lower() for arg in args]:
return await ctx.send("Which California, North or South? Try `!state norcal` or `!state socal`.")
if len(new_args) < 1:
return await ctx.send("Sorry, but you need to specify a state (or multiple states) to add/remove.")
elif len(new_args) > 10:
return await ctx.send("Sorry, you are attempting to add/remove too many states at once.")
for string in ["South", "North"]:
california_list = [f"California ({string})", f"California-{string}", f"California {string}", f"{string}ern California", f"{string} California", f"{string} Cali", f"Cali {string}", f"{string} CA", f"CA {string}"]
if string == "North":
california_list.append("NorCal")
else:
california_list.append("SoCal")
for listing in california_list:
words = listing.split(" ")
all_here = sum(1 for word in words if word.lower() in new_args)
if all_here == len(words):
role = discord.utils.get(member.guild.roles, name=f"California ({string})")
if role in member.roles:
await member.remove_roles(role)
removed_roles.append(f"California ({string})")
else:
await member.add_roles(role)
added_roles.append(f"California ({string})")
for word in words:
new_args.remove(word.lower())
for triple in triple_word_states:
words = triple.split(" ")
all_here = 0
all_here = sum(1 for word in words if word.lower() in new_args)
if all_here == 3:
# Word is in args
role = discord.utils.get(member.guild.roles, name=triple)
if role in member.roles:
await member.remove_roles(role)
removed_roles.append(triple)
else:
await member.add_roles(role)
added_roles.append(triple)
for word in words:
new_args.remove(word.lower())
for double in double_word_states:
words = double.split(" ")
all_here = 0
all_here = sum(1 for word in words if word.lower() in new_args)
if all_here == 2:
# Word is in args
role = discord.utils.get(member.guild.roles, name=double)
if role in member.roles:
await member.remove_roles(role)
removed_roles.append(double)
else:
await member.add_roles(role)
added_roles.append(double)
for word in words:
new_args.remove(word.lower())
for arg in new_args:
role_name = await lookup_role(arg)
if role_name == False:
return await ctx.send(f"Sorry, the {arg} state could not be found. Try again.")
role = discord.utils.get(member.guild.roles, name=role_name)
if role in member.roles:
await member.remove_roles(role)
removed_roles.append(role_name)
else:
await member.add_roles(role)
added_roles.append(role_name)
if len(added_roles) > 0 and len(removed_roles) == 0:
state_res = "Added states " + (' '.join([f'`{arg}`' for arg in added_roles])) + "."
elif len(removed_roles) > 0 and len(added_roles) == 0:
state_res = "Removed states " + (' '.join([f'`{arg}`' for arg in removed_roles])) + "."
else:
state_res = "Added states " + (' '.join([f'`{arg}`' for arg in added_roles])) + ", and removed states " + (' '.join([f'`{arg}`' for arg in removed_roles])) + "."
await ctx.send(state_res)
@bot.command()
async def games(ctx):
"""Removes or adds someone to the games channel."""
games_channel = discord.utils.get(ctx.message.author.guild.text_channels, name=CHANNEL_GAMES)
member = ctx.message.author
role = discord.utils.get(member.guild.roles, name=ROLE_GAMES)
if role in member.roles:
await member.remove_roles(role)
await ctx.send("Removed you from the games club... feel free to come back anytime!")
await games_channel.send(f"{member.mention} left the party.")
else:
await member.add_roles(role)
await ctx.send(f"You are now in the channel. Come and have fun in {games_channel.mention}! :tada:")
await games_channel.send(f"Please welcome {member.mention} to the party!!")
@bot.command(aliases=["tags", "t"])
async def tag(ctx, name):
member = ctx.message.author
if len(TAGS) == 0:
return await ctx.send("Apologies, tags do not appear to be working at the moment. Please try again in one minute.")
staff = await is_staff(ctx)
lh_role = discord.utils.get(member.guild.roles, name=ROLE_LH)
member_role = discord.utils.get(member.guild.roles, name=ROLE_MR)
for t in TAGS:
if t['name'] == name:
if staff or (t['launch_helpers'] and lh_role in member.roles) or (t['members'] and member_role in member.roles):
await ctx.message.delete()
return await ctx.send(t['text'])
else:
return await ctx.send("Unfortunately, you do not have the permissions for this tag.")
return await ctx.send("Tag not found.")
@bot.command()
@commands.check(is_staff)
async def lock(ctx):
"""Locks a channel to Member access."""
member = ctx.message.author
channel = ctx.message.channel
if (channel.category.name in ["beta", "staff", "Pi-Bot"]):
return await ctx.send("This command is not suitable for this channel because of its category.")
member_role = discord.utils.get(member.guild.roles, name=ROLE_MR)
if (channel.category.name == CATEGORY_STATES):
await ctx.channel.set_permissions(member_role, add_reactions=False, send_messages=False)
else:
await ctx.channel.set_permissions(member_role, add_reactions=False, send_messages=False, read_messages=True)
wiki_role = discord.utils.get(member.guild.roles, name=ROLE_WM)
gm_role = discord.utils.get(member.guild.roles, name=ROLE_GM)
admin_role = discord.utils.get(member.guild.roles, name=ROLE_AD)
bot_role = discord.utils.get(member.guild.roles, name=ROLE_BT)
await ctx.channel.set_permissions(wiki_role, add_reactions=True, send_messages=True, read_messages=True)
await ctx.channel.set_permissions(gm_role, add_reactions=True, send_messages=True, read_messages=True)
await ctx.channel.set_permissions(admin_role, add_reactions=True, send_messages=True, read_messages=True)
await ctx.channel.set_permissions(bot_role, add_reactions=True, send_messages=True, read_messages=True)
await ctx.send("Locked the channel to Member access.")
@bot.command()
@commands.check(is_staff)
async def unlock(ctx):
"""Unlocks a channel to Member access."""
member = ctx.message.author
channel = ctx.message.channel
if (channel.category.name in ["beta", "staff", "Pi-Bot"]):
return await ctx.send("This command is not suitable for this channel because of its category.")
if (channel.category.name == CATEGORY_SO or channel.category.name == CATEGORY_GENERAL):
await ctx.send("Synced permissions with channel category.")
return await channel.edit(sync_permissions=True)
member_role = discord.utils.get(member.guild.roles, name=ROLE_MR)
if (channel.category.name != CATEGORY_STATES):
await ctx.channel.set_permissions(member_role, add_reactions=True, send_messages=True, read_messages=True)
else:
await ctx.channel.set_permissions(member_role, add_reactions=True, send_messages=True)
wiki_role = discord.utils.get(member.guild.roles, name=ROLE_WM)
gm_role = discord.utils.get(member.guild.roles, name=ROLE_GM)
aRole = discord.utils.get(member.guild.roles, name=ROLE_AD)
bRole = discord.utils.get(member.guild.roles, name=ROLE_BT)
await ctx.channel.set_permissions(wiki_role, add_reactions=True, send_messages=True, read_messages=True)
await ctx.channel.set_permissions(gm_role, add_reactions=True, send_messages=True, read_messages=True)
await ctx.channel.set_permissions(aRole, add_reactions=True, send_messages=True, read_messages=True)
await ctx.channel.set_permissions(bRole, add_reactions=True, send_messages=True, read_messages=True)
await ctx.send("Unlocked the channel to Member access. Please check if permissions need to be synced.")
@bot.command()
async def info(ctx):
"""Gets information about the Discord server."""
server = ctx.message.guild
name = server.name
owner = server.owner
creation_date = server.created_at
emoji_count = len(server.emojis)
icon = server.icon_url_as(format=None, static_format='jpeg')
animated_icon = server.is_icon_animated()
iden = server.id
banner = server.banner_url
desc = server.description
mfa_level = server.mfa_level
verification_level = server.verification_level
content_filter = server.explicit_content_filter
default_notifs = server.default_notifications
features = server.features
splash = server.splash_url
premium_level = server.premium_tier
boosts = server.premium_subscription_count
channel_count = len(server.channels)
text_channel_count = len(server.text_channels)
voice_channel_count = len(server.voice_channels)
category_count = len(server.categories)
system_channel = server.system_channel
if type(system_channel) == discord.TextChannel: system_channel = system_channel.mention
rules_channel = server.rules_channel
if type(rules_channel) == discord.TextChannel: rules_channel = rules_channel.mention
public_updates_channel = server.public_updates_channel
if type(public_updates_channel) == discord.TextChannel: public_updates_channel = public_updates_channel.mention
emoji_limit = server.emoji_limit
bitrate_limit = server.bitrate_limit
filesize_limit = round(server.filesize_limit/1000000, 3)
boosters = server.premium_subscribers
for i, b in enumerate(boosters):
# convert user objects to mentions
boosters[i] = b.mention
boosters = ", ".join(boosters)
print(boosters)
role_count = len(server.roles)
member_count = len(server.members)
max_members = server.max_members
discovery_splash_url = server.discovery_splash_url
member_percentage = round(member_count/max_members * 100, 3)
emoji_percentage = round(emoji_count/emoji_limit * 100, 3)
channel_percentage = round(channel_count/500 * 100, 3)
role_percenatege = round(role_count/250 * 100, 3)
staff_member = await is_staff(ctx)
fields = [
{
"name": "Basic Information",
"value": (
f"**Creation Date:** {creation_date}\n" +
f"**ID:** {iden}\n" +
f"**Animated Icon:** {animated_icon}\n" +
f"**Banner URL:** {banner}\n" +
f"**Splash URL:** {splash}\n" +
f"**Discovery Splash URL:** {discovery_splash_url}"
),
"inline": False
},
{
"name": "Nitro Information",
"value": (
f"**Nitro Level:** {premium_level} ({boosts} individual boosts)\n" +
f"**Boosters:** {boosters}"
),
"inline": False
}
]
if staff_member and ctx.channel.category.name == CATEGORY_STAFF:
fields.extend(
[{
"name": "Staff Information",
"value": (
f"**Owner:** {owner}\n" +
f"**MFA Level:** {mfa_level}\n" +
f"**Verification Level:** {verification_level}\n" +
f"**Content Filter:** {content_filter}\n" +
f"**Default Notifications:** {default_notifs}\n" +
f"**Features:** {features}\n" +
f"**Bitrate Limit:** {bitrate_limit}\n" +
f"**Filesize Limit:** {filesize_limit} MB"
),
"inline": False
},
{
"name": "Channels",
"value": (
f"**Public Updates Channel:** {public_updates_channel}\n" +
f"**System Channel:** {system_channel}\n" +
f"**Rules Channel:** {rules_channel}\n" +
f"**Text Channel Count:** {text_channel_count}\n" +
f"**Voice Channel Count:** {voice_channel_count}\n" +
f"**Category Count:** {category_count}\n"
),
"inline": False
},
{
"name": "Limits",
"value": (
f"**Channels:** *{channel_percentage}%* ({channel_count}/500 channels)\n" +
f"**Members:** *{member_percentage}%* ({member_count}/{max_members} members)\n" +
f"**Emoji:** *{emoji_percentage}%* ({emoji_count}/{emoji_limit} emojis)\n" +
f"**Roles:** *{role_percenatege}%* ({role_count}/250 roles)"
),
"inline": False
}
])
embed = assemble_embed(
title=f"Information for `{name}`",
desc=f"**Description:** {desc}",
thumbnailUrl=icon,
fields=fields
)
await ctx.send(embed=embed)
@bot.command(aliases=["r"])
async def report(ctx, *args):
"""Creates a report that is sent to staff members."""
server = bot.get_guild(SERVER_ID)
reports_channel = discord.utils.get(server.text_channels, name=CHANNEL_REPORTS)
message = args[0]
if len(args) > 1:
message = ' '.join(args)
poster = str(ctx.message.author)
embed = assemble_embed(
title=f"Report Received (using `!report`)",
webcolor="red",
authorName = poster,
authorIcon = ctx.message.author.avatar_url_as(format="jpg"),
fields = [{
"name": "Message",
"value": message,
"inline": False
}]
)
message = await reports_channel.send(embed=embed)
REPORT_IDS.append(message.id)
await message.add_reaction("\U00002705")
await message.add_reaction("\U0000274C")
await ctx.send("Thanks, report created.")
# Meant for Pi-Bot only
async def auto_report(reason, color, message):
"""Allows Pi-Bot to generate a report by himself."""
server = bot.get_guild(SERVER_ID)
reports_channel = discord.utils.get(server.text_channels, name=CHANNEL_REPORTS)
embed = assemble_embed(
title=f"{reason} (message from Pi-Bot)",
webcolor=color,
fields = [{
"name": "Message",
"value": message,
"inline": False
}]
)
message = await reports_channel.send(embed=embed)
REPORT_IDS.append(message.id)
await message.add_reaction("\U00002705")
await message.add_reaction("\U0000274C")
@bot.command()
async def graphpage(ctx, title, temp_format, table_index, div, place_col=0):
temp = temp_format.lower() in ["y", "yes", "true"]
await ctx.send(
"*Inputs read:*\n" +
f"Page title: `{title}`\n" +
f"Template: `{temp}`\n" +
f"Table index (staring at 0): `{table_index}`\n" +
f"Division: `{div}`\n" +
(f"Column with point values: `{place_col}`" if not temp else "")
)
points = []
table_index = int(table_index)
place_col = int(place_col)
if temp:
template = await get_page_tables(title, True)
template = [t for t in template if t.normal_name() == "State results box"]
template = template[table_index]
ordinal = lambda n: "%d%s" % (n,"tsnrhtdd"[(n//10%10!=1)*(n%10<4)*n%10::4]) # Thanks https://codegolf.stackexchange.com/questions/4707/outputting-ordinal-numbers-1st-2nd-3rd#answer-4712
for i in range(100):
if template.has_arg(ordinal(i) + "_points"):
points.append(template.get_arg(ordinal(i) + "_points").value.replace("\n", ""))
else:
tables = await get_page_tables(title, False)
tables = tables[table_index]
data = tables.data()
points = [r[place_col] for r in data]
del points[0]
points = [int(p) for p in points]
await _graph(points, title + " - Division " + div, title + "Div" + div + ".svg")
with open(title + "Div" + div + ".svg") as f:
pic = discord.File(f)
await ctx.send(file=pic)
return await ctx.send("Attempted to graph.")
@bot.command()
async def graphscilympiad(ctx, url, title):
points = await get_points(url)
await _graph(points, title, "graph1.svg")
with open("graph1.svg") as f:
pic = discord.File(f)
await ctx.send(file=pic)
return await ctx.send("Attempted to graph.")
async def _graph(points, graph_title, title):
plt.plot(range(1, len(points) + 1), points, marker='o', color='#2E66B6')
z = np.polyfit(range(1, len(points) + 1), points, 1)
p = np.poly1d(z)
plt.plot(range(1, len(points) + 1), p(range(1, len(points) + 1)), "--", color='#CCCCCC')
plt.xlabel("Place")
plt.ylabel("Points")
plt.title(graph_title)
plt.savefig(title)
plt.close()
await asyncio.sleep(2)
@bot.command()
async def resultstemplate(ctx, url):
if url.find("scilympiad.com") == -1:
return await ctx.send("The URL must be a Scilympiad results link.")
await ctx.send("**Warning:** Because Scilympiad is constantly evolving, this command may break. Please preview the template on the wiki before saving! If this command breaks, please DM pepperonipi or open an issue on GitHub. Thanks!")
res = await make_results_template(url)
with open("resultstemplate.txt", "w+") as t:
t.write(res)
file = discord.File("resultstemplate.txt", filename="resultstemplate.txt")
await ctx.send(file=file)
@bot.command()
async def ping(ctx, command=None, *args):
"""Controls Pi-Bot's ping interface."""
if command is None:
return await ctx.send("Uh, I need a command you want to run.")
member = ctx.message.author.id
if len(args) > 8:
return await ctx.send("You are giving me too many pings at once! Please separate your requests over multiple commands.")
if command.lower() in ["add", "new", "addregex", "newregex", "addregexp", "newregexp", "delete", "remove", "test", "try"] and len(args) < 1:
return await ctx.send(f"In order to {command} a ping, you must supply a regular expression or word.")
if command.lower() in ["add", "new", "addregex", "newregex", "addregexp", "newregexp"]:
# Check to see if author in ping info already
ignored_list = []
if any([True for u in PING_INFO if u['id'] == member]):
#yes
user = next((u for u in PING_INFO if u['id'] == member), None)
pings = user['pings']
for arg in args:
try:
re.findall(arg, "test phrase")
except:
await ctx.send(f"Ignoring adding the `{arg}` ping because it uses illegal characters.")
ignored_list.append(arg)
continue
if f"({arg})" in pings or f"\\b({arg})\\b" in pings or arg in pings:
await ctx.send(f"Ignoring adding the `{arg}` ping because you already have a ping currently set as that.")
ignored_list.append(arg)
else:
if command.lower() in ["add", "new"]:
print(f"adding word: {re.escape(arg)}")
pings.append(fr"\b({re.escape(arg)})\b")
else:
print(f"adding regexp: {arg}")
pings.append(fr"({arg})")
else:
# nope
if command.lower() in ["add", "new"]:
PING_INFO.append({
"id": member,
"pings": [fr"\b({re.escape(arg)})\b" for arg in args]
})
else:
PING_INFO.append({
"id": member,
"pings": [fr"({arg})" for arg in args]
})
return await ctx.send(f"Alrighty... I've got you all set up for the following pings: " + (" ".join([f"`{arg}`" for arg in args if arg not in ignored_list])))
elif command.lower() in ["delete", "remove"]:
user = next((u for u in PING_INFO if u['id'] == member), None)
if user == None or len(user['pings']) == 0:
return await ctx.send("You have no registered pings.")
for arg in args:
if arg == "all":
user['pings'] = []
return await ctx.send("I removed all of your pings.")
if arg in user['pings']:
user['pings'].remove(arg)
await ctx.send(f"I removed the `{arg}` RegExp ping you were referencing.")
elif f"\\b({arg})\\b" in user['pings']:
user['pings'].remove(f"\\b({arg})\\b")
await ctx.send(f"I removed the `{arg}` word ping you were referencing.")
elif f"({arg})" in user['pings']:
user['pings'].remove(f"({arg})")
await ctx.send(f"I removed the `{arg}` RegExp ping you were referencing.")
else:
return await ctx.send(f"I can't find my phone or the **`{arg}`** ping you are referencing, sorry. Try another ping, or see all of your pings with `!ping list`.")
return await ctx.send("I removed all pings you requested.")
elif command.lower() in ["list", "all"]:
user = next((u for u in PING_INFO if u['id'] == member), None)
if user == None or len(user['pings']) == 0:
return await ctx.send("You have no registered pings.")
else:
pings = user['pings']
regex_pings = []
word_pings = []
for ping in pings:
if ping[:2] == "\\b":
word_pings.append(ping)
else:
regex_pings.append(ping)
if len(regex_pings) > 0:
await ctx.send("Your RegEx pings are: " + ", ".join([f"`{regex}`" for regex in regex_pings]))
if len(word_pings) > 0:
await ctx.send("Your word pings are: " + ", ".join([f"`{word[3:-3]}`" for word in word_pings]))
elif command.lower() in ["test", "try"]:
user = next((u for u in PING_INFO if u['id'] == member), None)
user_pings = user['pings']
matched = False
for arg in args:
for ping in user_pings:
if len(re.findall(ping, arg, re.I)) > 0:
await ctx.send(f"Your ping `{ping}` matches `{arg}`.")
matched = True
if not matched:
await ctx.send("Your test matched no pings of yours.")
else:
return await ctx.send("Sorry, I can't find that command.")
@bot.command(aliases=["donotdisturb"])
async def dnd(ctx):
member = ctx.message.author.id
if any([True for u in PING_INFO if u['id'] == member]):
user = next((u for u in PING_INFO if u['id'] == member), None)
if 'dnd' not in user:
user['dnd'] = True
return await ctx.send("Enabled DND mode for pings.")
elif user['dnd'] == True:
user['dnd'] = False
return await ctx.send("Disabled DND mode for pings.")
else:
user['dnd'] = True
return await ctx.send("Enabled DND mode for pings.")
else:
return await ctx.send("You can't enter DND mode without any pings!")
async def ping_pm(user_id, pinger, ping_exp, channel, content, jump_url):
"""Allows Pi-Bot to PM a user about a ping."""
user_to_send = bot.get_user(user_id)
try:
content = re.sub(rf'{ping_exp}', r'**\1**', content, flags=re.I)
except Exception as e:
print(f"Could not bold ping due to unfavored RegEx. Error: {e}")
ping_exp = ping_exp.replace(r"\b(", "").replace(r")\b", "")
warning = f"\n\nIf you don't want this ping anymore, in `#bot-spam` on the server, send `!ping remove {ping_exp}`"
embed = assemble_embed(
title=":bellhop: Ping Alert!",
desc=(f"Looks like `{pinger}` pinged a ping expression of yours in the Scioly.org Discord Server!" + warning),
fields=[
{"name": "Expression Matched", "value": f" `{ping_exp}`", "inline": "True"},
{"name": "Jump To Message", "value": f"[Click here!]({jump_url})", "inline": "True"},
{"name": "Channel", "value": f"`#{channel}`", "inline": "True"},
{"name": "Content", "value": content, "inline": "False"}
],
hexcolor="#2E66B6"
)
await user_to_send.send(embed=embed)
@bot.command(aliases=["doggobomb"])
@not_blacklisted_channel(blacklist=[CHANNEL_WELCOME])
async def dogbomb(ctx, member:str=False):
"""Dog bombs someone!"""
if member == False:
return await ctx.send("Tell me who you want to dog bomb!! :dog:")
doggo = await get_doggo()
await ctx.send(doggo)
await ctx.send(f"{member}, <@{ctx.message.author.id}> dog bombed you!!")
@bot.command()
@not_blacklisted_channel(blacklist=[CHANNEL_WELCOME])
async def shibabomb(ctx, member:str=False):
"""Shiba bombs a user!"""
if member == False:
return await ctx.send("Tell me who you want to shiba bomb!! :dog:")
doggo = await get_shiba()
await ctx.send(doggo)
await ctx.send(f"{member}, <@{ctx.message.author.id}> shiba bombed you!!")
@bot.command()
async def me(ctx, *args):
"""Replaces the good ol' /me"""
await ctx.message.delete()
if len(args) < 1:
return await ctx.send(f"*{ctx.message.author.mention} " + "is cool!*")
else:
await ctx.send(f"*{ctx.message.author.mention} " + " ".join(arg for arg in args) + "*")
@bot.command(aliases=["list"])
async def list_command(ctx, cmd:str=False):
"""Lists all of the commands a user may access."""
if cmd == False: # for quick list of commands
ls = await get_quick_list(ctx)
await ctx.send(embed=ls)
if cmd == "all" or cmd == "commands":
ls = await get_list(ctx.message.author, 1)
sent_list = await ctx.send(embed=ls)
await sent_list.add_reaction(EMOJI_FAST_REVERSE)
await sent_list.add_reaction(EMOJI_LEFT_ARROW)
await sent_list.add_reaction(EMOJI_RIGHT_ARROW)
await sent_list.add_reaction(EMOJI_FAST_FORWARD)
elif cmd == "states":
states_list = await get_state_list()
list = assemble_embed(
title="List of all states",
desc="\n".join([f"`{state}`" for state in states_list])
)
await ctx.send(embed=list)
elif cmd == "events":
events_list = [r['eventName'] for r in EVENT_INFO]
list = assemble_embed(
title="List of all events",
desc="\n".join([f"`{name}`" for name in events_list])
)
await ctx.send(embed=list)
@bot.command()
async def school(ctx, title, state):
lists = await get_school_listing(title, state)
fields = []
if len(lists) > 20:
return await ctx.send(f"Woah! Your query returned `{len(lists)}` schools, which is too much to send at once. Try narrowing your query!")
for l in lists:
fields.append({'name': l['name'], 'value': f"```{l['wikicode']}```", 'inline': "False"})
embed = assemble_embed(
title="School Data",
desc=f"Your query for `{title}` in `{state}` returned `{len(lists)}` results. Thanks for contribtuing to the wiki!",
fields=fields,
hexcolor="#2E66B6"
)
await ctx.send(embed=embed)
async def censor(message):
"""Constructs Pi-Bot's censor."""
channel = message.channel
ava = message.author.avatar_url
wh = await channel.create_webhook(name="Censor (Automated)")
content = message.content
for word in CENSORED_WORDS:
content = re.sub(fr'\b({word})\b', "<censored>", content, flags=re.IGNORECASE)
for word in CENSORED_EMOJIS:
content = re.sub(fr"{word}", "<censored>", content, flags=re.I)
author = message.author.nick
if author == None:
author = message.author.name
# Make sure pinging through @everyone, @here, or any role can not happen
mention_perms = discord.AllowedMentions(everyone=False, users=True, roles=False)
await wh.send(content, username=(author + " (auto-censor)"), avatar_url=ava, allowed_mentions=mention_perms)
await wh.delete()
@bot.command()
@commands.check(is_staff)
async def kick(ctx, user:discord.Member, reason:str=False):
"""Kicks a user for the specified reason."""
if reason == False:
return await ctx.send("Please specify a reason why you want to kick this user!")
if user.id in PI_BOT_IDS:
return await ctx.send("Hey! You can't kick me!!")
await user.kick(reason=reason)
await ctx.send("The user was kicked.")
@bot.command()
@commands.check(is_staff)
async def met(ctx):
"""Runs Pi-Bot's Most Edits Table"""
msg1 = await ctx.send("Attemping to run the Most Edits Table.")
res = await run_table()
print(res)
names = [v['name'] for v in res]
data = [v['increase'] for v in res]
names = names[:10]
data = data[:10]
fig = plt.figure()
plt.bar(names, data, color="#2E66B6")
plt.xlabel("Usernames")
plt.xticks(rotation=90)
plt.ylabel("Edits past week")
plt.title("Top wiki editors for the past week!")
plt.tight_layout()
plt.savefig("met.png")
plt.close()
await msg1.delete()
msg2 = await ctx.send("Generating graph...")
await asyncio.sleep(3)
await msg2.delete()
file = discord.File("met.png", filename="met.png")
embed = assemble_embed(
title="**Top wiki editors for the past week!**",
desc=("Check out the past week's top wiki editors! Thank you all for your contributions to the wiki! :heart:\n\n" +
f"`1st` - **{names[0]}** ({data[0]} edits)\n" +
f"`2nd` - **{names[1]}** ({data[1]} edits)\n" +
f"`3rd` - **{names[2]}** ({data[2]} edits)\n" +
f"`4th` - **{names[3]}** ({data[3]} edits)\n" +
f"`5th` - **{names[4]}** ({data[4]} edits)"),
imageUrl="attachment://met.png",
)
await ctx.send(file=file, embed=embed)
@bot.command()
@commands.check(is_staff)
async def prepembed(ctx, channel:discord.TextChannel, *, jsonInput):
"""Helps to create an embed to be sent to a channel."""
jso = json.loads(jsonInput)
title = jso['title'] if 'title' in jso else ""
desc = jso['description'] if 'description' in jso else ""
titleUrl = jso['titleUrl'] if 'titleUrl' in jso else ""
hexcolor = jso['hexColor'] if 'hexColor' in jso else "#2E66B6"
webcolor = jso['webColor'] if 'webColor' in jso else ""
thumbnailUrl = jso['thumbnailUrl'] if 'thumbnailUrl' in jso else ""
authorName = jso['authorName'] if 'authorName' in jso else ""
authorUrl = jso['authorUrl'] if 'authorUrl' in jso else ""
authorIcon = jso['authorIcon'] if 'authorIcon' in jso else ""
if 'author' in jso:
authorName = ctx.message.author.name
authorIcon = ctx.message.author.avatar_url_as(format="jpg")
fields = jso['fields'] if 'fields' in jso else ""
footerText = jso['footerText'] if 'footerText' in jso else ""
footerUrl = jso['footerUrl'] if 'footerUrl' in jso else ""
imageUrl = jso['imageUrl'] if 'imageUrl' in jso else ""
embed = assemble_embed(
title=title,
desc=desc,
titleUrl=titleUrl,
hexcolor=hexcolor,
webcolor=webcolor,
thumbnailUrl=thumbnailUrl,
authorName=authorName,
authorUrl=authorUrl,
authorIcon=authorIcon,
fields=fields,
footerText=footerText,
footerUrl=footerUrl,
imageUrl=imageUrl
)
await channel.send(embed=embed)
@bot.command(aliases=["event"])
async def events(ctx, *args):
"""Adds or removes event roles from a user."""
if len(args) < 1:
return await ctx.send("You need to specify at least one event to add/remove!")
elif len(args) > 10:
return await ctx.send("Woah, that's a lot for me to handle at once. Please separate your requests over multiple commands.")
member = ctx.message.author
new_args = [str(arg).lower() for arg in args]
# Fix commas as possible separator
if len(new_args) == 1:
new_args = new_args[0].split(",")
new_args = [re.sub("[;,]", "", arg) for arg in new_args]
event_info = EVENT_INFO
event_names = []
removed_roles = []
added_roles = []
could_not_handle = []
multi_word_events = []
if type(EVENT_INFO) == int:
# When the bot starts up, EVENT_INFO is initialized to 0 before receiving the data from the sheet a few seconds later. This lets the user know this.
return await ctx.send("Apologies... refreshing data currently. Try again in a few seconds.")
for i in range(7, 1, -1):
# Supports adding 7-word to 2-word long events
multi_word_events += [e['eventName'] for e in event_info if len(e['eventName'].split(" ")) == i]
for event in multi_word_events:
words = event.split(" ")
all_here = 0
all_here = sum(1 for word in words if word.lower() in new_args)
if all_here == i:
# Word is in args
role = discord.utils.get(member.guild.roles, name=event)
if role in member.roles:
await member.remove_roles(role)
removed_roles.append(event)
else:
await member.add_roles(role)
added_roles.append(event)
for word in words:
new_args.remove(word.lower())
for arg in new_args:
found_event = False
for event in event_info:
aliases = [abbr.lower() for abbr in event['event_abbreviations']]
if arg.lower() in aliases or arg.lower() == event['eventName'].lower():
event_names.append(event['eventName'])
found_event = True
break
if not found_event:
could_not_handle.append(arg)
for event in event_names:
role = discord.utils.get(member.guild.roles, name=event)
if role in member.roles:
await member.remove_roles(role)
removed_roles.append(event)
else:
await member.add_roles(role)
added_roles.append(event)
if len(added_roles) > 0 and len(removed_roles) == 0:
event_res = "Added events " + (' '.join([f'`{arg}`' for arg in added_roles])) + ((", and could not handle: " + " ".join([f"`{arg}`" for arg in could_not_handle])) if len(could_not_handle) else "") + "."
elif len(removed_roles) > 0 and len(added_roles) == 0:
event_res = "Removed events " + (' '.join([f'`{arg}`' for arg in removed_roles])) + ((", and could not handle: " + " ".join([f"`{arg}`" for arg in could_not_handle])) if len(could_not_handle) else "") + "."
else:
event_res = "Added events " + (' '.join([f'`{arg}`' for arg in added_roles])) + ", " + ("and " if not len(could_not_handle) else "") + "removed events " + (' '.join([f'`{arg}`' for arg in removed_roles])) + ((", and could not handle: " + " ".join([f"`{arg}`" for arg in could_not_handle])) if len(could_not_handle) else "") + "."
await ctx.send(event_res)
async def get_words():
"""Gets the censor list"""
global CENSORED_WORDS
CENSORED_WORDS = get_censor()
@bot.command(aliases=["man"])
async def help(ctx, command:str=None):
"""Allows a user to request help for a command."""
if command == None:
embed = assemble_embed(
title="Looking for help?",
desc=("Hey there, I'm a resident bot of Scioly.org!\n\n" +
"On Discord, you can send me commands using `!` before the command name, and I will process it to help you! " +
"For example, `!states`, `!events`, and `!fish` are all valid commands that can be used!\n\n" +
"If you want to see some commands that you can use on me, just type `!list`! " +
"If you need more help, please feel free to reach out to a staff member!")
)
return await ctx.send(embed=embed)
hlp = await get_help(ctx, command)
await ctx.send(embed=hlp)
@bot.command(aliases=["feedbear"])
@not_blacklisted_channel(blacklist=[CHANNEL_WELCOME])
async def fish(ctx):
"""Gives a fish to bear."""
global fish_now
r = random.random()
if len(str(fish_now)) > 1500:
fish_now = round(pow(fish_now, 0.5))
if fish_now == 69: fish_now = 70
return await ctx.send("Woah! Bear's fish is a little too high, so it unfortunately has to be square rooted.")
if r > 0.9:
fish_now += 10
if fish_now == 69: fish_now = 70
return await ctx.send(f"Wow, you gave bear a super fish! Added 10 fish! Bear now has {fish_now} fish!")
if r > 0.1:
fish_now += 1
if fish_now == 69:
fish_now = 70
return await ctx.send(f"You feed bear two fish. Bear now has {fish_now} fish!")
else:
return await ctx.send(f"You feed bear one fish. Bear now has {fish_now} fish!")
if r > 0.02:
fish_now += 0
return await ctx.send(f"You can't find any fish... and thus can't feed bear. Bear still has {fish_now} fish.")
else:
fish_now = round(pow(fish_now, 0.5))
if fish_now == 69: fish_now = 70
return await ctx.send(f":sob:\n:sob:\n:sob:\nAww, bear's fish was accidentally square root'ed. Bear now has {fish_now} fish. \n:sob:\n:sob:\n:sob:")
@bot.command(aliases=["badbear"])
@not_blacklisted_channel(blacklist=[CHANNEL_WELCOME])
async def stealfish(ctx):
global fish_now
member = ctx.message.author
r = random.random()
if member.id in STEALFISH_BAN:
return await ctx.send("Hey! You've been banned from stealing fish for now.")
if r >= 0.75:
ratio = r - 0.5
fish_now = round(fish_now * (1 - ratio))
per = round(ratio * 100)
return await ctx.send(f"You stole {per}% of bear's fish!")
if r >= 0.416:
parsed = dateparser.parse("1 hour", settings={"PREFER_DATES_FROM": "future"})
STEALFISH_BAN.append(member.id)
CRON_LIST.append({"date": parsed, "do": f"unstealfishban {member.id}"})
return await ctx.send(f"Sorry {member.mention}, but it looks like you're going to be banned from using this command for 1 hour!")
if r >= 0.25:
parsed = dateparser.parse("1 day", settings={"PREFER_DATES_FROM": "future"})
STEALFISH_BAN.append(member.id)
CRON_LIST.append({"date": parsed, "do": f"unstealfishban {member.id}"})
return await ctx.send(f"Sorry {member.mention}, but it looks like you're going to be banned from using this command for 1 day!")
if r >= 0.01:
return await ctx.send("Hmm, nothing happened. *crickets*")
else:
STEALFISH_BAN.append(member.id)
return await ctx.send("You are banned from using `!stealfish` until the next version of Pi-Bot is released.")
@bot.command(aliases=["slap", "trouts", "slaps", "troutslaps"])
@not_blacklisted_channel(blacklist=[CHANNEL_WELCOME])
async def trout(ctx, member:str=False):
if await sanitize_mention(member) == False:
return await ctx.send("Woah... looks like you're trying to be a little sneaky with what you're telling me to do. Not so fast!")
if member == False:
await ctx.send(f"{ctx.message.author.mention} trout slaps themselves!")
else:
await ctx.send(f"{ctx.message.author.mention} slaps {member} with a giant trout!")
await ctx.send("http://gph.is/1URFXN9")
@bot.command(aliases=["givecookie"])
@not_blacklisted_channel(blacklist=[CHANNEL_WELCOME])
async def cookie(ctx, member:str=False):
if await sanitize_mention(member) == False:
return await ctx.send("Woah... looks like you're trying to be a little sneaky with what you're telling me to do. You can't ping roles or everyone.")
if member == False:
await ctx.send(f"{ctx.message.author.mention} gives themselves a cookie.")
else:
await ctx.send(f"{ctx.message.author.mention} gives {member} a cookie!")
await ctx.send("http://gph.is/1UOaITh")
@bot.command()
@not_blacklisted_channel(blacklist=[CHANNEL_WELCOME])
async def treat(ctx):
await ctx.send("You give bernard one treat!")
await ctx.send("http://gph.is/11nJAH5")
@bot.command(aliases=["givehershey", "hershey"])
@not_blacklisted_channel(blacklist=[CHANNEL_WELCOME])
async def hersheybar(ctx, member:str=False):
if await sanitize_mention(member) == False:
return await ctx.send("Woah... looks like you're trying to be a little sneaky with what you're telling me to do. You can't ping roles or everyone.")
if member == False:
await ctx.send(f"{ctx.message.author.mention} gives themselves a Hershey bar.")
else:
await ctx.send(f"{ctx.message.author.mention} gives {member} a Hershey bar!")
await ctx.send("http://gph.is/2rt64CX")
@bot.command(aliases=["giveicecream"])
@not_blacklisted_channel(blacklist=[CHANNEL_WELCOME])
async def icecream(ctx, member:str=False):
if await sanitize_mention(member) == False:
return await ctx.send("Woah... looks like you're trying to be a little sneaky with what you're telling me to do. You can't ping roles or everyone.")
if member == False:
await ctx.send(f"{ctx.message.author.mention} gives themselves some ice cream.")
else:
await ctx.send(f"{ctx.message.author.mention} gives {member} ice cream!")
await ctx.send("http://gph.is/YZLMMs")
async def sanitize_mention(member):
if member == False: return True
if member == "@everyone" or member == "@here": return False
if member[:3] == "<@&": return False
return True
@bot.command(aliases=["div"])
async def division(ctx, div):
if div.lower() == "a":
res = await assign_div(ctx, "Division A")
await ctx.send("Assigned you the Division A role, and removed all other division/alumni roles.")
elif div.lower() == "b":
res = await assign_div(ctx, "Division B")
await ctx.send("Assigned you the Division B role, and removed all other division/alumni roles.")
elif div.lower() == "c":
res = await assign_div(ctx, "Division C")
await ctx.send("Assigned you the Division C role, and removed all other division/alumni roles.")
elif div.lower() == "d":
await ctx.send("This server does not have a Division D role. Instead, use the `!alumni` command!")
elif div.lower() in ["remove", "clear", "none", "x"]:
member = ctx.message.author
div_a_role = discord.utils.get(member.guild.roles, name=ROLE_DIV_A)
div_b_role = discord.utils.get(member.guild.roles, name=ROLE_DIV_B)
div_c_role = discord.utils.get(member.guild.roles, name=ROLE_DIV_C)
await member.remove_roles(div_a_role, div_b_role, div_c_role)
await ctx.send("Removed all of your division/alumni roles.")
else:
return await ctx.send("Sorry, I don't seem to see that division. Try `!division c` to assign the Division C role, or `!division d` to assign the Division D role.")
async def assign_div(ctx, div):
"""Assigns a user a div"""
member = ctx.message.author
role = discord.utils.get(member.guild.roles, name=div)
div_a_role = discord.utils.get(member.guild.roles, name=ROLE_DIV_A)
div_b_role = discord.utils.get(member.guild.roles, name=ROLE_DIV_B)
div_c_role = discord.utils.get(member.guild.roles, name=ROLE_DIV_C)
alumni_role = discord.utils.get(member.guild.roles, name=ROLE_ALUMNI)
await member.remove_roles(div_a_role, div_b_role, div_c_role, alumni_role)
await member.add_roles(role)
return True
@bot.command()
async def alumni(ctx):
"""Removes or adds the alumni role from a user."""
member = ctx.message.author
div_a_role = discord.utils.get(member.guild.roles, name=ROLE_DIV_A)
div_b_role = discord.utils.get(member.guild.roles, name=ROLE_DIV_B)
div_c_role = discord.utils.get(member.guild.roles, name=ROLE_DIV_C)
await member.remove_roles(div_a_role, div_b_role, div_c_role)
role = discord.utils.get(member.guild.roles, name=ROLE_ALUMNI)
if role in member.roles:
await member.remove_roles(role)
await ctx.send("Removed your alumni status.")
else:
await member.add_roles(role)
await ctx.send(f"Added the alumni role, and removed all other division roles.")
@bot.command()
async def wiki(ctx, command:str=False, *args):
# Check to make sure not too much at once
if not command:
return await ctx.send("<https://scioly.org/wiki>")
if len(args) > 7:
return await ctx.send("Slow down there buster. Please keep the command to 12 or less arguments at once.")
multiple = False
for arg in args:
if arg[:1] == "-":
multiple = arg.lower() == "-multiple"
if command in ["summary"]:
if multiple:
for arg in [arg for arg in args if arg[:1] != "-"]:
text = await implement_command("summary", arg)
if text == False:
await ctx.send(f"The `{arg}` page does not exist!")
else:
await ctx.send(" ".join(text))
else:
string_sum = " ".join([arg for arg in args if arg[:1] != "-"])
text = await implement_command("summary", string_sum)
if text == False:
await ctx.send(f"The `{arg}` page does not exist!")
else:
await ctx.send(" ".join(text))
elif command in ["search"]:
if multiple:
return await ctx.send("Ope! No multiple searches at once yet!")
searches = await implement_command("search", " ".join([arg for arg in args]))
await ctx.send("\n".join([f"`{s}`" for s in searches]))
else:
# Assume link
if multiple:
new_args = [command] + list(args)
for arg in [arg for arg in new_args if arg[:1] != "-"]:
url = await implement_command("link", arg)
if url == False:
await ctx.send(f"The `{arg}` page does not exist!")
await ctx.send(f"<{wiki_url_fix(url)}>")
else:
string_sum = " ".join([arg for arg in args if arg[:1] != "-"])
if len(args) > 0 and command.rstrip() != "link":
string_sum = f"{command} {string_sum}"
elif command.rstrip() != "link":
string_sum = command
url = await implement_command("link", string_sum)
if url == False:
await ctx.send(f"The `{string_sum}` page does not exist!")
else:
await ctx.send(f"<{wiki_url_fix(url)}>")
def wiki_url_fix(url):
return url.replace("%3A", ":").replace(r"%2F","/")
@bot.command(aliases=["wp"])
async def wikipedia(ctx, request:str=False, *args):
term = " ".join(args)
if request == False:
return await ctx.send("You must specifiy a command and keyword, such as `!wikipedia search \"Science Olympiad\"`")
if request == "search":
return await ctx.send("\n".join([f"`{result}`" for result in aiowikip.search(term, results=5)]))
elif request == "summary":
try:
term = term.title()
page = await aiowikip.page(term)
return await ctx.send(aiowikip.summary(term, sentences=3) + f"\n\nRead more on Wikipedia here: <{page.url}>!")
except wikip.exceptions.DisambiguationError as e:
return await ctx.send(f"Sorry, the `{term}` term could refer to multiple pages, try again using one of these terms:" + "\n".join([f"`{o}`" for o in e.options]))
except wikip.exceptions.PageError as e:
return await ctx.send(f"Sorry, but the `{term}` page doesn't exist! Try another term!")
else:
try:
term = f"{request} {term}".strip()
term = term.title()
page = await aiowikip.page(term)
return await ctx.send(f"Sure, here's the link: <{page.url}>")
except wikip.exceptions.PageError as e:
return await ctx.send(f"Sorry, but the `{term}` page doesn't exist! Try another term!")
except wikip.exceptions.DisambiguationError as e:
return await ctx.send(f"Sorry, but the `{term}` page is a disambiguation page. Please try again!")
@bot.command()
async def profile(ctx, name:str=False):
if name == False:
member = ctx.message.author
name = member.nick
if name == None:
name = member.name
elif name.find("<@") != -1:
iden = await harvest_id(name)
member = ctx.message.author.guild.get_member(int(iden))
name = member.nick
if name == None:
name = member.name
embed = assemble_embed(
title=f"Scioly.org Information for {name}",
desc=(f"[`Forums`](https://scioly.org/forums/memberlist.php?mode=viewprofile&un={name}) | [`Wiki`](https://scioly.org/wiki/index.php?title=User:{name})"),
hexcolor="#2E66B6"
)
await ctx.send(embed=embed)
@bot.command()
async def latex(ctx, *args):
new_args = " ".join(args)
print(new_args)
new_args = new_args.replace(" ", r"&space;")
print(new_args)
await ctx.send(r"https://latex.codecogs.com/png.latex?\dpi{150}{\color{Gray}" + new_args + "}")
@bot.command(aliases=["membercount"])
async def count(ctx):
guild = ctx.message.author.guild
await ctx.send(f"Currently, there are `{len(guild.members)}` members in the server.")
@bot.command()
@commands.check(is_staff)
async def exalt(ctx, user):
"""Exalts a user."""
member = ctx.message.author
role = discord.utils.get(member.guild.roles, name=ROLE_EM)
iden = await harvest_id(user)
user_obj = member.guild.get_member(int(iden))
await user_obj.add_roles(role)
await ctx.send(f"Successfully exalted. Congratulations {user}! :tada: :tada:")
@bot.command()
@commands.check(is_staff)
async def unexalt(ctx, user):
"""Unexalts a user."""
member = ctx.message.author
role = discord.utils.get(member.guild.roles, name=ROLE_EM)
iden = await harvest_id(user)
user_obj = member.guild.get_member(int(iden))
await user_obj.remove_roles(role)
await ctx.send(f"Successfully unexalted.")
@bot.command()
@commands.check(is_staff)
async def mute(ctx, user:discord.Member, *args):
"""
Mutes a user.
:param user: User to be muted.
:type user: discord.Member
:param *args: The time to mute the user for.
:type *args: str
"""
time = " ".join(args)
await _mute(ctx, user, time, self=False)
@bot.command()
async def selfmute(ctx, *args):
"""
Self mutes the user that invokes the command.
:param *args: The time to mute the user for.
:type *args: str
"""
user = ctx.message.author
if await is_staff(ctx):
return await ctx.send("Staff members can't self mute.")
time = " ".join(args)
await _mute(ctx, user, time, self=True)
async def _mute(ctx, user:discord.Member, time: str, self: bool):
"""
Helper function for muting commands.
:param user: User to be muted.
:type user: discord.Member
:param time: The time to mute the user for.
:type time: str
"""
if user.id in PI_BOT_IDS:
return await ctx.send("Hey! You can't mute me!!")
if time == None:
return await ctx.send("You need to specify a length that this used will be muted. Examples are: `1 day`, `2 months, 1 day`, or `indef` (aka, forever).")
role = None
if self:
role = discord.utils.get(user.guild.roles, name=ROLE_SELFMUTE)
else:
role = discord.utils.get(user.guild.roles, name=ROLE_MUTED)
parsed = "indef"
if time != "indef":
parsed = dateparser.parse(time, settings={"PREFER_DATES_FROM": "future"})
if parsed == None:
return await ctx.send("Sorry, but I don't understand that length of time.")
CRON_LIST.append({"date": parsed, "do": f"unmute {user.id}"})
await user.add_roles(role)
eastern = pytz.timezone("US/Eastern")
await ctx.send(f"Successfully muted {user.mention} until `{str(eastern.localize(parsed))} EST`.")
@bot.command()
@commands.check(is_staff)
async def unmute(ctx, user):
"""Unmutes a user."""
member = ctx.message.author
role = discord.utils.get(member.guild.roles, name=ROLE_MUTED)
iden = await harvest_id(user)
user_obj = member.guild.get_member(int(iden))
await user_obj.remove_roles(role)
await ctx.send(f"Successfully unmuted {user}.")
@bot.command()
@commands.check(is_staff)
async def ban(ctx, member:discord.User=None, reason=None, *args):
"""Bans a user."""
time = " ".join(args)
if member == None or member == ctx.message.author:
return await ctx.channel.send("You cannot ban yourself! >:(")
if reason == None:
return await ctx.send("You need to give a reason for you banning this user.")
if time == None:
return await ctx.send("You need to specify a length that this used will be banned. Examples are: `1 day`, `2 months, 1 day`, or `indef` (aka, forever).")
if member.id in PI_BOT_IDS:
return await ctx.send("Hey! You can't ban me!!")
message = f"You have been banned from the Scioly.org Discord server for {reason}."
parsed = "indef"
if time != "indef":
parsed = dateparser.parse(time, settings={"PREFER_DATES_FROM": "future"})
if parsed == None:
return await ctx.send(f"Sorry, but I don't understand the length of time: `{time}`.")
CRON_LIST.append({"date": parsed, "do": f"unban {member.id}"})
await member.send(message)
await ctx.guild.ban(member, reason=reason)
eastern = pytz.timezone("US/Eastern")
await ctx.channel.send(f"**{member}** is banned until `{str(eastern.localize(parsed))} EST`.")
@bot.command()
@commands.check(is_staff)
async def unban(ctx, member:discord.User=None):
"""Unbans a user."""
if member == None:
await ctx.channel.send("Please give either a user ID or mention a user.")
return
await ctx.guild.unban(member)
await ctx.channel.send(f"Inverse ban hammer applied, user unbanned. Please remember that I cannot force them to re-join the server, they must join themselves.")
@bot.command()
@commands.check(is_staff)
async def archive(ctx):
tournament = [t for t in TOURNAMENT_INFO if t[1] == ctx.channel.name]
bot_spam = discord.utils.get(ctx.guild.text_channels, name = CHANNEL_BOTSPAM)
archive_cat = discord.utils.get(ctx.guild.categories, name = CATEGORY_ARCHIVE)
tournament_name, tournament_formal = None, None
if len(tournament) > 0:
tournament_name = tournament[0][1]
tournament_formal = tournament[0][0]
tournament_role = discord.utils.get(ctx.guild.roles, name = tournament_formal)
all_tourney_role = discord.utils.get(ctx.guild.roles, name = ROLE_AT)
embed = assemble_embed(
title = 'This channel is now archived.',
desc = (f'Thank you all for your discussion around the {tournament_formal}. Now that we are well past the tournament date, we are going to close this channel to help keep tournament discussions relevant and on-topic.\n\n' +
f'If you have more questions/comments related to this tournament, you are welcome to bring them up in {ctx.channel.mention}. This channel is now read-only.\n\n' +
f'If you would like to no longer view this channel, you are welcome to type `!tournament {tournament_name}` into {bot_spam}, and the channel will disappear for you. Members with the `All Tournaments` role will continue to see the channel.'),
webcolor='red'
)
await ctx.channel.set_permissions(tournament_role, send_messages = False, view_channel = True)
await ctx.channel.set_permissions(all_tourney_role, send_messages = False, view_channel = True)
await ctx.channel.edit(category = archive_cat, position = 1000)
await ctx.channel.send(embed = embed)
await ctx.message.delete()
@bot.command()
async def pronouns(ctx, *args):
"""Assigns or removes pronoun roles from a user."""
member = ctx.message.author
if len(args) < 1:
await ctx.send(f"{member.mention}, please specify a pronoun to add/remove. Current options include `!pronouns he`, `!pronouns she`, and `!pronouns they`.")
he_role = discord.utils.get(member.guild.roles, name=ROLE_PRONOUN_HE)
she_role = discord.utils.get(member.guild.roles, name=ROLE_PRONOUN_SHE)
they_role = discord.utils.get(member.guild.roles, name=ROLE_PRONOUN_THEY)
for arg in args:
if arg.lower() in ["he", "him", "his", "he / him / his"]:
if he_role in member.roles:
await ctx.send("Oh, looks like you already have the He / Him / His role. Removing it.")
await member.remove_roles(he_role)
else:
await member.add_roles(he_role)
await ctx.send("Added the He / Him / His role.")
elif arg.lower() in ["she", "her", "hers", "she / her / hers"]:
if she_role in member.roles:
await ctx.send("Oh, looks like you already have the She / Her / Hers role. Removing it.")
await member.remove_roles(she_role)
else:
await member.add_roles(she_role)
await ctx.send("Added the She / Her / Hers role.")
elif arg.lower() in ["they", "them", "their", "they / them / their"]:
if they_role in member.roles:
await ctx.send("Oh, looks like you already have the They / Them / Theirs role. Removing it.")
await member.remove_roles(they_role)
else:
await member.add_roles(they_role)
await ctx.send("Added the They / Them / Theirs role.")
elif arg.lower() in ["remove", "clear", "delete", "nuke"]:
await member.remove_roles(he_role, she_role, they_role)
return await ctx.send("Alrighty, your pronouns have been removed.")
elif arg.lower() in ["help", "what"]:
return await ctx.send("For help with pronouns, please use `!help pronouns`.")
else:
return await ctx.send(f"Sorry, I don't recognize the `{arg}` pronoun. The pronoun roles we currently have are:\n" +
"> `!pronouns he ` (which gives you *He / Him / His*)\n" +
"> `!pronouns she ` (which gives you *She / Her / Hers*)\n" +
"> `!pronouns they` (which gives you *They / Them / Theirs*)\n" +
"To remove pronouns, use `!pronouns remove`.\n" +
"Feel free to request alternate pronouns, by opening a report, or reaching out a staff member.")
@bot.command()
@commands.check(is_launcher)
async def confirm(ctx, *args: discord.Member):
"""Allows a staff member to confirm a user."""
await _confirm(args)
async def _confirm(members):
server = bot.get_guild(SERVER_ID)
channel = discord.utils.get(server.text_channels, name=CHANNEL_WELCOME)
for member in members:
role1 = discord.utils.get(member.guild.roles, name=ROLE_UC)
role2 = discord.utils.get(member.guild.roles, name=ROLE_MR)
await member.remove_roles(role1)
await member.add_roles(role2)
message = await channel.send(f"Alrighty, confirmed {member.mention}. Welcome to the server! :tada:")
await asyncio.sleep(3)
await message.delete()
before_message = None
f = 0
async for message in channel.history(oldest_first=True):
# Delete any messages sent by Pi-Bot where message before is by member
if f > 0:
if message.author.id in PI_BOT_IDS and before_message.author == member and len(message.embeds) == 0:
await message.delete()
# Delete any messages by user
if message.author == member and len(message.embeds) == 0:
await message.delete()
if member in message.mentions:
await message.delete()
before_message = message
f += 1
@bot.command()
async def nuke(ctx, count):
"""Nukes (deletes) a specified amount of messages."""
global STOPNUKE
launcher = await is_launcher(ctx)
staff = await is_staff(ctx)
if not (staff or (launcher and ctx.message.channel.name == "welcome")):
return await ctx.send("APOLOGIES. INSUFFICIENT RANK FOR NUKE.")
if STOPNUKE:
return await ctx.send("TRANSMISSION FAILED. ALL NUKES ARE CURRENTLY PAUSED. TRY AGAIN LATER.")
if int(count) > 100:
return await ctx.send("Chill. No more than deleting 100 messages at a time.")
channel = ctx.message.channel
if int(count) < 0:
history = await channel.history(limit=105).flatten()
message_count = len(history)
print(message_count)
if message_count > 100:
count = 100
else:
count = message_count + int(count) - 1
if count <= 0:
return await ctx.send("Sorry, you can not delete a negative amount of messages. This is likely because you are asking to save more messages than there are in the channel.")
await ctx.send("=====\nINCOMING TRANSMISSION.\n=====")
await ctx.send("PREPARE FOR IMPACT.")
for i in range(10, 0, -1):
await ctx.send(f"NUKING {count} MESSAGES IN {i}... TYPE `!stopnuke` AT ANY TIME TO STOP ALL TRANSMISSION.")
await asyncio.sleep(1)
if STOPNUKE:
return await ctx.send("A COMMANDER HAS PAUSED ALL NUKES FOR 20 SECONDS. NUKE CANCELLED.")
if not STOPNUKE:
async for m in channel.history(limit=(int(count) + 13)):
if not m.pinned and not STOPNUKE:
await m.delete()
msg = await ctx.send("https://media.giphy.com/media/XUFPGrX5Zis6Y/giphy.gif")
await asyncio.sleep(5)
await msg.delete()
@bot.command()
async def stopnuke(ctx):
global STOPNUKE
launcher = await is_launcher(ctx)
staff = await is_staff(ctx)
if not (staff or (launcher and ctx.message.channel.name == CHANNEL_WELCOME)):
return await ctx.send("APOLOGIES. INSUFFICIENT RANK FOR STOPPING NUKE.")
STOPNUKE = True
await ctx.send("TRANSMISSION RECEIVED. STOPPED ALL CURRENT NUKES.")
await asyncio.sleep(15)
for i in range(5, 0, -1):
await ctx.send(f"NUKING WILL BE ALLOWED IN {i}. BE WARNED COMMANDER.")
await asyncio.sleep(1)
STOPNUKE = False
@bot.command()
@commands.check(is_staff)
async def clrreact(ctx, msg: discord.Message, *args: discord.Member):
"""
Clears all reactions from a given message.
:param msg: the message containing the reactions
:type msg: discord.Message
:param *args: list of users to clear reactions of
:type *args: List[discord.Member], optional
"""
users = args
if (not users):
await msg.clear_reactions()
await ctx.send("Cleared all reactions on message.")
else:
for u in users:
for r in msg.reactions:
await r.remove(u)
await ctx.send(f"Cleared reactions on message from {len(users)} user(s).")
@bot.event
async def on_message_edit(before, after):
if (datetime.datetime.now() - after.created_at).total_seconds() < 2:
# no need to log edit events for messages just created
return
print('Message from {0.author} edited to: {0.content}, from: {1.content}'.format(after, before))
for word in CENSORED_WORDS:
if len(re.findall(fr"\b({word})\b", after.content, re.I)):
print(f"Censoring message by {after.author} because of the word: `{word}`")
await after.delete()
for word in CENSORED_EMOJIS:
if len(re.findall(fr"{word}", after.content)):
print(f"Censoring message by {after.author} because of the emoji: `{word}`")
await after.delete()
if not any(ending for ending in DISCORD_INVITE_ENDINGS if ending in after.content) and (len(re.findall("discord.gg", after.content, re.I)) > 0 or len(re.findall("discord.com/invite", after.content, re.I)) > 0):
print(f"Censoring message by {after.author} because of the it mentioned a Discord invite link.")
await after.delete()
async def send_to_dm_log(message):
server = bot.get_guild(SERVER_ID)
dmChannel = discord.utils.get(server.text_channels, name=CHANNEL_DMLOG)
embed = assemble_embed(
title=":speech_balloon: New DM",
fields=[
{
"name": "Author",
"value": message.author,
"inline": "True"
},
{
"name": "Message ID",
"value": message.id,
"inline": "True"
},
{
"name": "Created At (UTC)",
"value": message.created_at,
"inline": "True"
},
{
"name": "Attachments",
"value": " | ".join([f"**{a.filename}**: [Link]({a.url})" for a in message.attachments]) if len(message.attachments) > 0 else "None",
"inline": "False"
},
{
"name": "Content",
"value": message.content if len(message.content) > 0 else "None",
"inline": "False"
},
{
"name": "Embed",
"value": "\n".join([str(e.to_dict()) for e in message.embeds]) if len(message.embeds) > 0 else "None",
"inline": "False"
}
]
)
await dmChannel.send(embed=embed)
@bot.event
async def on_message(message):
# Log DMs
if type(message.channel) == discord.DMChannel:
await send_to_dm_log(message)
else:
# Print to output
if not (message.author.id in PI_BOT_IDS and message.channel.name in [CHANNEL_EDITEDM, CHANNEL_DELETEDM, CHANNEL_DMLOG]):
# avoid sending logs for messages in log channels
print(f'Message from {message.author} in #{message.channel}: {message.content}')
# Prevent command usage in channels outside of #bot-spam
author = message.author
if type(message.channel) != discord.DMChannel and message.content.startswith(BOT_PREFIX) and author.roles[-1] == discord.utils.get(author.guild.roles, name=ROLE_MR):
if message.channel.name != CHANNEL_BOTSPAM:
allowedCommands = ["about", "dogbomb", "exchange", "gallery", "invite", "me", "magic8ball", "latex", "obb", "profile", "r", "report", "rule", "shibabomb", "tag", "wiki", "wikipedia", "wp"]
allowed = False
for c in allowedCommands:
if message.content.find(BOT_PREFIX + c) != -1: allowed = True
if not allowed:
botspam_channel = discord.utils.get(message.guild.text_channels, name=CHANNEL_BOTSPAM)
clarify_message = await message.channel.send(f"{author.mention}, please use bot commands only in {botspam_channel.mention}. If you have more questions, you can ping a global moderator.")
await asyncio.sleep(10)
await clarify_message.delete()
return await message.delete()
if message.author.id in PI_BOT_IDS: return
content = message.content
for word in CENSORED_WORDS:
if len(re.findall(fr"\b({word})\b", content, re.I)):
print(f"Censoring message by {message.author} because of the word: `{word}`")
await message.delete()
await censor(message)
for word in CENSORED_EMOJIS:
if len(re.findall(fr"{word}", content)):
print(f"Censoring message by {message.author} because of the emoji: `{word}`")
await message.delete()
await censor(message)
if not any(ending for ending in DISCORD_INVITE_ENDINGS if ending in message.content) and (len(re.findall("discord.gg", content, re.I)) > 0 or len(re.findall("discord.com/invite", content, re.I)) > 0):
print(f"Censoring message by {message.author} because of the it mentioned a Discord invite link.")
await message.delete()
ssChannel = discord.utils.get(message.author.guild.text_channels, name=CHANNEL_SUPPORT)
await message.channel.send(f"*Links to external Discord servers can not be sent in accordance with rule 12. If you have questions, please ask in {ssChannel.mention}.*")
pingable = True
if message.content[:1] == "!" or message.content[:1] == "?" or message.content[:2] == "pb" or message.content[:2] == "bp":
pingable = False
if message.channel.id == 724125653212987454:
# If the message is coming from #bot-spam
pingable = False
if pingable:
for user in PING_INFO:
if user['id'] == message.author.id:
continue
pings = user['pings']
for ping in pings:
if len(re.findall(ping, content, re.I)) > 0 and message.author.discriminator != "0000":
# Do not send a ping if the user is mentioned
user_is_mentioned = user['id'] in [m.id for m in message.mentions]
if user['id'] in [m.id for m in message.channel.members] and ('dnd' not in user or user['dnd'] != True) and not user_is_mentioned:
# Check that the user can actually see the message
name = message.author.nick
if name == None:
name = message.author.name
await ping_pm(user['id'], name, ping, message.channel.name, message.content, message.jump_url)
# SPAM TESTING
global RECENT_MESSAGES
caps = False
u = sum(1 for c in message.content if c.isupper())
l = sum(1 for c in message.content if c.islower())
if u > (l + 3): caps = True
RECENT_MESSAGES = [{"author": message.author.id,"content": message.content.lower(), "caps": caps}] + RECENT_MESSAGES[:20]
# Spam checker
if RECENT_MESSAGES.count({"author": message.author.id, "content": message.content.lower()}) >= 6:
muted_role = discord.utils.get(message.author.guild.roles, name=ROLE_MUTED)
parsed = dateparser.parse("1 hour", settings={"PREFER_DATES_FROM": "future"})
CRON_LIST.append({"date": parsed, "do": f"unmute {message.author.id}"})
await message.author.add_roles(muted_role)
await message.channel.send(f"Successfully muted {message.author.mention} for 1 hour.")
await auto_report("User was auto-muted (spam)", "red", f"A user ({str(message.author)}) was auto muted in {message.channel.mention} because of repeated spamming.")
elif RECENT_MESSAGES.count({"author": message.author.id, "content": message.content.lower()}) >= 3:
await message.channel.send(f"{message.author.mention}, please watch the spam. You will be muted if you do not stop.")
# Caps checker
elif sum(1 for m in RECENT_MESSAGES if m['author'] == message.author.id and m['caps']) > 8 and caps:
muted_role = discord.utils.get(message.author.guild.roles, name=ROLE_MUTED)
parsed = dateparser.parse("1 hour", settings={"PREFER_DATES_FROM": "future"})
CRON_LIST.append({"date": parsed, "do": f"unmute {message.author.id}"})
await message.author.add_roles(muted_role)
await message.channel.send(f"Successfully muted {message.author.mention} for 1 hour.")
await auto_report("User was auto-muted (caps)", "red", f"A user ({str(message.author)}) was auto muted in {message.channel.mention} because of repeated caps.")
elif sum(1 for m in RECENT_MESSAGES if m['author'] == message.author.id and m['caps']) > 3 and caps:
await message.channel.send(f"{message.author.mention}, please watch the caps, or else I will lay down the mute hammer!")
# Do not treat messages with only exclamations as command
if message.content.count(BOT_PREFIX) != len(message.content):
await bot.process_commands(message)
@bot.event
async def on_raw_reaction_add(payload):
if payload.user_id not in PI_BOT_IDS:
guild = bot.get_guild(payload.guild_id)
reports_channel = discord.utils.get(guild.text_channels, name=CHANNEL_REPORTS)
if payload.emoji.name == EMOJI_UNSELFMUTE:
guild = bot.get_guild(payload.guild_id)
self_muted_role = discord.utils.get(guild.roles, name=ROLE_SELFMUTE)
un_self_mute_channel = discord.utils.get(guild.text_channels, name=CHANNEL_UNSELFMUTE)
member = payload.member
message = await un_self_mute_channel.fetch_message(payload.message_id)
if self_muted_role in member.roles:
await member.remove_roles(self_muted_role)
await message.clear_reactions()
await message.add_reaction(EMOJI_FULL_UNSELFMUTE)
for obj in CRON_LIST[:]:
if obj['do'] == f'unmute {payload.user_id}':
CRON_LIST.remove(obj)
if payload.message_id in REPORT_IDS:
messageObj = await reports_channel.fetch_message(payload.message_id)
if payload.emoji.name == "\U0000274C": # :x:
print("Report cleared with no action.")
await messageObj.delete()
if payload.emoji.name == "\U00002705": # :white_check_mark:
print("Report handled.")
await messageObj.delete()
return
@bot.event
async def on_reaction_add(reaction, user):
msg = reaction.message
if len(msg.embeds) > 0:
if msg.embeds[0].title.startswith("List of Commands") and user.id not in PI_BOT_IDS:
currentPage = int(re.findall(r'(\d+)(?=\/)', msg.embeds[0].title)[0])
print(currentPage)
ls = False
if reaction.emoji == EMOJI_FAST_REVERSE:
ls = await get_list(user, 1)
elif reaction.emoji == EMOJI_LEFT_ARROW:
ls = await get_list(user, currentPage - 1)
elif reaction.emoji == EMOJI_RIGHT_ARROW:
ls = await get_list(user, currentPage + 1)
elif reaction.emoji == EMOJI_FAST_FORWARD:
ls = await get_list(user, 100)
if ls != False:
await reaction.message.edit(embed=ls)
await reaction.remove(user)
@bot.event
async def on_member_join(member):
role = discord.utils.get(member.guild.roles, name=ROLE_UC)
join_channel = discord.utils.get(member.guild.text_channels, name=CHANNEL_WELCOME)
await member.add_roles(role)
name = member.name
for word in CENSORED_WORDS:
if len(re.findall(fr"\b({word})\b", name, re.I)):
await auto_report("Innapropriate Username Detected", "red", f"A new member ({str(member)}) has joined the server, and I have detected that their username is innapropriate.")
await join_channel.send(f"{member.mention}, welcome to the Scioly.org Discord Server! " +
"You can add roles here, using the commands shown at the top of this channel. " +
"If you have any questions, please just ask here, and a helper or moderator will answer you ASAP." +
"\n\n" +
"**Please add roles by typing the commands above into the text box, and if you have a question, please type it here. After adding roles, a moderator will give you access to the rest of the server to chat with other members!**")
member_count = len(member.guild.members)
lounge_channel = discord.utils.get(member.guild.text_channels, name=CHANNEL_LOUNGE)
if member_count % 100 == 0:
await lounge_channel.send(f"Wow! There are now `{member_count}` members in the server!")
@bot.event
async def on_member_remove(member):
leave_channel = discord.utils.get(member.guild.text_channels, name=CHANNEL_LEAVE)
unconfirmed_role = discord.utils.get(member.guild.roles, name=ROLE_UC)
if unconfirmed_role in member.roles:
unconfirmed_statement = "Unconfirmed: :white_check_mark:"
else:
unconfirmed_statement = "Unconfirmed: :x:"
joined_at = f"Joined at: `{str(member.joined_at)}`"
if member.nick != None:
await leave_channel.send(f"**{member}** (nicknamed `{member.nick}`) has left the server (or was removed).\n{unconfirmed_statement}\n{joined_at}")
else:
await leave_channel.send(f"**{member}** has left the server (or was removed).\n{unconfirmed_statement}\n{joined_at}")
welcome_channel = discord.utils.get(member.guild.text_channels, name=CHANNEL_WELCOME)
# when user leaves, determine if they are mentioned in any messages in #welcome, delete if so
async for message in welcome_channel.history(oldest_first=True):
if not message.pinned:
if member in message.mentions:
await message.delete()
@bot.event
async def on_member_update(before, after):
if after.nick == None: return
for word in CENSORED_WORDS:
if len(re.findall(fr"\b({word})\b", after.nick, re.I)):
await auto_report("Innapropriate Username Detected", "red", f"A member ({str(after)}) has updated their nickname to **{after.nick}**, which the censor caught as innapropriate.")
@bot.event
async def on_user_update(before, after):
for word in CENSORED_WORDS:
if len(re.findall(fr"\b({word})\b", after.name, re.I)):
await auto_report("Innapropriate Username Detected", "red", f"A member ({str(member)}) has updated their nickname to **{after.name}**, which the censor caught as innapropriate.")
@bot.event
async def on_raw_message_edit(payload):
channel = bot.get_channel(payload.channel_id)
guild = bot.get_guild(SERVER_ID) if channel.type == discord.ChannelType.private else channel.guild
edited_channel = discord.utils.get(guild.text_channels, name=CHANNEL_EDITEDM)
if channel.type != discord.ChannelType.private and channel.name in [CHANNEL_EDITEDM, CHANNEL_DELETEDM, CHANNEL_DMLOG]:
return
try:
message = payload.cached_message
if (datetime.datetime.now() - message.created_at).total_seconds() < 2:
# no need to log events because message was created
return
message_now = await channel.fetch_message(message.id)
channel_name = f"{message.author.mention}'s DM" if channel.type == discord.ChannelType.private else message.channel.mention
embed = assemble_embed(
title=":pencil: Edited Message",
fields=[
{
"name": "Author",
"value": message.author,
"inline": "True"
},
{
"name": "Channel",
"value": channel_name,
"inline": "True"
},
{
"name": "Message ID",
"value": message.id,
"inline": "True"
},
{
"name": "Created At (UTC)",
"value": message.created_at,
"inline": "True"
},
{
"name": "Edited At (UTC)",
"value": message_now.edited_at,
"inline": "True"
},
{
"name": "Attachments",
"value": " | ".join([f"**{a.filename}**: [Link]({a.url})" for a in message.attachments]) if len(message.attachments) > 0 else "None",
"inline": "False"
},
{
"name": "Past Content",
"value": message.content[:1024] if len(message.content) > 0 else "None",
"inline": "False"
},
{
"name": "New Content",
"value": message_now.content[:1024] if len(message_now.content) > 0 else "None",
"inline": "False"
},
{
"name": "Embed",
"value": "\n".join([str(e.to_dict()) for e in message.embeds]) if len(message.embeds) > 0 else "None",
"inline": "False"
}
]
)
await edited_channel.send(embed=embed)
except Exception as e:
message_now = await channel.fetch_message(payload.message_id)
embed = assemble_embed(
title=":pencil: Edited Message",
fields=[
{
"name": "Channel",
"value": bot.get_channel(payload.channel_id).mention,
"inline": "True"
},
{
"name": "Message ID",
"value": payload.message_id,
"inline": "True"
},
{
"name": "Author",
"value": message_now.author,
"inline": "True"
},
{
"name": "Created At (UTC)",
"value": message_now.created_at,
"inline": "True"
},
{
"name": "Edited At (UTC)",
"value": message_now.edited_at,
"inline": "True"
},
{
"name": "New Content",
"value": message_now.content[:1024] if len(message_now.content) > 0 else "None",
"inline": "False"
},
{
"name": "Raw Payload",
"value": str(payload.data)[:1024] if len(payload.data) > 0 else "None",
"inline": "False"
},
{
"name": "Current Attachments",
"value": " | ".join([f"**{a.filename}**: [Link]({a.url})" for a in message_now.attachments]) if len(message_now.attachments) > 0 else "None",
"inline": "False"
},
{
"name": "Current Embed",
"value": "\n".join([str(e.to_dict()) for e in message_now.embeds])[:1024] if len(message_now.embeds) > 0 else "None",
"inline": "False"
}
]
)
await edited_channel.send(embed=embed)
@bot.event
async def on_raw_message_delete(payload):
channel = bot.get_channel(payload.channel_id)
guild = bot.get_guild(SERVER_ID) if channel.type == discord.ChannelType.private else channel.guild
if channel.type != discord.ChannelType.private and channel.name in [CHANNEL_REPORTS, CHANNEL_DELETEDM]:
print("Ignoring deletion event because of the channel it's from.")
return
deleted_channel = discord.utils.get(guild.text_channels, name=CHANNEL_DELETEDM)
try:
message = payload.cached_message
channel_name = f"{message.author.mention}'s DM" if channel.type == discord.ChannelType.private else message.channel.mention
embed = assemble_embed(
title=":fire: Deleted Message",
fields=[
{
"name": "Author",
"value": message.author,
"inline": "True"
},
{
"name": "Channel",
"value": channel_name,
"inline": "True"
},
{
"name": "Message ID",
"value": message.id,
"inline": "True"
},
{
"name": "Created At (UTC)",
"value": message.created_at,
"inline": "True"
},
{
"name": "Edited At (UTC)",
"value": message.edited_at,
"inline": "True"
},
{
"name": "Attachments",
"value": " | ".join([f"**{a.filename}**: [Link]({a.url})" for a in message.attachments]) if len(message.attachments) > 0 else "None",
"inline": "False"
},
{
"name": "Content",
"value": str(message.content)[:1024] if len(message.content) > 0 else "None",
"inline": "False"
},
{
"name": "Embed",
"value": "\n".join([str(e.to_dict()) for e in message.embeds])[:1024] if len(message.embeds) > 0 else "None",
"inline": "False"
}
]
)
await deleted_channel.send(embed=embed)
except Exception as e:
print(e)
embed = assemble_embed(
title=":fire: Deleted Message",
fields=[
{
"name": "Channel",
"value": bot.get_channel(payload.channel_id).mention,
"inline": "True"
},
{
"name": "Message ID",
"value": payload.message_id,
"inline": "True"
}
]
)
await deleted_channel.send(embed=embed)
@bot.event
async def on_command_error(ctx, error):
print("Command Error:")
print(error)
# Argument parsing errors
if isinstance(error, discord.ext.commands.UnexpectedQuoteError) or isinstance(error, discord.ext.commands.InvalidEndOfQuotedStringError):
return await ctx.send("Sorry, it appears that your quotation marks are misaligned, and I can't read your query.")
if isinstance(error, discord.ext.commands.ExpectedClosingQuoteError):
return await ctx.send("Oh. I was expecting you were going to close out your command with a quote somewhere, but never found it!")
# User input errors
if isinstance(error, discord.ext.commands.MissingRequiredArgument):
return await ctx.send("Oops, you are missing a required argument in the command.")
if isinstance(error, discord.ext.commands.ArgumentParsingError):
return await ctx.send("Sorry, I had trouble parsing one of your arguments.")
if isinstance(error, discord.ext.commands.TooManyArguments):
return await ctx.send("Woahhh!! Too many arguments for this command!")
if isinstance(error, discord.ext.commands.BadArgument) or isinstance(error, discord.ext.commands.BadUnionArgument):
return await ctx.send("Sorry, I'm having trouble reading one of the arguments you just used. Try again!")
# Check failure errors
if isinstance(error, discord.ext.commands.CheckAnyFailure):
return await ctx.send("It looks like you aren't able to run this command, sorry.")
if isinstance(error, discord.ext.commands.PrivateMessageOnly):
return await ctx.send("Pssttt. You're going to have to DM me to run this command!")
if isinstance(error, discord.ext.commands.NoPrivateMessage):
return await ctx.send("Ope. You can't run this command in the DM's!")
if isinstance(error, discord.ext.commands.NotOwner):
return await ctx.send("Oof. You have to be the bot's master to run that command!")
if isinstance(error, discord.ext.commands.MissingPermissions) or isinstance(error, discord.ext.commands.BotMissingPermissions):
return await ctx.send("Er, you don't have the permissions to run this command.")
if isinstance(error, discord.ext.commands.MissingRole) or isinstance(error, discord.ext.commands.BotMissingRole):
return await ctx.send("Oh no... you don't have the required role to run this command.")
if isinstance(error, discord.ext.commands.MissingAnyRole) or isinstance(error, discord.ext.commands.BotMissingAnyRole):
return await ctx.send("Oh no... you don't have the required role to run this command.")
if isinstance(error, discord.ext.commands.NSFWChannelRequired):
return await ctx.send("Uh... this channel can only be run in a NSFW channel... sorry to disappoint.")
# Command errors
if isinstance(error, CommandNotAllowedInChannel):
return await ctx.send(f"You are not allowed to use this command in {error.channel.mention}.")
if isinstance(error, discord.ext.commands.ConversionError):
return await ctx.send("Oops, there was a bot error here, sorry about that.")
if isinstance(error, discord.ext.commands.UserInputError):
return await ctx.send("Hmmm... I'm having trouble reading what you're trying to tell me.")
if isinstance(error, discord.ext.commands.CommandNotFound):
return await ctx.send("Sorry, I couldn't find that command.")
if isinstance(error, discord.ext.commands.CheckFailure):
return await ctx.send("Sorry, but I don't think you can run that command.")
if isinstance(error, discord.ext.commands.DisabledCommand):
return await ctx.send("Sorry, but this command is disabled.")
if isinstance(error, discord.ext.commands.CommandInvokeError):
return await ctx.send("Sorry, but an error incurred when the command was invoked.")
if isinstance(error, discord.ext.commands.CommandOnCooldown):
return await ctx.send("Slow down buster! This command's on cooldown.")
if isinstance(error, discord.ext.commands.MaxConcurrencyReached):
return await ctx.send("Uh oh. This command has reached MAXIMUM CONCURRENCY. *lightning flash*. Try again later.")
# Extension errors (not doing specifics)
if isinstance(error, discord.ext.commands.ExtensionError):
return await ctx.send("Oh no. There's an extension error. Please ping a developer about this one.")
# Client exception errors (not doing specifics)
if isinstance(error, discord.ext.commands.CommandRegistrationError):
return await ctx.send("Oh boy. Command registration error. Please ping a developer about this.")
# Overall errors
if isinstance(error, discord.ext.commands.CommandError):
return await ctx.send("Oops, there was a command error. Try again.")
return
@bot.event
async def on_error(event, *args, **kwargs):
print("Code Error:")
print(traceback.format_exc())
async def lookup_role(name):
name = name.title()
if name == "Al" or name == "Alabama": return "Alabama"
elif name == "All" or name == "All States": return "All States"
elif name == "Ak" or name == "Alaska": return "Alaska"
elif name == "Ar" or name == "Arkansas": return "Arkansas"
elif name == "Az" or name == "Arizona": return "Arizona"
elif name == "Cas" or name == "Ca-S" or name == "California (South)" or name == "Socal" or name == "California South" or name == "california-north": return "California (South)"
elif name == "Can" or name == "Ca-N" or name == "California (North)" or name == "Nocal" or name == "California North" or name == "california-south": return "California (North)"
if name == "Co" or name == "Colorado": return "Colorado"
elif name == "Ct" or name == "Connecticut": return "Connecticut"
elif name == "Dc" or name == "District Of Columbia" or name == "district-of-columbia": return "District of Columbia"
elif name == "De" or name == "Delaware": return "Delaware"
elif name == "Fl" or name == "Florida": return "Florida"
elif name == "Ga" or name == "Georgia": return "Georgia"
elif name == "Hi" or name == "Hawaii": return "Hawaii"
elif name == "Id" or name == "Idaho": return "Idaho"
elif name == "Il" or name == "Illinois": return "Illinois"
elif name == "In" or name == "Indiana": return "Indiana"
elif name == "Ia" or name == "Iowa": return "Iowa"
elif name == "Ks" or name == "Kansas": return "Kansas"
elif name == "Ky" or name == "Kentucky": return "Kentucky"
elif name == "La" or name == "Louisiana": return "Louisiana"
elif name == "Me" or name == "Maine": return "Maine"
elif name == "Md" or name == "Maryland": return "Maryland"
elif name == "Ma" or name == "Massachusetts": return "Massachusetts"
elif name == "Mi" or name == "Michigan": return "Michigan"
elif name == "Mn" or name == "Minnesota": return "Minnesota"
elif name == "Ms" or name == "Mississippi": return "Mississippi"
elif name == "Mo" or name == "Missouri": return "Missouri"
elif name == "Mt" or name == "Montana": return "Montana"
elif name == "Ne" or name == "Nebraska": return "Nebraska"
elif name == "Nv" or name == "Nevada": return "Nevada"
elif name == "Nh" or name == "New Hampshire": return "New Hampshire"
elif name == "Nj" or name == "New Jersey": return "New Jersey"
elif name == "Nm" or name == "New Mexico": return "New Mexico"
elif name == "Ny" or name == "New York": return "New York"
elif name == "Nc" or name == "North Carolina": return "North Carolina"
elif name == "Nd" or name == "North Dakota": return "North Dakota"
elif name == "Oh" or name == "Ohio": return "Ohio"
elif name == "Ok" or name == "Oklahoma": return "Oklahoma"
elif name == "Or" or name == "Oregon": return "Oregon"
elif name == "Pa" or name == "Pennsylvania": return "Pennsylvania"
elif name == "Ri" or name == "Rhode Island": return "Rhode Island"
elif name == "Sc" or name == "South Carolina": return "South Carolina"
elif name == "Sd" or name == "South Dakota": return "South Dakota"
elif name == "Tn" or name == "Tennessee": return "Tennessee"
elif name == "Tx" or name == "Texas": return "Texas"
elif name == "Ut" or name == "Utah": return "Utah"
elif name == "Vt" or name == "Vermont": return "Vermont"
elif name == "Va" or name == "Virginia": return "Virginia"
elif name == "Wa" or name == "Washington": return "Washington"
elif name == "Wv" or name == "West Virginia": return "West Virginia"
elif name == "Wi" or name == "Wisconsin": return "Wisconsin"
elif name == "Wy" or name == "Wyoming": return "Wyoming"
return False
async def harvest_id(user):
return user.replace("<@!", "").replace(">", "")
if dev_mode:
bot.run(DEV_TOKEN)
else:
bot.run(TOKEN)
|
21,247 | 50789e9e7ce144a5e42918521e497d881ea8df86 | import v20
from Oanda.Config.config import Config
"""
A class for placing trades
"""
class OrderHandler:
def __init__(self, pips_to_risk):
self.currency_pair = 'EUR_USD'
self.pips_to_risk = pips_to_risk
def place_market_order(self, order_type, n_units, profit_price, trailing_stop_flag):
# Add all of the needed arguments
kwargs = {}
kwargs['type'] = 'MARKET'
kwargs['instrument'] = self.currency_pair
kwargs['units'] = str(n_units) if order_type == 'buy' else str(-n_units)
kwargs['timeInForce'] = 'FOK'
kwargs['positionFill'] = 'DEFAULT'
kwargs['takeProfitOnFill'] = {'price': str(profit_price), 'timeInForce': 'GTC'}
kwargs['stopLossOnFill'] = {'distance': str(self.pips_to_risk), 'timeInForce': 'GTC'}
if trailing_stop_flag:
kwargs['trailingStopLossOnFill'] = {'distance': str(self.pips_to_risk),
'timeInForce': 'GTC'}
# Create the Oanda API context
api_context = v20.Context(
Config.get_host_name(),
Config.get_port(),
Config.get_ssl(),
application="sample_code",
token=Config.get_api_token(),
datetime_format=Config.get_date_format()
)
# Use the Oanda API context as well as the key word arguments to place the order
response = api_context.order.market(Config.get_account(), **kwargs)
print("Response: {} ({})\n".format(response.status, response.reason))
def get_open_trades(self):
api_context = v20.Context(
Config.get_host_name(),
Config.get_port(),
Config.get_ssl(),
application="sample_code",
token=Config.get_api_token(),
datetime_format=Config.get_date_format()
)
response = api_context.trade.list_open(Config.get_account())
if response.status != 200:
return None, str(response) + '\n' + str(response.body)
return response.body['trades'], None
|
21,248 | 5272c579e2f3c045c00d66bdc14e57db4e1ec7eb | from django.urls import path
from . import views
urlpatterns = [
path('<int:user_id>', views.show_all_bookings)
]
|
21,249 | 77aa70ad394b7ec7571562691abd24851e0dfd9c | #!/usr/bin/env python
# -*- coding: utf-8 -*-
from __future__ import print_function
import rospy
from geometry_msgs.msg import Twist
from util import Vector2
DEBUG = False
def get_agent_velocity(agent):
"""Return agent velocity as Vector2 instance."""
vel = Vector2()
vel.x = agent.twist.twist.linear.x
vel.y = agent.twist.twist.linear.y
return vel
def get_agent_position(agent):
"""Return agent position as Vector2 instance."""
pos = Vector2()
pos.x = agent.pose.pose.position.x
pos.y = agent.pose.pose.position.y
return pos
def get_obst_position(obst):
"""Return obstacle position as Vector2 instance."""
pos = Vector2()
pos.x = obst.position.x
pos.y = obst.position.y
return pos
class Boid():
"""
An implementation of Craig Reynolds' flocking rules and boid objects.
Each boid (bird-oid object) maneuvers based on the positions and velocities
of its nearby flockmates. Computation is based on three components:
1) alignment: steer towards the average heading of local flockmates
2) cohesion: steer to move toward the average position of local flockmates
3) separation: steer to avoid crowding local flockmates.
Additionally, 4th component, avoid, is implemented where boids steer away
from obstacles in their search radius.
Each component yields a force on boid. Total force then gives the
acceleration which is multiplied by time and added to boid's velocity.
Force and velocity are limited to specified amount.
Attributes:
position (Vector2): Boid's position
velocity (Vector2): Boid's velocity
mass (Vector2): Boid's mass
alignment_factor (double): Weight for alignment component
cohesion_factor (double): Weight for cohesion component
separation_factor (double): Weight for separation component
avoid_factor (double): Weight for obstacle avoiding component
max_speed (double): Velocity upper limit
max_force (double): Force upper limit
friction (double): Constant friction force
crowd_radius (double): Radius to avoid crowding
search_radius (double): Boid's sensing radius
Methods:
update_parameters(self): Save parameters in class variables
compute_alignment(self, nearest_agents): Return alignment component
compute_cohesion(self, nearest_agents): Return cohesion component
compute_separation(self, nearest_agents): Return separation component
compute_avoids(self, avoids): Return avoid component
compute_velocity(self, my_agent, nearest_agents, avoids):
Compute total velocity based on all components
"""
def __init__(self):
"""Create an empty boid and update parameters."""
self.position = Vector2()
self.velocity = Vector2()
self.update_parameters()
self.mass = 0.18 # Mass of Sphero robot in kilograms
def update_parameters(self):
"""Save Reynolds controller parameters in class variables."""
self.alignment_factor = rospy.get_param('/dyn_reconf/alignment_factor')
self.cohesion_factor = rospy.get_param('/dyn_reconf/cohesion_factor')
self.separation_factor = rospy.get_param('/dyn_reconf/separation_factor')
self.avoid_factor = rospy.get_param('/dyn_reconf/avoid_factor')
self.max_speed = rospy.get_param('/dyn_reconf/max_speed')
self.max_force = rospy.get_param('/dyn_reconf/max_force')
self.friction = rospy.get_param('/dyn_reconf/friction')
self.crowd_radius = rospy.get_param('/dyn_reconf/crowd_radius')
self.search_radius = rospy.get_param('/dyn_reconf/search_radius')
rospy.loginfo(rospy.get_caller_id() + " -> Parameters updated")
if DEBUG:
print('alignment_factor: ', self.alignment_factor)
print('cohesion_factor: ', self.cohesion_factor)
print('separation_factor: ', self.separation_factor)
print('avoid_factor: ', self.avoid_factor)
print('max_speed: ', self.max_speed)
print('max_force: ', self.max_force)
print('friction: ', self.friction)
print('crowd_radius: ', self.crowd_radius)
print('search_radius: ', self.search_radius)
def compute_alignment(self, nearest_agents):
"""Return alignment component."""
mean_velocity = Vector2()
steer = Vector2()
# Find mean velocity of neighboring agents
for agent in nearest_agents:
agent_velocity = get_agent_velocity(agent)
mean_velocity += agent_velocity
# Steer toward calculated mean velocity
if nearest_agents:
mean_velocity.set_mag(self.max_speed)
steer = mean_velocity - self.velocity
steer.limit(self.max_force)
return steer
def compute_cohesion(self, nearest_agents):
"""Return cohesion component."""
mean_position = Vector2()
steer = Vector2()
# Find mean position of neighboring agents
for agent in nearest_agents:
agent_position = get_agent_position(agent)
mean_position += agent_position
direction = mean_position / len(nearest_agents)
# Steer toward calculated mean position
# Force is proportional to agents distance from mean
d = direction.norm()
if d > 0:
direction.set_mag(self.max_speed * (d / self.search_radius))
steer = direction - self.velocity
steer.limit(self.max_force)
return steer
def compute_separation(self, nearest_agents):
"""Return separation component."""
direction = Vector2()
steer = Vector2()
count = 0
# Find mean position of neighboring agents
for agent in nearest_agents:
agent_position = get_agent_position(agent)
if agent_position.norm() < self.crowd_radius:
count += 1
d = agent_position.norm()
agent_position *= -1 # Make vector point away from other agent
agent_position.normalize() # Normalize to get only direction
# Vector's magnitude is reciprocal to distance between agents
agent_position /= d
direction += agent_position
# Steer away from calculated mean position
if count:
direction.set_mag(self.max_speed)
steer = direction - self.velocity
steer.limit(self.max_force)
return steer
def compute_avoids(self, avoids):
"""Return avoid component."""
direction = Vector2()
steer = Vector2()
# Find mean position of obstacles
for obst in avoids:
obst_position = get_obst_position(obst)
d = obst_position.norm()
obst_position *= -1 # Make vector point away from obstacle
obst_position.normalize() # Normalize to get only direction
# Vector's magnitude is reciprocal to distance between agents
obst_position /= d
direction += obst_position
# Steer away from calculated mean position
if avoids:
direction.set_mag(self.max_speed)
steer = direction - self.velocity
steer.limit(self.max_force)
return steer
def compute_velocity(self, my_agent, nearest_agents, avoids):
"""Compute total velocity based on all components."""
force = Vector2()
self.velocity = get_agent_velocity(my_agent)
# Compute all the components
alignment = self.compute_alignment(nearest_agents)
cohesion = self.compute_cohesion(nearest_agents)
separation = self.compute_separation(nearest_agents)
avoid = self.compute_avoids(avoids)
if DEBUG:
print("alignment: ", alignment)
print("cohesion: ", cohesion)
print("separation: ", separation)
print("avoid: ", avoid)
# Add components together and limit the output
force += alignment * self.alignment_factor
force += cohesion * self.cohesion_factor
force += separation * self.separation_factor
force += avoid * self.avoid_factor
force.limit(self.max_force)
# If agent is moving, apply constant friction force
if self.velocity.norm() > 0:
force += self.friction * -1 * self.velocity.normalize(ret=True)
acceleration = force / self.mass
# Calculate total velocity (delta_velocity = acceleration * delta_time)
self.velocity += acceleration / 10
self.velocity.limit(self.max_speed)
if DEBUG:
print("force: ", force)
print("acceleration: ", acceleration)
print("velocity: ", self.velocity)
print()
# Return the the velocity as Twist message
vel = Twist()
vel.linear.x = self.velocity.x
vel.linear.y = self.velocity.y
return vel
|
21,250 | 59686ccc0125502b992f09d326867997f2072f5d | import pandas as pd
import psycopg2
conn = psycopg2.connect(
"dbname='etiya' user='dwh_stg' host='192.168.1.45' password='Stg1220'")
tables = {'stg_dce_cust': None,
'stg_dce_party': {'lsuffix': 'party_id', 'rsuffix': 'party_id'},
'stg_dce_gnl_st': {'lsuffix': 'st_id', 'rsuffix': 'gnl_std_id'},
'stg_dce_cust_tp': {'lsuffix': 'cust_tp_id', 'rsuffix': 'cust_tp_id'},
'stg_dce_gnl_tp': {'lsuffix': 'party_tp_id', 'rsuffix': 'gnl_st_id'},
'stg_dce_gnl_st': {'lsuffix': 'st_id', 'rsuffix': 'gnl_st_id'}}
m = pd.DataFrame()
for table_name, suffixs in tables.items():
df = pd.read_sql_query("select * from {}".format(table_name), conn)
m = df if m.empty else m.join(
df, lsuffix=suffixs['lsuffix'], rsuffix=suffixs['rsuffix'])
|
21,251 | d7bb8874b63c8615fc5f6c7dfa16c4404d402a6a | import metadata
import yaml
from pykwalify.core import Core
import os
import shutil
from pushd import Dir
import exectools
import sys
VALID_UPDATES = {
'mode': metadata.CONFIG_MODES,
}
# Used in oit.py to print out valid update options
# in --help output
def valid_updates():
res = '\n\tKey\tValid Options\n\n'
for k, v in VALID_UPDATES.iteritems():
opts = ""
if v:
v = [str(i) for i in v]
opts = ':\t{}'.format(','.join(v))
res += '\t{}{}\n\n'.format(k, opts)
return res
class MetaDataConfig(object):
"""
Holds common functions for managing the MetaData configs
Mostly is a class to hold runtime
"""
def __init__(self, runtime):
self.runtime = runtime
if self.runtime.remove_tmp_working_dir:
print('config:* options require a non-temporary working space. Must run with --working-dir')
sys.exit(1)
def _load_config_log(self):
"""
<working_dir>/.config file holds details of the current
config management session
Load that file into a dict
"""
config_path = os.path.join(self.runtime.working_dir, '.config')
if not os.path.isfile(config_path):
return {}
with open(config_path, 'r') as f:
data = yaml.load(f)
return data
def _save_config_log(self, data):
"""
<working_dir>/.config file holds details of the current
config management session
Save that file
"""
config_path = os.path.join(self.runtime.working_dir, '.config')
with open(config_path, 'w') as f:
yaml.safe_dump(data, f, default_flow_style=False)
def _do_update(self, meta, k, v):
"""
Convenience function for setting meta keys
"""
self.runtime.logger.info('{}: [{}] -> {}'.format(meta.in_group_config_path, k, v))
meta.config[k] = v
meta.save()
def update(self, key, val):
"""
Update [key] to [val] in all given image/rpm metas
VALID_UPDATES is used to lock out what can be updated
Right now only [mode] is valid, but that may change
"""
if key not in VALID_UPDATES:
raise ValueError('{} is not a valid update key. See --help'.format(key))
if VALID_UPDATES[key]:
if val not in VALID_UPDATES[key]:
msg = '{} is not a valid value for {}. Use one of: {}'.format(val, key, ','.join(VALID_UPDATES[key]))
raise ValueError(msg)
for img in self.runtime.image_metas():
self._do_update(img, key, val)
for rpm in self.runtime.rpm_metas():
self._do_update(rpm, key, val)
def config_print(self, key=None, name_only=False):
"""
Print name, sub-key, or entire config
"""
def _do_print(meta, k):
if name_only:
print(meta.in_group_config_path)
else:
if k:
val = meta.config.get(k, None)
else:
val = meta.config.primitive()
val = yaml.safe_dump(val, default_flow_style=False)
print("*****" + meta.in_group_config_path + "*****")
print(val)
print('')
image_metas = self.runtime.image_metas()
rpm_metas = self.runtime.rpm_metas()
if image_metas:
print('')
print('********* Images *********')
for img in image_metas:
_do_print(img, key)
if rpm_metas:
print('')
print('********* RPMs *********')
for rpm in rpm_metas:
_do_print(rpm, key)
def commit(self, msg):
"""
Commit outstanding metadata config changes
"""
self.runtime.logger.info('Commit config: {}'.format(msg))
with Dir(self.runtime.metadata_dir):
exectools.cmd_assert(["git", "add", "."])
exectools.cmd_assert(["git", "commit", "--allow-empty", "-m", msg])
def push(self):
"""
Push changes back to config repo.
Will of course fail if user does not have write access.
"""
self.runtime.logger.info('Pushing config...')
with Dir(self.runtime.metadata_dir):
exectools.cmd_assert(["git", "push"])
def new(self, new_type, name):
"""
Given type and name, copy template config into correct place
and report that new config file path for editing.
"""
valid_types = ['image', 'rpm']
new_type = new_type.lower()
if new_type not in valid_types:
raise ValueError('Type must be one of {}'.format(','.join(valid_types)))
new_type = new_type + 's'
template = os.path.join(self.runtime.metadata_dir, 'example', new_type, 'template.yml')
new_config = os.path.join(self.runtime.group_dir, new_type, '{}.yml'.format(name))
if os.path.exists(new_config):
raise ValueError('{} already exists!'.format(new_config))
shutil.copyfile(template, new_config)
config_log = self._load_config_log()
config_log.setdefault('new', []).append(new_config)
self._save_config_log(config_log)
self.runtime.logger.info("New config template created: \n{}".format(new_config))
def sanitize_new_config(self):
"""
Configs created with new() will be filled with template comments.
We do not want those cluttering the final configs, so remove them
by parsing and rewriting the file.
"""
config_log = self._load_config_log()
if 'new' in config_log:
for cfg in config_log['new']:
with open(cfg, 'r+') as f:
data = yaml.load(f)
f.seek(0)
yaml.safe_dump(data, f, default_flow_style=False)
f.truncate()
del config_log['new']
self._save_config_log(config_log)
|
21,252 | b2428dba756948d12b51fbb9cdb80714f1a08863 | import numpy as np
class Molecule(object):
def __init__(self):
self.orientation = np.array([0.0,0.0,0.0])
self.position = np.array([0.0,0.0,0.0])
self.mu = []
self.Omega_list = []
def setPosition(self,x,y,z):
self.position = np.array([x,y,z],dtype=float)
def getPosition(self):
return self.position
def setOrientation(self,rho,theta,phi):
self.orientation = np.array([rho,theta,phi],dtype=float)
def getOrientation(self):
return self.orientation
def setMu(self,new_mu):
self.mu = np.array(new_mu,dtype=float)
def getMu(self):
return self.mu
def addOmega(self,new_Omegas):
new_Omegas = np.array(new_Omegas,dtype=float).flatten()
[self.Omega_list.append(new_Omegas[i]) for i in range(len(new_Omegas))]
def getOmegas(self):
return np.array(self.Omega_list,dtype=float) |
21,253 | 951124f1bd6d532cb819c87d6ddcdd3b74cad7b7 | from django.shortcuts import render
from django.contrib.auth.decorators import login_required
from .models import Marcacao
from datetime import datetime, timedelta
from django.utils import timezone
from .forms import MeuForm
from .models import Marcacao
from .models import MarcacoesPorDia
from django.contrib.auth.models import User
from django.http import HttpResponse
import xlwt
from django.db.models import Q
@login_required
def marcarPonto(request):
return render(request, 'marcarPonto.html')
@login_required
def marcacaoRealizada(request):
marcacao = Marcacao()
marcacao.nome_usuario = request.user
marcacao.id_usuario = request.user.id
marcacao.data_marcacao = timezone.now()
marcacao.save()
return render(request, 'marcacaoComSucesso.html')
@login_required
def confirmarMarcacao(request):
dataHoraAtual = timezone.now()
return render(request, 'confirmarMarcacao.html', {'dataHoraAtual': dataHoraAtual})
@login_required
def visualizarMarcacoes(request):
dataInicio = ""
dataFim = ""
idFuncionario = ""
cartaoPonto = []
if request.method == 'POST':
try:
userName = request.user.username
dataInicio = request.POST['dataInicio']
dataFim = request.POST['dataFim']
if (userName == 'bpa'):
idFuncionario = request.POST['Funcionarios']
else:
idFuncionario = request.user.id
dataFimDate = datetime.strptime(dataFim, '%Y-%m-%d').date()
dataFimDate = dataFimDate + timedelta(days=1)
marcacoes = Marcacao.objects.filter(Q(id_usuario=idFuncionario), data_marcacao__gte=dataInicio,
data_marcacao__lte=dataFimDate)
marcacoesAux = marcacoes
marcacoesDia = MarcacoesPorDia()
i = 0
x = 1
sizeLista = len(marcacoes)
while i < len(marcacoes):
dataStr = marcacoes[i].data_marcacao.strftime('%d/%m/%Y')
if (x < sizeLista):
dataAuxStr = marcacoes[x].data_marcacao.strftime('%d/%m/%Y')
else:
dataAuxStr = ""
horaStr = marcacoes[i].data_marcacao.strftime('%H:%M')
if (dataStr != ""):
if (dataStr == dataAuxStr):
marcacoesDia.marcacoes += (horaStr,)
i = i + 1
x = x + 1
else:
marcacoesDia.marcacoes += (horaStr,)
marcacoesDia.dia_marcacoes = dataStr
cartaoPonto.append(marcacoesDia)
marcacoesDia = MarcacoesPorDia()
i = i + 1
x = x + 1
except:
pass
userName = request.user.username
if (userName == "bpa"):
contexto = {
'meu_form': MeuForm(),
'cartaoPonto': cartaoPonto
}
else:
contexto = {
'cartaoPonto': cartaoPonto
}
return render(request, 'visualizarMarcacoes.html', contexto)
@login_required
def pontoManual(request):
userName = request.user.username
if (userName == "bpa"):
contexto = {
'meu_form': MeuForm()
}
return render(request, 'pontoManual.html', contexto)
@login_required
def marcacaoManual(request):
userName = request.user.username
if (userName == "bpa"):
contexto = {
'meu_form': MeuForm()
}
horarioMarcacao = ""
dataMarcacao = ""
marcacao = Marcacao()
marcacao.nome_usuario = request.user
try:
if request.method == 'POST':
idFuncionario = request.POST['Funcionarios']
print("Id funcionario .................. ", idFuncionario)
marcacao.id_usuario = idFuncionario
horarioMarcacao = request.POST['horarioMarcacao']
dataMarcacao = request.POST['dataMarcacao']
if (horarioMarcacao == "" or dataMarcacao == "" or idFuncionario == ""):
return render(request, 'pontoManual.html', contexto)
marcacao.data_marcacao = dataMarcacao + " " + horarioMarcacao
marcacao.save()
except:
pass
return render(request, 'marcacaoComSucesso.html')
def export_xls(request, cartaoPontoExcel, dataInicio, dataFim):
if(dataInicio != "" or dataFim != "" ):
response = HttpResponse(content_type='application/ms-excel')
response['Content-Disposition'] = 'attachment; filename="cartaoPonto.xls"'
wb = xlwt.Workbook(encoding='utf-8')
ws = wb.add_sheet('Marcacoes')
# Sheet header, first row
row_num = 0
font_style = xlwt.XFStyle()
font_style.font.bold = True
columns = ['NOME', 'DATA', 'MARCACOES', ]
for col_num in range(len(columns)):
ws.write(row_num, col_num, columns[col_num], font_style)
# Sheet body, remaining rows
font_style = xlwt.XFStyle()
i = 0
linha = 1
coluna = 0
colunaMarcacao = 2
while i < len(cartaoPontoExcel):
ws.write(linha, coluna, cartaoPontoExcel[i].nome_funcionario)
ws.write(linha, coluna + 1, cartaoPontoExcel[i].dia_marcacoes)
x = 0
while (x < len(cartaoPontoExcel[i].marcacoes)):
ws.write(linha, colunaMarcacao, cartaoPontoExcel[i].marcacoes[x])
colunaMarcacao = colunaMarcacao + 1
x = x + 1
i = i + 1
colunaMarcacao = 2
linha = linha + 1
wb.save(response)
return response
return render(request, 'exportarMarcacoesXLS.html')
def pageExportExcel(request):
return render(request, 'exportarMarcacoesXLS.html')
def filtro_export_xls(request):
cartaoPontoExcel = []
dataInicio = ""
dataFim = ""
idFuncionario = ""
if request.method == 'POST':
try:
userName = request.user.username
dataInicio = request.POST['dataInicio']
dataFim = request.POST['dataFim']
dataFimDate = datetime.strptime(dataFim, '%Y-%m-%d').date()
dataFimDate = dataFimDate + timedelta(days=1)
funcionarios = User.objects.all()
sizeFuncionarios = len(funcionarios)
cont = 0
while(cont <= sizeFuncionarios ):
marcacoes = Marcacao.objects.filter(Q(id_usuario=funcionarios[cont].id), data_marcacao__gte=dataInicio,
data_marcacao__lte=dataFimDate)
marcacoesAux = marcacoes
marcacoesDia = MarcacoesPorDia()
i = 0
x = 1
sizeLista = len(marcacoes)
while i < len(marcacoes):
nomeFuncionario = funcionarios[cont].username
dataStr = marcacoes[i].data_marcacao.strftime('%d/%m/%Y')
if (x < sizeLista):
dataAuxStr = marcacoes[x].data_marcacao.strftime('%d/%m/%Y')
else:
dataAuxStr = ""
horaStr = marcacoes[i].data_marcacao.strftime('%H:%M')
if (dataStr != ""):
if (dataStr == dataAuxStr):
marcacoesDia.marcacoes += (horaStr,)
i = i + 1
x = x + 1
else:
marcacoesDia.nome_funcionario = nomeFuncionario
marcacoesDia.marcacoes += (horaStr,)
marcacoesDia.dia_marcacoes = dataStr
cartaoPontoExcel.append(marcacoesDia)
marcacoesDia = MarcacoesPorDia()
i = i + 1
x = x + 1
cont = cont + 1
except:
pass
return export_xls(request, cartaoPontoExcel, dataInicio, dataFim) |
21,254 | b67ebda9076c70aedf9a1213dd6dc52c2fca2e68 | # 方法1, 比较好理解
if __name__ == '__main__':
while True:
try:
n = int(input())
line = list(map(int, input().split()))
K, d = map(int, input().split())
value=0
# 存储到每个元素的最大值
fm=[[0 for j in range(K)] for i in range(n)]
# 存储到每个元素的最小值
fn=[[0 for j in range(K)] for i in range(n)]
for i in range(n):
fm[i][0] = fn[i][0] = line[i]
for k in range(1, K):
for j in range(max(0, i-d), i):
fm[i][k] = max(fm[i][k], max(fm[j][k-1]*line[i],fn[j][k-1]*line[i]))
fn[i][k] = min(fn[i][k], min(fm[j][k-1]*line[i],fn[j][k-1]*line[i]))
value = max(value,fm[i][K-1])
print(value)
except:
break
# 方法2
# if __name__ == '__main__':
# while True:
# try:
# n = int(input())
# values = list(map(int, input().split()))
# k, d = map(int, input().split())
# table1 = [value for value in values]
# table2 = [value for value in values]
# for i in range(1, k):
# for j in range(n-1, i-1, -1):
# zmax = max(table1[max(0,j-d):j])
# fmin = min(table2[max(0,j-d):j])
# if values[j] >= 0:
# table1[j] = zmax * values[j]
# table2[j] = fmin * values[j]
# else:
# table1[j] = fmin * values[j]
# table2[j] = zmax * values[j]
# print(max(table1))
# except:
# break |
21,255 | ebd29517cfcca0e75b9379be4f03c0a2b2e372c6 | currentUsers = ["santi123","libardozene","coxato","edsel","admin","blanquito"]
newUsers = ["angelo","croco","santi123","libardozene","coxato"]
for user in newUsers:
if user.lowepr() not in currentUsers:
currentUsers.append( user )
print("The user " + user + " is now a currentUser :D")
else:
print("The user " + user + " is a repetead user")
print("The currentUsers are :", currentUsers)
|
21,256 | b6a78176ce78ae0b39c954c5a58ddcf43a9ccea7 | from django.http import HttpResponse
from django.contrib.auth.models import User
from django.contrib.auth import authenticate, login
from django.utils import simplejson
from knights.models import UserProfile
def registeruser(request):
if request.method == "POST":
data = simplejson.loads(request.body.decode('utf-8'))
else:
data = request.REQUEST
try:
userName = data['user_name']
email = data['email']
password = data['password']
try:
User.objects.get(email=email)
resp = HttpResponse(content='emailused')
except User.DoesNotExist:
newuser = User.objects.create_user(userName,
email=email,
password=password)
newuser.save()
UserProfile(user=newuser)
resp = HttpResponse(content='registered')
except KeyError:
resp = HttpResponse(content='')
return resp
def updateuser(request, user_name):
if request.method == "POST":
data = simplejson.loads(request.body.decode('utf-8'))
else:
data = request.REQUEST
resp = HttpResponse(content='')
username = user_name
try:
user = User.objects.get(username=userName)
userP = UserProfile.objects.get(user=user)
except User.DoesNotExist:
return HttpResponse('user doesnt exist')
if 'coins' in data:
coins = data['coins']
# To DO : auth token
userP.coins = coins
userP.save()
resp += 'cons submitted')
if 'friend' in data:
friendName = data['friend']
try:
friend = User.objects.get(username=friendName)
friendP = UserProfile.objects.get(user=friend)
userP.friedns.add(friednP)
userP.save()
resp += 'friend added'
except User.DoesNotExist:
return HttpResponse(content='user not found')
return resp
def loginuser(request):
# print(request.user)
if request.method == "POST":
data = simplejson.loads(request.body.decode('utf-8'))
else:
data = request.REQUEST
resp = HttpResponse(content='invalidlogin')
try:
username = data['user_name']
password = data['password']
user = authenticate(username=username, password=password)
login(request, user)
if user is not None:
if user.is_active:
resp = HttpResponse(content='login')
except KeyError:
resp = HttpResponse(content='')
return resp
def returnspriteinfo(request, user_name):
username = user_name
try:
user = User.objects.get(username=username)
userP = UserProfile.objects.get(user=user)
except UserNotFoundError:
resp = HttpResponse(content='user not found')
returnData = []
for sprite in userP.sprites.all():
spriteDict = {}
spriteDict["spriteID"] = sprite.spriteID
spriteDict["xx"] = sprite.xx
spriteDict["yy"] = sprite.yy
spriteDict["canInteract"] = sprite.canInteract
returnData.append(spriteDict)
return HttpResponse(simplejson.dumps(returnData))
def highscores(request):
returnData = []
for userP in UserProfile.objects.all():
returnData.append({'playersName': str(userP),
'coins': str(userP.coins)})
return HttpResponse(simplejson.dumps(returnData))
def friends(request, user_name):
username = user_name
try:
user = User.objects.get(username=username)
userP = UserProfile.objects.get(user=user)
except UserNotFoundError:
resp = HttpResponse(content='user not found')
returnData = []
for userP in UserProfile.friends.objects.all():
returnData.append({'playersName': str(userP)})
|
21,257 | 7f84d9a554a7cf47a9b210351fd4ccd8beb88197 | # -*- coding: utf-8 -*-
"""
Created on Sun Nov 05 08:39:17 2017
@author: michael
data_handler:
Module for specifying classes for collecting, cleaning,
storing, and delivering data to the strategy class.
"""
from datetime import date, datetime, timedelta
from pandas_datareader import DataReader
import os
import platform
import sqlite3
import traceback
import quandl as qd
import pandas as pd
import holidays
API_KEY = '...'
qd.ApiConfig.api_key = API_KEY
us_holidays = holidays.UnitedStates()
class WebToDatabase():
def __init__(self, db_path=None):
if not db_path:
if platform.system() == 'Windows': root = 'C:\Users'
if platform.system() == 'Linux': root = '/home'
db_path = os.path.join(root, 'michael', 'Documents', 'databases', 'securities_master.db')
# Instantiate database
self.db_path = db_path
if not os.path.exists(db_path):
self.build_database(db_path)
# Get a list of symbols and last symbol date from db
with sqlite3.connect(self.db_path) as con:
cur = con.cursor()
rows = cur.execute(
"SELECT P.Symbol, P.Date "
"FROM Prices P "
"INNER JOIN ( "
"SELECT Symbol, max(Date) as MaxDate "
"FROM Prices "
"GROUP BY Symbol"
") MX "
"ON P.Symbol = MX.Symbol "
"AND "
"P.Date = MX.MaxDate;")
self.symbols = dict(rows)
def build_database(self, db_path):
with sqlite3.connect(db_path) as con:
cur = con.cursor()
cur.execute("CREATE TABLE Prices("
"Symbol TEXT,"
"Date DATE,"
"Open FLOAT,"
"High FLOAT,"
"Low FLOAT,"
"Close FLOAT,"
"Volume INT,"
"Source TEXT"
");")
def get_web_data(self, symbol, src, start_date, end_date=None):
try:
stk_df = DataReader(symbol, src, start=start_date, end=end_date)
stk_df.rename(columns={'Adj. Close': 'Close'}, inplace=True)
stk_df.drop(['Adj Close'], axis=1, inplace=True)
stk_df['Source'] = src
stk_df['Symbol'] = symbol
with sqlite3.connect(self.db_path) as con:
stk_df.to_sql('Prices', con, if_exists='append')
return stk_df
except Exception as e:
raise Exception, '[+] get_web_data Error - {}'.format(e)
def get_Quandl_data(self, symbol, start_date, end_date=None):
try:
qd_df = qd.get('WIKI/{}'.format(symbol), start_date=start_date, end_date=end_date)
drop_cols = ['Open', 'High', 'Low', 'Close', 'Volume', 'Ex-Dividend', 'Split Ratio']
qd_df.drop(drop_cols, axis=1, inplace=True)
qd_df.rename(columns={'Adj. Open': 'Open',
'Adj. High': 'High',
'Adj. Low': 'Low',
'Adj. Close': 'Close',
'Adj. Volume': 'Volume'},
inplace=True)
qd_df['Source'] = 'quandl'
qd_df['Symbol'] = symbol
with sqlite3.connect(self.db_path) as con:
qd_df.to_sql('Prices', con, if_exists='append')
return qd_df
except Exception as e:
raise Exception, '[+] get_Quandl_data Error - {}'.format(e)
def get_prices(self, symbol, start_date):
try:
with sqlite3.connect(self.db_path) as con:
stk_df = pd.read_sql_query(
"SELECT * FROM Prices WHERE Symbol = ? AND Date > ?",
con=con, parse_dates=['Date'], params=(symbol, start_date,),
)
# Unnecessary, god willing
stk_df.drop_duplicates(['Date', 'Symbol'], inplace=True)
stk_df.set_index('Date', inplace=True)
stk_df.sort_index(inplace=True)
return stk_df
except Exception as e:
raise Exception, '[+] get_prices Error - {}'.format(e)
def get_DataFrame(self, symbol, start_date, verbose=False):
try:
# Rewind end date to last actual trading date
last_trade_date = date.today()
if last_trade_date in us_holidays:
last_trade_date -= timedelta(1)
last_day_of_week = last_trade_date.isoweekday()
if last_day_of_week == 6:
last_trade_date -= timedelta(1)
if last_day_of_week == 7:
last_trade_date -= timedelta(2)
if symbol in self.symbols.keys():
if verbose: print 'Symbol [{}] exists in database'.format(symbol)
end_db_date = str(self.symbols[symbol]).split()[0]
end_db_date = datetime.strptime(end_db_date, '%Y-%m-%d')
if end_db_date.date() < last_trade_date:
# We're missing some recent data
# Start with day after last db date
if verbose: print 'Updating [{}] symbol ending dates'.format(symbol)
update_date = end_db_date + timedelta(1)
try:
self.get_Quandl_data(symbol, update_date)
except Exception as e:
try:
self.get_web_data(symbol, 'yahoo', update_date)
except Exception as e:
try:
self.get_web_data(symbol, 'google', update_date)
except Exception as e:
raise Exception, 'Could not locate end data for [{}]\n{}'.format(e, traceback.format_exc())
# Update self.symbols dict
self.symbols[symbol] = last_trade_date
else:
if verbose: print 'Adding new symbol data [{}]'.format(symbol)
try:
self.get_Quandl_data(symbol, None)
except Exception as e:
try:
self.get_web_data(symbol, 'yahoo', None)
except Exception as e:
try:
self.get_web_data(symbol, 'google', None)
except Exception as e:
raise Exception, 'Could not locate new data for [{}]\n{}'.format(e, traceback.format_exc())
self.symbols[symbol] = last_trade_date
return self.get_prices(symbol, start_date)
except Exception as e:
if verbose:
raise Exception, '[+] get_DataFrame Error - {}\n{}'.format(e, traceback.format_exc())
else:
raise Exception, '[+] get_DataFrame Error - {}'.format(e)
if __name__ == '__main__':
# Testing
data = WebToDatabase()
chs = data.get_DataFrame('CHS', '2011-02-01')
rvsb = data.get_DataFrame('RVSB', '2014-01-01')
print chs.head()
print chs.tail()
print rvsb.head()
print rvsb.tail()
|
21,258 | 27c63bd9066ede69c63894f5b5f3569d2852b404 | import sqlite3
import json
from flask import Flask, g, request, send_from_directory
from flask.ext.cors import CORS
import os
import time
import numpy as np
from PIL import Image
from collections import defaultdict
import util
import database
import hulop
app = Flask(__name__)
CORS(app)
# example response: {location:{t: [], R: [[],[],[]]}}
@app.route("/localize", methods=['POST'])
def localize():
image = request.files['image']
estimate = hulop.localize_image(image, request.form['user'], request.form['map'])
image.stream.seek(0)
image.save(os.path.join('./images', image.filename))
if estimate:
return json.dumps({'location': estimate})
else:
return json.dumps({'error': 'could not localize'}), 400
@app.route("/nearby", methods=['POST'])
def nearby():
estimate = hulop.localize_image(
request.files['image'],
request.form['user'],
request.form['map']
)
if estimate:
loc = estimate['t']
results = []
radius = request.form['radius']
for h in database.query('select * from hotspots'):
h_loc = (h['x'],h['y'],h['z'])
if util.dist(loc[:2], h_loc[:2]) < radius:
direction = util.clockwise(estimate['t'], estimate['R'], h_loc)
results.append({'description': h['category'], 'direction': direction})
return json.dumps({'location':estimate, 'nearby':results})
else:
return json.dumps({'error': 'could not localize'}), 400
@app.route("/nearbyMessages", methods=['POST'])
def nearby_message():
estimate = hulop.localize_image(
request.files['image'],
request.form['user'],
request.form['map']
)
if estimate:
loc = estimate['t']
results = []
radius = request.form['radius']
#radius = 5
for h in database.query('select * from hotspot_messages'):
h_loc = (h['x'],h['y'],h['z'])
if util.dist(loc[:2], h_loc[:2]) < radius:
direction = util.clockwise(estimate['t'], estimate['R'], h_loc)
results.append({'message': h['message'], 'direction': direction})
return json.dumps({'location':estimate, 'nearby':results})
else:
return json.dumps({'error': 'could not localize'}), 400
@app.route("/createMessage", methods=["POST"])
def create_message():
estimate = hulop.localize_image(
request.files['image'],
request.form['user'],
request.form['map']
)
if estimate:
loc = estimate['t']
new_id = database.insert(
'hotspot_messages',
('message','x','y','z'),
(request.form['message'], loc[0], loc[1], loc[2])
)
hotspot = database.query('select * from hotspot_messages where id=?', [new_id], one=True)
return json.dumps(hotspot), 201
else:
return json.dumps({'error': 'could not localize'}), 400
@app.route("/hotspotLayout", methods=['POST', 'GET'])
def hotspot_loyout():
if request.method == "GET":
session = request.args['session']
#session = str(3846)
return send_from_directory('buttons', session)
elif request.method == "POST":
session = os.path.splitext(request.files['image'].filename)[0]
im = Image.open(request.files['image'])
width, height = im.size
w_scale, h_scale = util.screen_scale(width, height)
request.files['image'].seek(0)
buttons = []
points_db = database.query("select * from answer_to_3d_point")
answer_ids = [p['answer_id'] for p in points_db]
points_3d = [[p['x'],p['y'],p['z']] for p in points_db]
points_2d = hulop.project_3d_to_2d(
request.files['image'],
request.form['user'],
request.form['map'],
points_3d
)
if points_2d == None:
return json.dumps({'error': 'could not localize'}), 400
points_by_a = defaultdict(list)
for i,a in enumerate(answer_ids):
points_by_a[a].append(points_2d[i])
for k,v in points_by_a.iteritems():
if v:
p = np.array(v)
bbox = util.get_bounding(p)
a = database.query("select * from answers_label where id = ?", [k], one=True)
clipped = util.clip_bbox(bbox, width, height)
if clipped is not None:
clipped += 0.0001
buttons.append([a['category']] + [str(c) for c in np.nditer(clipped)])
buttons.insert(0,[str(w_scale*width), str(h_scale*height), str(len(buttons))])
with open(os.path.join('buttons', session), 'w') as outfile:
json.dump(buttons, outfile)
return "",201
@app.route("/hotspots", methods=['GET'])
def hotspots():
return json.dumps({'hotspots':database.query('select * from hotspots')})
@app.teardown_appcontext
def close_connection(exception):
db = getattr(g, '_database', None)
if db is not None:
db.close()
if __name__ == "__main__":
app.run(debug=True, host='0.0.0.0')
|
21,259 | 236d51c97fda873e16bb3642206ad505f2f584e9 | import re
import scrapy
from scrapy.loader import ItemLoader
from ..items import FarbancaItem
from itemloaders.processors import TakeFirst
class FarbancaSpider(scrapy.Spider):
name = 'farbanca'
start_urls = ['https://www.farbanca.it/media/news/']
def parse(self, response):
post_links = response.xpath('//div[@class="sn_listing_list_i_in"]/a/@href').getall()
yield from response.follow_all(post_links, self.parse_post)
next_page = response.xpath('//a[@class="next "]/@href').getall()
yield from response.follow_all(next_page, self.parse)
def parse_post(self, response):
title = response.xpath('//h1/text()').getall()
title = [p.strip() for p in title]
title = ' '.join(title).strip()
description = response.xpath('//div[@class="col-12 col-md-9 col-xl-7 offset-md-1 offset-xl-2 mt-50"]//text()[normalize-space()]').getall()
description = [p.strip() for p in description]
description = ' '.join(description).strip()
date = response.xpath('//div[@class="sn_info_icon _s my-5 mr-15"]/text()[normalize-space()]').get()
item = ItemLoader(item=FarbancaItem(), response=response)
item.default_output_processor = TakeFirst()
item.add_value('title', title)
item.add_value('description', description)
item.add_value('date', date)
return item.load_item()
|
21,260 | 9c05ea223f37acf1c64dc975f4edaae9dd2b47f0 | """
len(), lower(), upper(), split(), strip()
"""
var = " Coding is great "
print(len(var))
print(var.strip())
print(var.lower())
print(var.upper())
print(var.split(" "))
ip_addr = "127.0.0.1"
print(ip_addr.split('.'))
l1 = [1,2,'3',"4.423",54,56,"2w3r"]
print(l1[2:])
|
21,261 | cb828b79f9b227ba13a0994621214d546c00dee1 | # -*- coding: utf-8 -*-
import logging
import os
from lxml import etree
from odoo.loglevels import ustr
from odoo.tools import misc, view_validation
from odoo.modules.module import get_resource_path
_logger = logging.getLogger(__name__)
_gantt_validator = None
@view_validation.validate('gantt')
def schema_gantt(arch, **kwargs):
global _gantt_validator
if _gantt_validator is None:
with misc.file_open(os.path.join('web_gantt_view', 'views', 'gantt.rng')) as f:
# gantt.rng needs to include common.rng from the `base/rng/` directory. The idea
# here is to set the base url of lxml lib in order to load relative file from the
# `base/rng` directory.
base_url = os.path.join(get_resource_path('base', 'rng'), '')
_gantt_validator = etree.RelaxNG(etree.parse(f, base_url=base_url))
if _gantt_validator.validate(arch):
return True
for error in _gantt_validator.error_log:
_logger.error(ustr(error))
return False
|
21,262 | afb10ae29cc124d58c2e2d639b6472b2fdb4b4fb | Q1 = '''SELECT `Actor`.`id`,fname,lname,gender FROM Actor INNER JOIN Cast on `Cast`.`pid` = `Actor`.`id` INNER JOIN Movie on `Movie`.`id` = `Cast`.`mid` WHERE name LIKE 'Annie%';'''
Q2 = '''SELECT `Movie`.`id`, name, rank, year FROM Movie INNER JOIN MovieDirector on `MovieDirector`.`mid` = `Movie`.`id` INNER JOIN Director on `Director`.`id` = `MovieDirector`.`did` WHERE fname = "Biff" AND lname = "Malibu" AND year IN (1999, 1994, 2003) ORDER BY rank DESC, year ASC;'''
Q3 = '''SELECT year, COUNT(id) AS no_of_movies FROM Movie GROUP BY year HAVING AVG(rank) > (SELECT AVG(rank) FROM Movie) ORDER BY year ASC;'''
Q4 = '''SELECT id, name, year, rank FROM Movie WHERE year = 2001 AND rank < (SELECT AVG(rank) FROM Movie) ORDER BY rank DESC LIMIT 10;'''
Q5 = '''SELECT m.id, (SELECT COUNT(gender) FROM Actor INNER JOIN Cast on `Cast`.`pid` = `Actor`.`id` WHERE m.id = `Cast`.`mid` AND gender = 'F') AS no_of_female_actors, (SELECT COUNT(gender) FROM Actor INNER JOIN Cast on `Cast`.`pid` = `Actor`.`id` WHERE m.id = `Cast`.`mid` AND gender = 'M') AS no_of_male_actors FROM Movie AS m ORDER BY m.id ASC LIMIT 100;'''
Q6 = '''SELECT DISTINCT(pid) FROM Cast INNER JOIN Movie on `Movie`.`id` = `Cast`.`mid` GROUP BY mid,pid HAVING COUNT(DISTINCT(role)) > 1 ORDER BY pid ASC LIMIT 100;'''
Q7 = '''SELECT DISTINCT(fname), COUNT(fname) AS count FROM Director GROUP BY fname HAVING count > 1;'''
Q8 = '''SELECT `D`.`id`, fname, lname FROM Director AS D WHERE EXISTS (SELECT did FROM MovieDirector INNER JOIN Cast on `Cast`.`mid` = `MovieDirector`.`mid` WHERE `D`.`id` = `MovieDirector`.`did` GROUP BY `MovieDirector`.`mid` HAVING COUNT(DISTINCT pid) >= 100) AND NOT EXISTS (SELECT did FROM MovieDirector INNER JOIN Cast on `Cast`.`mid` = `MovieDirector`.`mid` WHERE `D`.`id` = `MovieDirector`.`did` GROUP BY `MovieDirector`.`mid` HAVING COUNT(DISTINCT pid) < 100);'''
#Q5 = '''SELECT m.id, (SELECT COUNT(gender) FROM Actor INNER JOIN Cast on `Cast`.`pid` = `Actor`.`id` WHERE m.id = `Cast`.`mid` AND gender = 'F') AS no_of_female_actors, (SELECT COUNT(gender) FROM Actor INNER JOIN Cast on `Cast`.`pid` = `Actor`.`id` WHERE m.id = `Cast`.`mid` AND gender = 'M') AS no_of_male_actors FROM Movie AS m ORDER BY m.id ASC LIMIT 100;''' |
21,263 | 3c291e923b113895a1648fd39da6ef6939ddf324 | import openpyxl, os, sys, time, shutil
from FunctionLibrery.globalVariables import GlobalVariables as globVal
from TestScripts import *
dispatcher = {'TC_001_Login':TC_001_Login.TC_001_Login,'TC_002_Signup':TC_002_Signup.TC_002_Signup,
'TC_004_WebTable':TC_004_WebTable.TC_004_WebTable,
'TC_MC_001_LoginVerification':TC_MC_001_LoginVerification.TC_MC_001_LoginVerification}
def makeBatchResultDirectory():
globVal.batchResultPath = "../Results/BatchResults/BatchResults_" + str(time.strftime("%Y%m%d-%H%M%S"))
try:
os.makedirs(globVal.batchResultPath)
except Exception as e:
print("Creation of the directory %s failed" % globVal.testResultFolderPath)
raise Exception(e)
def copyFolder(src, dst, symlinks=False, ignore=None):
for item in os.listdir(src):
if str(item).lower() == str(globVal.testResultFolderName).lower():
s = os.path.join(src, item)
d = os.path.join(dst, item)
if os.path.isdir(s):
shutil.copytree(s, d, symlinks, ignore)
break
#else:
#shutil.copy2(s, d)
def getScriptNamesToRunFromDataSheet():
try:
wb = openpyxl.load_workbook(filename=globVal.batchFlow_ExcelPath, read_only=True)
sh = wb[globVal.batchFlow_TestCases_SheetName]
scriptNamesToRun = []
maxRow = sh.max_row
maxColumn = sh.max_column
scrColumNum = 0
runStatusColumNum = 0
for col in range(1, maxColumn + 1):
if str(sh.cell(1, col).value).lower() == 'scriptname':
scrColumNum = col
if scrColumNum != 0 and runStatusColumNum != 0:
break
elif str(sh.cell(1, col).value).lower() == 'runstatus':
runStatusColumNum = col
if scrColumNum != 0 and runStatusColumNum != 0:
break
else:
if scrColumNum == 0 and runStatusColumNum == 0:
print('Both "ScriptName" and "RunStatus" columns are not present under sheet('+globVal.batchFlow_TestCases_SheetName+') in "'+str(os.path.abspath(globVal.batchFlow_ExcelPath))+'"')
sys.exit(1)
elif scrColumNum == 0 :
print('"ScriptName" column is not present under sheet('+globVal.batchFlow_TestCases_SheetName+') in "'+str(os.path.abspath(globVal.batchFlow_ExcelPath))+'"')
sys.exit(1)
elif runStatusColumNum == 0:
print('"RunStatus" column is not present under sheet(' + globVal.batchFlow_TestCases_SheetName + ') in "' + str(os.path.abspath(globVal.batchFlow_ExcelPath)) + '"')
sys.exit(1)
for r in range(2, maxRow +1):
if str(sh.cell(r,runStatusColumNum).value).lower() == 'yes':
scriptName = str(sh.cell(r,scrColumNum).value)
scriptNamesToRun.append([r,scriptName])
wb._archive.close()
return scriptNamesToRun
except Exception as e:
print('Got Exception in "getScriptNamesToRunFromDataSheet" function. Exception is :' + str(e))
raise Exception(e)
def writeResultToDataSheet(filePath, scriptRowNum, testResultStatus):
try:
wb = openpyxl.load_workbook(filename=filePath)
sh = wb[globVal.batchFlow_TestCases_SheetName]
maxRow = sh.max_row
maxColumn = sh.max_column
resStatusColumNum = 0
resLinkColumNum = 0
for col in range(1, maxColumn + 1):
val = str(sh.cell(1, col).value).lower()
if val == 'resultstatus':
resStatusColumNum = col
if resStatusColumNum != 0 and resLinkColumNum != 0:
break
elif val == 'resultlink':
resLinkColumNum = col
if resStatusColumNum != 0 and resLinkColumNum != 0:
break
else:
if resStatusColumNum == 0 and resLinkColumNum == 0:
print('Both "testResultStatus" and "ResultLink" columns are not present under sheet('+globVal.batchFlow_TestCases_SheetName+') in "'+str(os.path.abspath(globVal.batchFlow_ExcelPath))+'"')
sys.exit(1)
elif resStatusColumNum == 0 :
print('"testResultStatus" column is not present under sheet('+globVal.batchFlow_TestCases_SheetName+') in "'+str(os.path.abspath(globVal.batchFlow_ExcelPath))+'"')
sys.exit(1)
elif resLinkColumNum == 0:
print('"ResultLink" column is not present under sheet(' + globVal.batchFlow_TestCases_SheetName + ') in "' + str(os.path.abspath(globVal.batchFlow_ExcelPath)) + '"')
sys.exit(1)
sh.cell(int(scriptRowNum),resStatusColumNum).value = testResultStatus
sh.cell(int(scriptRowNum),resLinkColumNum).value = '=HYPERLINK("{}", "{}")'.format(os.path.abspath(str(globVal.testResultFolderPath)) + "/Results.html", "Result.html")
wb.save(os.path.abspath(filePath))
except Exception as excp:
print('Got Excepiton in "writeResultToDataSheet" function. Exception is :' +str(excp))
raise Exception(excp)
def removeAllResultStatusesFromDataSheet(filePath):
try:
wb = openpyxl.load_workbook(filename=filePath)
sh = wb[globVal.batchFlow_TestCases_SheetName]
maxRow = sh.max_row
maxColumn = sh.max_column
runStatusColumNum = 0
resStatusColumNum = 0
resLinkColumNum = 0
for col in range(1, maxColumn + 1):
if str(sh.cell(1, col).value).lower() == 'runstatus':
runStatusColumNum = col
if runStatusColumNum != 0 and resStatusColumNum != 0 and resLinkColumNum != 0:
break
elif str(sh.cell(1, col).value).lower() == 'resultstatus':
resStatusColumNum = col
if runStatusColumNum != 0 and resStatusColumNum != 0 and resLinkColumNum != 0:
break
elif str(sh.cell(1, col).value).lower() == 'resultlink':
resLinkColumNum = col
if runStatusColumNum != 0 and resStatusColumNum != 0 and resLinkColumNum != 0:
break
else:
if runStatusColumNum == 0 and resStatusColumNum == 0 and resLinkColumNum == 0:
print("While removing the result status from batchflow sheet",'Both "RunStatus", "ResultStatus" and "ResultLink" columns are not present under sheet('+globVal.batchFlow_TestCases_SheetName+') in "'+str(os.path.abspath(globVal.batchFlow_ExcelPath))+'"')
elif runStatusColumNum == 0:
print("While removing the result status from batchflow sheet",'"RunStatus" column is not present under sheet(' + globVal.batchFlow_TestCases_SheetName + ') in "' + str(os.path.abspath(globVal.batchFlow_ExcelPath)) + '"')
elif resStatusColumNum == 0:
print("While removing the result status from batchflow sheet",'"ResultStatus" column is not present under sheet(' + globVal.batchFlow_TestCases_SheetName + ') in "' + str(os.path.abspath(globVal.batchFlow_ExcelPath)) + '"')
elif resLinkColumNum == 0:
print("While removing the result status from batchflow sheet",'"ResultLink" column is not present under sheet(' + globVal.batchFlow_TestCases_SheetName + ') in "' + str(os.path.abspath(globVal.batchFlow_ExcelPath)) + '"')
for r in range(2, maxRow + 1):
if str(sh.cell(r, runStatusColumNum).value).lower() == 'yes':
sh.cell(r, resStatusColumNum).value = ''
sh.cell(r, resLinkColumNum).value = ''
wb.save(os.path.abspath(filePath))
except Exception as e:
print('Got Exception in funtion(removeAllResultStatusesFromDataSheet)', 'Exception is : '+str(e))
def executeBatch():
try:
scriptNamesToRun = getScriptNamesToRunFromDataSheet()
makeBatchResultDirectory()
globVal.batchResultExcelPath = shutil.copy(str(globVal.batchFlow_ExcelPath), str(os.path.abspath(globVal.batchResultPath))+'/BatchResults.xlsx')
totalScriptsSelected = len(scriptNamesToRun)
totalScriptsExecuted = 0
totalScriptsPassed = 0
totalScriptsFailed = 0
totalScriptsWarning = 0
totalScriptsStopped = 0
totalScriptsWithUnknowStatus = 0
print()
print('****************************************************************')
print('Batch Execution Started, Find below batch execution details:')
print('Total Number Of Scripts Selected = '+ str(totalScriptsSelected))
print('Names Of Selected Scripts = ', end = '')
for i in range(len(scriptNamesToRun)):
if i != len(scriptNamesToRun)-1:
print(str(scriptNamesToRun[i][1]), end=', ')
else:
print(str(scriptNamesToRun[i][1]))
print('****************************************************************')
for a in scriptNamesToRun:
scriptRowNum = int(a[0])
scriptFunction = dispatcher[a[1]]
print('----------------------------------------------')
print('Started Executing Script: "' + str(a[1])+'",', 'Started Time: "'+ str(time.strftime("%Y%m%d-%H%M%S"))+'"')
scriptFunction()
totalScriptsExecuted += 1
ResultStatus=''
if globVal.totalFailed >0:
ResultStatus = 'Failed'
totalScriptsFailed += 1
elif globVal.totalWarning >0:
ResultStatus = 'Warning'
totalScriptsWarning += 1
elif str(globVal.scriptCompletelyExecuted).lower() != 'yes':
ResultStatus = 'Stopped'
totalScriptsStopped += 1
elif str(globVal.scriptCompletelyExecuted).lower() == 'yes':
ResultStatus = 'Passed'
totalScriptsPassed += 1
else:
ResultStatus = 'Something Went Wrong'
totalScriptsWithUnknowStatus += 1
print('Execution Completed. Test Status: "' + str(ResultStatus)+'",', 'Test EndTime: "'+ str(time.strftime("%Y%m%d-%H%M%S"))+'"')
print('----------------------------------------------')
writeResultToDataSheet(globVal.batchFlow_ExcelPath,scriptRowNum,ResultStatus)
writeResultToDataSheet(globVal.batchResultExcelPath,scriptRowNum,ResultStatus)
sourceFolder = "../Results/TestResults/"
destinationFolder = str(os.path.abspath(str(globVal.batchResultPath)))
copyFolder(sourceFolder, destinationFolder)
globVal.resetGlobalVals()
removeAllResultStatusesFromDataSheet(globVal.batchFlow_ExcelPath)
print('****************************************************************')
print("Batch Execution Is Completed. Below are the batch details:")
print("Total Scripts Selected: " + str(totalScriptsSelected))
print("Total Scripts Executed: " + str(totalScriptsExecuted))
print("Total Scripts Passed: "+ str(totalScriptsPassed))
print("Total Scripts Failed: "+ str(totalScriptsFailed))
print("Total Scripts Warnings: " + str(totalScriptsWarning))
print("Total Scripts Incompletely Executed: " + str(totalScriptsStopped))
if totalScriptsWithUnknowStatus > 0:
print("Total Scripts With Unknown Status: '" +str(totalScriptsWithUnknowStatus)+"'")
print('****************************************************************')
except Exception as e:
print('Got exception. Error is : "' + str(e) + '"')
raise Exception('Got exception. Error is : "' + str(e) + '"')
if __name__ == "__main__":
executeBatch()
|
21,264 | d70e8e575e6c42e965518f29ff8933a74864f5b9 | from tensorflow.python.ops.rnn_cell import RNNCell
import tensorflow as tf
class mygru( RNNCell ):
def __init__( self, state_dim):
self.state_dim = state_dim
self.scope = None
@property
def state_size(self):
return self.state_dim
@property
def output_size(self):
return self.state_dim
def __call__( self, inputs, state):
input_shape = inputs.get_shape().as_list()
with tf.variable_scope('gru') as scope:
if self.scope == None:
wx_shape = [input_shape[1], self.state_dim]
wh_shape = [self.state_dim, self.state_dim]
b_shape = [self.state_dim]
self.Wxr = tf.get_variable('wxr', shape = wx_shape, initializer = tf.contrib.layers.variance_scaling_initializer())
self.Wxz = tf.get_variable('wxz', shape = wx_shape, initializer = tf.contrib.layers.variance_scaling_initializer())
self.Wxh = tf.get_variable('wxh', shape = wx_shape, initializer = tf.contrib.layers.variance_scaling_initializer())
self.Whr = tf.get_variable('whr', shape = wh_shape, initializer = tf.contrib.layers.variance_scaling_initializer())
self.Whz = tf.get_variable('whz', shape = wh_shape, initializer = tf.contrib.layers.variance_scaling_initializer())
self.Whh = tf.get_variable('whh', shape = wh_shape, initializer = tf.contrib.layers.variance_scaling_initializer())
self.br = tf.get_variable('br', shape = b_shape, initializer = tf.contrib.layers.variance_scaling_initializer())
self.bz = tf.get_variable('bz', shape = b_shape, initializer = tf.contrib.layers.variance_scaling_initializer())
self.bh = tf.get_variable('bh', shape = b_shape, initializer = tf.contrib.layers.variance_scaling_initializer())
self.scope = 'gru'
else:
scope.reuse_variables()
self.Wxr = tf.get_variable('wxr')
self.Wxz = tf.get_variable('wxz')
self.Wxh = tf.get_variable('wxh')
self.Whr = tf.get_variable('whr')
self.Whz = tf.get_variable('whz')
self.Whh = tf.get_variable('whh')
self.br = tf.get_variable('br')
self.bz = tf.get_variable('bz')
self.bh = tf.get_variable('bh')
r = tf.nn.sigmoid(tf.matmul(inputs, self.Wxr) + tf.matmul(state, self.Whr) + self.br)
z = tf.nn.sigmoid(tf.matmul(inputs, self.Wxz) + tf.matmul(state, self.Whz) + self.bz)
htild = tf.nn.tanh(tf.matmul(inputs, self.Wxh) + tf.matmul(tf.multiply(r,state), self.Whh) + self.bh)
h = tf.multiply(z,state) + tf.multiply((1-z), htild)
return h,h
|
21,265 | b521762138f0d4ce0e3bbbeeb175a8b3536d0b59 | """
select {} on columns,
[Geschlecht].[Geschlecht].[Level 01].ALLMEMBERS on rows
from [fused]
"""
result={'Axes': {'Axis': [{'Tuples': u'', '_name': u'Axis0'},
{'Tuples': {'Tuple': [{'Member': {'Caption': u'weiblich',
'DisplayInfo': u'0',
'LName': u'[Geschlecht].[Geschlecht].[Level 01]',
'LNum': u'1',
'UName': u'[Geschlecht].[Geschlecht].[weiblich]',
'_Hierarchy': u'[Geschlecht].[Geschlecht]'}},
{'Member': {'Caption': u'm\xe4nnlich',
'DisplayInfo': u'131072',
'LName': u'[Geschlecht].[Geschlecht].[Level 01]',
'LNum': u'1',
'UName': u'[Geschlecht].[Geschlecht].[m\xe4nnlich]',
'_Hierarchy': u'[Geschlecht].[Geschlecht]'}}]},
'_name': u'Axis1'},
{'Tuples': {'Tuple': {'Member': [{'Caption': u'Mitarbeiteranzahl',
'DisplayInfo': u'0',
'LName': u'[Measures].[MeasuresLevel]',
'LNum': u'0',
'UName': u'[Measures].[Mitarbeiteranzahl]',
'_Hierarchy': u'[Measures]'},
{'Caption': u'Alle',
'DisplayInfo': u'10',
'LName': u'[Zeit].[Monate].[(All)]',
'LNum': u'0',
'UName': u'[Zeit].[Monate].[Alle]',
'_Hierarchy': u'[Zeit].[Monate]'},
{'Caption': u'Zeit Gesamt',
'DisplayInfo': u'10',
'LName': u'[Zeit].[Zeit].[(All)]',
'LNum': u'0',
'UName': u'[Zeit].[Zeit].[Zeit Gesamt]',
'_Hierarchy': u'[Zeit].[Zeit]'},
{'Caption': u'Alle Vertragsbefristungen',
'DisplayInfo': u'2',
'LName': u'[Vertragsbefristung].[Vertragsbefristung].[(All)]',
'LNum': u'0',
'UName': u'[Vertragsbefristung].[Vertragsbefristung].[Alle Vertragsbefristungen]',
'_Hierarchy': u'[Vertragsbefristung].[Vertragsbefristung]'},
{'Caption': u'Alle Mitarbeitergruppen',
'DisplayInfo': u'5',
'LName': u'[Mitarbeitergruppe].[Mitarbeitergruppe].[(All)]',
'LNum': u'0',
'UName': u'[Mitarbeitergruppe].[Mitarbeitergruppe].[Alle Mitarbeitergruppen]',
'_Hierarchy': u'[Mitarbeitergruppe].[Mitarbeitergruppe]'},
{'Caption': u'Alle',
'DisplayInfo': u'7',
'LName': u'[Funktionsbezeichnung].[Funktionsbezeichnung].[(All)]',
'LNum': u'0',
'UName': u'[Funktionsbezeichnung].[Funktionsbezeichnung].[Alle]',
'_Hierarchy': u'[Funktionsbezeichnung].[Funktionsbezeichnung]'},
{'Caption': u'jedes Alter',
'DisplayInfo': u'9',
'LName': u'[Alter].[Alter].[(All)]',
'LNum': u'0',
'UName': u'[Alter].[Alter].[jedes Alter]',
'_Hierarchy': u'[Alter].[Alter]'},
{'Caption': u'Alle Vertragsarten',
'DisplayInfo': u'46',
'LName': u'[Vertragsart].[Vertragsart].[(All)]',
'LNum': u'0',
'UName': u'[Vertragsart].[Vertragsart].[Alle Vertragsarten]',
'_Hierarchy': u'[Vertragsart].[Vertragsart]'},
{'Caption': u'Alle Betriebszugeh\xf6rigkeiten',
'DisplayInfo': u'10',
'LName': u'[Betriebszugeh\xf6rigkeit].[Betriebszugeh\xf6rigkeit].[(All)]',
'LNum': u'0',
'UName': u'[Betriebszugeh\xf6rigkeit].[Betriebszugeh\xf6rigkeit].[Alle Betriebszugeh\xf6rigkeiten]',
'_Hierarchy': u'[Betriebszugeh\xf6rigkeit].[Betriebszugeh\xf6rigkeit]'},
{'Caption': u'Alle Standorte',
'DisplayInfo': u'3',
'LName': u'[Standort].[Standort].[(All)]',
'LNum': u'0',
'UName': u'[Standort].[Standort].[Alle Standorte]',
'_Hierarchy': u'[Standort].[Standort]'},
{'Caption': u'AP',
'DisplayInfo': u'0',
'LName': u'[Zeitbetrachtung].[Zeitbetrachtung].[Zeitbetrachtung]',
'LNum': u'0',
'UName': u'[Zeitbetrachtung].[Zeitbetrachtung].[AP]',
'_Hierarchy': u'[Zeitbetrachtung].[Zeitbetrachtung]'},
{'Caption': u'Kieback&Peter GmbH & Co.KG',
'DisplayInfo': u'6',
'LName': u'[Kostenstelle].[Kostenstelle].[Level 01]',
'LNum': u'0',
'UName': u'[Kostenstelle].[Kostenstelle].[Kieback&Peter GmbH & Co.KG]',
'_Hierarchy': u'[Kostenstelle].[Kostenstelle]'},
{'Caption': u'All',
'DisplayInfo': u'1883',
'LName': u'[Kostenstelle].[PNR].[(All)]',
'LNum': u'0',
'UName': u'[Kostenstelle].[PNR].[All]',
'_Hierarchy': u'[Kostenstelle].[PNR]'},
{'Caption': u'All',
'DisplayInfo': u'385',
'LName': u'[Kostenstelle].[Eintritt].[(All)]',
'LNum': u'0',
'UName': u'[Kostenstelle].[Eintritt].[All]',
'_Hierarchy': u'[Kostenstelle].[Eintritt]'},
{'Caption': u'All',
'DisplayInfo': u'385',
'LName': u'[Kostenstelle].[Eintritt2].[(All)]',
'LNum': u'0',
'UName': u'[Kostenstelle].[Eintritt2].[All]',
'_Hierarchy': u'[Kostenstelle].[Eintritt2]'},
{'Caption': u'KST Gesamt',
'DisplayInfo': u'720',
'LName': u'[KST].[KST].[(All)]',
'LNum': u'0',
'UName': u'[KST].[KST].[KST Gesamt]',
'_Hierarchy': u'[KST].[KST]'},
{'Caption': u'All',
'DisplayInfo': u'1883',
'LName': u'[Person].[Person].[(All)]',
'LNum': u'0',
'UName': u'[Person].[Person].[All]',
'_Hierarchy': u'[Person].[Person]'},
{'Caption': u'Alle Personen',
'DisplayInfo': u'178',
'LName': u'[Person].[Person Austritt].[(All)]',
'LNum': u'0',
'UName': u'[Person].[Person Austritt].[Alle Personen]',
'_Hierarchy': u'[Person].[Person Austritt]'},
{'Caption': u'Alle Personen',
'DisplayInfo': u'3',
'LName': u'[Person].[Person aktiv].[(All)]',
'LNum': u'0',
'UName': u'[Person].[Person aktiv].[Alle Personen]',
'_Hierarchy': u'[Person].[Person aktiv]'},
{'Caption': u'Alle Kostenarten',
'DisplayInfo': u'5',
'LName': u'[Kostenart].[Kostenart].[(All)]',
'LNum': u'0',
'UName': u'[Kostenart].[Kostenart].[Alle Kostenarten]',
'_Hierarchy': u'[Kostenart].[Kostenart]'},
{'Caption': u'Alle Kostenarten',
'DisplayInfo': u'40',
'LName': u'[Kostenart].[Level 02 Bezeichnung].[(All)]',
'LNum': u'0',
'UName': u'[Kostenart].[Level 02 Bezeichnung].[Alle Kostenarten]',
'_Hierarchy': u'[Kostenart].[Level 02 Bezeichnung]'},
{'Caption': u'All',
'DisplayInfo': u'2',
'LName': u'[UserAccount].[UserAccount].[(All)]',
'LNum': u'0',
'UName': u'[UserAccount].[UserAccount].[All]',
'_Hierarchy': u'[UserAccount].[UserAccount]'},
{'Caption': u'All',
'DisplayInfo': u'1',
'LName': u'[UserAccount].[Level 01 username].[(All)]',
'LNum': u'0',
'UName': u'[UserAccount].[Level 01 username].[All]',
'_Hierarchy': u'[UserAccount].[Level 01 username]'}]}},
'_name': u'SlicerAxis'}]},
'CellData': u'',
'OlapInfo': {'AxesInfo': {'AxisInfo': [{'_name': u'Axis0'},
{'HierarchyInfo': {'Caption': {'_name': u'[Geschlecht].[Geschlecht].[MEMBER_CAPTION]',
'_type': u'xsd:string'},
'DisplayInfo': {'_name': u'[Geschlecht].[Geschlecht].[DISPLAY_INFO]',
'_type': u'xsd:unsignedInt'},
'LName': {'_name': u'[Geschlecht].[Geschlecht].[LEVEL_UNIQUE_NAME]',
'_type': u'xsd:string'},
'LNum': {'_name': u'[Geschlecht].[Geschlecht].[LEVEL_NUMBER]',
'_type': u'xsd:int'},
'UName': {'_name': u'[Geschlecht].[Geschlecht].[MEMBER_UNIQUE_NAME]',
'_type': u'xsd:string'},
'_name': u'[Geschlecht].[Geschlecht]'},
'_name': u'Axis1'},
{'HierarchyInfo': [{'Caption': {'_name': u'[Measures].[MEMBER_CAPTION]',
'_type': u'xsd:string'},
'DisplayInfo': {'_name': u'[Measures].[DISPLAY_INFO]',
'_type': u'xsd:unsignedInt'},
'LName': {'_name': u'[Measures].[LEVEL_UNIQUE_NAME]',
'_type': u'xsd:string'},
'LNum': {'_name': u'[Measures].[LEVEL_NUMBER]',
'_type': u'xsd:int'},
'UName': {'_name': u'[Measures].[MEMBER_UNIQUE_NAME]',
'_type': u'xsd:string'},
'_name': u'[Measures]'},
{'Caption': {'_name': u'[Zeit].[Monate].[MEMBER_CAPTION]',
'_type': u'xsd:string'},
'DisplayInfo': {'_name': u'[Zeit].[Monate].[DISPLAY_INFO]',
'_type': u'xsd:unsignedInt'},
'LName': {'_name': u'[Zeit].[Monate].[LEVEL_UNIQUE_NAME]',
'_type': u'xsd:string'},
'LNum': {'_name': u'[Zeit].[Monate].[LEVEL_NUMBER]',
'_type': u'xsd:int'},
'UName': {'_name': u'[Zeit].[Monate].[MEMBER_UNIQUE_NAME]',
'_type': u'xsd:string'},
'_name': u'[Zeit].[Monate]'},
{'Caption': {'_name': u'[Zeit].[Zeit].[MEMBER_CAPTION]',
'_type': u'xsd:string'},
'DisplayInfo': {'_name': u'[Zeit].[Zeit].[DISPLAY_INFO]',
'_type': u'xsd:unsignedInt'},
'LName': {'_name': u'[Zeit].[Zeit].[LEVEL_UNIQUE_NAME]',
'_type': u'xsd:string'},
'LNum': {'_name': u'[Zeit].[Zeit].[LEVEL_NUMBER]',
'_type': u'xsd:int'},
'UName': {'_name': u'[Zeit].[Zeit].[MEMBER_UNIQUE_NAME]',
'_type': u'xsd:string'},
'_name': u'[Zeit].[Zeit]'},
{'Caption': {'_name': u'[Vertragsbefristung].[Vertragsbefristung].[MEMBER_CAPTION]',
'_type': u'xsd:string'},
'DisplayInfo': {'_name': u'[Vertragsbefristung].[Vertragsbefristung].[DISPLAY_INFO]',
'_type': u'xsd:unsignedInt'},
'LName': {'_name': u'[Vertragsbefristung].[Vertragsbefristung].[LEVEL_UNIQUE_NAME]',
'_type': u'xsd:string'},
'LNum': {'_name': u'[Vertragsbefristung].[Vertragsbefristung].[LEVEL_NUMBER]',
'_type': u'xsd:int'},
'UName': {'_name': u'[Vertragsbefristung].[Vertragsbefristung].[MEMBER_UNIQUE_NAME]',
'_type': u'xsd:string'},
'_name': u'[Vertragsbefristung].[Vertragsbefristung]'},
{'Caption': {'_name': u'[Mitarbeitergruppe].[Mitarbeitergruppe].[MEMBER_CAPTION]',
'_type': u'xsd:string'},
'DisplayInfo': {'_name': u'[Mitarbeitergruppe].[Mitarbeitergruppe].[DISPLAY_INFO]',
'_type': u'xsd:unsignedInt'},
'LName': {'_name': u'[Mitarbeitergruppe].[Mitarbeitergruppe].[LEVEL_UNIQUE_NAME]',
'_type': u'xsd:string'},
'LNum': {'_name': u'[Mitarbeitergruppe].[Mitarbeitergruppe].[LEVEL_NUMBER]',
'_type': u'xsd:int'},
'UName': {'_name': u'[Mitarbeitergruppe].[Mitarbeitergruppe].[MEMBER_UNIQUE_NAME]',
'_type': u'xsd:string'},
'_name': u'[Mitarbeitergruppe].[Mitarbeitergruppe]'},
{'Caption': {'_name': u'[Funktionsbezeichnung].[Funktionsbezeichnung].[MEMBER_CAPTION]',
'_type': u'xsd:string'},
'DisplayInfo': {'_name': u'[Funktionsbezeichnung].[Funktionsbezeichnung].[DISPLAY_INFO]',
'_type': u'xsd:unsignedInt'},
'LName': {'_name': u'[Funktionsbezeichnung].[Funktionsbezeichnung].[LEVEL_UNIQUE_NAME]',
'_type': u'xsd:string'},
'LNum': {'_name': u'[Funktionsbezeichnung].[Funktionsbezeichnung].[LEVEL_NUMBER]',
'_type': u'xsd:int'},
'UName': {'_name': u'[Funktionsbezeichnung].[Funktionsbezeichnung].[MEMBER_UNIQUE_NAME]',
'_type': u'xsd:string'},
'_name': u'[Funktionsbezeichnung].[Funktionsbezeichnung]'},
{'Caption': {'_name': u'[Alter].[Alter].[MEMBER_CAPTION]',
'_type': u'xsd:string'},
'DisplayInfo': {'_name': u'[Alter].[Alter].[DISPLAY_INFO]',
'_type': u'xsd:unsignedInt'},
'LName': {'_name': u'[Alter].[Alter].[LEVEL_UNIQUE_NAME]',
'_type': u'xsd:string'},
'LNum': {'_name': u'[Alter].[Alter].[LEVEL_NUMBER]',
'_type': u'xsd:int'},
'UName': {'_name': u'[Alter].[Alter].[MEMBER_UNIQUE_NAME]',
'_type': u'xsd:string'},
'_name': u'[Alter].[Alter]'},
{'Caption': {'_name': u'[Vertragsart].[Vertragsart].[MEMBER_CAPTION]',
'_type': u'xsd:string'},
'DisplayInfo': {'_name': u'[Vertragsart].[Vertragsart].[DISPLAY_INFO]',
'_type': u'xsd:unsignedInt'},
'LName': {'_name': u'[Vertragsart].[Vertragsart].[LEVEL_UNIQUE_NAME]',
'_type': u'xsd:string'},
'LNum': {'_name': u'[Vertragsart].[Vertragsart].[LEVEL_NUMBER]',
'_type': u'xsd:int'},
'UName': {'_name': u'[Vertragsart].[Vertragsart].[MEMBER_UNIQUE_NAME]',
'_type': u'xsd:string'},
'_name': u'[Vertragsart].[Vertragsart]'},
{'Caption': {'_name': u'[Betriebszugeh\xf6rigkeit].[Betriebszugeh\xf6rigkeit].[MEMBER_CAPTION]',
'_type': u'xsd:string'},
'DisplayInfo': {'_name': u'[Betriebszugeh\xf6rigkeit].[Betriebszugeh\xf6rigkeit].[DISPLAY_INFO]',
'_type': u'xsd:unsignedInt'},
'LName': {'_name': u'[Betriebszugeh\xf6rigkeit].[Betriebszugeh\xf6rigkeit].[LEVEL_UNIQUE_NAME]',
'_type': u'xsd:string'},
'LNum': {'_name': u'[Betriebszugeh\xf6rigkeit].[Betriebszugeh\xf6rigkeit].[LEVEL_NUMBER]',
'_type': u'xsd:int'},
'UName': {'_name': u'[Betriebszugeh\xf6rigkeit].[Betriebszugeh\xf6rigkeit].[MEMBER_UNIQUE_NAME]',
'_type': u'xsd:string'},
'_name': u'[Betriebszugeh\xf6rigkeit].[Betriebszugeh\xf6rigkeit]'},
{'Caption': {'_name': u'[Standort].[Standort].[MEMBER_CAPTION]',
'_type': u'xsd:string'},
'DisplayInfo': {'_name': u'[Standort].[Standort].[DISPLAY_INFO]',
'_type': u'xsd:unsignedInt'},
'LName': {'_name': u'[Standort].[Standort].[LEVEL_UNIQUE_NAME]',
'_type': u'xsd:string'},
'LNum': {'_name': u'[Standort].[Standort].[LEVEL_NUMBER]',
'_type': u'xsd:int'},
'UName': {'_name': u'[Standort].[Standort].[MEMBER_UNIQUE_NAME]',
'_type': u'xsd:string'},
'_name': u'[Standort].[Standort]'},
{'Caption': {'_name': u'[Zeitbetrachtung].[Zeitbetrachtung].[MEMBER_CAPTION]',
'_type': u'xsd:string'},
'DisplayInfo': {'_name': u'[Zeitbetrachtung].[Zeitbetrachtung].[DISPLAY_INFO]',
'_type': u'xsd:unsignedInt'},
'LName': {'_name': u'[Zeitbetrachtung].[Zeitbetrachtung].[LEVEL_UNIQUE_NAME]',
'_type': u'xsd:string'},
'LNum': {'_name': u'[Zeitbetrachtung].[Zeitbetrachtung].[LEVEL_NUMBER]',
'_type': u'xsd:int'},
'UName': {'_name': u'[Zeitbetrachtung].[Zeitbetrachtung].[MEMBER_UNIQUE_NAME]',
'_type': u'xsd:string'},
'_name': u'[Zeitbetrachtung].[Zeitbetrachtung]'},
{'Caption': {'_name': u'[Kostenstelle].[Kostenstelle].[MEMBER_CAPTION]',
'_type': u'xsd:string'},
'DisplayInfo': {'_name': u'[Kostenstelle].[Kostenstelle].[DISPLAY_INFO]',
'_type': u'xsd:unsignedInt'},
'LName': {'_name': u'[Kostenstelle].[Kostenstelle].[LEVEL_UNIQUE_NAME]',
'_type': u'xsd:string'},
'LNum': {'_name': u'[Kostenstelle].[Kostenstelle].[LEVEL_NUMBER]',
'_type': u'xsd:int'},
'UName': {'_name': u'[Kostenstelle].[Kostenstelle].[MEMBER_UNIQUE_NAME]',
'_type': u'xsd:string'},
'_name': u'[Kostenstelle].[Kostenstelle]'},
{'Caption': {'_name': u'[Kostenstelle].[PNR].[MEMBER_CAPTION]',
'_type': u'xsd:string'},
'DisplayInfo': {'_name': u'[Kostenstelle].[PNR].[DISPLAY_INFO]',
'_type': u'xsd:unsignedInt'},
'LName': {'_name': u'[Kostenstelle].[PNR].[LEVEL_UNIQUE_NAME]',
'_type': u'xsd:string'},
'LNum': {'_name': u'[Kostenstelle].[PNR].[LEVEL_NUMBER]',
'_type': u'xsd:int'},
'UName': {'_name': u'[Kostenstelle].[PNR].[MEMBER_UNIQUE_NAME]',
'_type': u'xsd:string'},
'_name': u'[Kostenstelle].[PNR]'},
{'Caption': {'_name': u'[Kostenstelle].[Eintritt].[MEMBER_CAPTION]',
'_type': u'xsd:string'},
'DisplayInfo': {'_name': u'[Kostenstelle].[Eintritt].[DISPLAY_INFO]',
'_type': u'xsd:unsignedInt'},
'LName': {'_name': u'[Kostenstelle].[Eintritt].[LEVEL_UNIQUE_NAME]',
'_type': u'xsd:string'},
'LNum': {'_name': u'[Kostenstelle].[Eintritt].[LEVEL_NUMBER]',
'_type': u'xsd:int'},
'UName': {'_name': u'[Kostenstelle].[Eintritt].[MEMBER_UNIQUE_NAME]',
'_type': u'xsd:string'},
'_name': u'[Kostenstelle].[Eintritt]'},
{'Caption': {'_name': u'[Kostenstelle].[Eintritt2].[MEMBER_CAPTION]',
'_type': u'xsd:string'},
'DisplayInfo': {'_name': u'[Kostenstelle].[Eintritt2].[DISPLAY_INFO]',
'_type': u'xsd:unsignedInt'},
'LName': {'_name': u'[Kostenstelle].[Eintritt2].[LEVEL_UNIQUE_NAME]',
'_type': u'xsd:string'},
'LNum': {'_name': u'[Kostenstelle].[Eintritt2].[LEVEL_NUMBER]',
'_type': u'xsd:int'},
'UName': {'_name': u'[Kostenstelle].[Eintritt2].[MEMBER_UNIQUE_NAME]',
'_type': u'xsd:string'},
'_name': u'[Kostenstelle].[Eintritt2]'},
{'Caption': {'_name': u'[KST].[KST].[MEMBER_CAPTION]',
'_type': u'xsd:string'},
'DisplayInfo': {'_name': u'[KST].[KST].[DISPLAY_INFO]',
'_type': u'xsd:unsignedInt'},
'LName': {'_name': u'[KST].[KST].[LEVEL_UNIQUE_NAME]',
'_type': u'xsd:string'},
'LNum': {'_name': u'[KST].[KST].[LEVEL_NUMBER]',
'_type': u'xsd:int'},
'UName': {'_name': u'[KST].[KST].[MEMBER_UNIQUE_NAME]',
'_type': u'xsd:string'},
'_name': u'[KST].[KST]'},
{'Caption': {'_name': u'[Person].[Person].[MEMBER_CAPTION]',
'_type': u'xsd:string'},
'DisplayInfo': {'_name': u'[Person].[Person].[DISPLAY_INFO]',
'_type': u'xsd:unsignedInt'},
'LName': {'_name': u'[Person].[Person].[LEVEL_UNIQUE_NAME]',
'_type': u'xsd:string'},
'LNum': {'_name': u'[Person].[Person].[LEVEL_NUMBER]',
'_type': u'xsd:int'},
'UName': {'_name': u'[Person].[Person].[MEMBER_UNIQUE_NAME]',
'_type': u'xsd:string'},
'_name': u'[Person].[Person]'},
{'Caption': {'_name': u'[Person].[Person Austritt].[MEMBER_CAPTION]',
'_type': u'xsd:string'},
'DisplayInfo': {'_name': u'[Person].[Person Austritt].[DISPLAY_INFO]',
'_type': u'xsd:unsignedInt'},
'LName': {'_name': u'[Person].[Person Austritt].[LEVEL_UNIQUE_NAME]',
'_type': u'xsd:string'},
'LNum': {'_name': u'[Person].[Person Austritt].[LEVEL_NUMBER]',
'_type': u'xsd:int'},
'UName': {'_name': u'[Person].[Person Austritt].[MEMBER_UNIQUE_NAME]',
'_type': u'xsd:string'},
'_name': u'[Person].[Person Austritt]'},
{'Caption': {'_name': u'[Person].[Person aktiv].[MEMBER_CAPTION]',
'_type': u'xsd:string'},
'DisplayInfo': {'_name': u'[Person].[Person aktiv].[DISPLAY_INFO]',
'_type': u'xsd:unsignedInt'},
'LName': {'_name': u'[Person].[Person aktiv].[LEVEL_UNIQUE_NAME]',
'_type': u'xsd:string'},
'LNum': {'_name': u'[Person].[Person aktiv].[LEVEL_NUMBER]',
'_type': u'xsd:int'},
'UName': {'_name': u'[Person].[Person aktiv].[MEMBER_UNIQUE_NAME]',
'_type': u'xsd:string'},
'_name': u'[Person].[Person aktiv]'},
{'Caption': {'_name': u'[Kostenart].[Kostenart].[MEMBER_CAPTION]',
'_type': u'xsd:string'},
'DisplayInfo': {'_name': u'[Kostenart].[Kostenart].[DISPLAY_INFO]',
'_type': u'xsd:unsignedInt'},
'LName': {'_name': u'[Kostenart].[Kostenart].[LEVEL_UNIQUE_NAME]',
'_type': u'xsd:string'},
'LNum': {'_name': u'[Kostenart].[Kostenart].[LEVEL_NUMBER]',
'_type': u'xsd:int'},
'UName': {'_name': u'[Kostenart].[Kostenart].[MEMBER_UNIQUE_NAME]',
'_type': u'xsd:string'},
'_name': u'[Kostenart].[Kostenart]'},
{'Caption': {'_name': u'[Kostenart].[Level 02 Bezeichnung].[MEMBER_CAPTION]',
'_type': u'xsd:string'},
'DisplayInfo': {'_name': u'[Kostenart].[Level 02 Bezeichnung].[DISPLAY_INFO]',
'_type': u'xsd:unsignedInt'},
'LName': {'_name': u'[Kostenart].[Level 02 Bezeichnung].[LEVEL_UNIQUE_NAME]',
'_type': u'xsd:string'},
'LNum': {'_name': u'[Kostenart].[Level 02 Bezeichnung].[LEVEL_NUMBER]',
'_type': u'xsd:int'},
'UName': {'_name': u'[Kostenart].[Level 02 Bezeichnung].[MEMBER_UNIQUE_NAME]',
'_type': u'xsd:string'},
'_name': u'[Kostenart].[Level 02 Bezeichnung]'},
{'Caption': {'_name': u'[UserAccount].[UserAccount].[MEMBER_CAPTION]',
'_type': u'xsd:string'},
'DisplayInfo': {'_name': u'[UserAccount].[UserAccount].[DISPLAY_INFO]',
'_type': u'xsd:unsignedInt'},
'LName': {'_name': u'[UserAccount].[UserAccount].[LEVEL_UNIQUE_NAME]',
'_type': u'xsd:string'},
'LNum': {'_name': u'[UserAccount].[UserAccount].[LEVEL_NUMBER]',
'_type': u'xsd:int'},
'UName': {'_name': u'[UserAccount].[UserAccount].[MEMBER_UNIQUE_NAME]',
'_type': u'xsd:string'},
'_name': u'[UserAccount].[UserAccount]'},
{'Caption': {'_name': u'[UserAccount].[Level 01 username].[MEMBER_CAPTION]',
'_type': u'xsd:string'},
'DisplayInfo': {'_name': u'[UserAccount].[Level 01 username].[DISPLAY_INFO]',
'_type': u'xsd:unsignedInt'},
'LName': {'_name': u'[UserAccount].[Level 01 username].[LEVEL_UNIQUE_NAME]',
'_type': u'xsd:string'},
'LNum': {'_name': u'[UserAccount].[Level 01 username].[LEVEL_NUMBER]',
'_type': u'xsd:int'},
'UName': {'_name': u'[UserAccount].[Level 01 username].[MEMBER_UNIQUE_NAME]',
'_type': u'xsd:string'},
'_name': u'[UserAccount].[Level 01 username]'}],
'_name': u'SlicerAxis'}]},
'CellInfo': {'CellOrdinal': {'_name': u'CELL_ORDINAL',
'_type': u'xsd:unsignedInt'},
'FmtValue': {'_name': u'FORMATTED_VALUE',
'_type': u'xsd:string'},
'Value': {'_name': u'VALUE'}},
'CubeInfo': {'Cube': {'CubeName': u'fused',
'LastDataUpdate': u'2012-11-16T07:47:31.283333',
'LastSchemaUpdate': u'2012-11-16T07:47:28.106667'}}},
'schema': {'_elementFormDefault': u'qualified',
'_targetNamespace': u'urn:schemas-microsoft-com:xml-analysis:mddataset',
'complexType': [{'_name': u'MemberType',
'attribute': {'_name': u'Hierarchy',
'_type': u'xs:string'},
'sequence': {'any': {'_maxOccurs': u'unbounded',
'_minOccurs': u'0',
'_namespace': u'##targetNamespace',
'_processContents': u'skip'}}},
{'_name': u'PropType',
'attribute': [{'_name': u'name',
'_type': u'xs:string',
'_use': u'required'},
{'_name': u'type',
'_type': u'xs:QName'}],
'sequence': {'element': {'_minOccurs': u'0',
'_name': u'Default'}}},
{'_name': u'TupleType',
'sequence': {'element': {'_maxOccurs': u'unbounded',
'_minOccurs': u'0',
'_name': u'Member',
'_type': u'MemberType'}}},
{'_name': u'MembersType',
'attribute': {'_name': u'Hierarchy',
'_type': u'xs:string',
'_use': u'required'},
'sequence': {'element': {'_maxOccurs': u'unbounded',
'_minOccurs': u'0',
'_name': u'Member',
'_type': u'MemberType'}}},
{'_name': u'TuplesType',
'sequence': {'element': {'_maxOccurs': u'unbounded',
'_minOccurs': u'0',
'_name': u'Tuple',
'_type': u'TupleType'}}},
{'_name': u'SetListType',
'attribute': {'_name': u'Size',
'_type': u'xs:unsignedInt'},
'group': {'_maxOccurs': u'unbounded',
'_minOccurs': u'0',
'_ref': u'SetType'}},
{'_name': u'OlapInfo',
'sequence': {'element': [{'_name': u'CubeInfo',
'complexType': {'sequence': {'element': {'_maxOccurs': u'unbounded',
'_name': u'Cube',
'complexType': {'sequence': {'element': [{'_name': u'CubeName',
'_type': u'xs:string'},
{'_minOccurs': u'0',
'_name': u'LastDataUpdate',
'_type': u'xs:dateTime'},
{'_minOccurs': u'0',
'_name': u'LastSchemaUpdate',
'_type': u'xs:dateTime'}]}}}}}},
{'_name': u'AxesInfo',
'complexType': {'sequence': {'element': {'_maxOccurs': u'unbounded',
'_name': u'AxisInfo',
'complexType': {'attribute': {'_name': u'name',
'_type': u'xs:string'},
'sequence': {'element': {'_maxOccurs': u'unbounded',
'_minOccurs': u'0',
'_name': u'HierarchyInfo',
'complexType': {'attribute': {'_name': u'name',
'_type': u'xs:string',
'_use': u'required'},
'sequence': {'any': {'_maxOccurs': u'unbounded',
'_minOccurs': u'0',
'_namespace': u'##targetNamespace',
'_processContents': u'skip'}}}}}}}}}},
{'_name': u'CellInfo',
'complexType': {'choice': {'_maxOccurs': u'unbounded',
'_minOccurs': u'0',
'any': {'_maxOccurs': u'unbounded',
'_minOccurs': u'0',
'_namespace': u'##targetNamespace',
'_processContents': u'skip'}}}}]}},
{'_name': u'Axes',
'sequence': {'element': {'_maxOccurs': u'unbounded',
'_name': u'Axis',
'complexType': {'attribute': {'_name': u'name',
'_type': u'xs:string'},
'group': {'_maxOccurs': u'unbounded',
'_minOccurs': u'0',
'_ref': u'SetType'}}}}},
{'_name': u'CellData',
'sequence': {'element': {'_maxOccurs': u'unbounded',
'_minOccurs': u'0',
'_name': u'Cell',
'complexType': {'attribute': {'_name': u'CellOrdinal',
'_type': u'xs:unsignedInt',
'_use': u'required'},
'sequence': {'any': {'_maxOccurs': u'unbounded',
'_minOccurs': u'0',
'_namespace': u'##targetNamespace',
'_processContents': u'skip'}}}}}}],
'element': {'_name': u'root',
'complexType': {'sequence': {'any': {'_minOccurs': u'0',
'_namespace': u'http://www.w3.org/2001/XMLSchema',
'_processContents': u'strict'},
'element': [{'_minOccurs': u'0',
'_name': u'OlapInfo',
'_type': u'OlapInfo'},
{'_minOccurs': u'0',
'_name': u'Axes',
'_type': u'Axes'},
{'_minOccurs': u'0',
'_name': u'CellData',
'_type': u'CellData'}]}}},
'group': {'_name': u'SetType',
'choice': {'element': [{'_name': u'Members',
'_type': u'MembersType'},
{'_name': u'Tuples',
'_type': u'TuplesType'},
{'_name': u'CrossProduct',
'_type': u'SetListType'},
{'_ref': u'msxmla:NormTupleSet'},
{'_name': u'Union',
'complexType': {'group': {'_maxOccurs': u'unbounded',
'_minOccurs': u'0',
'_ref': u'SetType'}}}]}},
'import': {'_namespace': u'http://schemas.microsoft.com/analysisservices/2003/xmla'}}}
|
21,266 | deb2de3089c73ea1ae25be45818c99eee8168822 | from collections import Counter
from typing import Iterator, List, Tuple, Counter as CounterType, Optional, Dict, Set
from ranked_vote.analysis.pairwise_stat import PairwiseStat
from ranked_vote.ballot import Ballot, Candidate
class PreferenceMatrix:
_candidates: List[Candidate]
_preferences: CounterType[Tuple[Candidate, Candidate]]
def __init__(self, candidates: List[Candidate], ballots: Iterator[Ballot]):
self._candidates = candidates
self._preferences = Counter()
for ballot in ballots:
preferred = set()
for c in ballot.candidate_rank:
for p in preferred:
self._preferences[(p, c)] += 1
preferred.add(c)
for p in preferred:
for c in set(candidates) - preferred:
self._preferences[(p, c)] += 1
def preferred(self, c1, c2):
return self._preferences[(c1, c2)] > self._preferences[(c2, c1)]
@property
def pairwise(self) -> Iterator[PairwiseStat]:
for c1 in self._candidates:
for c2 in self._candidates:
if c1 == c2:
continue
pref_c1 = self._preferences[(c1, c2)]
pref_c2 = self._preferences[(c2, c1)]
yield PairwiseStat(c1, c2, pref_c1, pref_c1 + pref_c2)
def to_dict_list(self) -> List[Dict]:
return [ps.to_dict() for ps in self.pairwise]
@property
def graph(self) -> Dict[Candidate, Set[Candidate]]:
graph = dict()
for c1 in self._candidates:
graph[c1] = set()
for c2 in self._candidates:
if self.preferred(c2, c1):
graph[c1].add(c2)
return graph
@property
def smith_set(self) -> Set[Candidate]:
g = self.graph
last_set = set(g)
while True:
this_set = set.union(*(g[a] for a in last_set))
if this_set == set() or this_set == last_set:
break
last_set = this_set
return last_set
@property
def condorcet_winner(self) -> Optional[Candidate]:
ss = self.smith_set
if len(ss) == 1:
return ss.pop()
@property
def preferences(self) -> CounterType[Tuple[Candidate, Candidate]]:
return self._preferences
@property
def candidates(self) -> List[Candidate]:
return self._candidates
|
21,267 | 97d9ef6608520e232700c18474f28cad95d139b6 | import requests
from db_connect import Item
#fp=open("out.txt","w")
#i=10
#for(
def n_sanity_check(number):
"""Make sure were not accidentally requesting a number of pages less than 1 or greater than 100"""
#number = min(99,number)
#number = max(1,number)
#return number
if number > 99: # This is alot clearer no?
return 99
elif number < 1:
return 1
else:
return number
def get_times_yahoo(search):
payload = {'query':search}
return requests.get("http://answers.yahooapis.com/answersservice/v1/questionsearch", params=payload)
def get_reddit_search(search, number):
number = n_sanity_check(number)
payload = {'q': search, 'limit': number+1}
return reddit_request('http://www.reddit.com/search.json', payload)
def get_reddit_searchurls(search, number):
number = n_sanity_check(number)
r=get_reddit_search("cat",number) # Is this an example you've hard coded?
data=r.json()
i=0
while (i<number):
#fp.write(data["data"]["children"][i]["data"]["url"])
print data["data"]["children"][i]["data"]["url"]
#fp.write('\n')
i+=1
def get_reddit_top(keyword, number):
number = n_sanity_check(number)
payload = {'limit': number+1}
response = reddit_request('http://www.reddit.com/r/'+keyword+'.json', payload)
for i in xrange(0,number):
#fp.write(data["data"]["children"][i]["data"]["url"])
#fp.write('\n')
item_response = response["data"]["children"][i]["data"]
if item_response["over_18"]: # Get that shit outta here
break
url = item_response["url"]
name = item_response["title"]
duration = ""
tag = item_response["subreddit"]
item = Item()
item.duration = duration
item.name = name
item.add_tag(tag)
item.url = url
items.append(item)
return items
def reddit_request(url, params):
"""So you don't have to add in the goddamn user agent each time"""
payload = params
keys=payload.keys()
if 'user-agent' not in keys:
payload['user-agent'] = 'downtime'
else:
pass
return requests.get(url, params=payload)
# def getreddittopurls(search, number):
# number=min(99,number);
# number=max(1,number);
# r=get_reddit_top("cat",number)
# data=r.json()
# i=0
# while (i<number):
# fp.write(data["data"]["children"][i]["data"]["url"])
# fp.write('\n')
# i+=1
def get_reddit_top_urls(keyword, number):
number = n_sanity_check(number)
if keyword.lower()=="world":
keyword="worldevents"
#r=get_reddit_top(keyword,number)
return get_reddit_top(keyword, number)#r.json()
def reddit_best_urls(keyword_list, number_each):
url_dict={}
for keyword in keyword_list:
url_dict[keyword]=get_reddit_top_urls(keyword,number_each)
return url_list
#data=r.json()
#fp=open("out.txt","w")
#i=10
#for(
#fp.write()
#getreddittop("monkey",6)
#getreddittopurls("monkey",6)
if __name__=='__main__':
print reddit_best_urls(["world","cat"],2)
|
21,268 | 2c775db210671864ec763a8fd175a301c3f362ee | from django import forms
from django.forms import widgets
from .models import Dealer, Bdm, Contact, Outlet, Inventory, City
# from easy_select2 import select2
class BdmForm(forms.ModelForm):
class Meta:
model = Bdm
fields = [
'name',
'city',
'contact_no',
'alt_contact_no',
'email'
]
class DealerForm(forms.ModelForm):
CATEGORIES=(
('Active', 'Active'),
('Inactive', 'In-Active'),
('Expired', 'Expired'),
)
status = forms.ChoiceField(choices=CATEGORIES)
# brand = forms.CharField(widget=forms.TextInput(attrs={'class': 'brand', 'placeholder':'Brand'}))
# status = forms.CharField(widget=forms.TextInput(attrs={'class': 'status'}))
# dealer_company = forms.CharField(widget=forms.TextInput(attrs={'class': 'form-control-dealer'}))
# dealership_name = forms.CharField(widget=forms.TextInput(attrs={'class': 'form-control-dealer'}))
# address = forms.CharField(widget=forms.TextInput(attrs={'class': 'form-control-dealer'}))
# city = forms.CharField(widget=forms.TextInput(attrs={'class': 'form-control-dealer'}))
# sales_outlet = forms.CharField(widget=forms.TextInput(attrs={'class': 'form-control-dealer'}))
# pincode = forms.CharField(widget=forms.TextInput(attrs={'class': 'form-control-dealer'}))
# bdm = forms.CharField(widget=forms.TextInput(attrs={'class': 'form-control-dealer'}))
class Meta:
model = Dealer
fields = [
'brand',
'dealer_company',
'dealership_name',
'status',
'address',
'city',
'sales_outlet',
'pincode',
# 'latitude',
# 'longitude',
# 'bdm'
'manager'
]
class DealerEditForm(forms.ModelForm):
CATEGORIES=(
('Active', 'Active'),
('Inactive', 'In-Active'),
('Expired', 'Expired'),
)
status = forms.ChoiceField(choices=CATEGORIES)
class Meta:
model = Dealer
fields = [
'brand',
'dealer_company',
'dealership_name',
'status',
'address',
'city',
'sales_outlet',
'pincode',
# 'latitude',
# 'longitude',
# 'bdm'
]
class ContactForm(forms.ModelForm):
# dealer = forms.CharField(
# widget=forms.TextInput(attrs={ 'readonly':'True' })
# )
# type = forms.CharField(
# widget=forms.TextInput(attrs={ 'readonly':'True' })
# )
class Meta:
model = Contact
fields = [
'name',
'designation',
'email',
'is_primary_contact',
'contact_no_1',
'contact_no_2',
'active',
'type',
#'image',
'dealer'
]
class ContactEditForm(forms.ModelForm):
# dealer = forms.CharField(
# widget=forms.TextInput(attrs={ 'readonly':'True' })
# )
# type = forms.CharField(
# widget=forms.TextInput(attrs={ 'readonly':'True' })
# )
class Meta:
model = Contact
fields = [
'name',
'designation',
'email',
'is_primary_contact',
'contact_no_1',
'contact_no_2',
'active',
'type',
#'image',
# 'dealer'
]
class ContactFormOutlet(forms.ModelForm):
# outlet = forms.CharField(
# widget=forms.TextInput(attrs={ 'readonly':'True' })
# )
# type = forms.CharField(
# widget=forms.TextInput(attrs={ 'readonly':'True' })
# )
class Meta:
model = Contact
fields = [
'name',
'designation',
'email',
'is_primary_contact',
'contact_no_1',
'contact_no_2',
'active',
'type',
#'image',
'outlet'
]
class ContactFormOutletEdit(forms.ModelForm):
# outlet = forms.CharField(
# widget=forms.TextInput(attrs={ 'readonly':'True' })
# )
# type = forms.CharField(
# widget=forms.TextInput(attrs={ 'readonly':'True' })
# )
class Meta:
model = Contact
fields = [
'name',
'designation',
'email',
'is_primary_contact',
'contact_no_1',
'contact_no_2',
'active',
'type',
#'image',
# 'outlet'
]
class OutletForm(forms.ModelForm):
CATEGORIES=(
('Active', 'Active'),
('Inactive', 'In-Active'),
)
status = forms.ChoiceField(choices=CATEGORIES)
# dealer = forms.CharField(
# widget=forms.TextInput(attrs={ 'readonly':'True' })
# )
class Meta:
model = Outlet
fields = [
'address',
'area',
'city',
'pincode',
'status',
'dealer',
]
class OutletEditForm(forms.ModelForm):
CATEGORIES=(
('Active', 'Active'),
('Inactive', 'In-Active'),
)
status = forms.ChoiceField(choices=CATEGORIES)
# dealer = forms.CharField(
# widget=forms.TextInput(attrs={ 'readonly':'True' })
# )
class Meta:
model = Outlet
fields = [
'address',
'area',
'city',
'pincode',
'status',
# 'dealer',
]
class PriceUploadDealerForm(forms.Form):
dealer_name = forms.ModelChoiceField(queryset= Dealer.objects.all(), widget=forms.Select(attrs={'class': 'select-dealer'}))
city_name = forms.ModelChoiceField(queryset=City.objects.all(), widget=forms.Select(attrs={'class': 'select-city'}))
file_name = forms.FileField(widget=forms.FileInput(attrs={'class': 'file-upload-button'}))
class PriceUploadForm(forms.Form):
city_name = forms.ModelChoiceField(queryset=City.objects.all(), widget=forms.Select(attrs={'class': 'select-city'}))
file_name = forms.FileField(widget=forms.FileInput(attrs={'class': 'file-upload-button'}))
class InventoryForm(forms.ModelForm):
class Meta:
model = Inventory
fields = [
'variant',
'count',
]
|
21,269 | 7254436c8cc3456237702d4496858e1b122da23d | # -*- coding: utf-8 -*-
from odoo import models, fields, api, _
class AccountMoveReversal(models.TransientModel):
_inherit = "account.move.reversal"
#pe_debit_note_code = fields.Selection(selection="_get_pe_debit_note_type", string="Dedit Note Code")
pe_credit_note_code = fields.Selection(
selection="_get_pe_crebit_note_type", string="Credit Note Code")
@api.model
def _get_pe_crebit_note_type(self):
return self.env['pe.datas'].get_selection("PE.CPE.CATALOG9")
@api.model
def _get_pe_debit_note_type(self):
return self.env['pe.datas'].get_selection("PE.CPE.CATALOG10")
def _prepare_default_reversal(self, move):
res = super()._prepare_default_reversal(move)
journal_id = move.journal_id.credit_note_id.id or res.get('journal_id')
journal = self.env['account.journal'].browse(journal_id)
res.update({
'journal_id': journal.id,
'pe_credit_note_code': self.pe_credit_note_code,
'pe_invoice_code': journal.pe_invoice_code,
})
return res
def reverse_moves(self):
res = super(AccountMoveReversal, self).reverse_moves()
if self.env.context.get("is_pe_debit_note", False):
invoice_domain = res['domain']
if invoice_domain:
del invoice_domain[0]
res['domain'] = invoice_domain
return res
|
21,270 | e6e0012f7072ab6f42f920284bbc13a9440a0196 | from django.conf import settings
import numpy as np
import json
import urllib3
from urllib3.util import Retry
from urllib3 import PoolManager, ProxyManager, Timeout
from urllib3.exceptions import MaxRetryError, TimeoutError
urllib3.disable_warnings()
from celery.utils.log import get_task_logger
logger = get_task_logger(__name__)
def get_connection_pool():
retry_policy = Retry(total=5, backoff_factor=0.1, status_forcelist=list(range(405,501)))
timeout_policy = Timeout(read=10, connect=5)
http = PoolManager(retries= retry_policy, timeout = timeout_policy)
return http
def segundos_a_pesos(segs):
horas_de_impresion = segs/3600
hora_base = settings.PRECIO_POR_HORA_DE_IMPRESION
precio = (20*horas_de_impresion+5*min(horas_de_impresion,5)+5*min(horas_de_impresion,15)+5*min(horas_de_impresion,25)+5*min(horas_de_impresion,35))
#Aplicamos el descuento en volumen precio_c_descuento = a*(precio_sin_descuento)^b
return precio
def get_shipping_price(compra):
if compra.delivery_address.postal_code is None:
compra.delivery_address.update_long_address_and_postal_code()
try:
http = get_connection_pool()
r = json.loads(http.request('GET', settings.SHIPNOW_API_URL.format(zip=compra.delivery_address.postal_code)).data.decode('utf-8'))
return r['price']
except:
return 200
def get_order_price(compra):
total_price = 0
print(compra)
for objeto_personalizado in compra.purchased_objects.all():
object_total_seconds = 0
if objeto_personalizado.scale == 1:
object_total_seconds = objeto_personalizado.object_id.printing_time_default_total()
else:
#Usamos el polinomio para calcular el tiempo de impresion de cada objeto
for archivostl in objeto_personalizado.object_id.files.all():
p = np.poly1d(archivostl.time_as_a_function_of_scale.coefficients_list())
print(archivostl.time_as_a_function_of_scale.coefficients_list())
object_total_seconds += p(objeto_personalizado.scale)
total_price += segundos_a_pesos(object_total_seconds) * objeto_personalizado.quantity * objeto_personalizado.object_id.discount
return total_price - total_price % 5
def obtener_parametros_de_precios():
return {'price_per_hour' : settings.PRECIO_POR_HORA_DE_IMPRESION,
'discount_parameter_a' : settings.PRECIO_DESCUENTO_VOLUMEN_A,
'discount_parameter_b' : settings.PRECIO_DESCUENTO_VOLUMEN_B}
|
21,271 | 5589c315a9fbf9c8fe516b22892ae52bb40f40b7 | from django.db import models
from django.conf import settings
from django.contrib.auth.models import User
# Create your models here.
class Profile(models.Model):
user = models.OneToOneField(
User,
on_delete=models.CASCADE,
blank=True,
default=None,
null=True
)
profilePicture = models.ImageField(upload_to="images/", blank=True, default=None, null=True)
def __str__(self):
return self.user.username
class Lecture(models.Model):
title = models.CharField(max_length=200,null=True)
subject = models.CharField(max_length=200,null=True)
description = models.CharField(max_length=2000,null=True)
author = models.ForeignKey(Profile, related_name='author',on_delete=models.CASCADE,blank=True,null=True)
userSaved = models.ManyToManyField(Profile)
def __str__(self):
return self.title
class Lecture_img(models.Model):
LectureKey = models.ForeignKey(Lecture, related_name='Lecture_img',on_delete=models.CASCADE,blank=True,null=True)
image = models.ImageField(upload_to='lecture_image',blank=True)
def __str__(self):
return self.image.name
|
21,272 | f9bf80500c3ef13299f6be2d3b73e2c0298caf31 | import cv2
import numpy as np
# Define um evento padrão que, nesse código, não faz nada
def eventoCallback(x):
pass
# Lê uma imagem
imagem = cv2.imread('../../imagens/urso.jpg')
#Cria uma janela chamada "Imagem"
cv2.namedWindow('Imagem')
# Cria um trackbar chamado "Valor" para a janela "Imagem"
# Esse trackbar suportará valores entre 0 e 255
cv2.createTrackbar('Valor','Imagem',0,255, eventoCallback)
while(1):
# Pega o valor atual do Trackbar "Valor"
valor = cv2.getTrackbarPos('Valor','Imagem')
# Aplica o valor na imagem usando a técnica Thresholding
ret, thresh1 = cv2.threshold(imagem, valor, 255, cv2.THRESH_BINARY)
# Mostra o resultado do thresholding
cv2.imshow('Imagem',thresh1)
k = cv2.waitKey(1) & 0xFF
if k == 27:
break
cv2.destroyAllWindows() |
21,273 | 2e256bd0a244b7e69751935477a2c91994b750df | from django.shortcuts import render
from django.http import HttpResponse
def top(request):
return HttpResponse(b"Hello World")
|
21,274 | 06f71806dc6fd6dd691e59a660327abfc8158c84 | S = str(input())
front = int(S[:2])
back = int(S[2:])
S_list = list(S)
if front > 12 or front == 0:
if 0 < back <= 12 :
print("YYMM")
else:
print("NA")
elif front <= 12 or front == 0:
if 0 < back <= 12:
print("AMBIGUOUS")
else:
print("MMYY") |
21,275 | bb332b9d4b8530b1d403252d04d99834e7efa7bd | from django.db import models
from login.models import User
# Create your models here.
class RawImg(models.Model):
code = models.CharField(max_length=8,default='code')
author = models.ForeignKey(User, on_delete=models.CASCADE, blank=True, null=True)
# img_01 = models.ImageField(upload_to='upload',default='')
# img_02 = models.ImageField(upload_to='out',default='')
def __str__(self):
return self.code
# Create your models here.
#
# class Post_img(models.Model):
# code = models.CharField(max_length=200, default='code')
#
# cover = models.ImageField(upload_to="images/", default=None, blank=True)
# def __str__(self):
# return self.code
#
# class Operation(models.Model):
# created = models.DateTimeField(auto_now_add=True)
# type = models.CharField(max_length=200)
# post = models.ForeignKey(Post_img, on_delete=models.CASCADE, default=None)
#
|
21,276 | 1ea71743282e1db2a8c9c991c011dbfc5dbd7b8a | import logging
import numpy as np
from aqp_spn.aqp_leaves import Categorical
from aqp_spn.aqp_leaves import IdentityNumericLeaf
from sklearn.cluster import KMeans
from spn.algorithms.splitting.Base import preproc, split_data_by_clusters
from spn.algorithms.splitting.RDC import getIndependentRDCGroups_py
from spn.structure.StatisticalTypes import MetaType
logger = logging.getLogger(__name__)
MAX_UNIQUE_LEAF_VALUES = 10000
def learn_mspn(
data,
ds_context,
cols="rdc",
rows="kmeans",
min_instances_slice=200,
threshold=0.3,
max_sampling_threshold_cols=10000,
max_sampling_threshold_rows=100000,
bloom_filters=False,
ohe=False,
leaves=None,
memory=None,
rand_gen=None,
cpus=-1,
):
"""
Adapts normal learn_mspn to use custom identity leafs and use sampling for structure learning.
:param bloom_filters:
:param max_sampling_threshold_rows:
:param max_sampling_threshold_cols:
:param data:
:param ds_context:
:param cols:
:param rows:
:param min_instances_slice:
:param threshold:
:param ohe:
:param leaves:
:param memory:
:param rand_gen:
:param cpus:
:return:
"""
if leaves is None:
leaves = create_custom_leaf
if rand_gen is None:
rand_gen = np.random.RandomState(17)
from aqp_spn.custom_spflow.custom_structure_learning import get_next_operation, learn_structure
def l_mspn(data, ds_context, cols, rows, min_instances_slice, threshold, ohe):
split_cols, split_rows = get_splitting_functions(max_sampling_threshold_rows, max_sampling_threshold_cols, cols,
rows, ohe, threshold, rand_gen, cpus)
nextop = get_next_operation(min_instances_slice)
node = learn_structure(bloom_filters, data, ds_context, split_rows, split_cols, leaves, next_operation=nextop)
return node
if memory:
l_mspn = memory.cache(l_mspn)
spn = l_mspn(data, ds_context, cols, rows, min_instances_slice, threshold, ohe)
return spn
def create_custom_leaf(data, ds_context, scope):
"""
Adapted leafs for cardinality SPN. Either categorical or identityNumeric leafs.
"""
idx = scope[0]
meta_type = ds_context.meta_types[idx]
if meta_type == MetaType.REAL:
assert len(scope) == 1, "scope for more than one variable?"
unique_vals, counts = np.unique(data[:, 0], return_counts=True)
if hasattr(ds_context, 'no_compression_scopes') and idx not in ds_context.no_compression_scopes and \
len(unique_vals) > MAX_UNIQUE_LEAF_VALUES:
# if there are too many unique values build identity leaf with histogram representatives
hist, bin_edges = np.histogram(data[:, 0], bins=MAX_UNIQUE_LEAF_VALUES, density=False)
logger.debug(f"\t\tDue to histograms leaf size was reduced "
f"by {(1 - float(MAX_UNIQUE_LEAF_VALUES) / len(unique_vals)) * 100:.2f}%")
unique_vals = bin_edges[:-1]
probs = hist / data.shape[0]
lidx = len(probs) - 1
assert len(probs) == len(unique_vals)
else:
probs = np.array(counts, np.float64) / len(data[:, 0])
lidx = len(probs) - 1
# cumulative sum to make inference faster
prob_sum = np.concatenate([[0], np.cumsum(probs)])
null_value = ds_context.null_values[idx]
zero_in_dataset = data.shape[0] != np.count_nonzero(data[:, 0])
not_null_indexes = np.where(data[:, 0] != null_value)[0]
# This version also removes 0 (for inverted (square) mean)
# not_null_indexes = np.where((data[:, 0] != null_value) & (data[:, 0] != 0.0))[0]
null_value_prob = 1 - len(not_null_indexes) / len(data[:, 0])
# all values NAN
if len(not_null_indexes) == 0:
mean = 0
inverted_mean = np.nan
# for variance computation
square_mean = 0
inverted_square_mean = np.nan
# some values nan
else:
mean = np.mean(data[not_null_indexes, 0])
if zero_in_dataset:
inverted_mean = np.nan
else:
inverted_mean = np.mean(1 / data[not_null_indexes, 0])
# for variance computation
square_mean = np.mean(np.square(data[not_null_indexes, 0]))
if zero_in_dataset:
inverted_square_mean = np.nan
else:
inverted_square_mean = np.mean(1 / np.square(data[not_null_indexes, 0]))
leaf = IdentityNumericLeaf(unique_vals, mean, inverted_mean, square_mean, inverted_square_mean, prob_sum,
null_value_prob, scope=scope)
from aqp_spn.custom_spflow.custom_validity import is_valid_prob_sum
leaf.cardinality = data.shape[0]
ok, err = is_valid_prob_sum(prob_sum, unique_vals, leaf.cardinality)
assert ok, err
return leaf
elif meta_type == MetaType.DISCRETE:
unique, counts = np.unique(data[:, 0], return_counts=True)
# +1 because of potential 0 value that might not occur
sorted_counts = np.zeros(len(ds_context.domains[idx]) + 1, dtype=np.float64)
for i, x in enumerate(unique):
sorted_counts[int(x)] = counts[i]
p = sorted_counts / data.shape[0]
node = Categorical(p, scope)
node.cardinality = data.shape[0]
return node
def get_splitting_functions(max_sampling_threshold_rows, max_sampling_threshold_cols, cols, rows, ohe, threshold,
rand_gen, n_jobs):
from spn.algorithms.splitting.Clustering import get_split_rows_TSNE, get_split_rows_GMM
from spn.algorithms.splitting.PoissonStabilityTest import get_split_cols_poisson_py
from spn.algorithms.splitting.RDC import get_split_rows_RDC_py
if isinstance(cols, str):
if cols == "rdc":
split_cols = get_split_cols_RDC_py(max_sampling_threshold_cols=max_sampling_threshold_cols,
threshold=threshold,
rand_gen=rand_gen, ohe=ohe, n_jobs=n_jobs)
elif cols == "poisson":
split_cols = get_split_cols_poisson_py(threshold, n_jobs=n_jobs)
else:
raise AssertionError("unknown columns splitting strategy type %s" % str(cols))
else:
split_cols = cols
if isinstance(rows, str):
if rows == "rdc":
split_rows = get_split_rows_RDC_py(rand_gen=rand_gen, ohe=ohe, n_jobs=n_jobs)
elif rows == "kmeans":
split_rows = get_split_rows_KMeans(max_sampling_threshold_rows=max_sampling_threshold_rows)
elif rows == "tsne":
split_rows = get_split_rows_TSNE()
elif rows == "gmm":
split_rows = get_split_rows_GMM()
else:
raise AssertionError("unknown rows splitting strategy type %s" % str(rows))
else:
split_rows = rows
return split_cols, split_rows
# noinspection PyPep8Naming
def get_split_rows_KMeans(max_sampling_threshold_rows, n_clusters=2, pre_proc=None, ohe=False, seed=17):
# noinspection PyPep8Naming
def split_rows_KMeans(local_data, ds_context, scope):
data = preproc(local_data, ds_context, pre_proc, ohe)
if data.shape[0] > max_sampling_threshold_rows:
data_sample = data[np.random.randint(data.shape[0], size=max_sampling_threshold_rows), :]
kmeans = KMeans(n_clusters=n_clusters, random_state=seed)
clusters = kmeans.fit(data_sample).predict(data)
else:
kmeans = KMeans(n_clusters=n_clusters, random_state=seed)
clusters = kmeans.fit_predict(data)
cluster_centers = kmeans.cluster_centers_
result = split_data_by_clusters(local_data, clusters, scope, rows=True)
return result, cluster_centers.tolist()
return split_rows_KMeans
# noinspection PyPep8Naming
def get_split_cols_RDC_py(max_sampling_threshold_cols=10000, threshold=0.3, ohe=True, k=10, s=1 / 6,
non_linearity=np.sin,
n_jobs=-2, rand_gen=None):
from spn.algorithms.splitting.RDC import split_data_by_clusters
def split_cols_RDC_py(local_data, ds_context, scope):
meta_types = ds_context.get_meta_types_by_scope(scope)
domains = ds_context.get_domains_by_scope(scope)
if local_data.shape[0] > max_sampling_threshold_cols:
local_data_sample = local_data[np.random.randint(local_data.shape[0], size=max_sampling_threshold_cols), :]
clusters = getIndependentRDCGroups_py(
local_data_sample,
threshold,
meta_types,
domains,
k=k,
s=s,
# ohe=True,
non_linearity=non_linearity,
n_jobs=n_jobs,
rand_gen=rand_gen,
)
return split_data_by_clusters(local_data, clusters, scope, rows=False)
else:
clusters = getIndependentRDCGroups_py(
local_data,
threshold,
meta_types,
domains,
k=k,
s=s,
# ohe=True,
non_linearity=non_linearity,
n_jobs=n_jobs,
rand_gen=rand_gen,
)
return split_data_by_clusters(local_data, clusters, scope, rows=False)
return split_cols_RDC_py
|
21,277 | 9dc60f0a775cbff3690febdbb3d87425e6bf74b6 | import glob
from PIL import Image, ImageDraw, ImageFont
# applyText to a large map (according to grid)
# or to a single keymap image
### CONSTANTS ###
letterImageSize = (80, 50)
textColor = (150, 150, 150)
textSize = 40
### AUX FUNCTIONS - TEXT###
def getSegmentText(xIndex, yIndex):
return chr(ord('A')+yIndex) + str(xIndex+1)
def createTextImage(text = ''):
# Create small image
image = Image.new("RGBA", letterImageSize, (255,255,255,0))
draw = ImageDraw.Draw(image)
font = ImageFont.truetype("arial.ttf", textSize)
draw.text((0, 0), text, fill=textColor, font=font)
return image
def getMainImage(filename):
# Get canvas image
try:
mainImage = Image.open(filename)
return mainImage
except:
print("Image not found.")
exit(1)
def pasteText(mainImage, textImage, pasteLocation):
mainImage.paste(textImage, pasteLocation, mask=textImage)
def applyTextToKeymap(mainImage, text):
# get mainImage dimensions
xDim, yDim = mainImage.size
# paste text on image
textImage = createTextImage(text)
# pasteLocation = (5, yDim/2 - letterImageSize[1]/2)
pasteLocation = (5, yDim - letterImageSize[1] - 5)
pasteText(mainImage, textImage, pasteLocation)
def applyTextToFullMap(mainImage):
# get mainImage dimensions
xDim, yDim = mainImage.size
xSegments = xDim // 500
ySegments = yDim // 500
for xIndex in range(xSegments):
for yIndex in range(ySegments):
segmentText = getSegmentText(xIndex, yIndex)
textImage = createTextImage(segmentText)
xPasteLocation = xIndex * 500 + 250
yPasteLocation = yIndex * 500 + 250 - letterImageSize[1]//2
pasteLocation = (xPasteLocation, yPasteLocation)
pasteText(mainImage, textImage, pasteLocation)
def saveMainImage(mainImage, filename):
try:
mainImage.save(filename, quality=100, optimize=True, progressive=True)
# mainImage.show()
except:
print("Failed to save image.")
exit(1)
def processImage(input_filename, output_filename="", method="keymap", keymapText = ""):
# get main image
mainImage = getMainImage(input_filename)
# applyText to mainImage
if method == "keymap":
applyTextToKeymap(mainImage, keymapText)
elif method == "fullmap":
applyTextToFullMap(mainImage)
# save or show
if not output_filename:
output_filename = input_filename
saveMainImage(mainImage, output_filename)
### AUX FUNCTION - GLOB ###
def getFileNames():
return glob.glob("images/map_*.png")
def getCoords(filenames):
nums = map(lambda s: s.split(".")[0].split("_")[1].lstrip("(").rstrip(")"), filenames)
nums_left = map(lambda s: int(s.split(",")[0]), nums)
nums_right= map(lambda s: int(s.split(",")[1]), nums)
numsInt = zip(nums_left, nums_right)
return numsInt
def processAllKeyMaps():
print("Attaching text to key maps.")
filenames = getFileNames()
numsInt = getCoords(filenames)
fileNamesWithCoords = zip(filenames, numsInt)
for filename, coords in fileNamesWithCoords:
yIndex = coords[0]
xIndex = coords[1]
segmentText = getSegmentText(xIndex, yIndex)
processImage(filename, method="keymap", keymapText=segmentText)
def processFullMap():
print("Attaching text to full map.")
mapFull_filename = "images/mapFullGrid.jpeg"
processImage(mapFull_filename, method = "fullmap")
# Attach text to keymaps
processAllKeyMaps()
# Attach text to mapFull
processFullMap()
### MAIN ###
# processImage("pic2.png", method = "keymap")
# processImage("largemap.jpeg", "largemapKeys.jpeg", method = "fullmap")
|
21,278 | f95a5689005f5e2330ca28b79290f91ae5b0a70b | def dijkstra(a,disk,n,v):
v[0]=1
for i in range(1,n-1):
disk[i-1]=a[0][i]
u=0
path=['']*5
for i in range(1,n):#记录路径,所有能从1到达的点 初始路径都是1,a。如果后面会更新
if a[0][i]!=-1:
path[i-1]=str(1)+','+str(i+1)
for j in range(n-1):
min=9999
for i in range(n-1):
if disk[i]!=-1 and disk[i]<min and v[i+1]==0:
min=disk[i]
u=i+1
v[u]=1
for i in range(1,n):
if v[i]==0 and a[u][i]!=-1:
if disk[i-1]>disk[u-1]+a[u][i] or disk[i-1]==-1:
disk[i-1]=disk[u-1]+a[u][i]
path[i-1]=str(path[u-1])+","+str(i+1)
print(disk)
print(path)
if __name__ == '__main__':
a=[[-1,2,12,-1,3,-1],
[-1,-1,3,6,-1,-1],
[-1,-1,-1,-1,5,-1],
[-1,-1,4,-1,-1,15],
[-1,-1,-1,2,-1,4],
[-1,-1,-1,-1,-1,-1]]
disk=[-1]*5
n=6
v=[0]*6
dijkstra(a,disk,n,v) |
21,279 | e43d801cfbe746768c84824865b5de7e8eeac048 | #coding:utf-8
import jieba
#分为全模式和精准模式,默认为精准模式
seg_list=jieba.cut('刚好遇见你,留下足迹多美丽',cut_all=False)
print("/".join(seg_list))
#制定字典,包含词库里没有的词,详细见github地址
#https://github.com/fxsjy/jieba
|
21,280 | 50b5d7e8f2cff3de84b88fc98daec28b490cfe15 | import sys
import ast
import io
import os
import csv
from comgen.constants import docstring_header, ast_header
class ASTDataExtractor(ast.NodeVisitor):
def __init__(self, python_file_path, docstring_ast_file_path):
self.ast_object = ast.parse(open(python_file_path).read())
self.docstring_ast_file_path = docstring_ast_file_path
self.single_function_ast_str = ''
self.single_function_docstring = ''
with open(self.docstring_ast_file_path, 'a+') as docstring_ast_file:
csv_writer = csv.writer(docstring_ast_file, delimiter=',')
csv_writer.writerow([docstring_header, ast_header])
def visit_FunctionDef(self, node):
try:
# only want docstrings that are in ascii so I can read + simplifies project
temp_docstring = ast.get_docstring(node)
if temp_docstring:
self.single_function_docstring = temp_docstring.encode(
'ascii').decode('utf-8')
# for training set, only want functions that have docstring since it's the training label
if len(self.single_function_docstring):
self.node_visit(node)
self.single_function_ast_str = self.single_function_ast_str.encode(
'ascii').decode('utf-8')
if self.single_function_ast_str:
self.save_data()
self.single_function_ast_str = ''
self.single_function_docstring = ''
except (UnicodeDecodeError, UnicodeEncodeError):
pass
def args_to_str(self, args):
return f'args{len(args)}'
def assign_to_str(self, node):
return type(node.value).__name__
def expr_to_str(self, node):
return node.value.__class__.__name__
def constant_to_str(self, node):
return f'{type(node.value).__name__}'
def node_to_str(self, node):
ast_set = ("Delete", "For", "AsyncFor",
"While", "If", "With", "AsyncWith", "Raise",
"Try", "Assert", "Global", "Nonlocal", "Pass",
"Break", "Continue", "ExceptHandler",
"BoolOp", "NamedExpr", "BinOp", "UnaryOp", "Lambda",
"IfExp", "Dict", "Set", "ListComp", "SetComp", "DictComp",
"GeneratorExp", "Await", "Compare", "FormattedValue", "JoinedStr"
"Constant", "Attribute", "Subscript", "Starred", "Name"
"List", "Tuple")
if isinstance(node, ast.AST):
fields_list = []
if node.__class__.__name__ == "FunctionDef":
fields_list.append(node.__class__.__name__)
if node.args.args:
fields_list.append(self.args_to_str(node.args.args))
elif node.__class__.__name__ in ("Assign", "AugAssign"):
fields_list.append("Assign")
elif node.__class__.__name__ in ("Yield", "YieldFrom"):
fields_list.append("Yield")
elif node.__class__.__name__ == "Expr":
fields_list.append(self.expr_to_str(node))
elif node.__class__.__name__ == "Constant":
fields_list.append(self.constant_to_str(node))
elif node.__class__.__name__ == "Call":
fields_list.append(self.args_to_str(node.args))
else:
fields_list.append(node.__class__.__name__)
return f"{' '.join(fields_list)}" if fields_list else ""
else:
return repr(node)
def node_visit(self, node):
node_str = self.node_to_str(node).strip()
if node_str:
self.single_function_ast_str += node_str + " "
for field, value in ast.iter_fields(node):
if isinstance(value, list):
for value_item in value:
if isinstance(value_item, ast.AST):
self.node_visit(value_item)
elif isinstance(value, ast.AST):
self.node_visit(value)
def save_data(self):
with open(self.docstring_ast_file_path, 'a+') as docstring_ast_file:
csv_writer = csv.writer(docstring_ast_file, delimiter=',')
csv_writer.writerow(
[self.single_function_docstring, self.single_function_ast_str])
|
21,281 | 055b3c85c3714603368ba6d18de6c609e87ea9c1 | # 画像識別(ResNet50)
# coding:utf-8
from keras.applications.resnet50 import ResNet50
from keras.preprocessing import image
from keras.applications.resnet50 import preprocess_input, decode_predictions
import numpy as np
# モデル読み込み(ResNet50)
model = ResNet50(weights='imagenet')
# 画像読み込み
img_path = 'test.jpg'
img = image.load_img(img_path, target_size=(224, 224))
# 前処理
x = image.img_to_array(img)
x = np.expand_dims(x, axis=0)
x = preprocess_input(x)
# 予測
preds = model.predict(x)
# 表示
for name, description, score in decode_predictions(preds, top=3)[0]:
print(description + ": " + str(int(score * 100)) + "%")
|
21,282 | 18e03ddc2ae6261b5ba02409a10c5e4686f5b6dc | import uuid
import django_filters
import graphene
import graphql_geojson
from django.apps import apps
from django.db import transaction
from django.db.models import Q
from django.utils import timezone
from django.utils.translation import gettext_lazy as _
from graphene import ID, ObjectType, relay, String
from graphene_django import DjangoObjectType
from graphene_django.filter import DjangoFilterConnectionField
from graphql import GraphQLError
from graphql_geojson.filters import DistanceFilter
from categories.models import Category
from features import models
from features.enums import HarborMooringType, OverrideFieldType, Visibility, Weekday
from utils.graphene import LanguageEnum, StringListFilter
HarborMooringTypeEnum = graphene.Enum.from_enum(
HarborMooringType, description=lambda e: e.label if e else ""
)
WeekdayEnum = graphene.Enum.from_enum(
Weekday, description=lambda e: e.label if e else ""
)
class Address(ObjectType):
street_address = graphene.String()
postal_code = graphene.String()
municipality = graphene.String()
class ContactInfo(DjangoObjectType):
"""Contact information for the given feature."""
class Meta:
model = models.ContactInfo
fields = ("email", "phone_number")
address = graphene.Field(Address)
def resolve_address(self: models.ContactInfo, info, **kwargs):
return {
"street_address": self.street_address,
"postal_code": self.postal_code,
"municipality": self.municipality,
}
class ExternalLink(DjangoObjectType):
"""Link to an external system.
Link can be e.g. to an online store, a berth rental or to ferry information.
"""
class Meta:
model = models.Link
fields = (
"type",
"url",
)
class FeatureSource(ObjectType):
"""Source system information for a feature."""
system = graphene.String(
required=True,
description=_(
"Name of the source system (e.g. 'myhelsinki', 'ahti', "
"'ulkoliikuntakartta', 'digitransit')"
),
)
type = graphene.String(
required=True,
description=_(
"Type of the feature in the source system, if applicable (e.g. 'place', "
"'activity', 'event', 'route')"
),
)
id = graphene.String(
required=True, description="ID of the current feature in source system"
)
class PriceTag(DjangoObjectType):
"""An item displayed in a price list."""
class Meta:
model = models.PriceTag
fields = ("price",)
item = graphene.String(required=True, description=_("Name of the item"))
price = graphene.Decimal(required=True, description=_("Price of the item in EUR"))
unit = graphene.String(
description=_(
"Unit of the price (e.g. 'hour', 'day', 'piece', 'person', 'child', "
"'one way')"
),
)
class Teaser(DjangoObjectType):
"""Simple content element (e.g. something special about a feature)."""
class Meta:
model = models.FeatureTeaser
fields = () # Don't include any fields from the model automatically
header = graphene.String(
description=_("An opening, e.g. 'Starting' from 'Starting from 7€/day.'")
)
main = graphene.String(description=_("The meat of the deal, '7€/day' part"))
class FeatureTranslations(DjangoObjectType):
"Values in other languages for the feature attributes that can have translations."
language_code = LanguageEnum(required=True)
class Meta:
model = apps.get_model("features", "FeatureTranslation")
exclude = ("id", "master")
class Image(DjangoObjectType):
class Meta:
model = models.Image
fields = (
"url",
"copyright_owner",
"license",
)
class License(DjangoObjectType):
class Meta:
model = models.License
fields = ("id",)
name = graphene.String(required=True, description=_("Display name of the license"))
class Tag(DjangoObjectType):
"""Tags are associated with things (like features)."""
class Meta:
model = models.Tag
fields = ("id", "features")
name = graphene.String(required=True, description=_("Display name of the tag"))
class OpeningHoursPeriod(DjangoObjectType):
"""A period during which certain opening hours are valid."""
class Meta:
model = models.OpeningHoursPeriod
fields = (
"valid_from",
"valid_to",
"opening_hours",
)
comment = graphene.String(
description=_(
"Comment for this opening hour period (e.g. 'Exceptional opening hours "
"during Midsummer')"
),
)
class OpeningHours(DjangoObjectType):
"""The daily opening hours / hours of operation of something."""
class Meta:
model = models.OpeningHours
fields = (
"opens",
"closes",
"all_day",
)
day = WeekdayEnum(required=True, description=_("Day of week"))
class Depth(ObjectType):
"""The depth of something, in meters.
Can be a single value (min and max are equal) or a range.
(Consider: harbor/lake/pool/mineshaft)."
"""
min = graphene.Float(
required=True,
description=_(
"An approximation of the minimum depth (or lower end of the range)"
),
)
max = graphene.Float(
required=True,
description=_(
"An approximation of the maximum depth (or deeper end of the range)"
),
)
class HarborDetails(ObjectType):
"""Information specific to harbors (and piers)."""
moorings = graphene.List(
graphene.NonNull(HarborMooringTypeEnum),
description=_("Mooring types available in the harbor"),
)
depth = graphene.Field(
Depth, description=_("Approximate depth of the harbor, in meters")
)
def resolve_moorings(self: models.FeatureDetails, info, **kwargs):
return self.data["berth_moorings"]
def resolve_depth(self: models.FeatureDetails, info, **kwargs):
"""Minimum depth is mandatory, maximum is included for a range."""
min = self.data.get("berth_min_depth")
max = self.data.get("berth_max_depth")
if min is None:
return None
return {
"min": min,
"max": max,
}
class FeatureDetails(ObjectType):
"""Detailed information a feature might have."""
harbor = graphene.Field(HarborDetails, description=_("Details of a harbor"))
price_list = graphene.List(
"features.schema.PriceTag",
required=True,
description=_("Price list related to a feature"),
)
class FeatureFilter(django_filters.FilterSet):
"""Contains the filters to use when retrieving features."""
class Meta:
model = models.Feature
fields = [
"distance_lte",
"updated_since",
"tagged_with_any",
"tagged_with_all",
"category",
]
distance_lte = DistanceFilter(
field_name="geometry",
lookup_expr="distance_lte",
label=_("Fetch features within a given distance from the given geometry"),
)
updated_since = django_filters.IsoDateTimeFilter(
method="filter_updated_since",
label=_("Fetch features that have changed since specified timestamp"),
)
tagged_with_any = StringListFilter(
method="filter_tagged_with_any",
label=_("Fetch features tagged with any of the specified tags (ids)"),
)
tagged_with_all = StringListFilter(
method="filter_tagged_with_all",
label=_("Fetch features tagged with all of the specified tags (ids)"),
)
category = StringListFilter(
method="filter_category", label=_("Fetch features from included categories")
)
def filter_updated_since(self, queryset, name, value):
return queryset.filter(
Q(overrides__modified_at__gt=value) | Q(source_modified_at__gt=value)
).distinct() # Distinct because filtering on ForeignKey relation.
def filter_tagged_with_any(self, queryset, name, value):
return queryset.filter(
tags__in=value
).distinct() # Distinct because filtering on ForeignKey relation.
def filter_tagged_with_all(self, queryset, name, value):
for v in value:
queryset = queryset.filter(tags=v)
return queryset
def filter_category(self, queryset, name, value):
return queryset.filter(category__in=value)
class Feature(graphql_geojson.GeoJSONType):
"""Features in Ahti are structured according to GeoJSON specification.
All Ahti specific attributes are contained within attribute `properties`.
**Note!** `Feature.type` always has the value `Feature`.
"""
class Meta:
fields = (
"id",
"category",
"created_at",
"contact_info",
"teaser",
"details",
"geometry",
"images",
"links",
"opening_hours_periods",
"tags",
"translations",
)
filterset_class = FeatureFilter
model = models.Feature
geojson_field = "geometry"
interfaces = (relay.Node,)
ahti_id = graphene.String(
required=True,
description=_(
"Human readable ID. Format examples: "
"'ahti:feature:12C4' or 'myhelsinki:place:5678'"
),
)
source = graphene.Field(
FeatureSource, required=True, description=_("Source of the feature")
)
name = graphene.String(required=True, description=_("Name of the feature"))
one_liner = graphene.String(
required=True, description=_("Short introductory text or a tagline")
)
description = graphene.String(description=_("Description of the feature"))
details = graphene.Field(
FeatureDetails, description=_("Detailed information a feature might have")
)
url = graphene.String(description=_("URL for more information about this feature"))
modified_at = graphene.DateTime(required=True)
parents = graphene.List(
"features.schema.Feature",
required=True,
description=_("Parents of this feature"),
)
children = graphene.List(
"features.schema.Feature",
required=True,
description=_(
"Children of this feature (ex. stops along a route, piers of a harbor etc.)"
),
)
def resolve_source(self: models.Feature, info, **kwargs):
return {
"system": self.source_type.system,
"type": self.source_type.type,
"id": self.source_id,
}
def resolve_name(self: models.Feature, info, **kwargs):
name_override = self.overrides.filter(field=OverrideFieldType.NAME).first()
if name_override:
return name_override.value
return self.name
def resolve_modified_at(self: models.Feature, info, **kwargs):
latest_override = self.overrides.order_by("-modified_at").first()
return (
max(self.source_modified_at, latest_override.modified_at)
if latest_override
else self.source_modified_at
)
def resolve_details(self: models.Feature, info, **kwargs):
details = {}
for detail in self.details.all():
# Default dict resolver will resolve this for FeatureDetails
details[detail.type.lower()] = detail
# PriceTags have a relation to Feature model, so we resolve it separately
details["price_list"] = self.price_tags.all()
return details if details else None
def resolve_parents(self: models.Feature, info, **kwargs):
return self.parents.all()
def resolve_children(self: models.Feature, info, **kwargs):
return self.children.all()
@classmethod
def get_queryset(cls, queryset, info):
return (
queryset.filter(visibility=Visibility.VISIBLE)
.select_related("source_type", "category", "teaser")
.prefetch_related(
"category__translations",
"contact_info",
"children",
"details",
"price_tags",
"price_tags__translations",
"images",
"images__license",
"images__license__translations",
"links",
"opening_hours_periods",
"opening_hours_periods__opening_hours",
"opening_hours_periods__translations",
"parents",
"tags",
"tags__translations",
"teaser__translations",
"translations",
)
)
class FeatureTranslationsInput(graphene.InputObjectType):
language_code = LanguageEnum(required=True)
name = graphene.String(required=True, description=_("Name of the feature"))
description = graphene.String(description=_("Description of the feature"))
url = graphene.String(description=_("URL for more information about this feature"))
one_liner = graphene.String(description=_("Short introductory text or a tagline"))
class ContactInfoInput(graphene.InputObjectType):
street_address = graphene.String()
postal_code = graphene.String()
municipality = graphene.String()
phone_number = graphene.String()
email = graphene.String()
class CreateFeatureMutation(relay.ClientIDMutation):
class Input:
translations = graphene.List(
graphene.NonNull(FeatureTranslationsInput), required=True
)
geometry = graphql_geojson.Geometry(required=True)
contact_info = ContactInfoInput()
category_id = graphene.String()
tag_ids = graphene.List(graphene.String)
feature = graphene.Field(Feature)
@classmethod
def get_source_type(cls):
st, created = models.SourceType.objects.get_or_create(system="ahti", type="api")
return st
@classmethod
@transaction.atomic
def mutate_and_get_payload(cls, root, info, **kwargs):
contact_info_values = kwargs.pop("contact_info", None)
tag_ids = kwargs.pop("tag_ids", None)
category_id = kwargs.pop("category_id", None)
now = timezone.now()
values = {
"source_type": cls.get_source_type(),
"source_id": uuid.uuid4(),
"source_modified_at": now,
"mapped_at": now,
"visibility": Visibility.DRAFT,
}
values.update(kwargs)
if category_id:
values["category"] = Category.objects.get(id=category_id)
if tag_ids:
tags = [models.Tag.objects.get(id=tag_id) for tag_id in tag_ids]
else:
tags = []
feature = models.Feature.objects.create_translatable_object(**values)
if contact_info_values:
ci = models.ContactInfo.objects.create(
feature=feature, **contact_info_values
)
ci.full_clean()
ci.save()
if tags:
feature.tags.set(tags)
return CreateFeatureMutation(feature=feature)
class Query(graphene.ObjectType):
features = DjangoFilterConnectionField(
Feature, description=_("Retrieve all features matching the given filters")
)
feature = graphene.Field(
Feature,
id=ID(description=_("The ID of the object")),
ahti_id=String(description=_("Ahti ID of the object")),
description=_("Retrieve a single feature"),
)
tags = graphene.List(Tag, description=_("Retrieve all tags"))
def resolve_feature(self, info, id=None, ahti_id=None, **kwargs):
if id:
return relay.Node.get_node_from_global_id(info, id, only_type=Feature)
if ahti_id:
try:
return Feature.get_queryset(models.Feature.objects, info).ahti_id(
ahti_id=ahti_id
)
except models.Feature.DoesNotExist:
return None
raise GraphQLError("You must provide either `id` or `ahtiId`.")
def resolve_tags(self, info, **kwargs):
return models.Tag.objects.all()
class Mutation(graphene.ObjectType):
create_feature = CreateFeatureMutation.Field(
description=_(
"Create a new feature into the system which will go through a"
"review before it is published into the API."
)
)
|
21,283 | 17ac3fbd7c6a73ddbc00df4c24b4be61d65800af | import sys, os
from source.agent import Agent
from source.data_structures import Dataset, Episode
import numpy as np
import pickle
import copy
import gym
from sklearn.preprocessing import PolynomialFeatures
from tqdm import tqdm
# RANDOM_SEED = 42
env = gym.make('CartPole-v0')
poly = PolynomialFeatures(2, include_bias=False)
def generate_dataset(env, agent, n_rollouts):
dataset = Dataset()
for _ in tqdm(range(n_rollouts)):
s = env.reset()
s = poly.fit_transform(s.reshape(1, -1))
done = False
states, actions, rewards, pb_sas = [], [], [], []
while not done:
action_probs = agent.act(s, return_probs=True)
a = np.random.choice(np.arange(agent.n_actions), 1, p=action_probs)[0]
next_state, r, done, _ = env.step(a)
states.append(s)
actions.append(a)
rewards.append(r)
pb_sas.append(action_probs[a])
s = poly.fit_transform(next_state.reshape(1, -1))
rewards[-1] = rewards[-1] * 10
dataset.episodes.append(Episode(states, actions, rewards, pb_sas))
return dataset
if __name__ == '__main__':
# np.random.seed(RANDOM_SEED)
s = env.reset()
n_states = poly.fit_transform(s.reshape(1, -1))
agent = Agent(n_states.shape[1], env.action_space.n, delta=0.1, sigma=0.1, is_tabular=False)
agent.c = 0
mean_return = 0
did_improve = []
safety_dataset = generate_dataset(env, agent, 1000)
candidate_dataset = generate_dataset(env, agent, 1000)
ngen = 1
for epoch in range(1000):
print(f'Epoch: {epoch}')
print('---------------')
did_pass = agent.update(safety_dataset, candidate_dataset, 1, write=False)
if did_pass:
eval_dataset = generate_dataset(env, agent, 1000)
gt_estimates = agent.expected_discounted_return(eval_dataset)
next_mean_return = np.mean(gt_estimates)
print(f'Average discounted reward: {next_mean_return}')
did_improve.append(next_mean_return > agent.c)
agent.c = (next_mean_return * 1.0)
mean_return = next_mean_return
safety_dataset = generate_dataset(env, agent, 1000)
candidate_dataset = generate_dataset(env, agent, 1000)
else:
ngen += 1
# if ngen > 50:
# agent.reset_es()
# ngen = 1
print(f'Current success rate: {np.mean(np.array(did_improve).astype(int))}')
print(f'Current policy iteration: {agent.policy_idx}')
print()
|
21,284 | 3c03e93c13c194a8246257c138c1ee24ea539c2d | import numpy as np
import scipy as scipy
import lxmls.classifiers.linear_classifier as lc
from lxmls.distributions.gaussian import *
class GaussianNaiveBayes(lc.LinearClassifier):
def __init__(self):
lc.LinearClassifier.__init__(self)
self.trained = False
self.means = 0
#self.variances = 0
self.prior = 0
def train(self,x,y):
nr_x,nr_f = x.shape
nr_c = np.unique(y).shape[0]
prior = np.zeros(nr_c)
likelihood = np.zeros((nr_f,nr_c))
classes = np.unique(y)
means = np.zeros((nr_c,nr_f))
variances = np.zeros((nr_c,nr_f))
for i in xrange(nr_c):
idx,_ = np.nonzero(y == classes[i])
prior[i] = 1.0*len(idx)/len(y)
for f in xrange(nr_f):
g = estimate_gaussian(x[idx,f])
means[i,f] = g.mean
variances[i,f] = g.variance
## Take the mean of the covariance for each matric
variances = np.mean(variances,1)
params = np.zeros((nr_f+1,nr_c))
for i in xrange(nr_c):
params[0,i] = -1/2 * np.dot(means[i,:],means[i,:]) + np.log(prior[i])
params[1:,i] = means[i].transpose()
#params[0,i] = -1/(2*variances[i]) * np.dot(means[i,:],means[i,:]) + np.log(prior[i])
#params[1:,i] = (1/variances[i] * means[i]).transpose()
self.means = means
#self.variances = variances
self.prior = prior
self.trained = True
return params
|
21,285 | 1ce41cb06dc09e92ad1210263912cc875c2b7a75 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
' a test module '
__author__ = 'clawpo'
from sklearn.cluster import MeanShift
import pandas as pd
import matplotlib.pyplot as plt
if __name__ == '__main__':
data = pd.read_csv('data', sep='\t', header=None)
model = MeanShift()
model.fit(data)
label_pred = model.labels_ #获取聚类标签
centroids = model.cluster_centers_ #获取聚类中心
colors = ['red', 'black', 'blue', 'green']
marks = ['*','x','+','o']
fig, ax = plt.subplots()
for i, s in enumerate(label_pred):
ax.scatter(data[0].loc[i], data[1].loc[i], c=colors[s],
marker=marks[s],label=s, alpha=0.5, edgecolors='none')
for i in range(len(centroids)):
plt.annotate('center', xy=(centroids[i,0],centroids[i,1]),
xytext=(centroids[i,0]+1,centroids[i,1]+1), arrowprops=dict(facecolor='red'))
plt.show()
|
21,286 | 10c9b138cd9ee3285860163401557f9218b1271f | import re
import os
import warnings
import subprocess
import numpy as np
import os.path as pa
from astropy.io import fits
from tempfile import mkdtemp
from astropy.table import Table, Column
from sfft.utils.ReadWCS import Read_WCS
from sfft.utils.StampGenerator import Stamp_Generator
from sfft.utils.pyAstroMatic.AMConfigMaker import AMConfig_Maker
from sfft.utils.SymmetricMatch import Symmetric_Match, Sky_Symmetric_Match
# version: Mar 12, 2023
__author__ = "Lei Hu <hulei@pmo.ac.cn>"
__version__ = "v1.4"
class PY_SEx:
@staticmethod
def PS(FITS_obj, PSF_obj=None, FITS_ref=None, SExParam=None, CATALOG_TYPE='FITS_LDAC', \
GAIN_KEY='GAIN', SATUR_KEY='SATURATE', PIXEL_SCALE=1.0, SEEING_FWHM=1.2, BACK_TYPE='AUTO', \
BACK_VALUE=0.0, BACK_SIZE=64, BACK_FILTERSIZE=3, USE_FILT=True, DETECT_THRESH=1.5, ANALYSIS_THRESH=1.5, \
DETECT_MINAREA=5, DETECT_MAXAREA=0, DEBLEND_NTHRESH=32, DEBLEND_MINCONT=0.005, CLEAN='Y', \
BACKPHOTO_TYPE='LOCAL', PHOT_APERTURES=5.0, NegativeCorr=True, CHECKIMAGE_TYPE='NONE', \
VIGNET=None, STAMP_IMGSIZE=None, AddRD=False, ONLY_FLAGS=None, XBoundary=0.0, YBoundary=0.0, \
Coor4Match='XY_', XY_Quest=None, Match_xytol=2.0, RD_Quest=None, Match_rdtol=1.0, \
Preserve_NoMatch=False, MDIR=None, VERBOSE_TYPE='QUIET', VERBOSE_LEVEL=2):
"""
# Inputs & Outputs:
-FITS_obj [] # FITS file path of the input image for photometry
-PSF_obj [None] # PSFEx .psf file path for PSF photometry
-FITS_ref [None] # FITS file path of the input image for detection
# (a) -FITS_ref = None means single image mode:
# SEx detection & SEx photometry on same image -FITS_obj.
# (b) -FITS_ref != None mean dual image mode:
# SEx detection on -FITS_ref & SEx photometry on -FITS_obj
-SExParam [None] # Parameter List (Python list here) of SExtractor output catalog
# one can use command line 'sex -dp' to find all available parameters
# Configurations for SExtractor:
-CATALOG_TYPE ['FITS_LDAC'] # SExtractor Parameter CATALOG_TYPE
# NONE,ASCII,ASCII_HEAD, ASCII_SKYCAT,
# ASCII_VOTABLE, FITS_1.0 or FITS_LDAC
-GAIN_KEY ['GAIN'] # SExtractor Parameter GAIN_KEY
# i.e., keyword of GAIN in FITS image header
-SATUR_KEY ['SATURATE'] # SExtractor Parameter SATUR_KEY
# i.e., keyword of the saturation level in the FITS image header
-PIXEL_SCALE [1.0] # SExtractor Parameter PIXEL_SCALE
# size of pixel in arcsec (0=use FITS WCS info)
# P.S. it only works for surface brightness parameters,
# FWHM_WORLD and star/galaxy separation.
-SEEING_FWHM [1.2] # SExtractor Parameter SEEING_FWHM
# stellar FWHM in arcsec
# P.S. it only works for star/galaxy separation.
-BACK_TYPE ['AUTO'] # SExtractor Parameter BACK_TYPE = [AUTO or MANUAL].
-BACK_VALUE [0.0] # SExtractor Parameter BACK_VALUE (only work for BACK_TYPE='MANUAL')
-BACK_SIZE [64] # SExtractor Parameter BACK_SIZE
-BACK_FILTERSIZE [3] # SExtractor Parameter BACK_FILTERSIZE
-DETECT_THRESH [1.5] # SExtractor Parameter DETECT_THRESH
# <sigmas> or <threshold>,<ZP> in mag.arcsec-2
-ANALYSIS_THRESH [1.5] # SExtractor Parameter ANALYSIS_THRESH
# <sigmas> or <threshold>,<ZP> in mag.arcsec-2
# Threshold at which CLASS STAR and FWHM_ operate.
-DETECT_MINAREA [5] # SExtractor Parameter DETECT_MINAREA
# min. # of pixels above threshold
-DETECT_MAXAREA [0] # SExtractor Parameter DETECT_MAXAREA
# max. # of pixels above threshold (0=unlimited)
-DEBLEND_MINCONT [0.005] # SExtractor Parameter DEBLEND_MINCONT (typically, 0.001 - 0.005)
# Minimum contrast parameter for deblending
-CLEAN ['Y] # SExtractor Parameter CLEAN
# Clean spurious detections? (Y or N)?
-BACKPHOTO_TYPE ['LOCAL'] # SExtractor Parameter BACKPHOTO_TYPE
# can be GLOBAL or LOCAL
# P.S. I have changed to the default value to more common 'LOCAL'
-PHOT_APERTURES [5.0] # SExtractor Parameter PHOT_APERTURES
# MAG_APER aperture diameter(s) in pixels
# P.S. Here it can be a Python list of apertures
-CHECKIMAGE_TYPE ['NONE'] # SExtractor Parameter CHECKIMAGE_TYPE
# can be NONE, BACKGROUND, BACKGROUND_RMS, MINIBACKGROUND, MINIBACK_RMS,
# -BACKGROUND, FILTERED, OBJECTS, -OBJECTS, SEGMENTATION, or APERTURES
-VERBOSE_TYPE ['QUIET'] # SExtractor Parameter VERBOSE_TYPE
# can be QUIET, NORMAL or FULL
# Other parameters
-NegativeCorr [True] # In SExtractor, MAG_* = 99. and MAGERR_* = 99. for FLUX_* < 0.0
# If -NegativeCorr = True, PYSEx will correct MAG_* and MAGERR_*
# to be a valid values using abs(FLUX_*) and FLUXERR_*.
-VIGNET [None] # VIGNET for generating PSFEx input catalog
# e.g., set -VIGNET = (51, 51), PYSEx will add 'VIGNET(51, 51)' into
# the SExtractor output parameter list.
-STAMP_IMGSIZE [None] # PYSEx allows for making a stamp for each detected source on -FITS_obj,
# the stamps will be saved in a new column named 'Stamp' in the output catalog.
# -STAMP_IMGSIZE is the stamp size, e.g., -STAMP_IMGSIZE = (31, 31)
-AddRD [False] # Add columns for Ra. and Decl. in the output catalog?
# P.S. The columns are X_WORLD, Y_WORLD or XWIN_WORLD, YWIN_WORLD.
# Although SExtractor itself can generate these columns, here we use
# astropy.wcs to convert image coordinates to world coordinates instead.
# (Because I feel like that astropy has better WCS compatibility than SExtractor)
-ONLY_FLAGS [None] # Do you put any constrain on the SExtractor output parameter FLAGS
#
# FLAGS description
# 1 aperture photometry is likely to be biased by neighboring sources
# or by more than 10% of bad pixels in any aperture
# 2 the object has been deblended
# 4 at least one object pixel is saturated
# 8 the isophotal footprint of the detected object is truncated (too close to an image boundary)
# 16 at least one photometric aperture is incomplete or corrupted (hitting buffer or memory limits)
# 32 the isophotal footprint is incomplete or corrupted (hitting buffer or memory limits)
# 64 a memory overflow occurred during deblending
# 128 a memory overflow occurred during extraction
#
# P.S. popular constrain like -ONLY_FLAGS = [0] or -ONLY_FLAGS = [0,2]
-XBoundary [0.0] # The image boundary size for X axis
# Sources detected within the boundary will be rejected in the output catalog
-YBoundary [0.0] # The image boundary size for Y axis
# ~
-Coor4Match ['XY_'] # Can be 'XY_' or 'XYWIN_'
# 'XY_': cross match using coordinates X_IMAGE, Y_IMAGE, X_WORLD, Y_WORLD
# 'XYWIN_': cross match using coordinates XWIN_IMAGE, YWIN_IMAGE, XWIN_WORLD, YWIN_WORLD
-XY_Quest [None] # The image coordinates you would like to cross match with photometry SExtractor catalog.
# P.S. it is a Python array with shape (2, NUM_SOURCE), a collection of (x, y)
-Match_xytol [2.0] # The cross match tolerance (pix)
-RD_Quest [None] # The world coordinates you would like to cross match with SExtractor photometry catalog.
# P.S. it is a Python array with shape (2, NUM_SOURCE), a collection of (ra, dec)
-Match_rdtol [1.0] # The cross match tolerance (arcsec)
-Preserve_NoMatch [False] # Preserve the detected sources in SExtractor photometry catalog without cross match counterpart
-MDIR [None] # Parent Directory for output files
# PYSEx will generate a child directory with a random name under the paraent directory
# all output files are stored in the child directory
-VERBOSE_LEVEL [2] # The level of verbosity, can be [0, 1, 2]
# 0/1/2: QUIET/NORMAL/FULL mode
# NOTE: it only controls the verbosity out of SExtractor.
# Returns:
AstSEx # astropy Table of SExtractor photometry catalog
PixA_SExCheckLst # List of Pixel arrays for SExtractor check images
# P.S. PixA = fits.getdata(FITS, ext=0).T
FITS_SExCat # File path of SExtractor photometry catalog
# P.S. only for -MDIR is not None
FITS_SExCheckLst # List of file path of SExtractor check images
# P.S. only for -MDIR is not None
# ---------------- MORE DESCRIPTION ON HOW SEXTRACTOR WORK ----------------
#
# * SExtractor Inputs
# ** Array-Inputs:
# SEx works on one image for signal detection and another image for photometry
# @ Individual Mode (Common): Image4detect and Image4phot are the same image (-FITS_obj).
# @ Dual Mode: Image4detect (-FITS_ref) and Image4phot (-FITS_obj) are different images .
# ** PSF-Input:
# SEx can accept given PSF model for PSF-Photometry (-PSF_obj).
# ** Parameter-Inputs:
# a. Basic keywords in FITS header of Image4detect:
# (-GAIN_KEY, -SATUR_KEY).
# b. How to generate Global Background Map:
# (-BACK_TYPE, -BACK_VALUE, -BACK_SIZE, -BACK_FILTERSIZE).
# c. Give the criteria for SExtractor Source Detection
# (-DETECT_THRESH, -DETECT_MINAREA, -DETECT_MAXAREA, -DEBLEND_NTHRESH, -DEBLEND_MINCONT, -CLEAN).
# d. Which photometry method(s) used by SExtractor:
# (parameters in -SExParam, e.g., FLUX_AUTO, FLUX_APER, FLUX_PSF).
# e. Specify output Check-Images:
# (-CHECKIMAGE_TYPE).
# f. Specify output columns in output SExtractor photometry table:
# (-SExParam).
#
# Remarks on Weight-Map:
# SExtractor allows users to feed a weight-map image. It is very useful for mosaic image, which has
# vairable effective GAIN and background noise level across the field due to different Num_Exposure.
# Weight-map would help SExtractor to calculate errors, such as, FLUXERR and MAGERR, more accurately.
#
# WARNING: For current version, PYSEx does not include this feature, and I would recommend users
# to set an average effective GAIN for mosaic image, and keep in mind it may, to some extent,
# cause inaccurate error estimations.
#
# * SExtractor Workflow (Background)
# ** Extract Global_Background_Map (GBMap) and its RMS (GBRMap) from Image4detect & Image4phot
# Control Parameters: BACK_TYPE, BACK_VALUE, BACK_SIZE and BACK_FILTERSIZE.
# @ Produce GBMap
# a. Manual-FLAT (e.g. BACK_TYPE='MANUAL', BACK_VALUE=100.0)
# SEx directly define GBMap as a FLAT image with given constant BACK_VALUE.
# b. Auto (e.g. BACK_TYPE='AUTO', BACK_SIZE=64, BACK_FILTERSIZE=3)
# SEx defines a mesh of a grid that covers the whole frame by [BACK_SIZE].
# i. Convergence-based sigma-clipping method on the flux histogram of each tile.
# More specificly, the flux histogram is clipped iteratively until convergence
# at +/- 3sigma around its median.
# ii. SEx compute local background estimator from the clipped histogram of each tile.
# If sigma is changed by less than 20% during clipping process (the tile is not crowded),
# use the mean of the clipped histogram as estimator
# otherwise (the tile is crowded), mode = 2.5*median - 1.5*mean is employed instead.
# iii. Once the estimate grid is calculated, a median filter [BACK_FILTERSIZE] can be applied
# to suppress possible local overestimations.
# iv. The resulting background map is them simply a bicubic-spline interpolation
# between the meshes of the grid.
#
# @ Generate GBRMap
# only Auto (e.g, BACK_SIZE=64, BACK_FILTERSIZE=3)
# SEx produces the noise map by the same approach of Auto style of GBMap, where the only
# difference is that SEx is [probably] use standard deviation as estimator of the
# clipped flux historgram, other than mean or mode.
#
# NOTE Abbr. GBMap / GBRMap from Image4detect: GBMap_4d / GBRMap_4d
# Abbr. GBMap / GBRMap from Image4phot: GBMap_4p / GBRMap_4p
#
# NOTE WARNING: Dual Mode have to use consistent control parameters for Image4detect & Image4phot,
# as it is not allowed to set some secondary configuration in SEx software framework,
# despite that it is not necessarily reasonable in some cases.
#
# * SExtractor Workflow (Detection)
# ** a. SEx-Detect on Image4detect: SkySubtraction & Filtering & Thresholding & Deblending & AreaConstrain & Clean
# @ SkySubtraction & Filtering Process (e.g. FILTER='Y', FILTER_NAME='default.conv')
# SEx Remove GBMap_4d from Image4detect and then perform a convolution to maximizes detectability.
# NOTE The power-spectrum of the noise and that of the superimposed signal can be significantly different.
# NOTE Although Filtering is a benefit for detection, it distorts profiles and correlates the noise.
# NOTE Filtering is applied 'on the fly' to the image, and directly affects only the following
# Thresholding process and Isophotal parameters.
#
# @ Thresholding Process (e.g. DETECT_THRESH=1.5, THRESH_TYPE='RELATIVE')
# SEx highlights the pixels in Filtered_Image with Threshold_Map = DETECT_THRESH * GBRMap_4d
# We would be bettter to imagine SEx actually make a hovering mask.
#
# @ Deblending Process (e.g. DEBLEND_MINCONT=0.005, DEBLEND_NTHRESH=32)
# SEx triggers a deblending process to dentify signal islands from the hovering
# mask [probably] on Filtered_Image,
# which converts the hovering mask to be a hovering label map.
#
# @ Put AreaConstrain (e.g. DETECT_MINAREA=5, DETECT_MAXAREA=0)
# MinMax AreaContrain is applied and then iolated cases then lose their hovering labels.
#
# @ Clean Process (e.g. CLEAN='YES')
# SEx will clean the list of objects of artifacts caused by bright objects.
# As a correction process, all cleaned objects are subsequently removed from the hovering label map.
# NOTE Now the hovering label map is the SEGMENTATION check image.
# One may refer to such label island as ISOIsland
# NOTE These labels are consistent with the indices in the ouput photometry table.
# NOTE SEx will report something like this: Objects: detected 514 / sextracted 397
# SET CLEAN='N', you could find detected == sextracted.
#
# ** b. Generate Positional & BasicShape Paramters from isophotal profile
# NOTE In this section 'pixel flux' means values of Filtered_Image.
# i. XMIN, XMAX, YMIN, YMAX define a rectangle which encloses the ISOIsland.
# ii. Barycenter X_IMAGE, Y_IMAGE is the first order moments of the profile, where the pixel flux of
# ISOIsand is the corresponding weight for cacluatuing Barycenter.
# iii. Centered second-order moments X2_IMAGE, Y2_IMAGE, XY2_IMAGE are convenient for
# measuring the spatial spread of a source profile. likewise, pixel fluxs of ISOIsand are weights.
# (if a parameter can be fully expressed by them, we will use $ to indicate).
# iv. Describe ISOIsand as an elliptical shape, centred at Barycenter.
# $A_IMAGE, $B_IMAGE are ellipse semi-major and semi-minor axis lengths, respectively.
# The ellipse is uniquely determined by $CXX_IMAGE, $CYY_IMAGE, $CXY_IMAGE with KRON_RADIUS,
# where KRON_RADIUS is independently calculated in a routine inspired by Kron's 'first moment' algorithm.
# NOTE By-products: $ELONGATION = A / B and $ELLIPTICITY = 1 - B / A
#
# ** c. Generate Positional & BasicShape Paramters from Window
# NOTE In this section 'pixel flux' [likely] means values of Image4phot !
# NOTE It is designed for Refinement, NOT for Photometry, thereby these parameters are irrelevant to Photometry!
#
# METHOD: The computations involved are roughly the same except that the domain is a circular Gaussian window,
# (I don't know what is the window radius) as opposed to the object's isophotal footprint (ISOIsland).
#
# MOTIVATION: Parameters measured within an object's isophotal limit are sensitive to two main factors:
# + Changes in the detection threshold, which create a variable bias
# + Irregularities in the object's isophotal boundaries, which act as
# additional 'noise' in the measurements.
#
# @Positional: This is an iterative process. The computation starts by initializing
# the windowed centroid coordinates to the Barycenter.
# The process will adjust window and finally its centroid converges
# at some point: XWIN_IMAGE, YWIN_IMAGE.
# (If the process is failed then XWIN_IMAGE, YWIN_IMAGE = X_IMAGE, Y_IMAGE)
# It has been verified that for isolated, Gaussian-like PSFs, its accuracy is close to
# the theoretical limit set by image noise.
# NOTE: We preferably use it for point sources, like in transient detection.
# However it may not optimal for extended sources like glaxies.
# NOTE: X_IMAGE & Y_IMAGE seems to be a good compromise choice.
#
# @BasicShape: Windowed second-order moments are computed once the centering process has converged.
# X2WIN_IMAGE, Y2WIN_IMAGE, XY2WIN_IMAGE
# AWIN_IMAGE, BWIN_IMAGE, CXXWIN_IMAGE, CYYWIN_IMAGE, CXYWIN_IMAGE
#
# NOTE: Positional XWIN_IMAGE and YWIN_IMAGE are quite useful to
# provide a refined Object coordinate (of gaussian-like point sources).
# However we seldom use Window version of BasicShape parameters
# to describe the shape of the approaximated ellipse.
#
# ** d. Generate Positional Paramters from PSF Fitting
# NOTE In this section 'pixel flux' is [possibly] means values of skysubtracted-Image4detect !
# METHOD: XPSF_IMAGE and YPSF_IMAGE are fitted from given PSF model.
#
# * SExtractor Workflow (Photometry)
# ** Count Flux with various photometry methods
# NOTE This process always works on Image4phot.
# @ISO: SEx simply count flux according to the hovering label map (ISOIsland).
# @APER: SEx count flux on a Circular Aperture with given PHOT_APERTURES (centred at Barycenter X_IMAGE, Y_IMAGE).
# @AUTO: SEx count flux on a Elliptic Aperture, which is determined by the hovering isophotal ellipse
# (centred at Barycenter X_IMAGE, Y_IMAGE). The leaked light fraction is typically less than 10%.
# @PSF: SEx count flux according to the PSF fitting results (centred at XPSF_IMAGE and YPSF_IMAGE).
# This is optimal for pointsource but fairly wrong for extended objects.
#
# ** Peel background contribution from Counted Flux
# @BACKPHOTO_TYPE='LOCAL': background will take a rectangular annulus into account,
# which has a donnut shape around the object, measured on Image4phot.
# @BACKPHOTO_TYPE='GLOBAL': background will directly use GBMap_4p,
# which can be Manual-Flat or Auto.
#
# ** Noise Estimation
# METHOD: SEx estimate the photometric noise contributed from photon possion distribution and sky background.
# that is, FLUXERR^2 = FLUX / GAIN + Area * skysig^2
# NOTE: Area means the pixel area of flux-count domain for various photometry methods.
# NOTE: skysig^2 is [probably] the sum of GBRMap_4p within the flux-count domain.
# NOTE: Bright Sources are Photon Noise Dominated cases, while Faint Sources are Sky Noise Dominated cases.
#
# * SExtractor Workflow (Check-Image)
# @BACKGROUND : GBMap_4p
# @BACKGROUND_RMS : GBRMap_4d
# @-BACKGROUND : Image4phot - GBMap_4p
# @FILTERED: Filtered_Image
# @SEGMENTATION: ISOIsland Label Map
# @OBJECTS: ISOIslands use flux in -BACKGROUND, zero otherwise
# @APERTURES: -BACKGROUND with brighten apertures to show the isophotal ellipses.
#
# * SExtractor Workflow (Formula)
# a. MAG = -2.5 * log10(FLUX) + MAG_ZEROPOINT
# b. MAGERR = 1.0857 * FLUXERR / FLUX = 1.0857 / SNR
# NOTE: a common crude approxmation SNR ~ 1/MAGERR.
# NOTE: It is [probably] derived from MAGERR = 2.5*np.log10(1.0+1.0/SNR)
# Ref: http://www.ucolick.org/~bolte/AY257/s_n.pdf
# ZTF use a more precise factor 1.085736, whatever, not a big deal.
#
# * Additional Clarification
# a. SExtractor can read GAIN & SATURATION & MAGZERO from FITS header of Image4phot by their keys.
# b. If SExtractor is configured with FITS_LDAC, the FITS header of Image4phot will be delivered
# into output FITS file saved at output-FITS[1].data in table format
# (only one element: a long integrated header text).
# c. If some ISOIsland has saturated pixel value on Image4phot, you can still find it
# on SEGMENTATION / OBJECTS, and it will be marked by FLAGS=4 in output catalog.
# d. Windowed Coordinate has higher priority in the function
# i. Make Stamps ii. Convert to RD iii. Symmetric-Match
# First use XWIN_IMAGE & YWIN_IMAGE (if exist),
# otherwise, employ X_IMAGE & Y_IMAGE instead.
# e. SExtractor allows to submit request for multi-check images
# e.g. CHECKIMAGE_TYPE = "BACKGROUND,SEGMENTATION,..."
# f. Although SExtractor can directly provide sky coordinates in output table,
# We always independently to get them by convert XY using astropy.
# g. If VIGNET is called in SExtractor, stamps will be extracted around the targets,
# centred at their Barycenter X_IMAGE, Y_IMAGE, from Image4phot.
#
# * Additional Tips
# a. SNR_WIN: Window-based Gaussian-weighted SNR estimate
# Although SNR_WIN is empirically tend to slightly underestimate noise,
# this useful parameter is calculated independently from the employed phot-method.
#
# b. If you got a long runtime, it may caused by
# i. Low DETECT_THRESH
# ii. Request XWIN_IMAGE, YWIN_IMAGE, SNR_WIN
# iii. BACKPHOTO_TYPE == 'LOCAL'
#
# c. WARNINGS
# i. You may encounter bug if sethead after fits.getdata
# ii. SExtractor do not support string > 256 as argument in command line.
# Use 'cd dir && sex ...' to avoid segmentation fault.
# iii. FITS_LDAC --- TABLE-HDU 2 | FITS_1.0 --- TABLE-HDU 1
# iv. In Debian operation system, please correct 'sex' as 'sextractor'
#
# d. A coarse look-up table between DETECT_THRESH and MINIMAL SNR_WIN
# DETECT_THRESH = 1.0 ---> minimal SNR_WIN = 3-4
# DETECT_THRESH = 1.2 ---> minimal SNR_WIN = 4-5
# DETECT_THRESH = 1.5 ---> minimal SNR_WIN = 5.5-6.5
# DETECT_THRESH = 2.0 ---> minimal SNR_WIN = 8-9
# DETECT_THRESH = 3.0 ---> minimal SNR_WIN ~ 12
# DETECT_THRESH = 4.0 ---> minimal SNR_WIN ~ 15
# DETECT_THRESH = 5.0 ---> minimal SNR_WIN ~ 20
#
# README
# * 2 models for input image*
# @ Dual-Image Mode: FITS_ref is Image4detect, FITS_obj is Image4phot.
# ** When this mode is called, please read above comments very carefully.
#
# @ Single-Image Mode: FITS_obj is Image4detect & Image4phot.
# ** When sky background has been well subtracted.
# Typically Set: BACK_TYPE='MANUAL', BACK_VALUE=0.0, BACK_SIZE=64, BACK_FILTERSIZE=3, BACKPHOTO_TYPE='LOCAL'
# a. BACK_TYPE & BACK_VALUE: Use Flat-Zero as Global_Background_Map
# b. BACK_SIZE & BACK_FILTERSIZE: Produce RMS of Global_Background_Map [AUTO]
# c. BACKPHOTO_TYPE: Use LOCAL / GLOBAL (zero) background to count sky flux contribution in photometry.
#
# * Additional Remarks on Background
# @ well-subtracted sky just means it probably outperforms SEx GLOBAL-Sky,
# we should not presume the underlying true sky is really subtracted, therefore,
# BACKPHOTO_TYPE = 'LOCAL' is in general necessary !
#
# @ Just like sky subtraction, the sky term in image subtraction can only handle the
# low-spatial-frequency trend of background. The image has flat zero background as an ideal expectation,
# we still need set BACKPHOTO_TYPE = 'LOCAL' on difference photometry.
#
# @ 'LOCAL' method itself can be biased too. We get the conclusion when we try to perform
# aperture photometry on two psf-homogenized DECam images. Recall any error on matched kernel is some
# linear effect for aperture photometry, however, we found a bias in FLUX_APER which is independent
# with the target birghtness. E.g. FLUX_APER_SCI is always smaller than FLUX_APER_REF with
# a nearly constant value, say 10.0 ADU, no matter the target is 19.0 mag or 22.0 mag, as if there is
# some constant leaked light when we measure on SCI. Equivalently, DMAG = MAG_APER_SCI - MAG_APER_REF
# deviate zero-baseline, and it becomes increasingly serious (towards to the faint end).
#
# ------------------------------------
# We guess the problem is caused by the fact: the background value calculated from the annulus
# around target might be a biased (over/under-) estimation. One observation supports our argument:
# the flux bias is much more evident when we increase the aperture size.
# NOTE: As we have found the flux bias is basically a constant, we can do relative-calibration by
# calculating the compensation offset FLUX_APER_REF - FLUX_APER_SCI for a collection of sationary stars.
# It is 'relative' since we have just assumed background estimation of REF is correct,
# which is proper if we are going to derive the variability (light curve).
#
# @ In which cases, Re-Run SExtractor can get the same coordinate list?
# a. Same configurations but only change photometric method, e.g. from AUTO to APER
# b. Same configurations but from Single-Image Mode to Dual-Image Mode
# NOTE: FLAGS is correlated to object image, if we add constraint FLAG=0
# then we fail to get the same coordinate list.
#
"""
# * sex or sextractor?
for cmd in ['sex', 'sextractor']:
try:
p = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
del p
break
except OSError:
continue
else:
raise FileNotFoundError('Source Extractor command NOT FOUND!')
SEx_KEY = cmd
del cmd
# * Make Directory as workplace
objname = pa.basename(FITS_obj)
TDIR = mkdtemp(suffix=None, prefix='PYSEx_', dir=MDIR)
if VERBOSE_LEVEL in [0, 1, 2]:
_message = 'Run Python Wrapper of SExtractor!'
print('\nMeLOn CheckPoint [%s]: %s' %(objname, _message))
# * Keyword Configurations
phr_obj = fits.getheader(FITS_obj, ext=0)
if GAIN_KEY in phr_obj:
GAIN = phr_obj[GAIN_KEY]
if VERBOSE_LEVEL in [1, 2]:
_message = 'SExtractor uses GAIN = [%s] from keyword [%s]!' %(GAIN, GAIN_KEY)
print('MeLOn CheckPoint [%s]: %s' %(objname, _message))
else:
GAIN = 0.0 # infinite GAIN, Poission noise ignored
if VERBOSE_LEVEL in [0, 1, 2]:
_warn_message = 'SExtractor has to use default GAIN = 0!'
warnings.warn('MeLOn WARNING [%s]: %s' %(objname, _warn_message))
if SATUR_KEY in phr_obj:
SATURATION = phr_obj[SATUR_KEY]
if VERBOSE_LEVEL in [1, 2]:
_message = 'SExtractor uses SATURATION = [%s] from keyword [%s]!' %(SATURATION, SATUR_KEY)
print('MeLOn CheckPoint [%s]: %s' %(objname, _message))
else:
SATURATION = 50000.0
if VERBOSE_LEVEL in [0, 1, 2]:
_warn_message = 'SExtractor has to use default SATURATION = 50000.0!'
warnings.warn('MeLOn WARNING [%s]: %s' %(objname, _warn_message))
"""
# A few additional remarks
# [1] MAGERR/FLUXERR/SNR are very sensitive to GAIN value.
# [2] PIXEL_SCALE (unit: arcsec) only works for surface brightness parameters,
# FWHM (FWHM_WORLD) and star/galaxy separation. PIXEL_SCALE=0 uses FITS WCS info.
# [3] SEEING_FWHM (unit: arcsec) is only for star/galaxy separation.
# VIP: You'd better to give a good estimate if star/galaxy separation is needed.
# [4] A common criteria for isolated point sources:
# CLASS_STAR (output parameter) > 0.95 & FLAG = 0
#
"""
# * Main Configurations
ConfigDict = {}
ConfigDict['CATALOG_TYPE'] = CATALOG_TYPE
ConfigDict['VERBOSE_TYPE'] = '%s' %VERBOSE_TYPE
ConfigDict['GAIN_KEY'] = '%s' %GAIN_KEY
ConfigDict['SATUR_KEY'] = '%s' %SATUR_KEY
ConfigDict['MAG_ZEROPOINT'] = '0.0'
ConfigDict['PIXEL_SCALE'] = '%s' %PIXEL_SCALE
ConfigDict['SEEING_FWHM'] = '%s' %SEEING_FWHM
ConfigDict['BACK_TYPE'] = '%s' %BACK_TYPE
ConfigDict['BACK_VALUE'] = '%s' %BACK_VALUE
ConfigDict['BACK_SIZE'] = '%s' %BACK_SIZE
ConfigDict['BACK_FILTERSIZE'] = '%s' %BACK_FILTERSIZE
ConfigDict['DETECT_THRESH'] = '%s' %DETECT_THRESH
ConfigDict['ANALYSIS_THRESH'] = '%s' %ANALYSIS_THRESH
ConfigDict['DETECT_MINAREA'] = '%s' %DETECT_MINAREA
ConfigDict['DETECT_MAXAREA'] = '%s' %DETECT_MAXAREA
ConfigDict['DEBLEND_NTHRESH'] = '%s' %DEBLEND_NTHRESH
ConfigDict['DEBLEND_MINCONT'] = '%s' %DEBLEND_MINCONT
ConfigDict['CLEAN'] = '%s' %CLEAN
ConfigDict['CHECKIMAGE_TYPE'] = '%s' %CHECKIMAGE_TYPE
ConfigDict['BACKPHOTO_TYPE'] = '%s' %BACKPHOTO_TYPE
if not isinstance(PHOT_APERTURES, (int, float)):
ConfigDict['PHOT_APERTURES'] = '%s' %(','.join(np.array(PHOT_APERTURES).astype(str)))
else: ConfigDict['PHOT_APERTURES'] = '%s' %PHOT_APERTURES
if PSF_obj is not None: ConfigDict['PSF_NAME'] = '%s' %PSF_obj
# create configuration file .conv
if USE_FILT:
# see https://github.com/astromatic/sextractor/blob/master/config/default.conv
conv_text = "CONV NORM\n"
conv_text += "# 3x3 ``all-ground'' convolution mask with FWHM = 2 pixels.\n"
conv_text += "1 2 1\n"
conv_text += "2 4 2\n"
conv_text += "1 2 1"
conv_path = ''.join([TDIR, "/PYSEx.conv"])
_cfile = open(conv_path, 'w')
_cfile.write(conv_text)
_cfile.close()
USE_NNW = False
if 'CLASS_STAR' in SExParam:
USE_NNW = True
if USE_NNW:
# see https://github.com/astromatic/sextractor/blob/master/config/default.nnw
nnw_text = r"""
NNW
# Neural Network Weights for the SExtractor star/galaxy classifier (V1.3)
# inputs: 9 for profile parameters + 1 for seeing.
# outputs: ``Stellarity index'' (0.0 to 1.0)
# Seeing FWHM range: from 0.025 to 5.5'' (images must have 1.5 < FWHM < 5 pixels)
# Optimized for Moffat profiles with 2<= beta <= 4.
3 10 10 1
-1.56604e+00 -2.48265e+00 -1.44564e+00 -1.24675e+00 -9.44913e-01 -5.22453e-01 4.61342e-02 8.31957e-01 2.15505e+00 2.64769e-01
3.03477e+00 2.69561e+00 3.16188e+00 3.34497e+00 3.51885e+00 3.65570e+00 3.74856e+00 3.84541e+00 4.22811e+00 3.27734e+00
-3.22480e-01 -2.12804e+00 6.50750e-01 -1.11242e+00 -1.40683e+00 -1.55944e+00 -1.84558e+00 -1.18946e-01 5.52395e-01 -4.36564e-01 -5.30052e+00
4.62594e-01 -3.29127e+00 1.10950e+00 -6.01857e-01 1.29492e-01 1.42290e+00 2.90741e+00 2.44058e+00 -9.19118e-01 8.42851e-01 -4.69824e+00
-2.57424e+00 8.96469e-01 8.34775e-01 2.18845e+00 2.46526e+00 8.60878e-02 -6.88080e-01 -1.33623e-02 9.30403e-02 1.64942e+00 -1.01231e+00
4.81041e+00 1.53747e+00 -1.12216e+00 -3.16008e+00 -1.67404e+00 -1.75767e+00 -1.29310e+00 5.59549e-01 8.08468e-01 -1.01592e-02 -7.54052e+00
1.01933e+01 -2.09484e+01 -1.07426e+00 9.87912e-01 6.05210e-01 -6.04535e-02 -5.87826e-01 -7.94117e-01 -4.89190e-01 -8.12710e-02 -2.07067e+01
-5.31793e+00 7.94240e+00 -4.64165e+00 -4.37436e+00 -1.55417e+00 7.54368e-01 1.09608e+00 1.45967e+00 1.62946e+00 -1.01301e+00 1.13514e-01
2.20336e-01 1.70056e+00 -5.20105e-01 -4.28330e-01 1.57258e-03 -3.36502e-01 -8.18568e-02 -7.16163e+00 8.23195e+00 -1.71561e-02 -1.13749e+01
3.75075e+00 7.25399e+00 -1.75325e+00 -2.68814e+00 -3.71128e+00 -4.62933e+00 -2.13747e+00 -1.89186e-01 1.29122e+00 -7.49380e-01 6.71712e-01
-8.41923e-01 4.64997e+00 5.65808e-01 -3.08277e-01 -1.01687e+00 1.73127e-01 -8.92130e-01 1.89044e+00 -2.75543e-01 -7.72828e-01 5.36745e-01
-3.65598e+00 7.56997e+00 -3.76373e+00 -1.74542e+00 -1.37540e-01 -5.55400e-01 -1.59195e-01 1.27910e-01 1.91906e+00 1.42119e+00 -4.35502e+00
-1.70059e+00 -3.65695e+00 1.22367e+00 -5.74367e-01 -3.29571e+00 2.46316e+00 5.22353e+00 2.42038e+00 1.22919e+00 -9.22250e-01 -2.32028e+00
0.00000e+00
1.00000e+00 """
nintent = len(re.split('NNW', re.split('\n', nnw_text)[1])[0])
nnw_text = '\n'.join([line[nintent: ] for line in re.split('\n', nnw_text)[1:]])
nnw_path = ''.join([TDIR, "/PYSEx.nnw"])
_cfile = open(nnw_path, 'w')
_cfile.write(nnw_text)
_cfile.close()
# create configuration file .param
USExParam = SExParam.copy()
if SExParam is None: USExParam = []
if 'X_IMAGE' not in USExParam:
USExParam.append('X_IMAGE')
if 'Y_IMAGE' not in USExParam:
USExParam.append('Y_IMAGE')
Param_path = ''.join([TDIR, "/PYSEx.param"])
if VIGNET is not None: USExParam += ['VIGNET(%d, %d)' %(VIGNET[0], VIGNET[1])]
Param_text = '\n'.join(USExParam)
pfile = open(Param_path, 'w')
pfile.write(Param_text)
pfile.close()
# create configuration file .sex
if not USE_FILT: ConfigDict['FILTER'] = 'N'
if USE_FILT: ConfigDict['FILTER_NAME'] = "%s" %conv_path
if USE_NNW: ConfigDict['STARNNW_NAME'] = "%s" %nnw_path
ConfigDict['PARAMETERS_NAME'] = "%s" %Param_path
FNAME = pa.basename(FITS_obj)
FITS_SExCat = ''.join([TDIR, '/%s_PYSEx_CAT.fits' %FNAME[:-5]])
_cklst = re.split(',', CHECKIMAGE_TYPE)
FITS_SExCheckLst = [''.join([TDIR, '/%s_PYSEx_CHECK_%s.fits' %(FNAME[:-5], ck)]) for ck in _cklst]
sex_config_path = AMConfig_Maker.AMCM(MDIR=TDIR, AstroMatic_KEY=SEx_KEY, \
ConfigDict=ConfigDict, tag='PYSEx')
# * Trigger SExtractor
if FITS_ref is None:
os.system("cd %s && %s %s -c %s -CATALOG_NAME %s -CHECKIMAGE_NAME %s" \
%(pa.dirname(FITS_obj), SEx_KEY, FNAME, sex_config_path, \
FITS_SExCat, ','.join(FITS_SExCheckLst)))
if FITS_ref is not None:
# WARNING: more risky to fail due to the too long string.
os.system("%s %s,%s -c %s -CATALOG_NAME %s -CHECKIMAGE_NAME %s" \
%(SEx_KEY, FITS_ref, FITS_obj, sex_config_path, \
FITS_SExCat, ','.join(FITS_SExCheckLst)))
"""
# Deprecated as it requires WCSTools, and seems not very useful
def record(FITS):
os.system('sethead %s SOURCE=%s' %(FITS, FITS_obj))
os.system('sethead %s RSOURCE=%s' %(FITS, FITS_ref))
for key in ConfigDict:
value = ConfigDict[key]
pack = ' : '.join(['Sex Parameters', key, value])
os.system('sethead %s HISTORY="%s"' %(FITS, pack))
return None
"""
if CATALOG_TYPE == 'FITS_LDAC': tbhdu = 2
if CATALOG_TYPE == 'FITS_1.0': tbhdu = 1
if pa.exists(FITS_SExCat):
#if MDIR is not None: record(FITS_SExCat)
AstSEx = Table.read(FITS_SExCat, hdu=tbhdu)
else: FITS_SExCat, AstSEx = None, None
PixA_SExCheckLst = []
FtmpLst = FITS_SExCheckLst.copy()
for k, FITS_SExCheck in enumerate(FITS_SExCheckLst):
if pa.exists(FITS_SExCheck):
#if MDIR is not None: record(FITS_SExCheck)
PixA_SExCheck = fits.getdata(FITS_SExCheck, ext=0).T
else: PixA_SExCheck, FtmpLst[k] = None, None
PixA_SExCheckLst.append(PixA_SExCheck)
FITS_SExCheckLst = FtmpLst
# * Optional functions
if AstSEx is not None:
Modify_AstSEx = False
if VERBOSE_LEVEL in [1, 2]:
_message = 'SExtractor found [%d] sources!' %(len(AstSEx))
print('MeLOn CheckPoint [%s]: %s' %(objname, _message))
# ** a. CORRECT the SExtractor bug on MAG & MAGERR due to negative flux count.
# For such cases, corresponding MAG & MAGERR will turn to be a trivial 99.
# PYSEx will re-calculate them from FLUX & FLUXERR if MAG & MAGERR in PL and NegativeCorr=True.
MAG_TYPES = [p for p in USExParam if p[:4] == 'MAG_']
if len(MAG_TYPES) > 0:
if NegativeCorr:
MAGERR_TYPES = ['MAGERR_' + MAGT[4:] for MAGT in MAG_TYPES]
FLUX_TYPES = ['FLUX_' + MAGT[4:] for MAGT in MAG_TYPES]
FLUXERR_TYPES = ['FLUXERR_' + MAGT[4:] for MAGT in MAG_TYPES]
for i in range(len(MAG_TYPES)):
pcomplete = (MAGERR_TYPES[i] in USExParam) & \
(FLUX_TYPES[i] in USExParam) & \
(FLUXERR_TYPES[i] in USExParam)
if not pcomplete:
_error_message = 'Please use complete FLUX FLUXERR MAG MAGERR '
_error_message += 'in SExParam for Negative Flux Correction!'
raise Exception('MeLOn ERROR [%s]: %s' %(objname, _error_message))
FLUX = np.array(AstSEx[FLUX_TYPES[i]])
FLUXERR = np.array(AstSEx[FLUXERR_TYPES[i]])
Mask_NC = FLUX < 0.0
MAG_NC = -2.5*np.log10(np.abs(FLUX[Mask_NC]))
MAGERR_NC = 1.0857 * np.abs(FLUXERR[Mask_NC] / FLUX[Mask_NC])
AstSEx[MAG_TYPES[i]][Mask_NC] = MAG_NC
AstSEx[MAGERR_TYPES[i]][Mask_NC] = MAGERR_NC
Modify_AstSEx = True
# ** b. ADD-COLUMN SEGLABEL if SEGMENTATION requested.
# If some lines are discarded later, corresponding
# SEGLABEL will be lost in the output table.
if 'SEGMENTATION' in CHECKIMAGE_TYPE:
SEGLABEL = 1 + np.arange(len(AstSEx)) # label 0 is background
AstSEx.add_column(Column(SEGLABEL, name='SEGLABEL'), index=0)
Modify_AstSEx = True
# ** c. ADD-COLUMN of Sky-Coordinates by Astropy
# (X_IMAGE, Y_IMAGE) to (X_WORLD, Y_WORLD)
# (XWIN_IMAGE, YWIN_IMAGE) to (XWIN_WORLD, YWIN_WORLD)
if AddRD:
w_obj = Read_WCS.RW(phr_obj, VERBOSE_LEVEL=VERBOSE_LEVEL)
_XY = np.array([AstSEx['X_IMAGE'], AstSEx['Y_IMAGE']]).T
_RD = w_obj.all_pix2world(_XY, 1)
AstSEx.add_column(Column(_RD[:, 0], name='X_WORLD'))
AstSEx.add_column(Column(_RD[:, 1], name='Y_WORLD'))
if 'XWIN_IMAGE' in USExParam:
if 'YWIN_IMAGE' in USExParam:
_XY = np.array([AstSEx['XWIN_IMAGE'], AstSEx['YWIN_IMAGE']]).T
_RD = w_obj.all_pix2world(_XY, 1)
AstSEx.add_column(Column(_RD[:, 0], name='XWIN_WORLD'))
AstSEx.add_column(Column(_RD[:, 1], name='YWIN_WORLD'))
Modify_AstSEx = True
# ** d. Restriction on FLAGS
if ONLY_FLAGS is not None:
if 'FLAGS' not in SExParam:
_error_message = 'FLAGS is required in SExParam to apply restriction on FLAGS!'
raise Exception('MeLOn ERROR [%s]: %s' %(objname, _error_message))
else:
_OLEN = len(AstSEx)
AstSEx = AstSEx[np.in1d(AstSEx['FLAGS'], ONLY_FLAGS)]
Modify_AstSEx = True
if VERBOSE_LEVEL in [2]:
_message = 'PYSEx excludes [%d / %d] sources by FLAGS restriction!' %(_OLEN - len(AstSEx), _OLEN)
print('MeLOn CheckPoint [%s]: %s' %(objname, _message))
# ** e. Remove Boundary Sources
if XBoundary != 0.0 or XBoundary != 0.0:
NX, NY = int(phr_obj['NAXIS1']), int(phr_obj['NAXIS2'])
_XY = np.array([AstSEx['X_IMAGE'], AstSEx['Y_IMAGE']]).T
InnerMask = np.logical_and.reduce((_XY[:, 0] > XBoundary + 0.5, \
_XY[:, 0] < NX - XBoundary + 0.5, \
_XY[:, 1] > YBoundary + 0.5, \
_XY[:, 1] < NY - YBoundary + 0.5))
_OLEN = len(AstSEx)
AstSEx = AstSEx[InnerMask]
Modify_AstSEx = True
if VERBOSE_LEVEL in [2]:
_message = 'PYSEx excludes [%d / %d] sources by boundary rejection!' %(_OLEN - len(AstSEx), _OLEN)
print('MeLOn CheckPoint [%s]: %s' %(objname, _message))
# ** f. Only preserve the sources matched to the quest coordinates
if Coor4Match == 'XY_':
Xcoln_4Match, Ycoln_4Match = 'X_IMAGE', 'Y_IMAGE'
RAcoln_4Match, DECcoln_4Match = 'X_WORLD', 'Y_WORLD'
if Coor4Match == 'XYWIN_':
assert 'XWIN_IMAGE' in USExParam
assert 'YWIN_IMAGE' in USExParam
Xcoln_4Match, Ycoln_4Match = 'XWIN_IMAGE', 'YWIN_IMAGE'
RAcoln_4Match, DECcoln_4Match = 'XWIN_WORLD', 'YWIN_WORLD'
assert XY_Quest is None or RD_Quest is None
if RD_Quest is not None and not AddRD:
_error_message = 'AddRD is required in SExParam when RD_Quest is given!'
raise Exception('MeLOn ERROR [%s]: %s' %(objname, _error_message))
Symm = None
if XY_Quest is not None:
_XY = np.array([AstSEx[Xcoln_4Match], AstSEx[Ycoln_4Match]]).T
Symm = Symmetric_Match.SM(XY_A=XY_Quest, XY_B=_XY, tol=Match_xytol, return_distance=False)
if RD_Quest is not None:
_RD = np.array([AstSEx[RAcoln_4Match], AstSEx[DECcoln_4Match]]).T
Symm = Sky_Symmetric_Match.SSM(RD_A=RD_Quest, RD_B=_RD, tol=Match_rdtol, return_distance=False)
if Symm is not None:
Modify_AstSEx = True
if Preserve_NoMatch:
QuestMATCH = np.zeros(len(AstSEx)).astype(bool)
QuestMATCH[Symm[:, 1]] = True
AstSEx.add_column(Column(QuestMATCH, name='QuestMATCH'))
QuestINDEX = -1 * np.ones(len(AstSEx)).astype(int)
QuestINDEX[Symm[:, 1]] = Symm[:, 0]
AstSEx.add_column(Column(QuestINDEX, name='QuestINDEX'))
else:
_OLEN = len(AstSEx)
AstSEx = AstSEx[Symm[:, 1]]
QuestMATCH = np.ones(len(AstSEx)).astype(bool)
AstSEx.add_column(Column(QuestMATCH, name='QuestMATCH'))
QuestINDEX = Symm[:, 0]
AstSEx.add_column(Column(QuestINDEX, name='QuestINDEX'))
if VERBOSE_LEVEL in [2]:
_message = 'PYSEx excludes [%d / %d] sources by symmetric matching!' %(_OLEN - len(AstSEx), _OLEN)
print('MeLOn CheckPoint [%s]: %s' %(objname, _message))
if VERBOSE_LEVEL in [1, 2]:
_message = 'PYSEx output catalog contains [%d] sources!' %(len(AstSEx))
print('MeLOn CheckPoint [%s]: %s' %(objname, _message))
# ** g. ADD-COLUMN Stamp
if STAMP_IMGSIZE is not None:
_XY = np.array([AstSEx['X_IMAGE'], AstSEx['Y_IMAGE']]).T
PixA_StpLst = Stamp_Generator.SG(FITS_obj=FITS_obj, EXTINDEX=0, \
COORD=_XY, COORD_TYPE='IMAGE', STAMP_IMGSIZE=STAMP_IMGSIZE, \
FILL_VALUE=np.nan, FITS_StpLst=None, VERBOSE_LEVEL=VERBOSE_LEVEL)
AstSEx.add_column(Column(PixA_StpLst, name='Stamp'))
Modify_AstSEx = True
# ** UPDATE the file FITS_SExCat
if MDIR is not None and Modify_AstSEx:
tFITS_SExCat = ''.join([TDIR, '/TMPCAT_%s' %FNAME])
AstSEx.write(tFITS_SExCat, overwrite=True)
hdl = fits.open(FITS_SExCat)
thdl = fits.open(tFITS_SExCat)
if tbhdu == 2: fits.HDUList([hdl[0], hdl[1], thdl[1]]).writeto(FITS_SExCat, overwrite=True)
if tbhdu == 1: fits.HDUList([hdl[0], thdl[1]]).writeto(FITS_SExCat, overwrite=True)
hdl.close()
thdl.close()
os.system('rm %s' %tFITS_SExCat)
# ** REMOVE temporary directory
if MDIR is None:
os.system('rm -rf %s' %TDIR)
return AstSEx, PixA_SExCheckLst, FITS_SExCat, FITS_SExCheckLst
|
21,287 | 0031180698ea8995d1c6659e7926e8e2e4499764 | from django import template
from localManagement.models import Locale
from localManagement.views import LocalList
register = template.Library()
@register.simple_tag
def multiply(var1, var2):
# return var1 * var2
return str(var1 * var2).replace('.', ',')
@register.simple_tag
def total_price():
sum_price = 0.00
for elem in LocalList.prod_ordine:
sum_price += float(elem['num_obj'] * elem['prodotto'].prezzo)
for elem in LocalList.menu_ordine:
sum_price += float(elem['num_obj'] * elem['menu'].prezzo)
if sum_price > 0:
sum_price += Locale.objects.get(cod_locale=LocalList.last_local).prezzo_di_spedizione
if (round(sum_price, 2) - int(sum_price)) * 100 > 0:
return str(round(sum_price, 2)).replace('.', ',')
return int(sum_price)
|
21,288 | d038088161a1f7093beeb2b7b3749f345d09f08b | from setuptools import setup, Extension
setup(
entry_points={"console_scripts": ["pyinpy=pyinpy.main:main"]}
)
|
21,289 | 7023b248e5bb5a6013cb0131ed51599e0edd55dd | class Solution(object):
"""
Given a string s, find the longest palindromic substring in s. You may assume that the maximum length of s is 1000.
Example:
Input: "babad"
Output: "bab"
Note: "aba" is also a valid answer.
Example:
Input: "cbbd"
Output: "bb"
"""
def lenExpanded(self, left, right,k):
while (left >= 0 and right < len(k) and k[left] == k[right]):
left-=1
right+=1
#when left the while loop, the condition is not met, either out of range or k[left] != k[right], so the return value should be k[left+1 : right]
return k[left+1 : right]
def longestPalindrome(self, s):
"""
:type s: str
:rtype: str
"""
if len(s) == 1:
return s
if len(s) == 2:
if s[0] != s[1]:
return s[0]
else:
return s
longest_palindrome = ""
#one letter in the middle
for i in range(len(s)):
palindrom = self.lenExpanded(i, i,s)
if len(palindrom) > len(longest_palindrome):
longest_palindrome = palindrom
#two letters in the middle
for i in range(len(s)-1):
if s[i] == s[i+1]:
palindrom = self.lenExpanded(i, i+1,s)
if len(palindrom) > len(longest_palindrome):
longest_palindrome = palindrom
return longest_palindrome
s = Solution()
print(s.longestPalindrome("babad"))
print(s.longestPalindrome("cbbd"))
print(s.longestPalindrome("abb"))
print(s.longestPalindrome("a"))
print(s.longestPalindrome("an"))
print(s.longestPalindrome("bb"))
print(s.longestPalindrome("ccc"))
print(s.longestPalindrome("ccd"))
print(s.longestPalindrome("abcdasdfghjkldcba"))
print("**************")
# for i in reversed(range(-3,2)):
# print(i) |
21,290 | 3ee217b42abd899fb3710f44429d57d353b688be | #!/usr/bin/env python
# Import libraries
import os, glob, argparse, cv2
import pandas as pd
# Defining main function
def main(targetpath, filepath):
# Getting the filename of the target image
target_name = os.path.split(targetpath)[-1]
# Info for user in terminal
if targetpath == os.path.join("..", "data", "flowers", "image_0002.jpg"):
print(f"[INFO] Targetpath not specified - using default: \"{target_name}\"")
# Empty lists, for appending to
filenames = []
distances_to_target = []
# Load target image, calculate histogram and normalize
target = cv2.imread(targetpath)
target_hist = cv2.calcHist([target], [0, 1, 2], None, [8, 8, 8], [0, 256, 0, 256, 0, 256])
target_hist_norm = cv2.normalize(target_hist, target_hist, 0,255, cv2.NORM_MINMAX)
# Info for user in terminal
print(f"[INFO] Calculating distances from corpus to \"{target_name}\" ...")
# For each of the non-target files, get filename and calculate distance to target
for file in glob.glob(filepath):
# Get filename and append to list
filenames.append(os.path.split(file)[-1])
# For each file, read image, get histogram, normalize according to target image and calculating distance using the chi-square method
img = cv2.imread(file)
img_hist = cv2.calcHist([img], [0, 1, 2], None, [8, 8, 8], [0, 256, 0, 256, 0, 256])
img_hist_norm = cv2.normalize(img_hist, img_hist, 0,255, cv2.NORM_MINMAX)
dist = round(cv2.compareHist(target_hist, img_hist_norm, cv2.HISTCMP_CHISQR), 2) #Rounded to 2 decimal places
distances_to_target.append(dist)
# Info for user in terminal
print(f"[INFO] Distances between the 3D color histogram of \"{target_name}\" and the corpus in \"{filepath}\" have been calculated using the chi-square method.")
# Create a df with the information on distances
df = pd.DataFrame(list(zip(filenames, distances_to_target)),
columns = ["filename", "distance"])
# Find the row with the shortest chisquare distance to target image
closest_image = df.loc[df['distance'].idxmax()]
# Create outpath for df
outpath = f"distances_to_{target_name[:-4]}.csv"
# Save df
df.to_csv(outpath, index = False)
# Info for user in terminal - also information on which image is the closest
print(f"[INFO] A new file with the distances has been created succesfully: \"{outpath}\" \n NOTE: The image \"{closest_image[0]}\" has the shortest chi-square distance to target with a distance of: {closest_image[1]}") # Info for user in terminal
# Defining behaviour when called from command line
if __name__=="__main__":
# Initialize ArgumentParser class
parser = argparse.ArgumentParser(description = "Calculates rgb-distance from image corpus to a specified target image using the chi-square method")
# Add inpath argument
parser.add_argument(
"-f",
"--filepath",
type = str,
default = os.path.join("..", "data", "flowers", "*.jpg"), # Default path to corpus, when none is specified
required = False,
help= "str - path to image corpus")
# Add outpath argument
parser.add_argument(
"-t",
"--targetpath",
type = str,
default = os.path.join("..", "data", "flowers", "image_0002.jpg"), # Default path to a target image, when none is specified
required = False,
help = "str - path to target file from which to calculate distance to the other images")
# Taking all the arguments we added to the parser and input into "args"
args = parser.parse_args()
# Perform main function
main(args.targetpath, args.filepath) |
21,291 | 9e3c107e0f5a403059d1d16300b4363090d88e06 | """ Receiver control module via HTTP API. """
import os
from datetime import datetime
from enum import Enum
from urllib.parse import quote
from gi.repository import GLib
from .dialogs import show_dialog, DialogType, get_message, get_builder
from .uicommons import Gtk, Gdk, UI_RESOURCES_PATH, Column
from ..commons import run_task, run_with_delay, log, run_idle
from ..connections import HttpAPI, UtfFTP
from ..eparser.ecommons import BqServiceType
class ControlBox(Gtk.HBox):
_TIME_STR = "%Y-%m-%d %H:%M"
class Tool(Enum):
""" The currently displayed tool. """
REMOTE = "control"
EPG = "epg"
TIMERS = "timers"
TIMER = "timer"
RECORDINGS = "recordings"
class EpgRow(Gtk.ListBoxRow):
def __init__(self, event: dict, **properties):
super().__init__(**properties)
self._event_data = event
h_box = Gtk.HBox()
h_box.set_orientation(Gtk.Orientation.VERTICAL)
self._title = event.get("e2eventtitle", "")
title_label = Gtk.Label(self._title)
self._desc = event.get("e2eventdescription", "")
description = Gtk.Label()
description.set_markup("<i>{}</i>".format(self._desc))
description.set_line_wrap(True)
description.set_max_width_chars(25)
start = int(event.get("e2eventstart", "0"))
start_time = datetime.fromtimestamp(start)
end_time = datetime.fromtimestamp(start + int(event.get("e2eventduration", "0")))
time_label = Gtk.Label()
time_label.set_margin_top(5)
self._time_header = "{} - {}".format(start_time.strftime("%A, %H:%M"), end_time.strftime("%H:%M"))
time_label.set_markup("<b>{}</b>".format(self._time_header))
h_box.add(time_label)
h_box.add(title_label)
h_box.add(description)
sep = Gtk.Separator()
sep.set_margin_top(5)
h_box.add(sep)
h_box.set_spacing(5)
self.add(h_box)
self.show_all()
@property
def event_data(self):
return self._event_data or {}
@property
def title(self):
return self._title or ""
@property
def desc(self):
return self._desc or ""
@property
def time_header(self):
return self._time_header or ""
class TimerRow(Gtk.ListBoxRow):
_UI_PATH = UI_RESOURCES_PATH + "timer_row.glade"
def __init__(self, timer, **properties):
super().__init__(**properties)
self._timer = timer
builder = get_builder(self._UI_PATH, None, use_str=True)
row_box = builder.get_object("timer_row_box")
name_label = builder.get_object("timer_name_label")
description_label = builder.get_object("timer_description_label")
service_name_label = builder.get_object("timer_service_name_label")
time_label = builder.get_object("timer_time_label")
name_label.set_text(timer.get("e2name", "") or "")
description_label.set_text(timer.get("e2description", "") or "")
service_name_label.set_text(timer.get("e2servicename", "") or "")
# Time
start_time = datetime.fromtimestamp(int(timer.get("e2timebegin", "0")))
end_time = datetime.fromtimestamp(int(timer.get("e2timeend", "0")))
time_label.set_text("{} - {}".format(start_time.strftime("%A, %H:%M"), end_time.strftime("%H:%M")))
self.add(row_box)
self.show()
@property
def timer(self):
return self._timer
class TimerAction(Enum):
ADD = 0
EVENT = 1
CHANGE = 2
class RecordingsRow(Gtk.ListBoxRow):
def __init__(self, movie: dict, **properties):
super().__init__(**properties)
self._movie = movie
h_box = Gtk.HBox()
h_box.set_orientation(Gtk.Orientation.VERTICAL)
self._service = movie.get("e2servicename")
service_label = Gtk.Label()
service_label.set_markup("<b>{}</b>".format(self._service))
self._title = movie.get("e2title", "")
title_label = Gtk.Label(self._title)
self._desc = movie.get("e2description", "")
description = Gtk.Label()
description.set_markup("<i>{}</i>".format(self._desc))
description.set_line_wrap(True)
description.set_max_width_chars(25)
start_time = datetime.fromtimestamp(int(movie.get("e2time", "0")))
start_time_label = Gtk.Label()
start_time_label.set_margin_top(5)
start_time_label.set_markup("<b>{}</b>".format(start_time.strftime("%A, %H:%M")))
time_label = Gtk.Label()
time_label.set_margin_top(5)
time_label.set_markup("<b>{}</b>".format(movie.get("e2length", "0")))
info_box = Gtk.HBox()
info_box.set_orientation(Gtk.Orientation.HORIZONTAL)
info_box.set_spacing(10)
info_box.pack_start(start_time_label, False, True, 5)
info_box.pack_end(time_label, False, True, 5)
h_box.add(service_label)
h_box.add(title_label)
h_box.add(description)
h_box.add(info_box)
sep = Gtk.Separator()
sep.set_margin_top(5)
h_box.add(sep)
h_box.set_spacing(5)
self.set_tooltip_text(movie.get("e2filename", ""))
self.add(h_box)
self.show_all()
@property
def movie(self):
return self._movie
@property
def service(self):
return self._service or ""
@property
def title(self):
return self._title or ""
@property
def desc(self):
return self._desc or ""
@property
def file(self):
return self._movie.get("e2filename", "")
def __init__(self, app, http_api, settings, *args, **kwargs):
super().__init__(*args, **kwargs)
self._http_api = http_api
self._settings = settings
self._update_epg = False
self._app = app
self._last_tool = self.Tool.REMOTE
self._timer_action = self.TimerAction.ADD
self._current_timer = {}
handlers = {"on_visible_tool": self.on_visible_tool,
"on_volume_changed": self.on_volume_changed,
"on_epg_press": self.on_epg_press,
"on_epg_filter_changed": self.on_epg_filter_changed,
"on_timers_press": self.on_timers_press,
"on_timers_drag_data_received": self.on_timers_drag_data_received,
"on_recordings_press": self.on_recordings_press,
"on_recording_filter_changed": self.on_recording_filter_changed,
"on_recordings_dir_changed": self.on_recordings_dir_changed}
builder = get_builder(UI_RESOURCES_PATH + "control.glade", handlers)
self.add(builder.get_object("main_box_frame"))
self._stack = builder.get_object("stack")
self._screenshot_image = builder.get_object("screenshot_image")
self._screenshot_button_box = builder.get_object("screenshot_button_box")
self._screenshot_check_button = builder.get_object("screenshot_check_button")
self._screenshot_check_button.bind_property("active", self._screenshot_image, "visible")
self._snr_value_label = builder.get_object("snr_value_label")
self._ber_value_label = builder.get_object("ber_value_label")
self._agc_value_label = builder.get_object("agc_value_label")
self._volume_button = builder.get_object("volume_button")
self._epg_list_box = builder.get_object("epg_list_box")
self._epg_list_box.set_filter_func(self.epg_filter_function)
self._epg_filter_entry = builder.get_object("epg_filter_entry")
self._timers_list_box = builder.get_object("timers_list_box")
self._app._control_revealer.bind_property("visible", self, "visible")
# Timers
self._timer_remove_button = builder.get_object("timer_remove_button")
self._timer_remove_button.bind_property("visible", builder.get_object("timer_edit_button"), "visible")
# Timer
self._timer_name_entry = builder.get_object("timer_name_entry")
self._timer_desc_entry = builder.get_object("timer_desc_entry")
self._timer_service_entry = builder.get_object("timer_service_entry")
self._timer_service_ref_entry = builder.get_object("timer_service_ref_entry")
self._timer_event_id_entry = builder.get_object("timer_event_id_entry")
self._timer_begins_entry = builder.get_object("timer_begins_entry")
self._timer_ends_entry = builder.get_object("timer_ends_entry")
self._timer_begins_calendar = builder.get_object("timer_begins_calendar")
self._timer_begins_hr_button = builder.get_object("timer_begins_hr_button")
self._timer_begins_min_button = builder.get_object("timer_begins_min_button")
self._timer_ends_calendar = builder.get_object("timer_ends_calendar")
self._timer_ends_hr_button = builder.get_object("timer_ends_hr_button")
self._timer_ends_min_button = builder.get_object("timer_ends_min_button")
self._timer_enabled_switch = builder.get_object("timer_enabled_switch")
self._timer_action_combo_box = builder.get_object("timer_action_combo_box")
self._timer_after_combo_box = builder.get_object("timer_after_combo_box")
self._timer_mo_check_button = builder.get_object("timer_mo_check_button")
self._timer_tu_check_button = builder.get_object("timer_tu_check_button")
self._timer_we_check_button = builder.get_object("timer_we_check_button")
self._timer_th_check_button = builder.get_object("timer_th_check_button")
self._timer_fr_check_button = builder.get_object("timer_fr_check_button")
self._timer_sa_check_button = builder.get_object("timer_sa_check_button")
self._timer_su_check_button = builder.get_object("timer_su_check_button")
self._timer_location_switch = builder.get_object("timer_location_switch")
self._timer_location_entry = builder.get_object("timer_location_entry")
self._timer_location_switch.bind_property("active", self._timer_location_entry, "sensitive")
# Disable DnD for timer entries.
self._timer_name_entry.drag_dest_unset()
self._timer_desc_entry.drag_dest_unset()
self._timer_service_entry.drag_dest_unset()
# DnD initialization for the timer list.
self._timers_list_box.drag_dest_set(Gtk.DestDefaults.ALL, [], Gdk.DragAction.DEFAULT | Gdk.DragAction.COPY)
self._timers_list_box.drag_dest_add_text_targets()
# Recordings.
self._recordings_list_box = builder.get_object("recordings_list_box")
self._recordings_list_box.set_filter_func(self.recording_filter_function)
self._recordings_filter_entry = builder.get_object("recordings_filter_entry")
self._recordings_dir_box = builder.get_object("recordings_dir_box")
self.init_actions(app)
self.connect("hide", self.on_hide)
self.show()
def init_actions(self, app):
# Remote controller actions
app.set_action("on_up", lambda a, v: self.on_remote_action(HttpAPI.Remote.UP))
app.set_action("on_down", lambda a, v: self.on_remote_action(HttpAPI.Remote.DOWN))
app.set_action("on_left", lambda a, v: self.on_remote_action(HttpAPI.Remote.LEFT))
app.set_action("on_right", lambda a, v: self.on_remote_action(HttpAPI.Remote.RIGHT))
app.set_action("on_ok", lambda a, v: self.on_remote_action(HttpAPI.Remote.OK))
app.set_action("on_menu", lambda a, v: self.on_remote_action(HttpAPI.Remote.MENU))
app.set_action("on_exit", lambda a, v: self.on_remote_action(HttpAPI.Remote.EXIT))
app.set_action("on_red", lambda a, v: self.on_remote_action(HttpAPI.Remote.RED))
app.set_action("on_green", lambda a, v: self.on_remote_action(HttpAPI.Remote.GREEN))
app.set_action("on_yellow", lambda a, v: self.on_remote_action(HttpAPI.Remote.YELLOW))
app.set_action("on_blue", lambda a, v: self.on_remote_action(HttpAPI.Remote.BLUE))
# Power
app.set_action("on_standby", lambda a, v: self.on_power_action(HttpAPI.Power.STANDBY))
app.set_action("on_wake_up", lambda a, v: self.on_power_action(HttpAPI.Power.WAKEUP))
app.set_action("on_reboot", lambda a, v: self.on_power_action(HttpAPI.Power.REBOOT))
app.set_action("on_restart_gui", lambda a, v: self.on_power_action(HttpAPI.Power.RESTART_GUI))
app.set_action("on_shutdown", lambda a, v: self.on_power_action(HttpAPI.Power.DEEP_STANDBY))
# Screenshots
app.set_action("on_screenshot_all", self.on_screenshot_all)
app.set_action("on_screenshot_video", self.on_screenshot_video)
app.set_action("on_screenshot_osd", self.on_screenshot_osd)
# Timers
app.set_action("on_timer_add", self.on_timer_add)
app.set_action("on_timer_add_from_event", self.on_timer_add_from_event)
app.set_action("on_timer_remove", self.on_timer_remove)
app.set_action("on_timer_edit", self.on_timer_edit)
app.set_action("on_timer_save", self.on_timer_save)
app.set_action("on_timer_cancel", self.on_timer_cancel)
app.set_action("on_timer_begins_set", self.on_timer_begins_set)
app.set_action("on_timer_ends_set", self.on_timer_ends_set)
# Recordings
app.set_action("on_recording_remove", self.on_recording_remove)
@property
def update_epg(self):
return self._update_epg
def on_visible_tool(self, stack, param):
tool = self.Tool(stack.get_visible_child_name())
self._update_epg = tool is self.Tool.EPG
if tool is self.Tool.TIMERS:
self.update_timer_list()
if tool is self.Tool.RECORDINGS:
self.update_recordings_list()
if tool is not self.Tool.TIMER:
self._last_tool = tool
def on_hide(self, item):
self._update_epg = False
# ***************** Remote controller ********************* #
def on_remote(self, action, state=False):
""" Shows/Hides [R key] remote controller. """
action.set_state(state)
self._remote_revealer.set_visible(state)
self._remote_revealer.set_reveal_child(state)
if state:
self._http_api.send(HttpAPI.Request.VOL, "state", self.update_volume)
def on_remote_action(self, action):
self._http_api.send(HttpAPI.Request.REMOTE, action, self.on_response)
@run_with_delay(0.5)
def on_volume_changed(self, button, value):
self._http_api.send(HttpAPI.Request.VOL, "{:.0f}".format(value), self.on_response)
def update_volume(self, vol):
if "error_code" in vol:
return
GLib.idle_add(self._volume_button.set_value, int(vol.get("e2current", "0")))
def on_response(self, resp):
if "error_code" in resp:
return
if self._screenshot_check_button.get_active():
ref = "mode=all" if self._http_api.is_owif else "d="
self._http_api.send(HttpAPI.Request.GRUB, ref, self.update_screenshot)
@run_task
def update_screenshot(self, data):
if "error_code" in data:
return
data = data.get("img_data", None)
if data:
from gi.repository import GdkPixbuf
loader = GdkPixbuf.PixbufLoader.new_with_type("jpeg")
loader.set_size(280, 165)
try:
loader.write(data)
pix = loader.get_pixbuf()
except GLib.Error:
pass # NOP
else:
GLib.idle_add(self._screenshot_image.set_from_pixbuf, pix)
finally:
loader.close()
def on_screenshot_all(self, action, value=None):
self._http_api.send(HttpAPI.Request.GRUB, "mode=all" if self._http_api.is_owif else "d=",
self.on_screenshot)
def on_screenshot_video(self, action, value=None):
self._http_api.send(HttpAPI.Request.GRUB, "mode=video" if self._http_api.is_owif else "v=",
self.on_screenshot)
def on_screenshot_osd(self, action, value=None):
self._http_api.send(HttpAPI.Request.GRUB, "mode=osd" if self._http_api.is_owif else "o=",
self.on_screenshot)
@run_task
def on_screenshot(self, data):
if "error_code" in data:
return
img = data.get("img_data", None)
if img:
is_darwin = self._settings.is_darwin
GLib.idle_add(self._screenshot_button_box.set_sensitive, is_darwin)
path = os.path.expanduser("~/Desktop") if is_darwin else None
try:
import tempfile
import subprocess
with tempfile.NamedTemporaryFile(mode="wb", suffix=".jpg", dir=path, delete=not is_darwin) as tf:
tf.write(img)
cmd = ["open" if is_darwin else "xdg-open", tf.name]
subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE).communicate()
finally:
GLib.idle_add(self._screenshot_button_box.set_sensitive, True)
def on_power_action(self, action):
self._http_api.send(HttpAPI.Request.POWER, action, lambda resp: log("Power status changed..."))
def update_signal(self, sig):
self._snr_value_label.set_text(sig.get("e2snrdb", "0 dB").strip())
self._ber_value_label.set_text(str(sig.get("e2ber", None) or "0").strip())
self._agc_value_label.set_text(sig.get("e2acg", "0 %").strip())
# ************************ EPG **************************** #
def on_service_changed(self, ref):
self._app._wait_dialog.show()
self._http_api.send(HttpAPI.Request.EPG, quote(ref), self.update_epg_data)
@run_idle
def update_epg_data(self, epg):
list(map(self._epg_list_box.remove, (r for r in self._epg_list_box)))
list(map(lambda e: self._epg_list_box.add(self.EpgRow(e)), epg.get("event_list", [])))
self._app._wait_dialog.hide()
def on_epg_press(self, list_box, event):
if event.get_event_type() == Gdk.EventType.DOUBLE_BUTTON_PRESS and len(list_box) > 0:
row = list_box.get_selected_row()
if row:
self.set_timer_from_event_data(row.event_data)
def on_epg_filter_changed(self, entry):
self._epg_list_box.invalidate_filter()
def epg_filter_function(self, row):
txt = self._epg_filter_entry.get_text().upper()
return any((not txt, txt in row.time_header.upper(), txt in row.title.upper(), txt in row.desc.upper()))
def on_timer_add_from_event(self, action, value=None):
rows = self._epg_list_box.get_selected_rows()
if not rows:
self._app.show_error_dialog("No selected item!")
return
refs = []
for row in rows:
event = row.event_data
ref = "timeraddbyeventid?sRef={}&eventid={}&justplay=0".format(event.get("e2eventservicereference", ""),
event.get("e2eventid", ""))
refs.append(ref)
gen = self.write_timers_list(refs)
GLib.idle_add(lambda: next(gen, False))
def write_timers_list(self, refs):
self._app._wait_dialog.show()
tasks = list(refs)
for ref in refs:
self._http_api.send(HttpAPI.Request.TIMER, ref, lambda x: tasks.pop())
yield True
while tasks:
yield True
self._stack.set_visible_child_name(self.Tool.TIMERS.value)
# *********************** Timers *************************** #
def on_timers_press(self, list_box, event):
if event.get_event_type() == Gdk.EventType.DOUBLE_BUTTON_PRESS and len(list_box) > 0:
self.on_timer_edit()
def update_timer_list(self):
self._app._wait_dialog.show()
self._http_api.send(HttpAPI.Request.TIMER_LIST, "", self.update_timers_data)
@run_idle
def update_timers_data(self, timers):
list(map(self._timers_list_box.remove, (r for r in self._timers_list_box)))
list(map(lambda t: self._timers_list_box.add(self.TimerRow(t)), timers.get("timer_list", [])))
self._timer_remove_button.set_visible(len(self._timers_list_box))
self._app._wait_dialog.hide()
def on_timer_add(self, action=None, value=None):
self._timer_action = self.TimerAction.ADD
date = datetime.now()
self.set_begins_date(date)
self.set_ends_date(date)
self._timer_event_id_entry.set_text("")
self._timer_location_switch.set_active(False)
self.set_repetition_flags(0)
self._stack.set_visible_child_name(self.Tool.TIMER.value)
def on_timer_remove(self, action, value=None):
rows = self._timers_list_box.get_selected_rows()
if not rows or show_dialog(DialogType.QUESTION, self._app._main_window) != Gtk.ResponseType.OK:
return
refs = {}
for row in rows:
timer = row.timer
ref = "timerdelete?sRef={}&begin={}&end={}".format(quote(timer.get("e2servicereference", "")),
timer.get("e2timebegin", ""),
timer.get("e2timeend", ""))
refs[ref] = row
self._app._wait_dialog.show("Deleting data...")
gen = self.remove_timers(refs)
GLib.idle_add(lambda: next(gen, False))
def remove_timers(self, refs):
tasks = list(refs)
removed = set()
for ref in refs:
yield from self.remove_timer(ref, removed, tasks)
while tasks:
yield True
list(map(self._timers_list_box.remove, (refs[ref] for ref in refs if ref in removed)))
self._app._wait_dialog.hide()
self._timer_remove_button.set_visible(len(self._timers_list_box))
yield True
def remove_timer(self, ref, removed, tasks=None):
def callback(resp):
if resp.get("e2state", "") == "True":
log(resp.get("e2statetext", ""))
removed.add(ref)
else:
log(resp.get("e2statetext", None) or "Timer deletion error.")
if tasks:
tasks.pop()
self._http_api.send(HttpAPI.Request.TIMER, ref, callback)
yield True
def on_timer_edit(self, action=None, value=None):
row = self._timers_list_box.get_selected_row()
if row:
self._timer_action = self.TimerAction.CHANGE
timer = row.timer
self._current_timer = timer
self._timer_name_entry.set_text(timer.get("e2name", ""))
self._timer_desc_entry.set_text(timer.get("e2description", "") or "")
self._timer_service_entry.set_text(timer.get("e2servicename", "") or "")
self._timer_service_ref_entry.set_text(timer.get("e2servicereference", ""))
self._timer_event_id_entry.set_text(timer.get("e2eit", ""))
self._timer_enabled_switch.set_active((timer.get("e2disabled", "0") == "0"))
self._timer_action_combo_box.set_active_id(timer.get("e2justplay", "0"))
self._timer_after_combo_box.set_active_id(timer.get("e2afterevent", "0"))
self.set_time_data(int(timer.get("e2timebegin", "0")), int(timer.get("e2timeend", "0")))
location = timer.get("e2location", "")
self._timer_location_entry.set_text("" if location == "None" else location)
# Days
self.set_repetition_flags(int(timer.get("e2repeated", "0")))
self._stack.set_visible_child_name(self.Tool.TIMER.value)
def on_timer_save(self, action, value=None):
args = []
t_data = self.get_timer_data()
s_ref = quote(t_data.get("sRef", ""))
if self._timer_action is self.TimerAction.EVENT:
args.append("timeraddbyeventid?sRef={}".format(s_ref))
args.append("eventid={}".format(t_data.get("eit", "0")))
args.append("justplay={}".format(t_data.get("justplay", "")))
args.append("tags={}".format(""))
else:
if self._timer_action is self.TimerAction.ADD:
args.append("timeradd?sRef={}".format(s_ref))
args.append("deleteOldOnSave={}".format(0))
elif self._timer_action is self.TimerAction.CHANGE:
args.append("timerchange?sRef={}".format(s_ref))
args.append("channelOld={}".format(s_ref))
args.append("beginOld={}".format(self._current_timer.get("e2timebegin", "0")))
args.append("endOld={}".format(self._current_timer.get("e2timeend", "0")))
args.append("deleteOldOnSave={}".format(1))
args.append("begin={}".format(t_data.get("begin", "")))
args.append("end={}".format(t_data.get("end", "")))
args.append("name={}".format(quote(t_data.get("name", ""))))
args.append("description={}".format(quote(t_data.get("description", ""))))
args.append("tags={}".format(""))
args.append("eit={}".format("0"))
args.append("disabled={}".format(t_data.get("disabled", "1")))
args.append("justplay={}".format(t_data.get("justplay", "1")))
args.append("afterevent={}".format(t_data.get("afterevent", "0")))
args.append("repeated={}".format(self.get_repetition_flags()))
if self._timer_location_switch.get_active():
args.append("dirname={}".format(self._timer_location_entry.get_text()))
self._http_api.send(HttpAPI.Request.TIMER, "&".join(args), self.timer_add_edit_callback)
@run_idle
def timer_add_edit_callback(self, resp):
if "error_code" in resp:
msg = "Error getting timer status.\n{}".format(resp.get("error_code"))
self._app.show_error_dialog(msg)
log(msg)
return
state = resp.get("e2state", None)
if state == "False":
msg = resp.get("e2statetext", "")
self._app.show_error_dialog(msg)
log(msg)
if state == "True":
log(resp.get("e2statetext", ""))
self._stack.set_visible_child_name(self._last_tool.value)
else:
log("Error getting timer status. No response!")
def on_timer_cancel(self, action, value=None):
self._stack.set_visible_child_name(self._last_tool.value)
def on_timer_begins_set(self, action, value=None):
self.set_begins_date(self.get_begins_date())
def on_timer_ends_set(self, action, value=None):
self.set_ends_date(self.get_ends_date())
def get_begins_date(self):
date = self._timer_begins_calendar.get_date()
return datetime(year=date.year, month=date.month + 1, day=date.day,
hour=int(self._timer_begins_hr_button.get_value()),
minute=int(self._timer_begins_min_button.get_value()))
def set_begins_date(self, date):
hour = date.hour
minute = date.minute
self._timer_begins_hr_button.set_value(hour)
self._timer_begins_min_button.set_value(minute)
self._timer_begins_calendar.select_day(date.day)
self._timer_begins_calendar.select_month(date.month - 1, date.year)
self._timer_begins_entry.set_text("{}-{}-{} {}:{:02d}".format(date.year, date.month, date.day, hour, minute))
def get_ends_date(self):
date = self._timer_ends_calendar.get_date()
return datetime(year=date.year, month=date.month + 1, day=date.day,
hour=int(self._timer_ends_hr_button.get_value()),
minute=int(self._timer_ends_min_button.get_value()))
def set_ends_date(self, date):
hour = date.hour
minute = date.minute
self._timer_ends_hr_button.set_value(hour)
self._timer_ends_min_button.set_value(minute)
self._timer_ends_calendar.select_day(date.day)
self._timer_ends_calendar.select_month(date.month - 1, date.year)
self._timer_ends_entry.set_text("{}-{}-{} {}:{:02d}".format(date.year, date.month, date.day, hour, minute))
def set_timer_from_event_data(self, timer):
self._stack.set_visible_child_name(self.Tool.TIMER.value)
self._timer_action = self.TimerAction.EVENT
self._timer_name_entry.set_text(timer.get("e2eventtitle", ""))
self._timer_desc_entry.set_text(timer.get("e2eventdescription", ""))
self._timer_service_entry.set_text(timer.get("e2eventservicename", ""))
self._timer_service_ref_entry.set_text(timer.get("e2eventservicereference", ""))
self._timer_event_id_entry.set_text(timer.get("e2eventid", ""))
self._timer_action_combo_box.set_active_id("1")
self._timer_after_combo_box.set_active_id("3")
start_time = int(timer.get("e2eventstart", "0"))
self.set_time_data(start_time, start_time + int(timer.get("e2eventduration", "0")))
def set_time_data(self, start_time, end_time):
""" Sets values for time widgets. """
ev_time_start = datetime.fromtimestamp(start_time) or datetime.now()
ev_time_end = datetime.fromtimestamp(end_time) or datetime.now()
self._timer_begins_entry.set_text(ev_time_start.strftime(self._TIME_STR))
self._timer_ends_entry.set_text(ev_time_end.strftime(self._TIME_STR))
self._timer_begins_calendar.select_day(ev_time_start.day)
self._timer_begins_calendar.select_month(ev_time_start.month - 1, ev_time_start.year)
self._timer_ends_calendar.select_day(ev_time_end.day)
self._timer_ends_calendar.select_month(ev_time_end.month - 1, ev_time_end.year)
self._timer_begins_hr_button.set_value(ev_time_start.hour)
self._timer_begins_min_button.set_value(ev_time_start.minute)
self._timer_ends_hr_button.set_value(ev_time_end.hour)
self._timer_ends_min_button.set_value(ev_time_end.minute)
def get_timer_data(self):
""" Returns timer data as a dict. """
return {"sRef": self._timer_service_ref_entry.get_text(),
"begin": int(datetime.strptime(self._timer_begins_entry.get_text(), self._TIME_STR).timestamp()),
"end": int(datetime.strptime(self._timer_ends_entry.get_text(), self._TIME_STR).timestamp()),
"name": self._timer_name_entry.get_text(),
"description": self._timer_desc_entry.get_text(),
"dirname": "",
"eit": self._timer_event_id_entry.get_text(),
"disabled": int(not self._timer_enabled_switch.get_active()),
"justplay": self._timer_action_combo_box.get_active_id(),
"afterevent": self._timer_after_combo_box.get_active_id(),
"repeated": self.get_repetition_flags()}
def get_repetition_flags(self):
""" Returns flags for repetition. """
day_flags = 0
for i, box in enumerate((self._timer_mo_check_button,
self._timer_tu_check_button,
self._timer_we_check_button,
self._timer_th_check_button,
self._timer_fr_check_button,
self._timer_sa_check_button,
self._timer_su_check_button)):
if box.get_active():
day_flags = day_flags | (1 << i)
return day_flags
def set_repetition_flags(self, flags):
for i, box in enumerate((self._timer_mo_check_button,
self._timer_tu_check_button,
self._timer_we_check_button,
self._timer_th_check_button,
self._timer_fr_check_button,
self._timer_sa_check_button,
self._timer_su_check_button)):
box.set_active(flags & 1 == 1)
flags = flags >> 1
# ***************** Drag-and-drop ********************* #
def on_timers_drag_data_received(self, box, context, x, y, data, info, time):
txt = data.get_text()
if txt:
itr_str, sep, source = txt.partition(self._app.DRAG_SEP)
if not source:
return
itrs = itr_str.split(",")
if len(itrs) > 1:
self._app.show_error_dialog("Please, select only one item!")
return
fav_id = None
if source == self._app.FAV_MODEL_NAME:
model = self._app.fav_view.get_model()
fav_id = model.get_value(model.get_iter_from_string(itrs[0]), Column.FAV_ID)
elif source == self._app.SERVICE_MODEL_NAME:
model = self._app.services_view.get_model()
fav_id = model.get_value(model.get_iter_from_string(itrs[0]), Column.SRV_FAV_ID)
service = self._app.current_services.get(fav_id, None)
if service:
if service.service_type == BqServiceType.ALT.name:
msg = "Alternative service.\n\n {}".format(get_message("Not implemented yet!"))
show_dialog(DialogType.ERROR, transient=self._app._main_window, text=msg)
context.finish(False, False, time)
return
self._timer_name_entry.set_text(service.service)
self._timer_service_entry.set_text(service.service)
self._timer_service_ref_entry.set_text(service.picon_id.rstrip(".png").replace("_", ":"))
self.on_timer_add()
context.finish(True, False, time)
# *********************** Recordings *************************** #
def on_recordings_press(self, list_box, event):
if event.get_event_type() == Gdk.EventType.DOUBLE_BUTTON_PRESS and len(list_box) > 0:
row = list_box.get_selected_row()
if row:
self._http_api.send(HttpAPI.Request.STREAM_TS,
row.movie.get("e2filename", ""),
self.on_play_recording)
def on_recording_filter_changed(self, entry):
self._recordings_list_box.invalidate_filter()
def recording_filter_function(self, row):
txt = self._recordings_filter_entry.get_text().upper()
return any((not txt, txt in row.service.upper(), txt in row.title.upper(), txt in row.desc.upper()))
def on_recording_remove(self, action, value=None):
""" Removes recordings via FTP. """
if show_dialog(DialogType.QUESTION, self._app._main_window) != Gtk.ResponseType.OK:
return
rows = self._recordings_list_box.get_selected_rows()
if rows:
settings = self._app._settings
with UtfFTP(host=settings.host, user=settings.user, passwd=settings.password) as ftp:
ftp.encoding = "utf-8"
for r in rows:
resp = ftp.delete_file(r.file)
if resp.startswith("2"):
GLib.idle_add(self._recordings_list_box.remove, r)
else:
show_dialog(DialogType.ERROR, transient=self._app._main_window, text=resp)
break
def on_recordings_dir_changed(self, box: Gtk.ComboBoxText):
self._http_api.send(HttpAPI.Request.RECORDINGS, quote(box.get_active_id()), self.update_recordings_data)
def update_recordings_list(self):
if not len(self._recordings_dir_box.get_model()):
self._http_api.send(HttpAPI.Request.REC_CURRENT, "", self.update_current_rec_dir)
def update_current_rec_dir(self, current):
cur = current.get("e2location", None)
if cur:
self._recordings_dir_box.append(cur, cur)
self._http_api.send(HttpAPI.Request.REC_DIRS, "", self.update_rec_dirs)
def update_rec_dirs(self, dirs):
for d in dirs.get("rec_dirs", []):
self._recordings_dir_box.append(d, d)
@run_idle
def update_recordings_data(self, recordings):
list(map(self._recordings_list_box.remove, (r for r in self._recordings_list_box)))
list(map(lambda r: self._recordings_list_box.add(self.RecordingsRow(r)), recordings.get("recordings", [])))
def on_play_recording(self, m3u):
url = self._app.get_url_from_m3u(m3u)
if url:
self._app.play(url)
|
21,292 | befce3c4df06041c39ab44753b8bd3bc3dd3116a | from gym_pid.pid_env import PidEnv |
21,293 | 1e0e38243061fd277a780987f56e712cab34fb4f | from keras.backend import cntk_backend as KCN
from keras.backend.cntk_backend import logsumexp
import cntk as C
import numpy as np
def clip(x, min_value, max_value):
"""Element-wise value clipping.
If min_value > max_value, clipping range is [min_value,min_value].
# Arguments
x: Tensor or variable.
min_value: Tensor, float, int, or None.
If min_value is None, defaults to -infinity.
max_value: Tensor, float, int, or None.
If max_value is None, defaults to infinity.
# Returns
A tensor.
"""
if max_value is None:
max_value = np.inf
if min_value is None:
min_value = -np.inf
max_value = C.maximum(min_value, max_value)
return C.clip(x, min_value, max_value)
def moments(x, axes, shift=None, keep_dims=False):
''' Calculates and returns the mean and variance of the input '''
mean, variant = KCN._moments(x, axes=axes, shift=shift, keep_dims=keep_dims)
return mean, variant
|
21,294 | f224ce7efc0c945426a0f073d2beda0d0a7db509 | import csv
import pandas as pd
def load_data(train_path, test_path):
"""
method for data loading
:param train_path: path for the train set file
:param test_path: path for the test set file
:return: a 'pandas' array for each set
"""
train_data = pd.read_csv(train_path)
test_data = pd.read_csv(test_path)
print("number of training examples = " + str(train_data.shape[0]))
print("number of test examples = " + str(test_data.shape[0]))
print("train shape: " + str(train_data.shape))
print("test shape: " + str(test_data.shape))
return train_data, test_data
def output_submission(test_ids, predictions, id_column, predction_column, file_name):
"""
:param test_ids: vector with test dataset ids
:param predictions: vector with test dataset predictions
:param id_column: name of the output id column
:param predction_column: name of the output predction column
:param file_name: string for the output file name
:return: output a csv with ids ands predictions
"""
print('Outputting submission...')
with open('submissions/' + file_name, 'w') as submission:
writer = csv.writer(submission)
writer.writerow([id_column, predction_column])
for test_id, test_prediction in zip(test_ids, predictions):
writer.writerow([test_id, test_prediction])
print('Output complete')
def pre_process_data(df):
"""
Perform a number of pre process functions on the data set
:param df: pandas data frame
:return: processed data frame
"""
# one-hot encode categorical values
df = pd.get_dummies(df)
return df
def mini_batches(train_set, train_labels, mini_batch_size):
"""
Generate mini batches from the data set (data and labels)
:param train_set: data set with the examples
:param train_labels: data set with the labels
:param mini_batch_size: mini batch size
:return: mini batches
"""
set_size = train_set.shape[0]
batches = []
num_complete_minibatches = set_size // mini_batch_size
for k in range(0, num_complete_minibatches):
mini_batch_x = train_set[k * mini_batch_size: (k + 1) * mini_batch_size]
mini_batch_y = train_labels[k * mini_batch_size: (k + 1) * mini_batch_size]
mini_batch = (mini_batch_x, mini_batch_y)
batches.append(mini_batch)
# Handling the end case (last mini-batch < mini_batch_size)
if set_size % mini_batch_size != 0:
mini_batch_x = train_set[(set_size - (set_size % mini_batch_size)):]
mini_batch_y = train_labels[(set_size - (set_size % mini_batch_size)):]
mini_batch = (mini_batch_x, mini_batch_y)
batches.append(mini_batch)
return batches
|
21,295 | de8ecd8638791145efbd6cea008a094fb7ef19ea | # 50. データの入手・整形
# News Aggregator Data Setをダウンロードし、以下の要領で学習データ(train.txt),検証データ(valid.txt),評価データ(test.txt)を作成せよ.
# ダウンロードしたzipファイルを解凍し,readme.txtの説明を読む.
# 情報源(publisher)が”Reuters”, “Huffington Post”, “Businessweek”, “Contactmusic.com”, “Daily Mail”の事例(記事)のみを抽出する.
# 抽出された事例をランダムに並び替える.
# 抽出された事例の80%を学習データ,残りの10%ずつを検証データと評価データに分割し,それぞれtrain.txt,valid.txt,test.txtというファイル名で保存する.
# ファイルには,1行に1事例を書き出すこととし,カテゴリ名と記事見出しのタブ区切り形式とせよ(このファイルは後に問題70で再利用する).
# 学習データと評価データを作成したら,各カテゴリの事例数を確認せよ.
from sklearn.model_selection import train_test_split
import pandas as pd
import collections
if __name__ == "__main__":
# データを読み込む
newsCorpora_path = "newsCorpora.csv"
newsCorpora = pd.read_csv(newsCorpora_path, header=None, sep="\t")
# 列の名前を設定する
colums_name = ["ID", "TITLE", "URL", "PUBLISHER", "CATEGORY", "STORY", "HOSTNAME", "TIMESTAMP"]
newsCorpora.columns = colums_name
# PUBLISHERが、”Reuters”、“Huffington Post”、“Businessweek”、“Contactmusic.com”、“Daily Mail” の事例のみを抽出する
newsCorpora = newsCorpora[newsCorpora["PUBLISHER"].isin(["Reuters", "Huffington Post", "Businessweek", "Contactmusic.com", "Daily Mail"])]
# 抽出された事例をランダムに並び替える
# frac: 抽出する行・列の割合を指定
# random_state: 乱数シードの固定
newsCorpora = newsCorpora.sample(frac=1, random_state=0)
# X = "TITLE" から Y = "CATEGORY" を予測する
X = newsCorpora["TITLE"]
Y = newsCorpora["CATEGORY"]
# train:valid:test = 8:1:1 にしたい
# まず、全体を train:(valid + test) = 8:2 に分ける
# 次に、(valid + test) を valid:test = 5:5 に分ける
# stratify: 層化抽出(元のデータの比率と同じになるように分ける)
X_train, X_test, Y_train, Y_test = train_test_split(X, Y, test_size=0.2, stratify=Y, random_state=0)
X_valid, X_test, Y_valid, Y_test = train_test_split(X_test, Y_test, test_size=0.5, stratify=Y_test, random_state=0)
# X_train と Y_train を列方向に連結する
# axis: 連結方向
XY_train = pd.concat([X_train, Y_train], axis=1)
XY_valid = pd.concat([X_valid, Y_valid], axis=1)
XY_test = pd.concat([X_test, Y_test], axis=1)
# csvファイルとして保存する
XY_train.to_csv("train.txt", sep="\t", index=False, header=None)
XY_valid.to_csv("valid.txt", sep="\t", index=False, header=None)
XY_test.to_csv("test.txt", sep="\t", index=False, header=None)
# 学習データ、検証データ、評価データの事例数を確認する
print(collections.Counter(Y_train)) # Counter({'b': 4502, 'e': 4223, 't': 1219, 'm': 728})
print(collections.Counter(Y_valid)) # Counter({'b': 562, 'e': 528, 't': 153, 'm': 91})
print(collections.Counter(Y_test)) # Counter({'b': 563, 'e': 528, 't': 152, 'm': 91})
|
21,296 | 2fdd772a4b7a6def1680fbe52781e160c6d7d89e | from selenium import webdriver
class Gchrometest():
def test(self):
driver = webdriver.Chrome()
driver.get('http://google.com')
gc=Gchrometest()
gc.test() |
21,297 | bd3b27a13fabffe1ca64771242942e1dd4449d4a | class Solution(object):
def longestConsecutive1(self, nums):
"""
给定一个未排序的整数数组,找出最长连续序列的长度。
要求算法的时间复杂度为 O(n)。
---
输入: [100, 4, 200, 1, 3, 2]
输出: 4
解释: 最长连续序列是 [1, 2, 3, 4]。它的长度为 4。
:type nums: List[int]
:rtype: int
"""
nums = set(nums)
res = 0
for num in nums:
if num - 1 not in nums:
# print(num)
y = num + 1
while y in nums:
y += 1
res = max(res, y - num)
return res
def longestConsecutive(self, nums):
lookup = {}
res = 0
for num in nums:
if num not in lookup:
# 判断左右是否可以连起来
left = lookup[num - 1] if num - 1 in lookup else 0
right = lookup[num + 1] if num + 1 in lookup else 0
# 记录长度
lookup[num] = left + right + 1
# 把头尾都设置为最长长度
if left:
lookup[num - left] = left + right + 1
if right:
lookup[num + right] = left + right + 1
res = max(res, left + right + 1)
return res
a = Solution()
print(a.longestConsecutive([100, 4, 200, 1, 3, 2]))
|
21,298 | a5d8e1dc3739e4d413d3ecbbce126e8533f29bc5 | from selenium import webdriver
from selenium.webdriver.support import expected_conditions as EC
from selenium.webdriver.common.by import By
|
21,299 | 664bdb439f7a69804aab5433cd49ef914448ce8b | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('kakry', '0013_auto_20150218_1627'),
('gbsaqmaxu', '0013_auto_20150218_1626'),
]
operations = [
migrations.RemoveField(
model_name='lshmrghvzw',
name='oodpnzzt',
),
migrations.AddField(
model_name='avwudusy',
name='fctsrubu',
field=models.OneToOneField(null=True, related_name='+', to='kakry.Kiurw'),
),
migrations.AddField(
model_name='nhlpe',
name='imicowhnou',
field=models.CharField(default='', max_length=209),
),
migrations.DeleteModel(
name='Lshmrghvzw',
),
]
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.