hexsha stringlengths 40 40 | size int64 1 1.03M | ext stringclasses 10
values | lang stringclasses 1
value | max_stars_repo_path stringlengths 3 239 | max_stars_repo_name stringlengths 5 130 | max_stars_repo_head_hexsha stringlengths 40 78 | max_stars_repo_licenses listlengths 1 10 | max_stars_count int64 1 191k ⌀ | max_stars_repo_stars_event_min_datetime stringlengths 24 24 ⌀ | max_stars_repo_stars_event_max_datetime stringlengths 24 24 ⌀ | max_issues_repo_path stringlengths 3 239 | max_issues_repo_name stringlengths 5 130 | max_issues_repo_head_hexsha stringlengths 40 78 | max_issues_repo_licenses listlengths 1 10 | max_issues_count int64 1 67k ⌀ | max_issues_repo_issues_event_min_datetime stringlengths 24 24 ⌀ | max_issues_repo_issues_event_max_datetime stringlengths 24 24 ⌀ | max_forks_repo_path stringlengths 3 239 | max_forks_repo_name stringlengths 5 130 | max_forks_repo_head_hexsha stringlengths 40 78 | max_forks_repo_licenses listlengths 1 10 | max_forks_count int64 1 105k ⌀ | max_forks_repo_forks_event_min_datetime stringlengths 24 24 ⌀ | max_forks_repo_forks_event_max_datetime stringlengths 24 24 ⌀ | content stringlengths 1 1.03M | avg_line_length float64 1 958k | max_line_length int64 1 1.03M | alphanum_fraction float64 0 1 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
7956b867b68c4203c61f992050480225252973e4 | 3,485 | py | Python | lux/core/groupby.py | Moh-Yakoub/lux | 127806f653602afeea92d6cb45917401c0ee366e | [
"Apache-2.0"
] | 1 | 2021-04-01T13:57:02.000Z | 2021-04-01T13:57:02.000Z | lux/core/groupby.py | Moh-Yakoub/lux | 127806f653602afeea92d6cb45917401c0ee366e | [
"Apache-2.0"
] | null | null | null | lux/core/groupby.py | Moh-Yakoub/lux | 127806f653602afeea92d6cb45917401c0ee366e | [
"Apache-2.0"
] | 1 | 2020-05-21T03:24:46.000Z | 2020-05-21T03:24:46.000Z | import pandas as pd
class LuxGroupBy(pd.core.groupby.groupby.GroupBy):
_metadata = [
"_intent",
"_inferred_intent",
"_data_type",
"unique_values",
"cardinality",
"_rec_info",
"_min_max",
"_current_vis",
"_widget",
"_recommendation",
"_prev",
"_history",
"_saved_export",
"_sampled",
"_toggle_pandas_display",
"_message",
"_pandas_only",
"pre_aggregated",
"_type_override",
]
def __init__(self, *args, **kwargs):
super(LuxGroupBy, self).__init__(*args, **kwargs)
def aggregate(self, *args, **kwargs):
ret_val = super(LuxGroupBy, self).aggregate(*args, **kwargs)
for attr in self._metadata:
ret_val.__dict__[attr] = getattr(self, attr, None)
return ret_val
def _agg_general(self, *args, **kwargs):
ret_val = super(LuxGroupBy, self)._agg_general(*args, **kwargs)
for attr in self._metadata:
ret_val.__dict__[attr] = getattr(self, attr, None)
return ret_val
def _cython_agg_general(self, *args, **kwargs):
ret_val = super(LuxGroupBy, self)._cython_agg_general(*args, **kwargs)
for attr in self._metadata:
ret_val.__dict__[attr] = getattr(self, attr, None)
return ret_val
def get_group(self, *args, **kwargs):
ret_val = super(LuxGroupBy, self).get_group(*args, **kwargs)
for attr in self._metadata:
ret_val.__dict__[attr] = getattr(self, attr, None)
ret_val.pre_aggregated = False # Returned LuxDataFrame isn't pre_aggregated
return ret_val
def filter(self, *args, **kwargs):
ret_val = super(LuxGroupBy, self).filter(*args, **kwargs)
for attr in self._metadata:
ret_val.__dict__[attr] = getattr(self, attr, None)
ret_val.pre_aggregated = False # Returned LuxDataFrame isn't pre_aggregated
return ret_val
def apply(self, *args, **kwargs):
ret_val = super(LuxGroupBy, self).apply(*args, **kwargs)
for attr in self._metadata:
ret_val.__dict__[attr] = getattr(self, attr, None)
ret_val.pre_aggregated = False # Returned LuxDataFrame isn't pre_aggregated
return ret_val
def apply(self, *args, **kwargs):
ret_val = super(LuxDataFrameGroupBy, self).apply(*args, **kwargs)
for attr in self._metadata:
ret_val.__dict__[attr] = getattr(self, attr, None)
ret_val.pre_aggregated = False # Returned LuxDataFrame isn't pre_aggregated
return ret_val
def size(self, *args, **kwargs):
ret_val = super(LuxGroupBy, self).size(*args, **kwargs)
for attr in self._metadata:
ret_val.__dict__[attr] = getattr(self, attr, None)
return ret_val
def __getitem__(self, *args, **kwargs):
ret_val = super(LuxGroupBy, self).__getitem__(*args, **kwargs)
for attr in self._metadata:
ret_val.__dict__[attr] = getattr(self, attr, None)
return ret_val
agg = aggregate
class LuxDataFrameGroupBy(LuxGroupBy, pd.core.groupby.generic.DataFrameGroupBy):
def __init__(self, *args, **kwargs):
super(LuxDataFrameGroupBy, self).__init__(*args, **kwargs)
class LuxSeriesGroupBy(LuxGroupBy, pd.core.groupby.generic.SeriesGroupBy):
def __init__(self, *args, **kwargs):
super(LuxSeriesGroupBy, self).__init__(*args, **kwargs)
| 34.85 | 84 | 0.626973 |
7956b9f26e6efef0c818cf11c3679719db284e90 | 749 | py | Python | pandas/tests/indexes/period/test_scalar_compat.py | LauraCollard/pandas | b1c3a9031569334cafc4e8d45d35408421f7dea4 | [
"BSD-3-Clause"
] | 5 | 2019-07-26T15:22:41.000Z | 2021-09-28T09:22:17.000Z | pandas/tests/indexes/period/test_scalar_compat.py | ivan-vasilev/pandas | 4071dde86e33434e1bee8304fa62074949f813cc | [
"BSD-3-Clause"
] | 16 | 2021-03-19T09:44:52.000Z | 2022-03-12T00:22:14.000Z | pandas/tests/indexes/period/test_scalar_compat.py | ivan-vasilev/pandas | 4071dde86e33434e1bee8304fa62074949f813cc | [
"BSD-3-Clause"
] | 9 | 2020-02-05T10:24:12.000Z | 2020-02-10T13:08:50.000Z | """Tests for PeriodIndex behaving like a vectorized Period scalar"""
from pandas import Timedelta, date_range, period_range
import pandas.util.testing as tm
class TestPeriodIndexOps:
def test_start_time(self):
index = period_range(freq="M", start="2016-01-01", end="2016-05-31")
expected_index = date_range("2016-01-01", end="2016-05-31", freq="MS")
tm.assert_index_equal(index.start_time, expected_index)
def test_end_time(self):
index = period_range(freq="M", start="2016-01-01", end="2016-05-31")
expected_index = date_range("2016-01-01", end="2016-05-31", freq="M")
expected_index += Timedelta(1, "D") - Timedelta(1, "ns")
tm.assert_index_equal(index.end_time, expected_index)
| 41.611111 | 78 | 0.687583 |
7956bb1bc2205dfb49ecad3eee05e8e996a6f76c | 7,397 | py | Python | ftpclient.py | ryanshim/cpsc558-minimal-ftp | 076389fc7b49319b98deda776aa26453c52321a9 | [
"MIT"
] | null | null | null | ftpclient.py | ryanshim/cpsc558-minimal-ftp | 076389fc7b49319b98deda776aa26453c52321a9 | [
"MIT"
] | null | null | null | ftpclient.py | ryanshim/cpsc558-minimal-ftp | 076389fc7b49319b98deda776aa26453c52321a9 | [
"MIT"
] | null | null | null | """ Simple implementation of a FTP client program used for pedagogical
purposes. Current commands supported:
get <filename>: retrieve the file specified by filename.
put <filename>: send the file to the server specified by filename.
cd <path>: change the current working directory to the specified path.
ls: list the files in the current working directory in the server.
pwd: get the parent working directory
"""
import socket
import protocol
import argparse
import subprocess
import hashlib
class FTPClient:
def __init__(self, host, port):
""" Initializes the client socket for command connection and attempts to
connect to the server specified by the host and port.
@param host: server ip addr
@param port: port to communicate on
"""
self.host = host
self.port = port
self.client_sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
# Connect to server and start listener
try:
self.connect((self.host, self.port))
self.start()
except socket.error as e:
print(e) # use logging later
def __del__(self):
self.client_sock.close()
def connect(self, server):
""" Establish a connection with the client socket
@param server: tuple that contains the host IP and port.
"""
self.client_sock.connect(server)
def start(self):
""" Main driver of the FTP client, which continuously parses any
user args and calls the necessary member functions.
"""
while True:
tokens = self.parse()
cmd = tokens[0]
if cmd == 'put' and len(tokens) == 2:
filename = tokens[1]
if self.is_valid_file(filename):
protocol.send_msg(self.client_sock, cmd.encode())
data_port = protocol.recv_msg(self.client_sock).decode()
self.send_file(filename, int(data_port))
else:
print("File does not exist")
elif cmd == 'get' and len(tokens) == 2:
filename = tokens[1]
protocol.send_msg(self.client_sock, cmd.encode())
protocol.send_msg(self.client_sock, filename.encode())
self.recv_file()
elif cmd == 'ls' and len(tokens) == 1:
protocol.send_msg(self.client_sock, cmd.encode())
self.list_files()
elif cmd == 'cd' and len(tokens) == 2:
path = tokens[1]
protocol.send_msg(self.client_sock, cmd.encode())
protocol.send_msg(self.client_sock, path.encode())
elif cmd == 'pwd' and len(tokens) == 1:
protocol.send_msg(self.client_sock, cmd.encode())
self.get_pwd()
elif cmd == 'exit':
protocol.send_msg(self.client_sock, cmd.encode())
self.client_sock.close()
break
def parse(self):
""" Asks for user input and parses the command to extract tokens.
"""
tokens = input(">>> ").split(' ')
return tokens
def get_pwd(self):
""" Receives the output of cwd from the server.
"""
ephem_sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
ephem_sock.bind(('', 0))
ephem_sock.listen(1)
ephem_name = ephem_sock.getsockname()
protocol.send_msg(self.client_sock, str(ephem_name[1]).encode())
conn, addr = ephem_sock.accept()
pwd_output = protocol.recv_msg(conn).decode()
print(pwd_output)
conn.close()
ephem_sock.close()
def list_files(self):
""" Receives the output of ls in the cwd from the server.
"""
# Create an ephemeral socket
ephem_sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
ephem_sock.bind(('', 0))
ephem_sock.listen(1)
# Send the ephemeral port number to server
ephem_name = ephem_sock.getsockname()
protocol.send_msg(self.client_sock, str(ephem_name[1]).encode())
# Accept any incoming connections on the ephemeral socket
conn, addr = ephem_sock.accept()
# Receive the ls output from server
ls_output = protocol.recv_msg(conn).decode()
print(ls_output)
conn.close() # close the ephem socket conn
ephem_sock.close()
def send_file(self, filename, ephem_port):
""" Create an ephemeral socket and send file.
@param filename: path to the file to send.
"""
data = open(filename, 'rb').read()
ephem_sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
ephem_sock.connect((self.host, ephem_port))
print('Sending {} to {}'.format(filename, self.host))
try:
protocol.send_msg(ephem_sock, filename.encode())
protocol.send_msg(ephem_sock, data)
# send md5 hash
md5_send = hashlib.md5(data).hexdigest()
protocol.send_msg(ephem_sock, md5_send.encode())
except Exception as e:
print('Error: {}'.format(e))
print('Unsuccessful transfer of {}'.format(filename))
ephem_sock.close()
return
print('Transfer complete.')
ephem_sock.close()
def recv_file(self):
""" Receive a file through an ephemeral socket from the client.
"""
# Create ephemeral socket
ephem_sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
ephem_sock.bind(('', 0))
ephem_sock.listen(1)
# Send the ephemeral port number to server
ephem_name = ephem_sock.getsockname()
protocol.send_msg(self.client_sock, str(ephem_name[1]).encode())
# Accept any incoming connections on the ephemeral socket
conn, addr = ephem_sock.accept()
# Receive the file and store in cwd
filename = protocol.recv_msg(conn).decode()
if filename == 'NXFILE':
print('File does not exist.')
else:
print('Receiving {} from {}'.format(filename, self.host))
try:
filedata = protocol.recv_msg(conn).decode()
# Check file integrity
md5_recv = protocol.recv_msg(conn).decode()
md5_local = hashlib.md5(filedata.encode()).hexdigest()
if md5_recv != md5_local:
print('Corrupt file data during transfer.')
return
except Exception as e:
print(e)
print('Error receiving file {}'.format(filename))
return
with open(filename, 'w') as outfile:
outfile.write(filedata)
print('Transfer complete.')
# Close the ephemeral socket
conn.close()
ephem_sock.close()
def is_valid_file(self, filename):
""" Checks if the path is valid and if the file exists.
@param filename: name of file of file including path
"""
if subprocess.os.path.exists(filename):
return True
return False
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument("ip")
args = parser.parse_args()
client = FTPClient(args.ip, 12000)
| 34.891509 | 80 | 0.584426 |
7956bc0d25c06cc3e0ba079d10e1db799a17c5bb | 990 | py | Python | table.py | TeamZombeavers/collabothon2021 | db9ddbb2a189c776e41243c484d8fdf639a6c2b0 | [
"Apache-2.0"
] | null | null | null | table.py | TeamZombeavers/collabothon2021 | db9ddbb2a189c776e41243c484d8fdf639a6c2b0 | [
"Apache-2.0"
] | null | null | null | table.py | TeamZombeavers/collabothon2021 | db9ddbb2a189c776e41243c484d8fdf639a6c2b0 | [
"Apache-2.0"
] | null | null | null | from bokeh.models import ColumnDataSource
from bokeh.models.widgets import DataTable, DateFormatter, TableColumn
from bokeh.io import output_file, show
from google.cloud import bigquery
api_key = 'AIzaSyCCKJAMQRcfWjlXJMQhzsVA22FbAGqEDZM'
GCP_PROJECT = 'collabothon21-team-a'
DATASET_NAME = 'testlodz'
TABLE_NAME = 'tree_patches'
QUERY = (
f'SELECT NDVI, TEMPERATURA, DRZEWA_POW, DRZEWA_POW_PROC '
f'FROM {GCP_PROJECT}.{DATASET_NAME}.{TABLE_NAME} LIMIT 100'
)
def load_preview_data():
client = bigquery.Client()
data = client.query(QUERY).to_dataframe()
source = ColumnDataSource(data)
columns = [
TableColumn(field="NDVI", title="NDVI"),
TableColumn(field="TEMPERATURA", title="TEMPERATURA"),
TableColumn(field="DRZEWA_POW", title="DRZEWA_POW"),
TableColumn(field="DRZEWA_POW_PROC", title="DRZEWA_POW_PROC"),
]
data_table = DataTable(
source=source, columns=columns, width=750, height=500)
return data_table
| 31.935484 | 71 | 0.728283 |
7956bc2be2fa66a580b90e3e3249ec6e93b86018 | 202 | py | Python | silent.py | ilhomidin/remote-screenshot | f2cf3477979e3ed1eb53d4f40cd25784454fe9fc | [
"MIT"
] | null | null | null | silent.py | ilhomidin/remote-screenshot | f2cf3477979e3ed1eb53d4f40cd25784454fe9fc | [
"MIT"
] | null | null | null | silent.py | ilhomidin/remote-screenshot | f2cf3477979e3ed1eb53d4f40cd25784454fe9fc | [
"MIT"
] | null | null | null | """
Run remscreen without any output.
Write stdout to the `remscreen.log` file.
"""
import subprocess
with open("remscreen.log", "w") as file:
subprocess.Popen("python remscreen.py", stdout=file)
| 20.2 | 56 | 0.717822 |
7956bd29bf080526aa02c9987f07218234aeba46 | 5,678 | py | Python | ccoin.py | xczh/ccoin | a3f080f6113d74ec775a78dd44a9bbca3728b3f9 | [
"Apache-2.0"
] | 1 | 2016-03-02T07:41:31.000Z | 2016-03-02T07:41:31.000Z | ccoin.py | xczh/ccoin | a3f080f6113d74ec775a78dd44a9bbca3728b3f9 | [
"Apache-2.0"
] | 1 | 2015-07-07T10:14:57.000Z | 2015-07-07T12:28:44.000Z | ccoin.py | xczh/ccoin | a3f080f6113d74ec775a78dd44a9bbca3728b3f9 | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python
#coding:utf-8
"""
Purpose: ccoin Main
Author: xczh <christopher.winnie2012@gmail.com>
Copyright (c) 2015 xczh. All rights reserved.
"""
import conf
import logging
import sys
import os
import time
from login import Login
from modules import Requests
from modules.PushCode import PushCodeModule
from modules.Point import PointModule
from modules.WebHook import WebHookModule
class Ccoin(object):
# Version
version = '1.0.5'
# CLI args
args = None
# Logger
logger = None
# Module Shared Info
mInfo = {}
# User
login = False
sid =None
userinfo = None
global_key = None
@classmethod
def initLogger(cls):
logger=logging.getLogger('Ccoin')
logger.setLevel(logging.DEBUG)
format = logging.Formatter(conf.LOG_FORMAT,conf.LOG_DATE_FORMAT)
if conf.DEBUG:
# 调试模式
# Console Handler
console = logging.StreamHandler()
console.setLevel(logging.DEBUG)
console.setFormatter(format)
logger.addHandler(console)
else:
# 运行模式
# File Handler
try:
file_handler = logging.FileHandler(filename=os.path.join(conf.LOG_DIR,'ccoin-%s.log' % time.strftime('%Y%m%d')),mode='a')
except IOError,e:
print 'IOError: %s (%s)' %(e.strerror,e.filename)
print 'Warning: Log will not be write to file!'
# Use streamHandler instead
console = logging.StreamHandler()
console.setLevel(logging.INFO)
console.setFormatter(format)
logger.addHandler(console)
else:
if conf.LOG_LEVEL == 'INFO':
file_handler.setLevel(logging.INFO)
elif conf.LOG_LEVEL == 'ERROR':
file_handler.setLevel(logging.ERROR)
else:
file_handler.setLevel(logging.WARNING)
file_handler.setFormatter(format)
logger.addHandler(file_handler)
cls.logger = logger
@classmethod
def argsParser(cls):
import argparse
parser = argparse.ArgumentParser(description='an automatic acquisition of coding coins tool.')
parser.add_argument('-u','--user', dest='user',action='store',type=str,default='',
help='Your coding.net Email or Personality Suffix')
parser.add_argument('-p','--pwd', dest='pwd',action='store',type=str,default='',
help='Your coding.net Password')
parser.add_argument('-P','--push-project', dest='push_project',action='store',type=str,default='',
help='push to which project')
parser.add_argument('-B','--push-branch', dest='push_branch',action='store',type=str,default='',
help='push to which branch')
parser.add_argument('-D','--push-path', dest='push_path',action='store',type=str,default='',
help='push to project\'s dir name')
parser.add_argument('-v','--version', action='version', version='ccoin %s' % cls.version)
cls.args = parser.parse_args()
@classmethod
def update(cls):
import json
cls.logger.info('=== ccoin %s ===' %cls.version)
# URL
url = r'https://coding.net/u/xczh/p/coding_coins/git/raw/master/update.html'
r = Requests.get(url)
if r.status_code != 200:
cls.logger.error('Update Fail. The HTTP Status is %d' % r.status_code)
return False
else:
cls.logger.debug('HTTP response body: %s' % r.text)
try:
ret = json.loads(r.text)
except ValueError:
cls.logger.error('Update Fail. Remote repository return: %s' %r.text)
return False
else:
cls.logger.info('Latest Version is: %s' % ret['version'])
if ret['version'] > cls.version:
# Need Update
cls.logger.warn('Current version is old. It may cause fail. You can get newest version by this command:'
'git pull origin dev:dev')
return True
@classmethod
def main(cls):
# init logger
cls.initLogger()
# get cli args
cls.argsParser()
# check for update
cls.update()
# login
u = Login(cls.args.user,cls.args.pwd)
if u.login():
msg = u.getResult()
cls.login = True
cls.global_key = msg['global_key']
cls.sid = msg['sid']
cls.userinfo = msg['userinfo']
else:
# login failed, exit.
sys.exit(-1)
# build module args
mArgs = {
'login':cls.login,
'global_key':cls.global_key,
'cookie':{'sid':cls.sid},
'userinfo':cls.userinfo,
'PUSH_PROJECT':cls.args.push_project,
'PUSH_BRANCH':cls.args.push_branch,
'PUSH_PATH':cls.args.push_path,
'WEBHOOK_KEY':'',
'WEBHOOK_URL':'',
}
for k,v in mArgs.iteritems():
if not v and k in conf.__dict__:
mArgs[k] = conf.__dict__[k]
cls.logger.debug(str(mArgs))
# module work
for name in conf.ENABLED_MODULE:
m = globals()[name](mArgs,cls.mInfo)
m.start()
# end
cls.logger.info('=== ccoin finished. Global Key: %s ===\n' %cls.global_key)
if __name__=='__main__':
Ccoin.main()
| 34.621951 | 137 | 0.549313 |
7956bd70187eb2b75154b4aa24ceba3e786f46c1 | 730 | py | Python | fileuploads/migrations/0036_auto_20170106_1336.py | fr33ky/signalserver | ce360cd89732c9d9270d7af04e38e55f6570d6a7 | [
"MIT"
] | 23 | 2016-03-24T00:31:47.000Z | 2022-02-10T21:27:53.000Z | fileuploads/migrations/0036_auto_20170106_1336.py | fr33ky/signalserver | ce360cd89732c9d9270d7af04e38e55f6570d6a7 | [
"MIT"
] | 148 | 2016-04-03T00:22:55.000Z | 2020-08-01T20:08:03.000Z | fileuploads/migrations/0036_auto_20170106_1336.py | fr33ky/signalserver | ce360cd89732c9d9270d7af04e38e55f6570d6a7 | [
"MIT"
] | 11 | 2016-04-24T03:31:31.000Z | 2019-09-03T16:51:08.000Z | # -*- coding: utf-8 -*-
# Generated by Django 1.10.dev20160107235441 on 2017-01-06 18:36
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('signals', '0012_output_user_name'),
('groups', '0005_auto_20161209_2103'),
('fileuploads', '0035_video_groups'),
]
operations = [
migrations.AddField(
model_name='video',
name='outputs',
field=models.ManyToManyField(to='signals.Output'),
),
migrations.AddField(
model_name='video',
name='results',
field=models.ManyToManyField(to='groups.Result'),
),
]
| 26.071429 | 64 | 0.60274 |
7956bdd90403bc85e55ac1c95acc19eb7ede9263 | 59,560 | py | Python | Lib/test/test_generators.py | Golfist/cpython | c4750959acbfc3057f12aaec832483ba30898d1c | [
"PSF-2.0"
] | 27 | 2017-04-21T14:57:04.000Z | 2021-11-03T22:10:38.000Z | Lib/test/test_generators.py | Golfist/cpython | c4750959acbfc3057f12aaec832483ba30898d1c | [
"PSF-2.0"
] | null | null | null | Lib/test/test_generators.py | Golfist/cpython | c4750959acbfc3057f12aaec832483ba30898d1c | [
"PSF-2.0"
] | 9 | 2017-04-26T14:14:05.000Z | 2020-12-14T16:26:41.000Z | import copy
import gc
import pickle
import sys
import unittest
import warnings
import weakref
import inspect
from test import support
class FinalizationTest(unittest.TestCase):
def test_frame_resurrect(self):
# A generator frame can be resurrected by a generator's finalization.
def gen():
nonlocal frame
try:
yield
finally:
frame = sys._getframe()
g = gen()
wr = weakref.ref(g)
next(g)
del g
support.gc_collect()
self.assertIs(wr(), None)
self.assertTrue(frame)
del frame
support.gc_collect()
def test_refcycle(self):
# A generator caught in a refcycle gets finalized anyway.
old_garbage = gc.garbage[:]
finalized = False
def gen():
nonlocal finalized
try:
g = yield
yield 1
finally:
finalized = True
g = gen()
next(g)
g.send(g)
self.assertGreater(sys.getrefcount(g), 2)
self.assertFalse(finalized)
del g
support.gc_collect()
self.assertTrue(finalized)
self.assertEqual(gc.garbage, old_garbage)
def test_lambda_generator(self):
# Issue #23192: Test that a lambda returning a generator behaves
# like the equivalent function
f = lambda: (yield 1)
def g(): return (yield 1)
# test 'yield from'
f2 = lambda: (yield from g())
def g2(): return (yield from g())
f3 = lambda: (yield from f())
def g3(): return (yield from f())
for gen_fun in (f, g, f2, g2, f3, g3):
gen = gen_fun()
self.assertEqual(next(gen), 1)
with self.assertRaises(StopIteration) as cm:
gen.send(2)
self.assertEqual(cm.exception.value, 2)
class GeneratorTest(unittest.TestCase):
def test_name(self):
def func():
yield 1
# check generator names
gen = func()
self.assertEqual(gen.__name__, "func")
self.assertEqual(gen.__qualname__,
"GeneratorTest.test_name.<locals>.func")
# modify generator names
gen.__name__ = "name"
gen.__qualname__ = "qualname"
self.assertEqual(gen.__name__, "name")
self.assertEqual(gen.__qualname__, "qualname")
# generator names must be a string and cannot be deleted
self.assertRaises(TypeError, setattr, gen, '__name__', 123)
self.assertRaises(TypeError, setattr, gen, '__qualname__', 123)
self.assertRaises(TypeError, delattr, gen, '__name__')
self.assertRaises(TypeError, delattr, gen, '__qualname__')
# modify names of the function creating the generator
func.__qualname__ = "func_qualname"
func.__name__ = "func_name"
gen = func()
self.assertEqual(gen.__name__, "func_name")
self.assertEqual(gen.__qualname__, "func_qualname")
# unnamed generator
gen = (x for x in range(10))
self.assertEqual(gen.__name__,
"<genexpr>")
self.assertEqual(gen.__qualname__,
"GeneratorTest.test_name.<locals>.<genexpr>")
def test_copy(self):
def f():
yield 1
g = f()
with self.assertRaises(TypeError):
copy.copy(g)
def test_pickle(self):
def f():
yield 1
g = f()
for proto in range(pickle.HIGHEST_PROTOCOL + 1):
with self.assertRaises((TypeError, pickle.PicklingError)):
pickle.dumps(g, proto)
class ExceptionTest(unittest.TestCase):
# Tests for the issue #23353: check that the currently handled exception
# is correctly saved/restored in PyEval_EvalFrameEx().
def test_except_throw(self):
def store_raise_exc_generator():
try:
self.assertEqual(sys.exc_info()[0], None)
yield
except Exception as exc:
# exception raised by gen.throw(exc)
self.assertEqual(sys.exc_info()[0], ValueError)
self.assertIsNone(exc.__context__)
yield
# ensure that the exception is not lost
self.assertEqual(sys.exc_info()[0], ValueError)
yield
# we should be able to raise back the ValueError
raise
make = store_raise_exc_generator()
next(make)
try:
raise ValueError()
except Exception as exc:
try:
make.throw(exc)
except Exception:
pass
next(make)
with self.assertRaises(ValueError) as cm:
next(make)
self.assertIsNone(cm.exception.__context__)
self.assertEqual(sys.exc_info(), (None, None, None))
def test_except_next(self):
def gen():
self.assertEqual(sys.exc_info()[0], ValueError)
yield "done"
g = gen()
try:
raise ValueError
except Exception:
self.assertEqual(next(g), "done")
self.assertEqual(sys.exc_info(), (None, None, None))
def test_except_gen_except(self):
def gen():
try:
self.assertEqual(sys.exc_info()[0], None)
yield
# we are called from "except ValueError:", TypeError must
# inherit ValueError in its context
raise TypeError()
except TypeError as exc:
self.assertEqual(sys.exc_info()[0], TypeError)
self.assertEqual(type(exc.__context__), ValueError)
# here we are still called from the "except ValueError:"
self.assertEqual(sys.exc_info()[0], ValueError)
yield
self.assertIsNone(sys.exc_info()[0])
yield "done"
g = gen()
next(g)
try:
raise ValueError
except Exception:
next(g)
self.assertEqual(next(g), "done")
self.assertEqual(sys.exc_info(), (None, None, None))
def test_except_throw_exception_context(self):
def gen():
try:
try:
self.assertEqual(sys.exc_info()[0], None)
yield
except ValueError:
# we are called from "except ValueError:"
self.assertEqual(sys.exc_info()[0], ValueError)
raise TypeError()
except Exception as exc:
self.assertEqual(sys.exc_info()[0], TypeError)
self.assertEqual(type(exc.__context__), ValueError)
# we are still called from "except ValueError:"
self.assertEqual(sys.exc_info()[0], ValueError)
yield
self.assertIsNone(sys.exc_info()[0])
yield "done"
g = gen()
next(g)
try:
raise ValueError
except Exception as exc:
g.throw(exc)
self.assertEqual(next(g), "done")
self.assertEqual(sys.exc_info(), (None, None, None))
def test_stopiteration_warning(self):
# See also PEP 479.
def gen():
raise StopIteration
yield
with self.assertRaises(StopIteration), \
self.assertWarnsRegex(DeprecationWarning, "StopIteration"):
next(gen())
with self.assertRaisesRegex(DeprecationWarning,
"generator .* raised StopIteration"), \
warnings.catch_warnings():
warnings.simplefilter('error')
next(gen())
def test_tutorial_stopiteration(self):
# Raise StopIteration" stops the generator too:
def f():
yield 1
raise StopIteration
yield 2 # never reached
g = f()
self.assertEqual(next(g), 1)
with self.assertWarnsRegex(DeprecationWarning, "StopIteration"):
with self.assertRaises(StopIteration):
next(g)
with self.assertRaises(StopIteration):
# This time StopIteration isn't raised from the generator's body,
# hence no warning.
next(g)
def test_return_tuple(self):
def g():
return (yield 1)
gen = g()
self.assertEqual(next(gen), 1)
with self.assertRaises(StopIteration) as cm:
gen.send((2,))
self.assertEqual(cm.exception.value, (2,))
def test_return_stopiteration(self):
def g():
return (yield 1)
gen = g()
self.assertEqual(next(gen), 1)
with self.assertRaises(StopIteration) as cm:
gen.send(StopIteration(2))
self.assertIsInstance(cm.exception.value, StopIteration)
self.assertEqual(cm.exception.value.value, 2)
class YieldFromTests(unittest.TestCase):
def test_generator_gi_yieldfrom(self):
def a():
self.assertEqual(inspect.getgeneratorstate(gen_b), inspect.GEN_RUNNING)
self.assertIsNone(gen_b.gi_yieldfrom)
yield
self.assertEqual(inspect.getgeneratorstate(gen_b), inspect.GEN_RUNNING)
self.assertIsNone(gen_b.gi_yieldfrom)
def b():
self.assertIsNone(gen_b.gi_yieldfrom)
yield from a()
self.assertIsNone(gen_b.gi_yieldfrom)
yield
self.assertIsNone(gen_b.gi_yieldfrom)
gen_b = b()
self.assertEqual(inspect.getgeneratorstate(gen_b), inspect.GEN_CREATED)
self.assertIsNone(gen_b.gi_yieldfrom)
gen_b.send(None)
self.assertEqual(inspect.getgeneratorstate(gen_b), inspect.GEN_SUSPENDED)
self.assertEqual(gen_b.gi_yieldfrom.gi_code.co_name, 'a')
gen_b.send(None)
self.assertEqual(inspect.getgeneratorstate(gen_b), inspect.GEN_SUSPENDED)
self.assertIsNone(gen_b.gi_yieldfrom)
[] = gen_b # Exhaust generator
self.assertEqual(inspect.getgeneratorstate(gen_b), inspect.GEN_CLOSED)
self.assertIsNone(gen_b.gi_yieldfrom)
tutorial_tests = """
Let's try a simple generator:
>>> def f():
... yield 1
... yield 2
>>> for i in f():
... print(i)
1
2
>>> g = f()
>>> next(g)
1
>>> next(g)
2
"Falling off the end" stops the generator:
>>> next(g)
Traceback (most recent call last):
File "<stdin>", line 1, in ?
File "<stdin>", line 2, in g
StopIteration
"return" also stops the generator:
>>> def f():
... yield 1
... return
... yield 2 # never reached
...
>>> g = f()
>>> next(g)
1
>>> next(g)
Traceback (most recent call last):
File "<stdin>", line 1, in ?
File "<stdin>", line 3, in f
StopIteration
>>> next(g) # once stopped, can't be resumed
Traceback (most recent call last):
File "<stdin>", line 1, in ?
StopIteration
However, "return" and StopIteration are not exactly equivalent:
>>> def g1():
... try:
... return
... except:
... yield 1
...
>>> list(g1())
[]
>>> def g2():
... try:
... raise StopIteration
... except:
... yield 42
>>> print(list(g2()))
[42]
This may be surprising at first:
>>> def g3():
... try:
... return
... finally:
... yield 1
...
>>> list(g3())
[1]
Let's create an alternate range() function implemented as a generator:
>>> def yrange(n):
... for i in range(n):
... yield i
...
>>> list(yrange(5))
[0, 1, 2, 3, 4]
Generators always return to the most recent caller:
>>> def creator():
... r = yrange(5)
... print("creator", next(r))
... return r
...
>>> def caller():
... r = creator()
... for i in r:
... print("caller", i)
...
>>> caller()
creator 0
caller 1
caller 2
caller 3
caller 4
Generators can call other generators:
>>> def zrange(n):
... for i in yrange(n):
... yield i
...
>>> list(zrange(5))
[0, 1, 2, 3, 4]
"""
# The examples from PEP 255.
pep_tests = """
Specification: Yield
Restriction: A generator cannot be resumed while it is actively
running:
>>> def g():
... i = next(me)
... yield i
>>> me = g()
>>> next(me)
Traceback (most recent call last):
...
File "<string>", line 2, in g
ValueError: generator already executing
Specification: Return
Note that return isn't always equivalent to raising StopIteration: the
difference lies in how enclosing try/except constructs are treated.
For example,
>>> def f1():
... try:
... return
... except:
... yield 1
>>> print(list(f1()))
[]
because, as in any function, return simply exits, but
>>> def f2():
... try:
... raise StopIteration
... except:
... yield 42
>>> print(list(f2()))
[42]
because StopIteration is captured by a bare "except", as is any
exception.
Specification: Generators and Exception Propagation
>>> def f():
... return 1//0
>>> def g():
... yield f() # the zero division exception propagates
... yield 42 # and we'll never get here
>>> k = g()
>>> next(k)
Traceback (most recent call last):
File "<stdin>", line 1, in ?
File "<stdin>", line 2, in g
File "<stdin>", line 2, in f
ZeroDivisionError: integer division or modulo by zero
>>> next(k) # and the generator cannot be resumed
Traceback (most recent call last):
File "<stdin>", line 1, in ?
StopIteration
>>>
Specification: Try/Except/Finally
>>> def f():
... try:
... yield 1
... try:
... yield 2
... 1//0
... yield 3 # never get here
... except ZeroDivisionError:
... yield 4
... yield 5
... raise
... except:
... yield 6
... yield 7 # the "raise" above stops this
... except:
... yield 8
... yield 9
... try:
... x = 12
... finally:
... yield 10
... yield 11
>>> print(list(f()))
[1, 2, 4, 5, 8, 9, 10, 11]
>>>
Guido's binary tree example.
>>> # A binary tree class.
>>> class Tree:
...
... def __init__(self, label, left=None, right=None):
... self.label = label
... self.left = left
... self.right = right
...
... def __repr__(self, level=0, indent=" "):
... s = level*indent + repr(self.label)
... if self.left:
... s = s + "\\n" + self.left.__repr__(level+1, indent)
... if self.right:
... s = s + "\\n" + self.right.__repr__(level+1, indent)
... return s
...
... def __iter__(self):
... return inorder(self)
>>> # Create a Tree from a list.
>>> def tree(list):
... n = len(list)
... if n == 0:
... return []
... i = n // 2
... return Tree(list[i], tree(list[:i]), tree(list[i+1:]))
>>> # Show it off: create a tree.
>>> t = tree("ABCDEFGHIJKLMNOPQRSTUVWXYZ")
>>> # A recursive generator that generates Tree labels in in-order.
>>> def inorder(t):
... if t:
... for x in inorder(t.left):
... yield x
... yield t.label
... for x in inorder(t.right):
... yield x
>>> # Show it off: create a tree.
>>> t = tree("ABCDEFGHIJKLMNOPQRSTUVWXYZ")
>>> # Print the nodes of the tree in in-order.
>>> for x in t:
... print(' '+x, end='')
A B C D E F G H I J K L M N O P Q R S T U V W X Y Z
>>> # A non-recursive generator.
>>> def inorder(node):
... stack = []
... while node:
... while node.left:
... stack.append(node)
... node = node.left
... yield node.label
... while not node.right:
... try:
... node = stack.pop()
... except IndexError:
... return
... yield node.label
... node = node.right
>>> # Exercise the non-recursive generator.
>>> for x in t:
... print(' '+x, end='')
A B C D E F G H I J K L M N O P Q R S T U V W X Y Z
"""
# Examples from Iterator-List and Python-Dev and c.l.py.
email_tests = """
The difference between yielding None and returning it.
>>> def g():
... for i in range(3):
... yield None
... yield None
... return
>>> list(g())
[None, None, None, None]
Ensure that explicitly raising StopIteration acts like any other exception
in try/except, not like a return.
>>> def g():
... yield 1
... try:
... raise StopIteration
... except:
... yield 2
... yield 3
>>> list(g())
[1, 2, 3]
Next one was posted to c.l.py.
>>> def gcomb(x, k):
... "Generate all combinations of k elements from list x."
...
... if k > len(x):
... return
... if k == 0:
... yield []
... else:
... first, rest = x[0], x[1:]
... # A combination does or doesn't contain first.
... # If it does, the remainder is a k-1 comb of rest.
... for c in gcomb(rest, k-1):
... c.insert(0, first)
... yield c
... # If it doesn't contain first, it's a k comb of rest.
... for c in gcomb(rest, k):
... yield c
>>> seq = list(range(1, 5))
>>> for k in range(len(seq) + 2):
... print("%d-combs of %s:" % (k, seq))
... for c in gcomb(seq, k):
... print(" ", c)
0-combs of [1, 2, 3, 4]:
[]
1-combs of [1, 2, 3, 4]:
[1]
[2]
[3]
[4]
2-combs of [1, 2, 3, 4]:
[1, 2]
[1, 3]
[1, 4]
[2, 3]
[2, 4]
[3, 4]
3-combs of [1, 2, 3, 4]:
[1, 2, 3]
[1, 2, 4]
[1, 3, 4]
[2, 3, 4]
4-combs of [1, 2, 3, 4]:
[1, 2, 3, 4]
5-combs of [1, 2, 3, 4]:
From the Iterators list, about the types of these things.
>>> def g():
... yield 1
...
>>> type(g)
<class 'function'>
>>> i = g()
>>> type(i)
<class 'generator'>
>>> [s for s in dir(i) if not s.startswith('_')]
['close', 'gi_code', 'gi_frame', 'gi_running', 'gi_yieldfrom', 'send', 'throw']
>>> from test.support import HAVE_DOCSTRINGS
>>> print(i.__next__.__doc__ if HAVE_DOCSTRINGS else 'Implement next(self).')
Implement next(self).
>>> iter(i) is i
True
>>> import types
>>> isinstance(i, types.GeneratorType)
True
And more, added later.
>>> i.gi_running
0
>>> type(i.gi_frame)
<class 'frame'>
>>> i.gi_running = 42
Traceback (most recent call last):
...
AttributeError: readonly attribute
>>> def g():
... yield me.gi_running
>>> me = g()
>>> me.gi_running
0
>>> next(me)
1
>>> me.gi_running
0
A clever union-find implementation from c.l.py, due to David Eppstein.
Sent: Friday, June 29, 2001 12:16 PM
To: python-list@python.org
Subject: Re: PEP 255: Simple Generators
>>> class disjointSet:
... def __init__(self, name):
... self.name = name
... self.parent = None
... self.generator = self.generate()
...
... def generate(self):
... while not self.parent:
... yield self
... for x in self.parent.generator:
... yield x
...
... def find(self):
... return next(self.generator)
...
... def union(self, parent):
... if self.parent:
... raise ValueError("Sorry, I'm not a root!")
... self.parent = parent
...
... def __str__(self):
... return self.name
>>> names = "ABCDEFGHIJKLM"
>>> sets = [disjointSet(name) for name in names]
>>> roots = sets[:]
>>> import random
>>> gen = random.Random(42)
>>> while 1:
... for s in sets:
... print(" %s->%s" % (s, s.find()), end='')
... print()
... if len(roots) > 1:
... s1 = gen.choice(roots)
... roots.remove(s1)
... s2 = gen.choice(roots)
... s1.union(s2)
... print("merged", s1, "into", s2)
... else:
... break
A->A B->B C->C D->D E->E F->F G->G H->H I->I J->J K->K L->L M->M
merged K into B
A->A B->B C->C D->D E->E F->F G->G H->H I->I J->J K->B L->L M->M
merged A into F
A->F B->B C->C D->D E->E F->F G->G H->H I->I J->J K->B L->L M->M
merged E into F
A->F B->B C->C D->D E->F F->F G->G H->H I->I J->J K->B L->L M->M
merged D into C
A->F B->B C->C D->C E->F F->F G->G H->H I->I J->J K->B L->L M->M
merged M into C
A->F B->B C->C D->C E->F F->F G->G H->H I->I J->J K->B L->L M->C
merged J into B
A->F B->B C->C D->C E->F F->F G->G H->H I->I J->B K->B L->L M->C
merged B into C
A->F B->C C->C D->C E->F F->F G->G H->H I->I J->C K->C L->L M->C
merged F into G
A->G B->C C->C D->C E->G F->G G->G H->H I->I J->C K->C L->L M->C
merged L into C
A->G B->C C->C D->C E->G F->G G->G H->H I->I J->C K->C L->C M->C
merged G into I
A->I B->C C->C D->C E->I F->I G->I H->H I->I J->C K->C L->C M->C
merged I into H
A->H B->C C->C D->C E->H F->H G->H H->H I->H J->C K->C L->C M->C
merged C into H
A->H B->H C->H D->H E->H F->H G->H H->H I->H J->H K->H L->H M->H
"""
# Emacs turd '
# Fun tests (for sufficiently warped notions of "fun").
fun_tests = """
Build up to a recursive Sieve of Eratosthenes generator.
>>> def firstn(g, n):
... return [next(g) for i in range(n)]
>>> def intsfrom(i):
... while 1:
... yield i
... i += 1
>>> firstn(intsfrom(5), 7)
[5, 6, 7, 8, 9, 10, 11]
>>> def exclude_multiples(n, ints):
... for i in ints:
... if i % n:
... yield i
>>> firstn(exclude_multiples(3, intsfrom(1)), 6)
[1, 2, 4, 5, 7, 8]
>>> def sieve(ints):
... prime = next(ints)
... yield prime
... not_divisible_by_prime = exclude_multiples(prime, ints)
... for p in sieve(not_divisible_by_prime):
... yield p
>>> primes = sieve(intsfrom(2))
>>> firstn(primes, 20)
[2, 3, 5, 7, 11, 13, 17, 19, 23, 29, 31, 37, 41, 43, 47, 53, 59, 61, 67, 71]
Another famous problem: generate all integers of the form
2**i * 3**j * 5**k
in increasing order, where i,j,k >= 0. Trickier than it may look at first!
Try writing it without generators, and correctly, and without generating
3 internal results for each result output.
>>> def times(n, g):
... for i in g:
... yield n * i
>>> firstn(times(10, intsfrom(1)), 10)
[10, 20, 30, 40, 50, 60, 70, 80, 90, 100]
>>> def merge(g, h):
... ng = next(g)
... nh = next(h)
... while 1:
... if ng < nh:
... yield ng
... ng = next(g)
... elif ng > nh:
... yield nh
... nh = next(h)
... else:
... yield ng
... ng = next(g)
... nh = next(h)
The following works, but is doing a whale of a lot of redundant work --
it's not clear how to get the internal uses of m235 to share a single
generator. Note that me_times2 (etc) each need to see every element in the
result sequence. So this is an example where lazy lists are more natural
(you can look at the head of a lazy list any number of times).
>>> def m235():
... yield 1
... me_times2 = times(2, m235())
... me_times3 = times(3, m235())
... me_times5 = times(5, m235())
... for i in merge(merge(me_times2,
... me_times3),
... me_times5):
... yield i
Don't print "too many" of these -- the implementation above is extremely
inefficient: each call of m235() leads to 3 recursive calls, and in
turn each of those 3 more, and so on, and so on, until we've descended
enough levels to satisfy the print stmts. Very odd: when I printed 5
lines of results below, this managed to screw up Win98's malloc in "the
usual" way, i.e. the heap grew over 4Mb so Win98 started fragmenting
address space, and it *looked* like a very slow leak.
>>> result = m235()
>>> for i in range(3):
... print(firstn(result, 15))
[1, 2, 3, 4, 5, 6, 8, 9, 10, 12, 15, 16, 18, 20, 24]
[25, 27, 30, 32, 36, 40, 45, 48, 50, 54, 60, 64, 72, 75, 80]
[81, 90, 96, 100, 108, 120, 125, 128, 135, 144, 150, 160, 162, 180, 192]
Heh. Here's one way to get a shared list, complete with an excruciating
namespace renaming trick. The *pretty* part is that the times() and merge()
functions can be reused as-is, because they only assume their stream
arguments are iterable -- a LazyList is the same as a generator to times().
>>> class LazyList:
... def __init__(self, g):
... self.sofar = []
... self.fetch = g.__next__
...
... def __getitem__(self, i):
... sofar, fetch = self.sofar, self.fetch
... while i >= len(sofar):
... sofar.append(fetch())
... return sofar[i]
>>> def m235():
... yield 1
... # Gack: m235 below actually refers to a LazyList.
... me_times2 = times(2, m235)
... me_times3 = times(3, m235)
... me_times5 = times(5, m235)
... for i in merge(merge(me_times2,
... me_times3),
... me_times5):
... yield i
Print as many of these as you like -- *this* implementation is memory-
efficient.
>>> m235 = LazyList(m235())
>>> for i in range(5):
... print([m235[j] for j in range(15*i, 15*(i+1))])
[1, 2, 3, 4, 5, 6, 8, 9, 10, 12, 15, 16, 18, 20, 24]
[25, 27, 30, 32, 36, 40, 45, 48, 50, 54, 60, 64, 72, 75, 80]
[81, 90, 96, 100, 108, 120, 125, 128, 135, 144, 150, 160, 162, 180, 192]
[200, 216, 225, 240, 243, 250, 256, 270, 288, 300, 320, 324, 360, 375, 384]
[400, 405, 432, 450, 480, 486, 500, 512, 540, 576, 600, 625, 640, 648, 675]
Ye olde Fibonacci generator, LazyList style.
>>> def fibgen(a, b):
...
... def sum(g, h):
... while 1:
... yield next(g) + next(h)
...
... def tail(g):
... next(g) # throw first away
... for x in g:
... yield x
...
... yield a
... yield b
... for s in sum(iter(fib),
... tail(iter(fib))):
... yield s
>>> fib = LazyList(fibgen(1, 2))
>>> firstn(iter(fib), 17)
[1, 2, 3, 5, 8, 13, 21, 34, 55, 89, 144, 233, 377, 610, 987, 1597, 2584]
Running after your tail with itertools.tee (new in version 2.4)
The algorithms "m235" (Hamming) and Fibonacci presented above are both
examples of a whole family of FP (functional programming) algorithms
where a function produces and returns a list while the production algorithm
suppose the list as already produced by recursively calling itself.
For these algorithms to work, they must:
- produce at least a first element without presupposing the existence of
the rest of the list
- produce their elements in a lazy manner
To work efficiently, the beginning of the list must not be recomputed over
and over again. This is ensured in most FP languages as a built-in feature.
In python, we have to explicitly maintain a list of already computed results
and abandon genuine recursivity.
This is what had been attempted above with the LazyList class. One problem
with that class is that it keeps a list of all of the generated results and
therefore continually grows. This partially defeats the goal of the generator
concept, viz. produce the results only as needed instead of producing them
all and thereby wasting memory.
Thanks to itertools.tee, it is now clear "how to get the internal uses of
m235 to share a single generator".
>>> from itertools import tee
>>> def m235():
... def _m235():
... yield 1
... for n in merge(times(2, m2),
... merge(times(3, m3),
... times(5, m5))):
... yield n
... m1 = _m235()
... m2, m3, m5, mRes = tee(m1, 4)
... return mRes
>>> it = m235()
>>> for i in range(5):
... print(firstn(it, 15))
[1, 2, 3, 4, 5, 6, 8, 9, 10, 12, 15, 16, 18, 20, 24]
[25, 27, 30, 32, 36, 40, 45, 48, 50, 54, 60, 64, 72, 75, 80]
[81, 90, 96, 100, 108, 120, 125, 128, 135, 144, 150, 160, 162, 180, 192]
[200, 216, 225, 240, 243, 250, 256, 270, 288, 300, 320, 324, 360, 375, 384]
[400, 405, 432, 450, 480, 486, 500, 512, 540, 576, 600, 625, 640, 648, 675]
The "tee" function does just what we want. It internally keeps a generated
result for as long as it has not been "consumed" from all of the duplicated
iterators, whereupon it is deleted. You can therefore print the hamming
sequence during hours without increasing memory usage, or very little.
The beauty of it is that recursive running-after-their-tail FP algorithms
are quite straightforwardly expressed with this Python idiom.
Ye olde Fibonacci generator, tee style.
>>> def fib():
...
... def _isum(g, h):
... while 1:
... yield next(g) + next(h)
...
... def _fib():
... yield 1
... yield 2
... next(fibTail) # throw first away
... for res in _isum(fibHead, fibTail):
... yield res
...
... realfib = _fib()
... fibHead, fibTail, fibRes = tee(realfib, 3)
... return fibRes
>>> firstn(fib(), 17)
[1, 2, 3, 5, 8, 13, 21, 34, 55, 89, 144, 233, 377, 610, 987, 1597, 2584]
"""
# syntax_tests mostly provokes SyntaxErrors. Also fiddling with #if 0
# hackery.
syntax_tests = """
These are fine:
>>> def f():
... yield 1
... return
>>> def f():
... try:
... yield 1
... finally:
... pass
>>> def f():
... try:
... try:
... 1//0
... except ZeroDivisionError:
... yield 666
... except:
... pass
... finally:
... pass
>>> def f():
... try:
... try:
... yield 12
... 1//0
... except ZeroDivisionError:
... yield 666
... except:
... try:
... x = 12
... finally:
... yield 12
... except:
... return
>>> list(f())
[12, 666]
>>> def f():
... yield
>>> type(f())
<class 'generator'>
>>> def f():
... if 0:
... yield
>>> type(f())
<class 'generator'>
>>> def f():
... if 0:
... yield 1
>>> type(f())
<class 'generator'>
>>> def f():
... if "":
... yield None
>>> type(f())
<class 'generator'>
>>> def f():
... return
... try:
... if x==4:
... pass
... elif 0:
... try:
... 1//0
... except SyntaxError:
... pass
... else:
... if 0:
... while 12:
... x += 1
... yield 2 # don't blink
... f(a, b, c, d, e)
... else:
... pass
... except:
... x = 1
... return
>>> type(f())
<class 'generator'>
>>> def f():
... if 0:
... def g():
... yield 1
...
>>> type(f())
<class 'NoneType'>
>>> def f():
... if 0:
... class C:
... def __init__(self):
... yield 1
... def f(self):
... yield 2
>>> type(f())
<class 'NoneType'>
>>> def f():
... if 0:
... return
... if 0:
... yield 2
>>> type(f())
<class 'generator'>
This one caused a crash (see SF bug 567538):
>>> def f():
... for i in range(3):
... try:
... continue
... finally:
... yield i
...
>>> g = f()
>>> print(next(g))
0
>>> print(next(g))
1
>>> print(next(g))
2
>>> print(next(g))
Traceback (most recent call last):
StopIteration
Test the gi_code attribute
>>> def f():
... yield 5
...
>>> g = f()
>>> g.gi_code is f.__code__
True
>>> next(g)
5
>>> next(g)
Traceback (most recent call last):
StopIteration
>>> g.gi_code is f.__code__
True
Test the __name__ attribute and the repr()
>>> def f():
... yield 5
...
>>> g = f()
>>> g.__name__
'f'
>>> repr(g) # doctest: +ELLIPSIS
'<generator object f at ...>'
Lambdas shouldn't have their usual return behavior.
>>> x = lambda: (yield 1)
>>> list(x())
[1]
>>> x = lambda: ((yield 1), (yield 2))
>>> list(x())
[1, 2]
"""
# conjoin is a simple backtracking generator, named in honor of Icon's
# "conjunction" control structure. Pass a list of no-argument functions
# that return iterable objects. Easiest to explain by example: assume the
# function list [x, y, z] is passed. Then conjoin acts like:
#
# def g():
# values = [None] * 3
# for values[0] in x():
# for values[1] in y():
# for values[2] in z():
# yield values
#
# So some 3-lists of values *may* be generated, each time we successfully
# get into the innermost loop. If an iterator fails (is exhausted) before
# then, it "backtracks" to get the next value from the nearest enclosing
# iterator (the one "to the left"), and starts all over again at the next
# slot (pumps a fresh iterator). Of course this is most useful when the
# iterators have side-effects, so that which values *can* be generated at
# each slot depend on the values iterated at previous slots.
def simple_conjoin(gs):
values = [None] * len(gs)
def gen(i):
if i >= len(gs):
yield values
else:
for values[i] in gs[i]():
for x in gen(i+1):
yield x
for x in gen(0):
yield x
# That works fine, but recursing a level and checking i against len(gs) for
# each item produced is inefficient. By doing manual loop unrolling across
# generator boundaries, it's possible to eliminate most of that overhead.
# This isn't worth the bother *in general* for generators, but conjoin() is
# a core building block for some CPU-intensive generator applications.
def conjoin(gs):
n = len(gs)
values = [None] * n
# Do one loop nest at time recursively, until the # of loop nests
# remaining is divisible by 3.
def gen(i):
if i >= n:
yield values
elif (n-i) % 3:
ip1 = i+1
for values[i] in gs[i]():
for x in gen(ip1):
yield x
else:
for x in _gen3(i):
yield x
# Do three loop nests at a time, recursing only if at least three more
# remain. Don't call directly: this is an internal optimization for
# gen's use.
def _gen3(i):
assert i < n and (n-i) % 3 == 0
ip1, ip2, ip3 = i+1, i+2, i+3
g, g1, g2 = gs[i : ip3]
if ip3 >= n:
# These are the last three, so we can yield values directly.
for values[i] in g():
for values[ip1] in g1():
for values[ip2] in g2():
yield values
else:
# At least 6 loop nests remain; peel off 3 and recurse for the
# rest.
for values[i] in g():
for values[ip1] in g1():
for values[ip2] in g2():
for x in _gen3(ip3):
yield x
for x in gen(0):
yield x
# And one more approach: For backtracking apps like the Knight's Tour
# solver below, the number of backtracking levels can be enormous (one
# level per square, for the Knight's Tour, so that e.g. a 100x100 board
# needs 10,000 levels). In such cases Python is likely to run out of
# stack space due to recursion. So here's a recursion-free version of
# conjoin too.
# NOTE WELL: This allows large problems to be solved with only trivial
# demands on stack space. Without explicitly resumable generators, this is
# much harder to achieve. OTOH, this is much slower (up to a factor of 2)
# than the fancy unrolled recursive conjoin.
def flat_conjoin(gs): # rename to conjoin to run tests with this instead
n = len(gs)
values = [None] * n
iters = [None] * n
_StopIteration = StopIteration # make local because caught a *lot*
i = 0
while 1:
# Descend.
try:
while i < n:
it = iters[i] = gs[i]().__next__
values[i] = it()
i += 1
except _StopIteration:
pass
else:
assert i == n
yield values
# Backtrack until an older iterator can be resumed.
i -= 1
while i >= 0:
try:
values[i] = iters[i]()
# Success! Start fresh at next level.
i += 1
break
except _StopIteration:
# Continue backtracking.
i -= 1
else:
assert i < 0
break
# A conjoin-based N-Queens solver.
class Queens:
def __init__(self, n):
self.n = n
rangen = range(n)
# Assign a unique int to each column and diagonal.
# columns: n of those, range(n).
# NW-SE diagonals: 2n-1 of these, i-j unique and invariant along
# each, smallest i-j is 0-(n-1) = 1-n, so add n-1 to shift to 0-
# based.
# NE-SW diagonals: 2n-1 of these, i+j unique and invariant along
# each, smallest i+j is 0, largest is 2n-2.
# For each square, compute a bit vector of the columns and
# diagonals it covers, and for each row compute a function that
# generates the possibilities for the columns in that row.
self.rowgenerators = []
for i in rangen:
rowuses = [(1 << j) | # column ordinal
(1 << (n + i-j + n-1)) | # NW-SE ordinal
(1 << (n + 2*n-1 + i+j)) # NE-SW ordinal
for j in rangen]
def rowgen(rowuses=rowuses):
for j in rangen:
uses = rowuses[j]
if uses & self.used == 0:
self.used |= uses
yield j
self.used &= ~uses
self.rowgenerators.append(rowgen)
# Generate solutions.
def solve(self):
self.used = 0
for row2col in conjoin(self.rowgenerators):
yield row2col
def printsolution(self, row2col):
n = self.n
assert n == len(row2col)
sep = "+" + "-+" * n
print(sep)
for i in range(n):
squares = [" " for j in range(n)]
squares[row2col[i]] = "Q"
print("|" + "|".join(squares) + "|")
print(sep)
# A conjoin-based Knight's Tour solver. This is pretty sophisticated
# (e.g., when used with flat_conjoin above, and passing hard=1 to the
# constructor, a 200x200 Knight's Tour was found quickly -- note that we're
# creating 10s of thousands of generators then!), and is lengthy.
class Knights:
def __init__(self, m, n, hard=0):
self.m, self.n = m, n
# solve() will set up succs[i] to be a list of square #i's
# successors.
succs = self.succs = []
# Remove i0 from each of its successor's successor lists, i.e.
# successors can't go back to i0 again. Return 0 if we can
# detect this makes a solution impossible, else return 1.
def remove_from_successors(i0, len=len):
# If we remove all exits from a free square, we're dead:
# even if we move to it next, we can't leave it again.
# If we create a square with one exit, we must visit it next;
# else somebody else will have to visit it, and since there's
# only one adjacent, there won't be a way to leave it again.
# Finelly, if we create more than one free square with a
# single exit, we can only move to one of them next, leaving
# the other one a dead end.
ne0 = ne1 = 0
for i in succs[i0]:
s = succs[i]
s.remove(i0)
e = len(s)
if e == 0:
ne0 += 1
elif e == 1:
ne1 += 1
return ne0 == 0 and ne1 < 2
# Put i0 back in each of its successor's successor lists.
def add_to_successors(i0):
for i in succs[i0]:
succs[i].append(i0)
# Generate the first move.
def first():
if m < 1 or n < 1:
return
# Since we're looking for a cycle, it doesn't matter where we
# start. Starting in a corner makes the 2nd move easy.
corner = self.coords2index(0, 0)
remove_from_successors(corner)
self.lastij = corner
yield corner
add_to_successors(corner)
# Generate the second moves.
def second():
corner = self.coords2index(0, 0)
assert self.lastij == corner # i.e., we started in the corner
if m < 3 or n < 3:
return
assert len(succs[corner]) == 2
assert self.coords2index(1, 2) in succs[corner]
assert self.coords2index(2, 1) in succs[corner]
# Only two choices. Whichever we pick, the other must be the
# square picked on move m*n, as it's the only way to get back
# to (0, 0). Save its index in self.final so that moves before
# the last know it must be kept free.
for i, j in (1, 2), (2, 1):
this = self.coords2index(i, j)
final = self.coords2index(3-i, 3-j)
self.final = final
remove_from_successors(this)
succs[final].append(corner)
self.lastij = this
yield this
succs[final].remove(corner)
add_to_successors(this)
# Generate moves 3 thru m*n-1.
def advance(len=len):
# If some successor has only one exit, must take it.
# Else favor successors with fewer exits.
candidates = []
for i in succs[self.lastij]:
e = len(succs[i])
assert e > 0, "else remove_from_successors() pruning flawed"
if e == 1:
candidates = [(e, i)]
break
candidates.append((e, i))
else:
candidates.sort()
for e, i in candidates:
if i != self.final:
if remove_from_successors(i):
self.lastij = i
yield i
add_to_successors(i)
# Generate moves 3 thru m*n-1. Alternative version using a
# stronger (but more expensive) heuristic to order successors.
# Since the # of backtracking levels is m*n, a poor move early on
# can take eons to undo. Smallest square board for which this
# matters a lot is 52x52.
def advance_hard(vmid=(m-1)/2.0, hmid=(n-1)/2.0, len=len):
# If some successor has only one exit, must take it.
# Else favor successors with fewer exits.
# Break ties via max distance from board centerpoint (favor
# corners and edges whenever possible).
candidates = []
for i in succs[self.lastij]:
e = len(succs[i])
assert e > 0, "else remove_from_successors() pruning flawed"
if e == 1:
candidates = [(e, 0, i)]
break
i1, j1 = self.index2coords(i)
d = (i1 - vmid)**2 + (j1 - hmid)**2
candidates.append((e, -d, i))
else:
candidates.sort()
for e, d, i in candidates:
if i != self.final:
if remove_from_successors(i):
self.lastij = i
yield i
add_to_successors(i)
# Generate the last move.
def last():
assert self.final in succs[self.lastij]
yield self.final
if m*n < 4:
self.squaregenerators = [first]
else:
self.squaregenerators = [first, second] + \
[hard and advance_hard or advance] * (m*n - 3) + \
[last]
def coords2index(self, i, j):
assert 0 <= i < self.m
assert 0 <= j < self.n
return i * self.n + j
def index2coords(self, index):
assert 0 <= index < self.m * self.n
return divmod(index, self.n)
def _init_board(self):
succs = self.succs
del succs[:]
m, n = self.m, self.n
c2i = self.coords2index
offsets = [( 1, 2), ( 2, 1), ( 2, -1), ( 1, -2),
(-1, -2), (-2, -1), (-2, 1), (-1, 2)]
rangen = range(n)
for i in range(m):
for j in rangen:
s = [c2i(i+io, j+jo) for io, jo in offsets
if 0 <= i+io < m and
0 <= j+jo < n]
succs.append(s)
# Generate solutions.
def solve(self):
self._init_board()
for x in conjoin(self.squaregenerators):
yield x
def printsolution(self, x):
m, n = self.m, self.n
assert len(x) == m*n
w = len(str(m*n))
format = "%" + str(w) + "d"
squares = [[None] * n for i in range(m)]
k = 1
for i in x:
i1, j1 = self.index2coords(i)
squares[i1][j1] = format % k
k += 1
sep = "+" + ("-" * w + "+") * n
print(sep)
for i in range(m):
row = squares[i]
print("|" + "|".join(row) + "|")
print(sep)
conjoin_tests = """
Generate the 3-bit binary numbers in order. This illustrates dumbest-
possible use of conjoin, just to generate the full cross-product.
>>> for c in conjoin([lambda: iter((0, 1))] * 3):
... print(c)
[0, 0, 0]
[0, 0, 1]
[0, 1, 0]
[0, 1, 1]
[1, 0, 0]
[1, 0, 1]
[1, 1, 0]
[1, 1, 1]
For efficiency in typical backtracking apps, conjoin() yields the same list
object each time. So if you want to save away a full account of its
generated sequence, you need to copy its results.
>>> def gencopy(iterator):
... for x in iterator:
... yield x[:]
>>> for n in range(10):
... all = list(gencopy(conjoin([lambda: iter((0, 1))] * n)))
... print(n, len(all), all[0] == [0] * n, all[-1] == [1] * n)
0 1 True True
1 2 True True
2 4 True True
3 8 True True
4 16 True True
5 32 True True
6 64 True True
7 128 True True
8 256 True True
9 512 True True
And run an 8-queens solver.
>>> q = Queens(8)
>>> LIMIT = 2
>>> count = 0
>>> for row2col in q.solve():
... count += 1
... if count <= LIMIT:
... print("Solution", count)
... q.printsolution(row2col)
Solution 1
+-+-+-+-+-+-+-+-+
|Q| | | | | | | |
+-+-+-+-+-+-+-+-+
| | | | |Q| | | |
+-+-+-+-+-+-+-+-+
| | | | | | | |Q|
+-+-+-+-+-+-+-+-+
| | | | | |Q| | |
+-+-+-+-+-+-+-+-+
| | |Q| | | | | |
+-+-+-+-+-+-+-+-+
| | | | | | |Q| |
+-+-+-+-+-+-+-+-+
| |Q| | | | | | |
+-+-+-+-+-+-+-+-+
| | | |Q| | | | |
+-+-+-+-+-+-+-+-+
Solution 2
+-+-+-+-+-+-+-+-+
|Q| | | | | | | |
+-+-+-+-+-+-+-+-+
| | | | | |Q| | |
+-+-+-+-+-+-+-+-+
| | | | | | | |Q|
+-+-+-+-+-+-+-+-+
| | |Q| | | | | |
+-+-+-+-+-+-+-+-+
| | | | | | |Q| |
+-+-+-+-+-+-+-+-+
| | | |Q| | | | |
+-+-+-+-+-+-+-+-+
| |Q| | | | | | |
+-+-+-+-+-+-+-+-+
| | | | |Q| | | |
+-+-+-+-+-+-+-+-+
>>> print(count, "solutions in all.")
92 solutions in all.
And run a Knight's Tour on a 10x10 board. Note that there are about
20,000 solutions even on a 6x6 board, so don't dare run this to exhaustion.
>>> k = Knights(10, 10)
>>> LIMIT = 2
>>> count = 0
>>> for x in k.solve():
... count += 1
... if count <= LIMIT:
... print("Solution", count)
... k.printsolution(x)
... else:
... break
Solution 1
+---+---+---+---+---+---+---+---+---+---+
| 1| 58| 27| 34| 3| 40| 29| 10| 5| 8|
+---+---+---+---+---+---+---+---+---+---+
| 26| 35| 2| 57| 28| 33| 4| 7| 30| 11|
+---+---+---+---+---+---+---+---+---+---+
| 59|100| 73| 36| 41| 56| 39| 32| 9| 6|
+---+---+---+---+---+---+---+---+---+---+
| 74| 25| 60| 55| 72| 37| 42| 49| 12| 31|
+---+---+---+---+---+---+---+---+---+---+
| 61| 86| 99| 76| 63| 52| 47| 38| 43| 50|
+---+---+---+---+---+---+---+---+---+---+
| 24| 75| 62| 85| 54| 71| 64| 51| 48| 13|
+---+---+---+---+---+---+---+---+---+---+
| 87| 98| 91| 80| 77| 84| 53| 46| 65| 44|
+---+---+---+---+---+---+---+---+---+---+
| 90| 23| 88| 95| 70| 79| 68| 83| 14| 17|
+---+---+---+---+---+---+---+---+---+---+
| 97| 92| 21| 78| 81| 94| 19| 16| 45| 66|
+---+---+---+---+---+---+---+---+---+---+
| 22| 89| 96| 93| 20| 69| 82| 67| 18| 15|
+---+---+---+---+---+---+---+---+---+---+
Solution 2
+---+---+---+---+---+---+---+---+---+---+
| 1| 58| 27| 34| 3| 40| 29| 10| 5| 8|
+---+---+---+---+---+---+---+---+---+---+
| 26| 35| 2| 57| 28| 33| 4| 7| 30| 11|
+---+---+---+---+---+---+---+---+---+---+
| 59|100| 73| 36| 41| 56| 39| 32| 9| 6|
+---+---+---+---+---+---+---+---+---+---+
| 74| 25| 60| 55| 72| 37| 42| 49| 12| 31|
+---+---+---+---+---+---+---+---+---+---+
| 61| 86| 99| 76| 63| 52| 47| 38| 43| 50|
+---+---+---+---+---+---+---+---+---+---+
| 24| 75| 62| 85| 54| 71| 64| 51| 48| 13|
+---+---+---+---+---+---+---+---+---+---+
| 87| 98| 89| 80| 77| 84| 53| 46| 65| 44|
+---+---+---+---+---+---+---+---+---+---+
| 90| 23| 92| 95| 70| 79| 68| 83| 14| 17|
+---+---+---+---+---+---+---+---+---+---+
| 97| 88| 21| 78| 81| 94| 19| 16| 45| 66|
+---+---+---+---+---+---+---+---+---+---+
| 22| 91| 96| 93| 20| 69| 82| 67| 18| 15|
+---+---+---+---+---+---+---+---+---+---+
"""
weakref_tests = """\
Generators are weakly referencable:
>>> import weakref
>>> def gen():
... yield 'foo!'
...
>>> wr = weakref.ref(gen)
>>> wr() is gen
True
>>> p = weakref.proxy(gen)
Generator-iterators are weakly referencable as well:
>>> gi = gen()
>>> wr = weakref.ref(gi)
>>> wr() is gi
True
>>> p = weakref.proxy(gi)
>>> list(p)
['foo!']
"""
coroutine_tests = """\
Sending a value into a started generator:
>>> def f():
... print((yield 1))
... yield 2
>>> g = f()
>>> next(g)
1
>>> g.send(42)
42
2
Sending a value into a new generator produces a TypeError:
>>> f().send("foo")
Traceback (most recent call last):
...
TypeError: can't send non-None value to a just-started generator
Yield by itself yields None:
>>> def f(): yield
>>> list(f())
[None]
An obscene abuse of a yield expression within a generator expression:
>>> list((yield 21) for i in range(4))
[21, None, 21, None, 21, None, 21, None]
And a more sane, but still weird usage:
>>> def f(): list(i for i in [(yield 26)])
>>> type(f())
<class 'generator'>
A yield expression with augmented assignment.
>>> def coroutine(seq):
... count = 0
... while count < 200:
... count += yield
... seq.append(count)
>>> seq = []
>>> c = coroutine(seq)
>>> next(c)
>>> print(seq)
[]
>>> c.send(10)
>>> print(seq)
[10]
>>> c.send(10)
>>> print(seq)
[10, 20]
>>> c.send(10)
>>> print(seq)
[10, 20, 30]
Check some syntax errors for yield expressions:
>>> f=lambda: (yield 1),(yield 2)
Traceback (most recent call last):
...
SyntaxError: 'yield' outside function
>>> def f(): x = yield = y
Traceback (most recent call last):
...
SyntaxError: assignment to yield expression not possible
>>> def f(): (yield bar) = y
Traceback (most recent call last):
...
SyntaxError: can't assign to yield expression
>>> def f(): (yield bar) += y
Traceback (most recent call last):
...
SyntaxError: can't assign to yield expression
Now check some throw() conditions:
>>> def f():
... while True:
... try:
... print((yield))
... except ValueError as v:
... print("caught ValueError (%s)" % (v))
>>> import sys
>>> g = f()
>>> next(g)
>>> g.throw(ValueError) # type only
caught ValueError ()
>>> g.throw(ValueError("xyz")) # value only
caught ValueError (xyz)
>>> g.throw(ValueError, ValueError(1)) # value+matching type
caught ValueError (1)
>>> g.throw(ValueError, TypeError(1)) # mismatched type, rewrapped
caught ValueError (1)
>>> g.throw(ValueError, ValueError(1), None) # explicit None traceback
caught ValueError (1)
>>> g.throw(ValueError(1), "foo") # bad args
Traceback (most recent call last):
...
TypeError: instance exception may not have a separate value
>>> g.throw(ValueError, "foo", 23) # bad args
Traceback (most recent call last):
...
TypeError: throw() third argument must be a traceback object
>>> g.throw("abc")
Traceback (most recent call last):
...
TypeError: exceptions must be classes or instances deriving from BaseException, not str
>>> g.throw(0)
Traceback (most recent call last):
...
TypeError: exceptions must be classes or instances deriving from BaseException, not int
>>> g.throw(list)
Traceback (most recent call last):
...
TypeError: exceptions must be classes or instances deriving from BaseException, not type
>>> def throw(g,exc):
... try:
... raise exc
... except:
... g.throw(*sys.exc_info())
>>> throw(g,ValueError) # do it with traceback included
caught ValueError ()
>>> g.send(1)
1
>>> throw(g,TypeError) # terminate the generator
Traceback (most recent call last):
...
TypeError
>>> print(g.gi_frame)
None
>>> g.send(2)
Traceback (most recent call last):
...
StopIteration
>>> g.throw(ValueError,6) # throw on closed generator
Traceback (most recent call last):
...
ValueError: 6
>>> f().throw(ValueError,7) # throw on just-opened generator
Traceback (most recent call last):
...
ValueError: 7
Plain "raise" inside a generator should preserve the traceback (#13188).
The traceback should have 3 levels:
- g.throw()
- f()
- 1/0
>>> def f():
... try:
... yield
... except:
... raise
>>> g = f()
>>> try:
... 1/0
... except ZeroDivisionError as v:
... try:
... g.throw(v)
... except Exception as w:
... tb = w.__traceback__
>>> levels = 0
>>> while tb:
... levels += 1
... tb = tb.tb_next
>>> levels
3
Now let's try closing a generator:
>>> def f():
... try: yield
... except GeneratorExit:
... print("exiting")
>>> g = f()
>>> next(g)
>>> g.close()
exiting
>>> g.close() # should be no-op now
>>> f().close() # close on just-opened generator should be fine
>>> def f(): yield # an even simpler generator
>>> f().close() # close before opening
>>> g = f()
>>> next(g)
>>> g.close() # close normally
And finalization:
>>> def f():
... try: yield
... finally:
... print("exiting")
>>> g = f()
>>> next(g)
>>> del g
exiting
GeneratorExit is not caught by except Exception:
>>> def f():
... try: yield
... except Exception:
... print('except')
... finally:
... print('finally')
>>> g = f()
>>> next(g)
>>> del g
finally
Now let's try some ill-behaved generators:
>>> def f():
... try: yield
... except GeneratorExit:
... yield "foo!"
>>> g = f()
>>> next(g)
>>> g.close()
Traceback (most recent call last):
...
RuntimeError: generator ignored GeneratorExit
>>> g.close()
Our ill-behaved code should be invoked during GC:
>>> import sys, io
>>> old, sys.stderr = sys.stderr, io.StringIO()
>>> g = f()
>>> next(g)
>>> del g
>>> "RuntimeError: generator ignored GeneratorExit" in sys.stderr.getvalue()
True
>>> sys.stderr = old
And errors thrown during closing should propagate:
>>> def f():
... try: yield
... except GeneratorExit:
... raise TypeError("fie!")
>>> g = f()
>>> next(g)
>>> g.close()
Traceback (most recent call last):
...
TypeError: fie!
Ensure that various yield expression constructs make their
enclosing function a generator:
>>> def f(): x += yield
>>> type(f())
<class 'generator'>
>>> def f(): x = yield
>>> type(f())
<class 'generator'>
>>> def f(): lambda x=(yield): 1
>>> type(f())
<class 'generator'>
>>> def f(): x=(i for i in (yield) if (yield))
>>> type(f())
<class 'generator'>
>>> def f(d): d[(yield "a")] = d[(yield "b")] = 27
>>> data = [1,2]
>>> g = f(data)
>>> type(g)
<class 'generator'>
>>> g.send(None)
'a'
>>> data
[1, 2]
>>> g.send(0)
'b'
>>> data
[27, 2]
>>> try: g.send(1)
... except StopIteration: pass
>>> data
[27, 27]
"""
refleaks_tests = """
Prior to adding cycle-GC support to itertools.tee, this code would leak
references. We add it to the standard suite so the routine refleak-tests
would trigger if it starts being uncleanable again.
>>> import itertools
>>> def leak():
... class gen:
... def __iter__(self):
... return self
... def __next__(self):
... return self.item
... g = gen()
... head, tail = itertools.tee(g)
... g.item = head
... return head
>>> it = leak()
Make sure to also test the involvement of the tee-internal teedataobject,
which stores returned items.
>>> item = next(it)
This test leaked at one point due to generator finalization/destruction.
It was copied from Lib/test/leakers/test_generator_cycle.py before the file
was removed.
>>> def leak():
... def gen():
... while True:
... yield g
... g = gen()
>>> leak()
This test isn't really generator related, but rather exception-in-cleanup
related. The coroutine tests (above) just happen to cause an exception in
the generator's __del__ (tp_del) method. We can also test for this
explicitly, without generators. We do have to redirect stderr to avoid
printing warnings and to doublecheck that we actually tested what we wanted
to test.
>>> import sys, io
>>> old = sys.stderr
>>> try:
... sys.stderr = io.StringIO()
... class Leaker:
... def __del__(self):
... def invoke(message):
... raise RuntimeError(message)
... invoke("test")
...
... l = Leaker()
... del l
... err = sys.stderr.getvalue().strip()
... "Exception ignored in" in err
... "RuntimeError: test" in err
... "Traceback" in err
... "in invoke" in err
... finally:
... sys.stderr = old
True
True
True
True
These refleak tests should perhaps be in a testfile of their own,
test_generators just happened to be the test that drew these out.
"""
__test__ = {"tut": tutorial_tests,
"pep": pep_tests,
"email": email_tests,
"fun": fun_tests,
"syntax": syntax_tests,
"conjoin": conjoin_tests,
"weakref": weakref_tests,
"coroutine": coroutine_tests,
"refleaks": refleaks_tests,
}
# Magic test name that regrtest.py invokes *after* importing this module.
# This worms around a bootstrap problem.
# Note that doctest and regrtest both look in sys.argv for a "-v" argument,
# so this works as expected in both ways of running regrtest.
def test_main(verbose=None):
from test import support, test_generators
support.run_unittest(__name__)
support.run_doctest(test_generators, verbose)
# This part isn't needed for regrtest, but for running the test directly.
if __name__ == "__main__":
test_main(1)
| 27.03586 | 88 | 0.524765 |
7956bde1be6db88cae951ab2ff45825dfdec9ee6 | 1,140 | py | Python | tests/source/github_org_test.py | KindDragon/all-repos | 88f50d7bf10247bb14dd82b6f8b18957b2a9941d | [
"MIT"
] | null | null | null | tests/source/github_org_test.py | KindDragon/all-repos | 88f50d7bf10247bb14dd82b6f8b18957b2a9941d | [
"MIT"
] | null | null | null | tests/source/github_org_test.py | KindDragon/all-repos | 88f50d7bf10247bb14dd82b6f8b18957b2a9941d | [
"MIT"
] | 1 | 2022-03-31T04:09:55.000Z | 2022-03-31T04:09:55.000Z | from __future__ import annotations
import json
import pytest
from all_repos.source.github_org import list_repos
from all_repos.source.github_org import Settings
from testing.mock_http import FakeResponse
from testing.mock_http import urlopen_side_effect
from tests.source.github_test import _resource_json
@pytest.fixture
def repos_response(mock_urlopen):
mock_urlopen.side_effect = urlopen_side_effect({
'https://api.github.com/orgs/sass/repos?per_page=100': FakeResponse(
json.dumps([_resource_json('libsass-python')]).encode(),
),
})
def test_list_repos(repos_response):
settings = Settings('key', 'sass')
ret = list_repos(settings)
expected = {'sass/libsass-python': 'git@github.com:sass/libsass-python'}
assert ret == expected
def test_settings_repr():
assert repr(Settings('key', 'sass')) == (
'Settings(\n'
' api_key=...,\n'
" org='sass',\n"
' collaborator=True,\n'
' forks=False,\n'
' private=False,\n'
' archived=False,\n'
" base_url='https://api.github.com',\n"
')'
)
| 27.142857 | 76 | 0.65614 |
7956bf0b196b8080912aa802b1092ed0c2e3163a | 29,549 | py | Python | research/object_detection/protos/optimizer_pb2.py | beraterenterzi/tf_car_licence | 3872a0539d5472b9241d8ff5170aecebb9eac5e6 | [
"MIT"
] | 3 | 2022-03-05T10:46:52.000Z | 2022-03-22T06:00:05.000Z | research/object_detection/protos/optimizer_pb2.py | beraterenterzi/tf_car_licence | 3872a0539d5472b9241d8ff5170aecebb9eac5e6 | [
"MIT"
] | null | null | null | research/object_detection/protos/optimizer_pb2.py | beraterenterzi/tf_car_licence | 3872a0539d5472b9241d8ff5170aecebb9eac5e6 | [
"MIT"
] | 1 | 2019-10-04T21:46:56.000Z | 2019-10-04T21:46:56.000Z | # -*- coding: utf-8 -*-
# Generated by the protocol buffer compiler. DO NOT EDIT!
# source: object_detection/protos/optimizer.proto
import sys
_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1'))
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
DESCRIPTOR = _descriptor.FileDescriptor(
name='object_detection/protos/optimizer.proto',
package='object_detection.protos',
syntax='proto2',
serialized_options=None,
serialized_pb=_b('\n\'object_detection/protos/optimizer.proto\x12\x17object_detection.protos\"\xb5\x02\n\tOptimizer\x12G\n\x12rms_prop_optimizer\x18\x01 \x01(\x0b\x32).object_detection.protos.RMSPropOptimizerH\x00\x12H\n\x12momentum_optimizer\x18\x02 \x01(\x0b\x32*.object_detection.protos.MomentumOptimizerH\x00\x12@\n\x0e\x61\x64\x61m_optimizer\x18\x03 \x01(\x0b\x32&.object_detection.protos.AdamOptimizerH\x00\x12 \n\x12use_moving_average\x18\x04 \x01(\x08:\x04true\x12$\n\x14moving_average_decay\x18\x05 \x01(\x02:\x06\x30.9999B\x0b\n\toptimizer\"\x9f\x01\n\x10RMSPropOptimizer\x12<\n\rlearning_rate\x18\x01 \x01(\x0b\x32%.object_detection.protos.LearningRate\x12%\n\x18momentum_optimizer_value\x18\x02 \x01(\x02:\x03\x30.9\x12\x12\n\x05\x64\x65\x63\x61y\x18\x03 \x01(\x02:\x03\x30.9\x12\x12\n\x07\x65psilon\x18\x04 \x01(\x02:\x01\x31\"x\n\x11MomentumOptimizer\x12<\n\rlearning_rate\x18\x01 \x01(\x0b\x32%.object_detection.protos.LearningRate\x12%\n\x18momentum_optimizer_value\x18\x02 \x01(\x02:\x03\x30.9\"e\n\rAdamOptimizer\x12<\n\rlearning_rate\x18\x01 \x01(\x0b\x32%.object_detection.protos.LearningRate\x12\x16\n\x07\x65psilon\x18\x02 \x01(\x02:\x05\x31\x65-08\"\x80\x03\n\x0cLearningRate\x12O\n\x16\x63onstant_learning_rate\x18\x01 \x01(\x0b\x32-.object_detection.protos.ConstantLearningRateH\x00\x12`\n\x1f\x65xponential_decay_learning_rate\x18\x02 \x01(\x0b\x32\x35.object_detection.protos.ExponentialDecayLearningRateH\x00\x12T\n\x19manual_step_learning_rate\x18\x03 \x01(\x0b\x32/.object_detection.protos.ManualStepLearningRateH\x00\x12V\n\x1a\x63osine_decay_learning_rate\x18\x04 \x01(\x0b\x32\x30.object_detection.protos.CosineDecayLearningRateH\x00\x42\x0f\n\rlearning_rate\"4\n\x14\x43onstantLearningRate\x12\x1c\n\rlearning_rate\x18\x01 \x01(\x02:\x05\x30.002\"\xef\x01\n\x1c\x45xponentialDecayLearningRate\x12$\n\x15initial_learning_rate\x18\x01 \x01(\x02:\x05\x30.002\x12\x1c\n\x0b\x64\x65\x63\x61y_steps\x18\x02 \x01(\r:\x07\x34\x30\x30\x30\x30\x30\x30\x12\x1a\n\x0c\x64\x65\x63\x61y_factor\x18\x03 \x01(\x02:\x04\x30.95\x12\x17\n\tstaircase\x18\x04 \x01(\x08:\x04true\x12\x1f\n\x14\x62urnin_learning_rate\x18\x05 \x01(\x02:\x01\x30\x12\x17\n\x0c\x62urnin_steps\x18\x06 \x01(\r:\x01\x30\x12\x1c\n\x11min_learning_rate\x18\x07 \x01(\x02:\x01\x30\"\xf1\x01\n\x16ManualStepLearningRate\x12$\n\x15initial_learning_rate\x18\x01 \x01(\x02:\x05\x30.002\x12V\n\x08schedule\x18\x02 \x03(\x0b\x32\x44.object_detection.protos.ManualStepLearningRate.LearningRateSchedule\x12\x15\n\x06warmup\x18\x03 \x01(\x08:\x05\x66\x61lse\x1a\x42\n\x14LearningRateSchedule\x12\x0c\n\x04step\x18\x01 \x01(\r\x12\x1c\n\rlearning_rate\x18\x02 \x01(\x02:\x05\x30.002\"\xbe\x01\n\x17\x43osineDecayLearningRate\x12!\n\x12learning_rate_base\x18\x01 \x01(\x02:\x05\x30.002\x12\x1c\n\x0btotal_steps\x18\x02 \x01(\r:\x07\x34\x30\x30\x30\x30\x30\x30\x12$\n\x14warmup_learning_rate\x18\x03 \x01(\x02:\x06\x30.0002\x12\x1b\n\x0cwarmup_steps\x18\x04 \x01(\r:\x05\x31\x30\x30\x30\x30\x12\x1f\n\x14hold_base_rate_steps\x18\x05 \x01(\r:\x01\x30')
)
_OPTIMIZER = _descriptor.Descriptor(
name='Optimizer',
full_name='object_detection.protos.Optimizer',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='rms_prop_optimizer', full_name='object_detection.protos.Optimizer.rms_prop_optimizer', index=0,
number=1, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='momentum_optimizer', full_name='object_detection.protos.Optimizer.momentum_optimizer', index=1,
number=2, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='adam_optimizer', full_name='object_detection.protos.Optimizer.adam_optimizer', index=2,
number=3, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='use_moving_average', full_name='object_detection.protos.Optimizer.use_moving_average', index=3,
number=4, type=8, cpp_type=7, label=1,
has_default_value=True, default_value=True,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='moving_average_decay', full_name='object_detection.protos.Optimizer.moving_average_decay', index=4,
number=5, type=2, cpp_type=6, label=1,
has_default_value=True, default_value=float(0.9999),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto2',
extension_ranges=[],
oneofs=[
_descriptor.OneofDescriptor(
name='optimizer', full_name='object_detection.protos.Optimizer.optimizer',
index=0, containing_type=None, fields=[]),
],
serialized_start=69,
serialized_end=378,
)
_RMSPROPOPTIMIZER = _descriptor.Descriptor(
name='RMSPropOptimizer',
full_name='object_detection.protos.RMSPropOptimizer',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='learning_rate', full_name='object_detection.protos.RMSPropOptimizer.learning_rate', index=0,
number=1, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='momentum_optimizer_value', full_name='object_detection.protos.RMSPropOptimizer.momentum_optimizer_value', index=1,
number=2, type=2, cpp_type=6, label=1,
has_default_value=True, default_value=float(0.9),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='decay', full_name='object_detection.protos.RMSPropOptimizer.decay', index=2,
number=3, type=2, cpp_type=6, label=1,
has_default_value=True, default_value=float(0.9),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='epsilon', full_name='object_detection.protos.RMSPropOptimizer.epsilon', index=3,
number=4, type=2, cpp_type=6, label=1,
has_default_value=True, default_value=float(1),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto2',
extension_ranges=[],
oneofs=[
],
serialized_start=381,
serialized_end=540,
)
_MOMENTUMOPTIMIZER = _descriptor.Descriptor(
name='MomentumOptimizer',
full_name='object_detection.protos.MomentumOptimizer',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='learning_rate', full_name='object_detection.protos.MomentumOptimizer.learning_rate', index=0,
number=1, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='momentum_optimizer_value', full_name='object_detection.protos.MomentumOptimizer.momentum_optimizer_value', index=1,
number=2, type=2, cpp_type=6, label=1,
has_default_value=True, default_value=float(0.9),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto2',
extension_ranges=[],
oneofs=[
],
serialized_start=542,
serialized_end=662,
)
_ADAMOPTIMIZER = _descriptor.Descriptor(
name='AdamOptimizer',
full_name='object_detection.protos.AdamOptimizer',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='learning_rate', full_name='object_detection.protos.AdamOptimizer.learning_rate', index=0,
number=1, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='epsilon', full_name='object_detection.protos.AdamOptimizer.epsilon', index=1,
number=2, type=2, cpp_type=6, label=1,
has_default_value=True, default_value=float(1e-08),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto2',
extension_ranges=[],
oneofs=[
],
serialized_start=664,
serialized_end=765,
)
_LEARNINGRATE = _descriptor.Descriptor(
name='LearningRate',
full_name='object_detection.protos.LearningRate',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='constant_learning_rate', full_name='object_detection.protos.LearningRate.constant_learning_rate', index=0,
number=1, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='exponential_decay_learning_rate', full_name='object_detection.protos.LearningRate.exponential_decay_learning_rate', index=1,
number=2, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='manual_step_learning_rate', full_name='object_detection.protos.LearningRate.manual_step_learning_rate', index=2,
number=3, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='cosine_decay_learning_rate', full_name='object_detection.protos.LearningRate.cosine_decay_learning_rate', index=3,
number=4, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto2',
extension_ranges=[],
oneofs=[
_descriptor.OneofDescriptor(
name='learning_rate', full_name='object_detection.protos.LearningRate.learning_rate',
index=0, containing_type=None, fields=[]),
],
serialized_start=768,
serialized_end=1152,
)
_CONSTANTLEARNINGRATE = _descriptor.Descriptor(
name='ConstantLearningRate',
full_name='object_detection.protos.ConstantLearningRate',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='learning_rate', full_name='object_detection.protos.ConstantLearningRate.learning_rate', index=0,
number=1, type=2, cpp_type=6, label=1,
has_default_value=True, default_value=float(0.002),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto2',
extension_ranges=[],
oneofs=[
],
serialized_start=1154,
serialized_end=1206,
)
_EXPONENTIALDECAYLEARNINGRATE = _descriptor.Descriptor(
name='ExponentialDecayLearningRate',
full_name='object_detection.protos.ExponentialDecayLearningRate',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='initial_learning_rate', full_name='object_detection.protos.ExponentialDecayLearningRate.initial_learning_rate', index=0,
number=1, type=2, cpp_type=6, label=1,
has_default_value=True, default_value=float(0.002),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='decay_steps', full_name='object_detection.protos.ExponentialDecayLearningRate.decay_steps', index=1,
number=2, type=13, cpp_type=3, label=1,
has_default_value=True, default_value=4000000,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='decay_factor', full_name='object_detection.protos.ExponentialDecayLearningRate.decay_factor', index=2,
number=3, type=2, cpp_type=6, label=1,
has_default_value=True, default_value=float(0.95),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='staircase', full_name='object_detection.protos.ExponentialDecayLearningRate.staircase', index=3,
number=4, type=8, cpp_type=7, label=1,
has_default_value=True, default_value=True,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='burnin_learning_rate', full_name='object_detection.protos.ExponentialDecayLearningRate.burnin_learning_rate', index=4,
number=5, type=2, cpp_type=6, label=1,
has_default_value=True, default_value=float(0),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='burnin_steps', full_name='object_detection.protos.ExponentialDecayLearningRate.burnin_steps', index=5,
number=6, type=13, cpp_type=3, label=1,
has_default_value=True, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='min_learning_rate', full_name='object_detection.protos.ExponentialDecayLearningRate.min_learning_rate', index=6,
number=7, type=2, cpp_type=6, label=1,
has_default_value=True, default_value=float(0),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto2',
extension_ranges=[],
oneofs=[
],
serialized_start=1209,
serialized_end=1448,
)
_MANUALSTEPLEARNINGRATE_LEARNINGRATESCHEDULE = _descriptor.Descriptor(
name='LearningRateSchedule',
full_name='object_detection.protos.ManualStepLearningRate.LearningRateSchedule',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='step', full_name='object_detection.protos.ManualStepLearningRate.LearningRateSchedule.step', index=0,
number=1, type=13, cpp_type=3, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='learning_rate', full_name='object_detection.protos.ManualStepLearningRate.LearningRateSchedule.learning_rate', index=1,
number=2, type=2, cpp_type=6, label=1,
has_default_value=True, default_value=float(0.002),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto2',
extension_ranges=[],
oneofs=[
],
serialized_start=1626,
serialized_end=1692,
)
_MANUALSTEPLEARNINGRATE = _descriptor.Descriptor(
name='ManualStepLearningRate',
full_name='object_detection.protos.ManualStepLearningRate',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='initial_learning_rate', full_name='object_detection.protos.ManualStepLearningRate.initial_learning_rate', index=0,
number=1, type=2, cpp_type=6, label=1,
has_default_value=True, default_value=float(0.002),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='schedule', full_name='object_detection.protos.ManualStepLearningRate.schedule', index=1,
number=2, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='warmup', full_name='object_detection.protos.ManualStepLearningRate.warmup', index=2,
number=3, type=8, cpp_type=7, label=1,
has_default_value=True, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[_MANUALSTEPLEARNINGRATE_LEARNINGRATESCHEDULE, ],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto2',
extension_ranges=[],
oneofs=[
],
serialized_start=1451,
serialized_end=1692,
)
_COSINEDECAYLEARNINGRATE = _descriptor.Descriptor(
name='CosineDecayLearningRate',
full_name='object_detection.protos.CosineDecayLearningRate',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='learning_rate_base', full_name='object_detection.protos.CosineDecayLearningRate.learning_rate_base', index=0,
number=1, type=2, cpp_type=6, label=1,
has_default_value=True, default_value=float(0.002),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='total_steps', full_name='object_detection.protos.CosineDecayLearningRate.total_steps', index=1,
number=2, type=13, cpp_type=3, label=1,
has_default_value=True, default_value=4000000,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='warmup_learning_rate', full_name='object_detection.protos.CosineDecayLearningRate.warmup_learning_rate', index=2,
number=3, type=2, cpp_type=6, label=1,
has_default_value=True, default_value=float(0.0002),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='warmup_steps', full_name='object_detection.protos.CosineDecayLearningRate.warmup_steps', index=3,
number=4, type=13, cpp_type=3, label=1,
has_default_value=True, default_value=10000,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='hold_base_rate_steps', full_name='object_detection.protos.CosineDecayLearningRate.hold_base_rate_steps', index=4,
number=5, type=13, cpp_type=3, label=1,
has_default_value=True, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto2',
extension_ranges=[],
oneofs=[
],
serialized_start=1695,
serialized_end=1885,
)
_OPTIMIZER.fields_by_name['rms_prop_optimizer'].message_type = _RMSPROPOPTIMIZER
_OPTIMIZER.fields_by_name['momentum_optimizer'].message_type = _MOMENTUMOPTIMIZER
_OPTIMIZER.fields_by_name['adam_optimizer'].message_type = _ADAMOPTIMIZER
_OPTIMIZER.oneofs_by_name['optimizer'].fields.append(
_OPTIMIZER.fields_by_name['rms_prop_optimizer'])
_OPTIMIZER.fields_by_name['rms_prop_optimizer'].containing_oneof = _OPTIMIZER.oneofs_by_name['optimizer']
_OPTIMIZER.oneofs_by_name['optimizer'].fields.append(
_OPTIMIZER.fields_by_name['momentum_optimizer'])
_OPTIMIZER.fields_by_name['momentum_optimizer'].containing_oneof = _OPTIMIZER.oneofs_by_name['optimizer']
_OPTIMIZER.oneofs_by_name['optimizer'].fields.append(
_OPTIMIZER.fields_by_name['adam_optimizer'])
_OPTIMIZER.fields_by_name['adam_optimizer'].containing_oneof = _OPTIMIZER.oneofs_by_name['optimizer']
_RMSPROPOPTIMIZER.fields_by_name['learning_rate'].message_type = _LEARNINGRATE
_MOMENTUMOPTIMIZER.fields_by_name['learning_rate'].message_type = _LEARNINGRATE
_ADAMOPTIMIZER.fields_by_name['learning_rate'].message_type = _LEARNINGRATE
_LEARNINGRATE.fields_by_name['constant_learning_rate'].message_type = _CONSTANTLEARNINGRATE
_LEARNINGRATE.fields_by_name['exponential_decay_learning_rate'].message_type = _EXPONENTIALDECAYLEARNINGRATE
_LEARNINGRATE.fields_by_name['manual_step_learning_rate'].message_type = _MANUALSTEPLEARNINGRATE
_LEARNINGRATE.fields_by_name['cosine_decay_learning_rate'].message_type = _COSINEDECAYLEARNINGRATE
_LEARNINGRATE.oneofs_by_name['learning_rate'].fields.append(
_LEARNINGRATE.fields_by_name['constant_learning_rate'])
_LEARNINGRATE.fields_by_name['constant_learning_rate'].containing_oneof = _LEARNINGRATE.oneofs_by_name['learning_rate']
_LEARNINGRATE.oneofs_by_name['learning_rate'].fields.append(
_LEARNINGRATE.fields_by_name['exponential_decay_learning_rate'])
_LEARNINGRATE.fields_by_name['exponential_decay_learning_rate'].containing_oneof = _LEARNINGRATE.oneofs_by_name['learning_rate']
_LEARNINGRATE.oneofs_by_name['learning_rate'].fields.append(
_LEARNINGRATE.fields_by_name['manual_step_learning_rate'])
_LEARNINGRATE.fields_by_name['manual_step_learning_rate'].containing_oneof = _LEARNINGRATE.oneofs_by_name['learning_rate']
_LEARNINGRATE.oneofs_by_name['learning_rate'].fields.append(
_LEARNINGRATE.fields_by_name['cosine_decay_learning_rate'])
_LEARNINGRATE.fields_by_name['cosine_decay_learning_rate'].containing_oneof = _LEARNINGRATE.oneofs_by_name['learning_rate']
_MANUALSTEPLEARNINGRATE_LEARNINGRATESCHEDULE.containing_type = _MANUALSTEPLEARNINGRATE
_MANUALSTEPLEARNINGRATE.fields_by_name['schedule'].message_type = _MANUALSTEPLEARNINGRATE_LEARNINGRATESCHEDULE
DESCRIPTOR.message_types_by_name['Optimizer'] = _OPTIMIZER
DESCRIPTOR.message_types_by_name['RMSPropOptimizer'] = _RMSPROPOPTIMIZER
DESCRIPTOR.message_types_by_name['MomentumOptimizer'] = _MOMENTUMOPTIMIZER
DESCRIPTOR.message_types_by_name['AdamOptimizer'] = _ADAMOPTIMIZER
DESCRIPTOR.message_types_by_name['LearningRate'] = _LEARNINGRATE
DESCRIPTOR.message_types_by_name['ConstantLearningRate'] = _CONSTANTLEARNINGRATE
DESCRIPTOR.message_types_by_name['ExponentialDecayLearningRate'] = _EXPONENTIALDECAYLEARNINGRATE
DESCRIPTOR.message_types_by_name['ManualStepLearningRate'] = _MANUALSTEPLEARNINGRATE
DESCRIPTOR.message_types_by_name['CosineDecayLearningRate'] = _COSINEDECAYLEARNINGRATE
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
Optimizer = _reflection.GeneratedProtocolMessageType('Optimizer', (_message.Message,), dict(
DESCRIPTOR = _OPTIMIZER,
__module__ = 'object_detection.protos.optimizer_pb2'
# @@protoc_insertion_point(class_scope:object_detection.protos.Optimizer)
))
_sym_db.RegisterMessage(Optimizer)
RMSPropOptimizer = _reflection.GeneratedProtocolMessageType('RMSPropOptimizer', (_message.Message,), dict(
DESCRIPTOR = _RMSPROPOPTIMIZER,
__module__ = 'object_detection.protos.optimizer_pb2'
# @@protoc_insertion_point(class_scope:object_detection.protos.RMSPropOptimizer)
))
_sym_db.RegisterMessage(RMSPropOptimizer)
MomentumOptimizer = _reflection.GeneratedProtocolMessageType('MomentumOptimizer', (_message.Message,), dict(
DESCRIPTOR = _MOMENTUMOPTIMIZER,
__module__ = 'object_detection.protos.optimizer_pb2'
# @@protoc_insertion_point(class_scope:object_detection.protos.MomentumOptimizer)
))
_sym_db.RegisterMessage(MomentumOptimizer)
AdamOptimizer = _reflection.GeneratedProtocolMessageType('AdamOptimizer', (_message.Message,), dict(
DESCRIPTOR = _ADAMOPTIMIZER,
__module__ = 'object_detection.protos.optimizer_pb2'
# @@protoc_insertion_point(class_scope:object_detection.protos.AdamOptimizer)
))
_sym_db.RegisterMessage(AdamOptimizer)
LearningRate = _reflection.GeneratedProtocolMessageType('LearningRate', (_message.Message,), dict(
DESCRIPTOR = _LEARNINGRATE,
__module__ = 'object_detection.protos.optimizer_pb2'
# @@protoc_insertion_point(class_scope:object_detection.protos.LearningRate)
))
_sym_db.RegisterMessage(LearningRate)
ConstantLearningRate = _reflection.GeneratedProtocolMessageType('ConstantLearningRate', (_message.Message,), dict(
DESCRIPTOR = _CONSTANTLEARNINGRATE,
__module__ = 'object_detection.protos.optimizer_pb2'
# @@protoc_insertion_point(class_scope:object_detection.protos.ConstantLearningRate)
))
_sym_db.RegisterMessage(ConstantLearningRate)
ExponentialDecayLearningRate = _reflection.GeneratedProtocolMessageType('ExponentialDecayLearningRate', (_message.Message,), dict(
DESCRIPTOR = _EXPONENTIALDECAYLEARNINGRATE,
__module__ = 'object_detection.protos.optimizer_pb2'
# @@protoc_insertion_point(class_scope:object_detection.protos.ExponentialDecayLearningRate)
))
_sym_db.RegisterMessage(ExponentialDecayLearningRate)
ManualStepLearningRate = _reflection.GeneratedProtocolMessageType('ManualStepLearningRate', (_message.Message,), dict(
LearningRateSchedule = _reflection.GeneratedProtocolMessageType('LearningRateSchedule', (_message.Message,), dict(
DESCRIPTOR = _MANUALSTEPLEARNINGRATE_LEARNINGRATESCHEDULE,
__module__ = 'object_detection.protos.optimizer_pb2'
# @@protoc_insertion_point(class_scope:object_detection.protos.ManualStepLearningRate.LearningRateSchedule)
))
,
DESCRIPTOR = _MANUALSTEPLEARNINGRATE,
__module__ = 'object_detection.protos.optimizer_pb2'
# @@protoc_insertion_point(class_scope:object_detection.protos.ManualStepLearningRate)
))
_sym_db.RegisterMessage(ManualStepLearningRate)
_sym_db.RegisterMessage(ManualStepLearningRate.LearningRateSchedule)
CosineDecayLearningRate = _reflection.GeneratedProtocolMessageType('CosineDecayLearningRate', (_message.Message,), dict(
DESCRIPTOR = _COSINEDECAYLEARNINGRATE,
__module__ = 'object_detection.protos.optimizer_pb2'
# @@protoc_insertion_point(class_scope:object_detection.protos.CosineDecayLearningRate)
))
_sym_db.RegisterMessage(CosineDecayLearningRate)
# @@protoc_insertion_point(module_scope)
| 46.533858 | 3,028 | 0.776913 |
7956bf2ecb980f3c0b27da958de7b678ade7e74e | 1,029 | py | Python | pure_protobuf/io_.py | yoyonel/protobuf | ef03946ffa1a4f18791490972e4b2f99e1888cdf | [
"MIT"
] | 132 | 2015-07-25T14:40:39.000Z | 2022-03-30T19:37:01.000Z | pure_protobuf/io_.py | yoyonel/protobuf | ef03946ffa1a4f18791490972e4b2f99e1888cdf | [
"MIT"
] | 36 | 2016-09-02T20:03:16.000Z | 2022-03-09T16:52:11.000Z | pure_protobuf/io_.py | yoyonel/protobuf | ef03946ffa1a4f18791490972e4b2f99e1888cdf | [
"MIT"
] | 15 | 2015-02-27T03:00:04.000Z | 2022-02-07T10:55:09.000Z | """
`pure-protobuf` contributors © 2011-2019
"""
from abc import ABC, abstractmethod
from io import BytesIO
from typing import Any, BinaryIO, Union
# Type hinting doesn't recognize `BytesIO` as an instance of `BinaryIO`.
IO = Union[BinaryIO, BytesIO]
class Dumps(ABC):
@abstractmethod
def dump(self, value: Any, io: IO):
"""
Serializes a value into a file-like object.
"""
raise NotImplementedError()
def dumps(self, value: Any) -> bytes:
"""
Serializes a value into a byte string
"""
with BytesIO() as io:
self.dump(value, io)
return io.getvalue()
class Loads(ABC):
@abstractmethod
def load(self, io: IO) -> Any:
"""
Deserializes a value from a file-like object.
"""
raise NotImplementedError()
def loads(self, bytes_: bytes) -> Any:
"""
Deserializes a value from a byte string.
"""
with BytesIO(bytes_) as io:
return self.load(io)
| 23.386364 | 72 | 0.586006 |
7956bf424119502f8edba0d4261928617f89b343 | 6,248 | py | Python | python-api/terrain_flattening.py | EskedarT/gee_s1_ard | 2d463c2e94f4f1674d6284f03cb90201d1f3f717 | [
"MIT"
] | 1 | 2021-05-19T09:31:09.000Z | 2021-05-19T09:31:09.000Z | python-api/terrain_flattening.py | EskedarT/gee_s1_ard | 2d463c2e94f4f1674d6284f03cb90201d1f3f717 | [
"MIT"
] | null | null | null | python-api/terrain_flattening.py | EskedarT/gee_s1_ard | 2d463c2e94f4f1674d6284f03cb90201d1f3f717 | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Version: v1.0
Date: 2021-03-12
Description: This code is adopted from
Vollrath, A., Mullissa, A., & Reiche, J. (2020).
Angular-Based Radiometric Slope Correction for Sentinel-1 on Google Earth Engine.
Remote Sensing, 12(11), [1867]. https://doi.org/10.3390/rs12111867
"""
import ee
import math
# ---------------------------------------------------------------------------//
# Terrain Flattening
# ---------------------------------------------------------------------------//
def slope_correction(collection, TERRAIN_FLATTENING_MODEL
,DEM, TERRAIN_FLATTENING_ADDITIONAL_LAYOVER_SHADOW_BUFFER):
"""
Parameters
----------
collection : ee image collection
DESCRIPTION.
TERRAIN_FLATTENING_MODEL : string
The radiometric terrain normalization model, either volume or direct
DEM : ee asset
The DEM to be used
TERRAIN_FLATTENING_ADDITIONAL_LAYOVER_SHADOW_BUFFER : integer
The additional buffer to account for the passive layover and shadow
Returns
-------
ee image collection
An image collection where radiometric terrain normalization is
implemented on each image
"""
ninetyRad = ee.Image.constant(90).multiply(math.pi/180)
def _volumetric_model_SCF(theta_iRad, alpha_rRad):
"""
Parameters
----------
theta_iRad : ee.Image
The scene incidence angle
alpha_rRad : ee.Image
Slope steepness in range
Returns
-------
ee.Image
Applies the volume model in the radiometric terrain normalization
"""
# Volume model
nominator = (ninetyRad.subtract(theta_iRad).add(alpha_rRad)).tan()
denominator = (ninetyRad.subtract(theta_iRad)).tan()
return nominator.divide(denominator)
def _direct_model_SCF(theta_iRad, alpha_rRad, alpha_azRad):
"""
Parameters
----------
theta_iRad : ee.Image
The scene incidence angle
alpha_rRad : ee.Image
Slope steepness in range
Returns
-------
ee.Image
Applies the direct model in the radiometric terrain normalization
"""
# Surface model
nominator = (ninetyRad.subtract(theta_iRad)).cos()
denominator = alpha_azRad.cos().multiply((ninetyRad.subtract(theta_iRad).add(alpha_rRad)).cos())
return nominator.divide(denominator)
def _erode(image, distance):
"""
Parameters
----------
image : ee.Image
Image to apply the erode function to
distance : integer
The distance to apply the buffer
Returns
-------
ee.Image
An image that is masked to conpensate for passive layover
and shadow depending on the given distance
"""
# buffer function (thanks Noel)
d = (image.Not().unmask(1).fastDistanceTransform(30).sqrt()
.multiply(ee.Image.pixelArea().sqrt()))
return image.updateMask(d.gt(distance))
def _masking(alpha_rRad, theta_iRad, buffer):
"""
Parameters
----------
alpha_rRad : ee.Image
Slope steepness in range
theta_iRad : ee.Image
The scene incidence angle
buffer : TYPE
DESCRIPTION.
Returns
-------
ee.Image
An image that is masked to conpensate for passive layover
and shadow depending on the given distance
"""
# calculate masks
# layover, where slope > radar viewing angle
layover = alpha_rRad.lt(theta_iRad).rename('layover')
# shadow
shadow = alpha_rRad.gt(ee.Image.constant(-1)
.multiply(ninetyRad.subtract(theta_iRad))).rename('shadow')
# combine layover and shadow
mask = layover.And(shadow)
# add buffer to final mask
if (buffer > 0):
mask = _erode(mask, buffer)
return mask.rename('no_data_mask')
def _correct(image):
"""
Parameters
----------
image : ee.Image
Image to apply the radiometric terrain normalization to
Returns
-------
ee.Image
Radiometrically terrain corrected image
"""
bandNames = image.bandNames()
# calculate the look direction
heading = ee.Terrain.aspect(image.select('angle')).reduceRegion(ee.Reducer.mean(), image.geometry(), 1000).get('aspect')
# the numbering follows the article chapters
# 2.1.1 Radar geometry
theta_iRad = image.select('angle').multiply(math.pi/180)
phi_iRad = ee.Image.constant(heading).multiply(math.pi/180)
# 2.1.2 Terrain geometry
alpha_sRad = ee.Terrain.slope(DEM).select('slope').multiply(math.pi/180)
phi_sRad = ee.Terrain.aspect(DEM).select('aspect').multiply(math.pi/180)
# 2.1.3 Model geometry
# reduce to 3 angle
phi_rRad = phi_iRad.subtract(phi_sRad)
# slope steepness in range (eq. 2)
alpha_rRad = (alpha_sRad.tan().multiply(phi_rRad.cos())).atan()
# slope steepness in azimuth (eq 3)
alpha_azRad = (alpha_sRad.tan().multiply(phi_rRad.sin())).atan()
# 2.2
# Gamma_nought
gamma0 = image.divide(theta_iRad.cos())
if (TERRAIN_FLATTENING_MODEL == 'VOLUME'):
# Volumetric Model
scf = _volumetric_model_SCF(theta_iRad, alpha_rRad)
if (TERRAIN_FLATTENING_MODEL == 'DIRECT'):
scf = _direct_model_SCF(theta_iRad, alpha_rRad, alpha_azRad)
# apply model for Gamm0
gamma0_flat = gamma0.divide(scf)
# get Layover/Shadow mask
mask = _masking(alpha_rRad, theta_iRad, TERRAIN_FLATTENING_ADDITIONAL_LAYOVER_SHADOW_BUFFER)
output = gamma0_flat.mask(mask).rename(bandNames).copyProperties(image)
output = ee.Image(output).addBands(image.select('angle'), None, True)
return output.set('system:time_start', image.get('system:time_start'))
return collection.map(_correct)
| 30.330097 | 128 | 0.589149 |
7956c0bb34cf16180ddb4260d356a7683b9c5658 | 26,292 | py | Python | lxml/html/clean.py | kmbn/zeit-now-filter-feeds | a138b5d5f4b5113ad0f64dea9db1f1fd0d2cf687 | [
"MIT"
] | null | null | null | lxml/html/clean.py | kmbn/zeit-now-filter-feeds | a138b5d5f4b5113ad0f64dea9db1f1fd0d2cf687 | [
"MIT"
] | null | null | null | lxml/html/clean.py | kmbn/zeit-now-filter-feeds | a138b5d5f4b5113ad0f64dea9db1f1fd0d2cf687 | [
"MIT"
] | null | null | null | """A cleanup tool for HTML.
Removes unwanted tags and content. See the `Cleaner` class for
details.
"""
import re
import copy
try:
from urlparse import urlsplit
except ImportError:
# Python 3
from urllib.parse import urlsplit
from lxml import etree
from lxml.html import defs
from lxml.html import fromstring, XHTML_NAMESPACE
from lxml.html import xhtml_to_html, _transform_result
try:
unichr
except NameError:
# Python 3
unichr = chr
try:
unicode
except NameError:
# Python 3
unicode = str
try:
bytes
except NameError:
# Python < 2.6
bytes = str
try:
basestring
except NameError:
basestring = (str, bytes)
__all__ = [
"clean_html",
"clean",
"Cleaner",
"autolink",
"autolink_html",
"word_break",
"word_break_html",
]
# Look at http://code.sixapart.com/trac/livejournal/browser/trunk/cgi-bin/cleanhtml.pl
# Particularly the CSS cleaning; most of the tag cleaning is integrated now
# I have multiple kinds of schemes searched; but should schemes be
# whitelisted instead?
# max height?
# remove images? Also in CSS? background attribute?
# Some way to whitelist object, iframe, etc (e.g., if you want to
# allow *just* embedded YouTube movies)
# Log what was deleted and why?
# style="behavior: ..." might be bad in IE?
# Should we have something for just <meta http-equiv>? That's the worst of the
# metas.
# UTF-7 detections? Example:
# <HEAD><META HTTP-EQUIV="CONTENT-TYPE" CONTENT="text/html; charset=UTF-7"> </HEAD>+ADw-SCRIPT+AD4-alert('XSS');+ADw-/SCRIPT+AD4-
# you don't always have to have the charset set, if the page has no charset
# and there's UTF7-like code in it.
# Look at these tests: http://htmlpurifier.org/live/smoketests/xssAttacks.php
# This is an IE-specific construct you can have in a stylesheet to
# run some Javascript:
_css_javascript_re = re.compile(r"expression\s*\(.*?\)", re.S | re.I)
# Do I have to worry about @\nimport?
_css_import_re = re.compile(r"@\s*import", re.I)
# All kinds of schemes besides just javascript: that can cause
# execution:
_is_image_dataurl = re.compile(r"^data:image/.+;base64", re.I).search
_is_possibly_malicious_scheme = re.compile(
r"(?:javascript|jscript|livescript|vbscript|data|about|mocha):", re.I
).search
def _is_javascript_scheme(s):
if _is_image_dataurl(s):
return None
return _is_possibly_malicious_scheme(s)
_substitute_whitespace = re.compile(r"[\s\x00-\x08\x0B\x0C\x0E-\x19]+").sub
# FIXME: should data: be blocked?
# FIXME: check against: http://msdn2.microsoft.com/en-us/library/ms537512.aspx
_conditional_comment_re = re.compile(r"\[if[\s\n\r]+.*?][\s\n\r]*>", re.I | re.S)
_find_styled_elements = etree.XPath("descendant-or-self::*[@style]")
_find_external_links = etree.XPath(
(
"descendant-or-self::a [normalize-space(@href) and substring(normalize-space(@href),1,1) != '#'] |"
"descendant-or-self::x:a[normalize-space(@href) and substring(normalize-space(@href),1,1) != '#']"
),
namespaces={"x": XHTML_NAMESPACE},
)
class Cleaner(object):
"""
Instances cleans the document of each of the possible offending
elements. The cleaning is controlled by attributes; you can
override attributes in a subclass, or set them in the constructor.
``scripts``:
Removes any ``<script>`` tags.
``javascript``:
Removes any Javascript, like an ``onclick`` attribute. Also removes stylesheets
as they could contain Javascript.
``comments``:
Removes any comments.
``style``:
Removes any style tags.
``inline_style``
Removes any style attributes. Defaults to the value of the ``style`` option.
``links``:
Removes any ``<link>`` tags
``meta``:
Removes any ``<meta>`` tags
``page_structure``:
Structural parts of a page: ``<head>``, ``<html>``, ``<title>``.
``processing_instructions``:
Removes any processing instructions.
``embedded``:
Removes any embedded objects (flash, iframes)
``frames``:
Removes any frame-related tags
``forms``:
Removes any form tags
``annoying_tags``:
Tags that aren't *wrong*, but are annoying. ``<blink>`` and ``<marquee>``
``remove_tags``:
A list of tags to remove. Only the tags will be removed,
their content will get pulled up into the parent tag.
``kill_tags``:
A list of tags to kill. Killing also removes the tag's content,
i.e. the whole subtree, not just the tag itself.
``allow_tags``:
A list of tags to include (default include all).
``remove_unknown_tags``:
Remove any tags that aren't standard parts of HTML.
``safe_attrs_only``:
If true, only include 'safe' attributes (specifically the list
from the feedparser HTML sanitisation web site).
``safe_attrs``:
A set of attribute names to override the default list of attributes
considered 'safe' (when safe_attrs_only=True).
``add_nofollow``:
If true, then any <a> tags will have ``rel="nofollow"`` added to them.
``host_whitelist``:
A list or set of hosts that you can use for embedded content
(for content like ``<object>``, ``<link rel="stylesheet">``, etc).
You can also implement/override the method
``allow_embedded_url(el, url)`` or ``allow_element(el)`` to
implement more complex rules for what can be embedded.
Anything that passes this test will be shown, regardless of
the value of (for instance) ``embedded``.
Note that this parameter might not work as intended if you do not
make the links absolute before doing the cleaning.
Note that you may also need to set ``whitelist_tags``.
``whitelist_tags``:
A set of tags that can be included with ``host_whitelist``.
The default is ``iframe`` and ``embed``; you may wish to
include other tags like ``script``, or you may want to
implement ``allow_embedded_url`` for more control. Set to None to
include all tags.
This modifies the document *in place*.
"""
scripts = True
javascript = True
comments = True
style = False
inline_style = None
links = True
meta = True
page_structure = True
processing_instructions = True
embedded = True
frames = True
forms = True
annoying_tags = True
remove_tags = None
allow_tags = None
kill_tags = None
remove_unknown_tags = True
safe_attrs_only = True
safe_attrs = defs.safe_attrs
add_nofollow = False
host_whitelist = ()
whitelist_tags = set(["iframe", "embed"])
def __init__(self, **kw):
for name, value in kw.items():
if not hasattr(self, name):
raise TypeError("Unknown parameter: %s=%r" % (name, value))
setattr(self, name, value)
if self.inline_style is None and "inline_style" not in kw:
self.inline_style = self.style
# Used to lookup the primary URL for a given tag that is up for
# removal:
_tag_link_attrs = dict(
script="src",
link="href",
# From: http://java.sun.com/j2se/1.4.2/docs/guide/misc/applet.html
# From what I can tell, both attributes can contain a link:
applet=["code", "object"],
iframe="src",
embed="src",
layer="src",
# FIXME: there doesn't really seem like a general way to figure out what
# links an <object> tag uses; links often go in <param> tags with values
# that we don't really know. You'd have to have knowledge about specific
# kinds of plugins (probably keyed off classid), and match against those.
##object=?,
# FIXME: not looking at the action currently, because it is more complex
# than than -- if you keep the form, you should keep the form controls.
##form='action',
a="href",
)
def __call__(self, doc):
"""
Cleans the document.
"""
if hasattr(doc, "getroot"):
# ElementTree instance, instead of an element
doc = doc.getroot()
# convert XHTML to HTML
xhtml_to_html(doc)
# Normalize a case that IE treats <image> like <img>, and that
# can confuse either this step or later steps.
for el in doc.iter("image"):
el.tag = "img"
if not self.comments:
# Of course, if we were going to kill comments anyway, we don't
# need to worry about this
self.kill_conditional_comments(doc)
kill_tags = set(self.kill_tags or ())
remove_tags = set(self.remove_tags or ())
allow_tags = set(self.allow_tags or ())
if self.scripts:
kill_tags.add("script")
if self.safe_attrs_only:
safe_attrs = set(self.safe_attrs)
for el in doc.iter(etree.Element):
attrib = el.attrib
for aname in attrib.keys():
if aname not in safe_attrs:
del attrib[aname]
if self.javascript:
if not (self.safe_attrs_only and self.safe_attrs == defs.safe_attrs):
# safe_attrs handles events attributes itself
for el in doc.iter(etree.Element):
attrib = el.attrib
for aname in attrib.keys():
if aname.startswith("on"):
del attrib[aname]
doc.rewrite_links(self._remove_javascript_link, resolve_base_href=False)
# If we're deleting style then we don't have to remove JS links
# from styles, otherwise...
if not self.inline_style:
for el in _find_styled_elements(doc):
old = el.get("style")
new = _css_javascript_re.sub("", old)
new = _css_import_re.sub("", new)
if self._has_sneaky_javascript(new):
# Something tricky is going on...
del el.attrib["style"]
elif new != old:
el.set("style", new)
if not self.style:
for el in list(doc.iter("style")):
if el.get("type", "").lower().strip() == "text/javascript":
el.drop_tree()
continue
old = el.text or ""
new = _css_javascript_re.sub("", old)
# The imported CSS can do anything; we just can't allow:
new = _css_import_re.sub("", old)
if self._has_sneaky_javascript(new):
# Something tricky is going on...
el.text = "/* deleted */"
elif new != old:
el.text = new
if self.comments or self.processing_instructions:
# FIXME: why either? I feel like there's some obscure reason
# because you can put PIs in comments...? But I've already
# forgotten it
kill_tags.add(etree.Comment)
if self.processing_instructions:
kill_tags.add(etree.ProcessingInstruction)
if self.style:
kill_tags.add("style")
if self.inline_style:
etree.strip_attributes(doc, "style")
if self.links:
kill_tags.add("link")
elif self.style or self.javascript:
# We must get rid of included stylesheets if Javascript is not
# allowed, as you can put Javascript in them
for el in list(doc.iter("link")):
if "stylesheet" in el.get("rel", "").lower():
# Note this kills alternate stylesheets as well
if not self.allow_element(el):
el.drop_tree()
if self.meta:
kill_tags.add("meta")
if self.page_structure:
remove_tags.update(("head", "html", "title"))
if self.embedded:
# FIXME: is <layer> really embedded?
# We should get rid of any <param> tags not inside <applet>;
# These are not really valid anyway.
for el in list(doc.iter("param")):
found_parent = False
parent = el.getparent()
while parent is not None and parent.tag not in ("applet", "object"):
parent = parent.getparent()
if parent is None:
el.drop_tree()
kill_tags.update(("applet",))
# The alternate contents that are in an iframe are a good fallback:
remove_tags.update(("iframe", "embed", "layer", "object", "param"))
if self.frames:
# FIXME: ideally we should look at the frame links, but
# generally frames don't mix properly with an HTML
# fragment anyway.
kill_tags.update(defs.frame_tags)
if self.forms:
remove_tags.add("form")
kill_tags.update(("button", "input", "select", "textarea"))
if self.annoying_tags:
remove_tags.update(("blink", "marquee"))
_remove = []
_kill = []
for el in doc.iter():
if el.tag in kill_tags:
if self.allow_element(el):
continue
_kill.append(el)
elif el.tag in remove_tags:
if self.allow_element(el):
continue
_remove.append(el)
if _remove and _remove[0] == doc:
# We have to drop the parent-most tag, which we can't
# do. Instead we'll rewrite it:
el = _remove.pop(0)
el.tag = "div"
el.attrib.clear()
elif _kill and _kill[0] == doc:
# We have to drop the parent-most element, which we can't
# do. Instead we'll clear it:
el = _kill.pop(0)
if el.tag != "html":
el.tag = "div"
el.clear()
_kill.reverse() # start with innermost tags
for el in _kill:
el.drop_tree()
for el in _remove:
el.drop_tag()
if self.remove_unknown_tags:
if allow_tags:
raise ValueError(
"It does not make sense to pass in both allow_tags and remove_unknown_tags"
)
allow_tags = set(defs.tags)
if allow_tags:
bad = []
for el in doc.iter():
if el.tag not in allow_tags:
bad.append(el)
if bad:
if bad[0] is doc:
el = bad.pop(0)
el.tag = "div"
el.attrib.clear()
for el in bad:
el.drop_tag()
if self.add_nofollow:
for el in _find_external_links(doc):
if not self.allow_follow(el):
rel = el.get("rel")
if rel:
if "nofollow" in rel and " nofollow " in (" %s " % rel):
continue
rel = "%s nofollow" % rel
else:
rel = "nofollow"
el.set("rel", rel)
def allow_follow(self, anchor):
"""
Override to suppress rel="nofollow" on some anchors.
"""
return False
def allow_element(self, el):
if el.tag not in self._tag_link_attrs:
return False
attr = self._tag_link_attrs[el.tag]
if isinstance(attr, (list, tuple)):
for one_attr in attr:
url = el.get(one_attr)
if not url:
return False
if not self.allow_embedded_url(el, url):
return False
return True
else:
url = el.get(attr)
if not url:
return False
return self.allow_embedded_url(el, url)
def allow_embedded_url(self, el, url):
if self.whitelist_tags is not None and el.tag not in self.whitelist_tags:
return False
scheme, netloc, path, query, fragment = urlsplit(url)
netloc = netloc.lower().split(":", 1)[0]
if scheme not in ("http", "https"):
return False
if netloc in self.host_whitelist:
return True
return False
def kill_conditional_comments(self, doc):
"""
IE conditional comments basically embed HTML that the parser
doesn't normally see. We can't allow anything like that, so
we'll kill any comments that could be conditional.
"""
bad = []
self._kill_elements(
doc, lambda el: _conditional_comment_re.search(el.text), etree.Comment
)
def _kill_elements(self, doc, condition, iterate=None):
bad = []
for el in doc.iter(iterate):
if condition(el):
bad.append(el)
for el in bad:
el.drop_tree()
def _remove_javascript_link(self, link):
# links like "j a v a s c r i p t:" might be interpreted in IE
new = _substitute_whitespace("", link)
if _is_javascript_scheme(new):
# FIXME: should this be None to delete?
return ""
return link
_substitute_comments = re.compile(r"/\*.*?\*/", re.S).sub
def _has_sneaky_javascript(self, style):
"""
Depending on the browser, stuff like ``e x p r e s s i o n(...)``
can get interpreted, or ``expre/* stuff */ssion(...)``. This
checks for attempt to do stuff like this.
Typically the response will be to kill the entire style; if you
have just a bit of Javascript in the style another rule will catch
that and remove only the Javascript from the style; this catches
more sneaky attempts.
"""
style = self._substitute_comments("", style)
style = style.replace("\\", "")
style = _substitute_whitespace("", style)
style = style.lower()
if "javascript:" in style:
return True
if "expression(" in style:
return True
return False
def clean_html(self, html):
result_type = type(html)
if isinstance(html, basestring):
doc = fromstring(html)
else:
doc = copy.deepcopy(html)
self(doc)
return _transform_result(result_type, doc)
clean = Cleaner()
clean_html = clean.clean_html
############################################################
## Autolinking
############################################################
_link_regexes = [
re.compile(
r"(?P<body>https?://(?P<host>[a-z0-9._-]+)(?:/[/\-_.,a-z0-9%&?;=~]*)?(?:\([/\-_.,a-z0-9%&?;=~]*\))?)",
re.I,
),
# This is conservative, but autolinking can be a bit conservative:
re.compile(r"mailto:(?P<body>[a-z0-9._-]+@(?P<host>[a-z0-9_.-]+[a-z]))", re.I),
]
_avoid_elements = ["textarea", "pre", "code", "head", "select", "a"]
_avoid_hosts = [
re.compile(r"^localhost", re.I),
re.compile(r"\bexample\.(?:com|org|net)$", re.I),
re.compile(r"^127\.0\.0\.1$"),
]
_avoid_classes = ["nolink"]
def autolink(
el,
link_regexes=_link_regexes,
avoid_elements=_avoid_elements,
avoid_hosts=_avoid_hosts,
avoid_classes=_avoid_classes,
):
"""
Turn any URLs into links.
It will search for links identified by the given regular
expressions (by default mailto and http(s) links).
It won't link text in an element in avoid_elements, or an element
with a class in avoid_classes. It won't link to anything with a
host that matches one of the regular expressions in avoid_hosts
(default localhost and 127.0.0.1).
If you pass in an element, the element's tail will not be
substituted, only the contents of the element.
"""
if el.tag in avoid_elements:
return
class_name = el.get("class")
if class_name:
class_name = class_name.split()
for match_class in avoid_classes:
if match_class in class_name:
return
for child in list(el):
autolink(
child,
link_regexes=link_regexes,
avoid_elements=avoid_elements,
avoid_hosts=avoid_hosts,
avoid_classes=avoid_classes,
)
if child.tail:
text, tail_children = _link_text(
child.tail, link_regexes, avoid_hosts, factory=el.makeelement
)
if tail_children:
child.tail = text
index = el.index(child)
el[index + 1 : index + 1] = tail_children
if el.text:
text, pre_children = _link_text(
el.text, link_regexes, avoid_hosts, factory=el.makeelement
)
if pre_children:
el.text = text
el[:0] = pre_children
def _link_text(text, link_regexes, avoid_hosts, factory):
leading_text = ""
links = []
last_pos = 0
while 1:
best_match, best_pos = None, None
for regex in link_regexes:
regex_pos = last_pos
while 1:
match = regex.search(text, pos=regex_pos)
if match is None:
break
host = match.group("host")
for host_regex in avoid_hosts:
if host_regex.search(host):
regex_pos = match.end()
break
else:
break
if match is None:
continue
if best_pos is None or match.start() < best_pos:
best_match = match
best_pos = match.start()
if best_match is None:
# No more matches
if links:
assert not links[-1].tail
links[-1].tail = text
else:
assert not leading_text
leading_text = text
break
link = best_match.group(0)
end = best_match.end()
if link.endswith(".") or link.endswith(","):
# These punctuation marks shouldn't end a link
end -= 1
link = link[:-1]
prev_text = text[: best_match.start()]
if links:
assert not links[-1].tail
links[-1].tail = prev_text
else:
assert not leading_text
leading_text = prev_text
anchor = factory("a")
anchor.set("href", link)
body = best_match.group("body")
if not body:
body = link
if body.endswith(".") or body.endswith(","):
body = body[:-1]
anchor.text = body
links.append(anchor)
text = text[end:]
return leading_text, links
def autolink_html(html, *args, **kw):
result_type = type(html)
if isinstance(html, basestring):
doc = fromstring(html)
else:
doc = copy.deepcopy(html)
autolink(doc, *args, **kw)
return _transform_result(result_type, doc)
autolink_html.__doc__ = autolink.__doc__
############################################################
## Word wrapping
############################################################
_avoid_word_break_elements = ["pre", "textarea", "code"]
_avoid_word_break_classes = ["nobreak"]
def word_break(
el,
max_width=40,
avoid_elements=_avoid_word_break_elements,
avoid_classes=_avoid_word_break_classes,
break_character=unichr(0x200B),
):
"""
Breaks any long words found in the body of the text (not attributes).
Doesn't effect any of the tags in avoid_elements, by default
``<textarea>`` and ``<pre>``
Breaks words by inserting ​, which is a unicode character
for Zero Width Space character. This generally takes up no space
in rendering, but does copy as a space, and in monospace contexts
usually takes up space.
See http://www.cs.tut.fi/~jkorpela/html/nobr.html for a discussion
"""
# Character suggestion of ​ comes from:
# http://www.cs.tut.fi/~jkorpela/html/nobr.html
if el.tag in _avoid_word_break_elements:
return
class_name = el.get("class")
if class_name:
dont_break = False
class_name = class_name.split()
for avoid in avoid_classes:
if avoid in class_name:
dont_break = True
break
if dont_break:
return
if el.text:
el.text = _break_text(el.text, max_width, break_character)
for child in el:
word_break(
child,
max_width=max_width,
avoid_elements=avoid_elements,
avoid_classes=avoid_classes,
break_character=break_character,
)
if child.tail:
child.tail = _break_text(child.tail, max_width, break_character)
def word_break_html(html, *args, **kw):
result_type = type(html)
doc = fromstring(html)
word_break(doc, *args, **kw)
return _transform_result(result_type, doc)
def _break_text(text, max_width, break_character):
words = text.split()
for word in words:
if len(word) > max_width:
replacement = _insert_break(word, max_width, break_character)
text = text.replace(word, replacement)
return text
_break_prefer_re = re.compile(r"[^a-z]", re.I)
def _insert_break(word, width, break_character):
orig_word = word
result = ""
while len(word) > width:
start = word[:width]
breaks = list(_break_prefer_re.finditer(start))
if breaks:
last_break = breaks[-1]
# Only walk back up to 10 characters to find a nice break:
if last_break.end() > width - 10:
# FIXME: should the break character be at the end of the
# chunk, or the beginning of the next chunk?
start = word[: last_break.end()]
result += start + break_character
word = word[len(start) :]
result += word
return result
| 34.012937 | 133 | 0.56907 |
7956c0e0c17f091db89a1e7331a20b9b969050a5 | 422 | py | Python | usersettings/migrations/0013_auto_20200324_1134.py | christianwgd/photos | b0c3343325a556d25217e9678f6142d4dcb03f51 | [
"MIT"
] | null | null | null | usersettings/migrations/0013_auto_20200324_1134.py | christianwgd/photos | b0c3343325a556d25217e9678f6142d4dcb03f51 | [
"MIT"
] | 6 | 2021-03-19T20:39:25.000Z | 2022-02-10T16:18:00.000Z | usersettings/migrations/0013_auto_20200324_1134.py | christianwgd/photos | b0c3343325a556d25217e9678f6142d4dcb03f51 | [
"MIT"
] | null | null | null | # Generated by Django 3.0.4 on 2020-03-24 10:34
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('usersettings', '0012_auto_20200310_1735'),
]
operations = [
migrations.AlterField(
model_name='theme',
name='name',
field=models.CharField(max_length=50, unique=True, verbose_name='Name'),
),
]
| 22.210526 | 84 | 0.613744 |
7956c17dd42fe54ea5ef912f4a4659c833e2ff6b | 7,741 | py | Python | sunpy/net/_attrs.py | sashank27/sunpy | 72fbed58a47ba599c8729a955ddaf874f8b9b5b4 | [
"BSD-2-Clause"
] | 2 | 2020-07-02T13:01:42.000Z | 2020-08-27T20:05:31.000Z | sunpy/net/_attrs.py | sashank27/sunpy | 72fbed58a47ba599c8729a955ddaf874f8b9b5b4 | [
"BSD-2-Clause"
] | null | null | null | sunpy/net/_attrs.py | sashank27/sunpy | 72fbed58a47ba599c8729a955ddaf874f8b9b5b4 | [
"BSD-2-Clause"
] | null | null | null | """
Implementation of global attrs.
These are defined in here to keep the `sunpy.net.attrs` namespace clean, and to
prevent circular imports.
"""
import collections
import astropy.units as u
from sunpy.time import TimeRange, parse_time
from sunpy.time.time import _variables_for_parse_time_docstring
from sunpy.util.decorators import add_common_docstring
from .attr import Range, SimpleAttr
__all__ = ['Physobs', 'Resolution', 'Detector', 'Sample',
'Level', 'Instrument', 'Wavelength', 'Time']
@add_common_docstring(**_variables_for_parse_time_docstring())
class Time(Range):
"""
Specify the time range of the query.
Parameters
----------
start : {parse_time_types}
The start time in a format parseable by `~sunpy.time.parse_time` or
a `sunpy.time.TimeRange` object.
end : {parse_time_types}
The end time of the range.
near : {parse_time_types}
Return a singular record closest in time to this value as possible,
inside the start and end window. Note: not all providers support this
functionality.
"""
def __init__(self, start, end=None, near=None):
if end is None and not isinstance(start, TimeRange):
raise ValueError("Specify start and end or start has to be a TimeRange")
if isinstance(start, TimeRange):
self.start = start.start
self.end = start.end
else:
self.start = parse_time(start)
self.end = parse_time(end)
if self.start > self.end:
raise ValueError("End time must be after start time.")
self.near = None if near is None else parse_time(near)
super().__init__(self.start, self.end)
def __hash__(self):
if not (isinstance(self.start, collections.Hashable) and
isinstance(self.end, collections.Hashable)):
# The hash is the hash of the start and end time
return hash((self.start.jd1, self.start.jd2, self.start.scale,
self.end.jd1, self.end.jd2, self.end.scale))
else:
return super().__hash__()
def collides(self, other):
# Use exact type checking here, because otherwise it collides with all
# subclasses of itself which can have completely different search
# meanings.
return type(other) is type(self)
def __xor__(self, other):
if not isinstance(other, self.__class__):
raise TypeError
if self.near is not None or other.near is not None:
raise TypeError
return Range.__xor__(self, other)
def pad(self, timedelta):
return type(self)(self.start - timedelta, self.start + timedelta)
def __repr__(self):
return f'<sunpy.net.attrs.Time({self.start.iso}, {self.end.iso}{", " + self.near.iso if self.near else ""})>'
class Wavelength(Range):
def __init__(self, wavemin, wavemax=None):
"""
Specifies the wavelength or spectral energy range of the detector.
Parameters
----------
wavemin : `~astropy.units.Quantity`
The lower bounds of the range.
wavemax : `~astropy.units.Quantity`
The upper bound of the range, if not specified it will default to
the lower bound.
Notes
-----
The VSO understands the 'wavelength' in one of three units, Angstroms,
kHz or keV. Therefore any unit which is directly convertible to these
units is valid input.
"""
if wavemax is None:
wavemax = wavemin
if not all(isinstance(var, u.Quantity) for var in [wavemin, wavemax]):
raise TypeError("Wave inputs must be astropy Quantities")
if not all([wavemin.isscalar, wavemax.isscalar]):
raise ValueError("Both wavemin and wavemax must be scalar values")
# VSO just accept inputs as Angstroms, kHz or keV, the following
# converts to any of these units depending on the spectral inputs
# Note: the website asks for GHz, however it seems that using GHz
# produces weird responses on VSO.
supported_units = [u.AA, u.kHz, u.keV]
for unit in supported_units:
if wavemin.unit.is_equivalent(unit):
break
else:
unit = None
if unit is None:
raise u.UnitsError(f"This unit is not convertable to any of {supported_units}")
wavemin, wavemax = sorted([wavemin.to(unit), wavemax.to(unit)])
self.unit = unit
super().__init__(wavemin, wavemax)
def collides(self, other):
return isinstance(other, self.__class__)
def __repr__(self):
return f"<sunpy.net.attrs.Wavelength({self.min.value}, {self.max.value}, '{self.unit}')>"
class Instrument(SimpleAttr):
"""
Specifies the Instrument name for the search.
Parameters
----------
value : `str`
Notes
-----
More information about each instrument supported by the VSO may be found
within the VSO Registry. For a list of instruments see
https://sdac.virtualsolar.org/cgi/show_details?keyword=INSTRUMENT.
"""
def __init__(self, value):
if not isinstance(value, str):
raise ValueError("Instrument names must be strings")
super().__init__(value)
class Level(SimpleAttr):
"""
Specifies the data processing level to search for. The data processing
level is specified by the instrument PI. May not work with all archives.
Parameters
----------
value : `float` or `str`
The value can be entered in of three ways:
# . May be entered as a string or any numeric type for equality matching
# . May be a string of the format '(min) - (max)' for range matching
# . May be a string of the form '(operator) (number)' where operator is\
one of: lt gt le ge < > <= >=
"""
class Sample(SimpleAttr):
"""
Time interval for data sampling.
Parameters
----------
value : `astropy.units.Quantity`
A sampling rate convertible to seconds.
"""
@u.quantity_input
def __init__(self, value: u.s):
super().__init__(value)
self.value = value.to_value(u.s)
class Detector(SimpleAttr):
"""
The detector from which the data comes from.
Parameters
----------
value : `str`
"""
class Resolution(SimpleAttr):
"""
Resolution level of the data.
Parameters
----------
value : `float` or `str`
The value can be entered in of three ways:
#. May be entered as a string or any numeric type for equality matching
#. May be a string of the format '(min) - (max)' for range matching
#. May be a string of the form '(operator) (number)' where operator is\
one of: lt gt le ge < > <= >=
This attribute is currently implemented for SDO/AIA and HMI only.
The "resolution" is a function of the highest level of data available.
If the CCD is 2048x2048, but is binned to 512x512 before downlink,
the 512x512 product is designated as '1'. If a 2048x2048 and 512x512
product are both available, the 512x512 product is designated '0.25'.
References
----------
Documentation in SSWIDL routine vso_search.pro.
"""
class Physobs(SimpleAttr):
"""
Specifies the physical observable the VSO can search for.
Parameters
----------
value : `str`
A keyword describing the observable in the data.
Notes
-----
More information about the values of physobs used by the VSO
registry can be found at
https://sdac.virtualsolar.org/cgi/show_details?keyword=PHYSOBS.
"""
| 31.46748 | 117 | 0.629247 |
7956c2745cb5bf844a17956e09dbf41677336ed5 | 174 | py | Python | grl/algos/p2sro/p2sro_manager/__init__.py | indylab/xdo | 1ddd92aa56ba10fa468396de8f8824c83ba9d0ba | [
"MIT"
] | 12 | 2021-03-12T07:18:52.000Z | 2022-03-15T22:30:44.000Z | grl/algos/p2sro/p2sro_manager/__init__.py | indylab/xdo | 1ddd92aa56ba10fa468396de8f8824c83ba9d0ba | [
"MIT"
] | 1 | 2021-11-22T16:39:46.000Z | 2022-02-02T22:13:03.000Z | grl/algos/p2sro/p2sro_manager/__init__.py | indylab/xdo | 1ddd92aa56ba10fa468396de8f8824c83ba9d0ba | [
"MIT"
] | 4 | 2021-06-21T03:54:45.000Z | 2022-01-13T10:28:26.000Z | from .logger import P2SROManagerLogger, SimpleP2SROManagerLogger
from .p2sro_manager import P2SROManager
from .remote import P2SROManagerWithServer, RemoteP2SROManagerClient
| 43.5 | 68 | 0.890805 |
7956c2bfa3a57fbaba8f6002c297d5f3a64e9308 | 517 | py | Python | mplhep/cms.py | HDembinski/mplhep | 5ae7601bd8922074dfc1ee92fc81f590a9efa7d5 | [
"MIT"
] | null | null | null | mplhep/cms.py | HDembinski/mplhep | 5ae7601bd8922074dfc1ee92fc81f590a9efa7d5 | [
"MIT"
] | null | null | null | mplhep/cms.py | HDembinski/mplhep | 5ae7601bd8922074dfc1ee92fc81f590a9efa7d5 | [
"MIT"
] | null | null | null | # Log styles
from . import styles_cms as style
from . import label as label_base
from . label import lumitext
__all__ = [style, lumitext]
# Experiment wrappers:
def cmstext(text="", **kwargs):
return label_base._exptext("CMS", text=text, italic=(False, True), **kwargs)
def cmslabel(**kwargs):
return label_base._explabel(exp="CMS", italic=(False, True), **kwargs)
def text(*args, **kwargs):
return cmstext(*args, **kwargs)
def label(**kwargs):
return cmslabel(**kwargs)
| 22.478261 | 81 | 0.663443 |
7956c2f3bb51f27179325db906261b8228693070 | 6,891 | py | Python | ivy_tests/test_ivy/helpers.py | saurbhc/ivy | 20b327b4fab543b26ad5a18acf4deddd6e3c804b | [
"Apache-2.0"
] | 1 | 2022-02-15T02:07:07.000Z | 2022-02-15T02:07:07.000Z | ivy_tests/test_ivy/helpers.py | saurbhc/ivy | 20b327b4fab543b26ad5a18acf4deddd6e3c804b | [
"Apache-2.0"
] | 1 | 2022-03-08T13:29:20.000Z | 2022-03-08T13:29:20.000Z | ivy_tests/test_ivy/helpers.py | saurbhc/ivy | 20b327b4fab543b26ad5a18acf4deddd6e3c804b | [
"Apache-2.0"
] | null | null | null | """
Collection of helpers for ivy unit tests
"""
# global
import ivy
try:
import numpy as _np
except ImportError:
_np = None
try:
import jax.numpy as _jnp
except ImportError:
_jnp = None
try:
import tensorflow as _tf
_tf_version = float('.'.join(_tf.__version__.split('.')[0:2]))
if _tf_version >= 2.3:
# noinspection PyPep8Naming,PyUnresolvedReferences
from tensorflow.python.types.core import Tensor as tensor_type
else:
# noinspection PyPep8Naming
# noinspection PyProtectedMember,PyUnresolvedReferences
from tensorflow.python.framework.tensor_like import _TensorLike as tensor_type
physical_devices = _tf.config.list_physical_devices('GPU')
for device in physical_devices:
_tf.config.experimental.set_memory_growth(device, True)
except ImportError:
_tf = None
try:
import torch as _torch
except ImportError:
_torch = None
try:
import mxnet as _mx
import mxnet.ndarray as _mx_nd
except ImportError:
_mx = None
_mx_nd = None
def get_ivy_numpy():
try:
import ivy.functional.backends.numpy
except ImportError:
return None
return ivy.functional.backends.numpy
def get_ivy_jax():
try:
import ivy.functional.backends.jax
except ImportError:
return None
return ivy.functional.backends.jax
def get_ivy_tensorflow():
try:
import ivy.functional.backends.tensorflow
except ImportError:
return None
return ivy.functional.backends.tensorflow
def get_ivy_torch():
try:
import ivy.functional.backends.torch
except ImportError:
return None
return ivy.functional.backends.torch
def get_ivy_mxnet():
try:
import ivy.functional.backends.mxnet
except ImportError:
return None
return ivy.functional.backends.mxnet
_ivy_fws_dict = {'numpy': lambda: get_ivy_numpy(),
'jax': lambda: get_ivy_jax(),
'tensorflow': lambda: get_ivy_tensorflow(),
'tensorflow_graph': lambda: get_ivy_tensorflow(),
'torch': lambda: get_ivy_torch(),
'mxnet': lambda: get_ivy_mxnet()}
_iterable_types = [list, tuple, dict]
_excluded = []
def _convert_vars(vars_in, from_type, to_type_callable=None, keep_other=True, to_type=None):
new_vars = list()
for var in vars_in:
if type(var) in _iterable_types:
return_val = _convert_vars(var, from_type, to_type_callable)
new_vars.append(return_val)
elif isinstance(var, from_type):
if isinstance(var, _np.ndarray):
if var.dtype == _np.float64:
var = var.astype(_np.float32)
if bool(sum([stride < 0 for stride in var.strides])):
var = var.copy()
if to_type_callable:
new_vars.append(to_type_callable(var))
else:
raise Exception('Invalid. A conversion callable is required.')
elif to_type is not None and isinstance(var, to_type):
new_vars.append(var)
elif keep_other:
new_vars.append(var)
return new_vars
def np_call(func, *args, **kwargs):
ret = func(*args, **kwargs)
if isinstance(ret, (list, tuple)):
return ivy.to_native(ret, nested=True)
return ivy.to_numpy(ret)
def jnp_call(func, *args, **kwargs):
new_args = _convert_vars(args, _np.ndarray, _jnp.asarray)
new_kw_vals = _convert_vars(kwargs.values(), _np.ndarray, _jnp.asarray)
new_kwargs = dict(zip(kwargs.keys(), new_kw_vals))
output = func(*new_args, **new_kwargs)
if isinstance(output, tuple):
return tuple(_convert_vars(output, (_jnp.ndarray, ivy.Array), ivy.to_numpy))
else:
return _convert_vars([output], (_jnp.ndarray, ivy.Array), ivy.to_numpy)[0]
def tf_call(func, *args, **kwargs):
new_args = _convert_vars(args, _np.ndarray, _tf.convert_to_tensor)
new_kw_vals = _convert_vars(kwargs.values(), _np.ndarray, _tf.convert_to_tensor)
new_kwargs = dict(zip(kwargs.keys(), new_kw_vals))
output = func(*new_args, **new_kwargs)
if isinstance(output, tuple):
return tuple(_convert_vars(output, (tensor_type, ivy.Array), ivy.to_numpy))
else:
return _convert_vars([output], (tensor_type, ivy.Array), ivy.to_numpy)[0]
def tf_graph_call(func, *args, **kwargs):
new_args = _convert_vars(args, _np.ndarray, _tf.convert_to_tensor)
new_kw_vals = _convert_vars(kwargs.values(), _np.ndarray, _tf.convert_to_tensor)
new_kwargs = dict(zip(kwargs.keys(), new_kw_vals))
@_tf.function
def tf_func(*local_args, **local_kwargs):
return func(*local_args, **local_kwargs)
output = tf_func(*new_args, **new_kwargs)
if isinstance(output, tuple):
return tuple(_convert_vars(output, (tensor_type, ivy.Array), ivy.to_numpy))
else:
return _convert_vars([output], (tensor_type, ivy.Array), ivy.to_numpy)[0]
def torch_call(func, *args, **kwargs):
new_args = _convert_vars(args, _np.ndarray, _torch.from_numpy)
new_kw_vals = _convert_vars(kwargs.values(), _np.ndarray, _torch.from_numpy)
new_kwargs = dict(zip(kwargs.keys(), new_kw_vals))
output = func(*new_args, **new_kwargs)
if isinstance(output, tuple):
return tuple(_convert_vars(output, (_torch.Tensor, ivy.Array), ivy.to_numpy))
else:
return _convert_vars([output], (_torch.Tensor, ivy.Array), ivy.to_numpy)[0]
def mx_call(func, *args, **kwargs):
new_args = _convert_vars(args, _np.ndarray, _mx_nd.array)
new_kw_items = _convert_vars(kwargs.values(), _np.ndarray, _mx_nd.array)
new_kwargs = dict(zip(kwargs.keys(), new_kw_items))
output = func(*new_args, **new_kwargs)
if isinstance(output, tuple):
return tuple(_convert_vars(output, (_mx_nd.ndarray.NDArray, ivy.Array), ivy.to_numpy))
else:
return _convert_vars([output], (_mx_nd.ndarray.NDArray, ivy.Array), ivy.to_numpy)[0]
_calls = [np_call, jnp_call, tf_call, tf_graph_call, torch_call, mx_call]
def assert_compilable(fn):
try:
ivy.compile(fn)
except Exception as e:
raise e
def var_fn(a, b=None, c=None):
return ivy.variable(ivy.array(a, b, c))
def exclude(exclusion_list):
global _excluded
_excluded = _excluded + list(set(exclusion_list) - set(_excluded))
def frameworks():
return list(set([ivy_fw() for fw_str, ivy_fw in _ivy_fws_dict.items()
if ivy_fw() is not None and fw_str not in _excluded]))
def calls():
return [call for (fw_str, ivy_fw), call in zip(_ivy_fws_dict.items(), _calls)
if ivy_fw() is not None and fw_str not in _excluded]
def f_n_calls():
return [(ivy_fw(), call) for (fw_str, ivy_fw), call in zip(_ivy_fws_dict.items(), _calls)
if ivy_fw() is not None and fw_str not in _excluded]
| 31.75576 | 94 | 0.669424 |
7956c35f336edfe71b4c11d4b589e06760b7259d | 1,296 | py | Python | ebnmpy/estimators/point_exponential.py | kclamar/ebnmpy | fc3d7126757c4184c7cb442312f1db5b78d73a3b | [
"MIT"
] | null | null | null | ebnmpy/estimators/point_exponential.py | kclamar/ebnmpy | fc3d7126757c4184c7cb442312f1db5b78d73a3b | [
"MIT"
] | null | null | null | ebnmpy/estimators/point_exponential.py | kclamar/ebnmpy | fc3d7126757c4184c7cb442312f1db5b78d73a3b | [
"MIT"
] | null | null | null | from ..point_exponential import (
pe_initpar,
pe_nllik,
pe_partog,
pe_postcomp,
pe_postsamp,
pe_precomp,
pe_scalepar,
pe_summres,
)
from .parametric import ParametricEBNM
class PointExponentialEBNM(ParametricEBNM):
@property
def _class_name(self) -> str:
return "gammamix"
@property
def _mode_name(self) -> str:
return "shift"
def _initpar(self, g_init, mode, scale, pointmass, x, s):
return pe_initpar(g_init, mode, scale, pointmass, x, s)
def _scalepar(self, par, scale_factor):
return pe_scalepar(par, scale_factor)
def _precomp(self, x, s, par_init, fix_par):
return pe_precomp(x, s, par_init, fix_par)
def _nllik(self, par, x, s, par_init, fix_par, calc_grad, calc_hess, **kwargs):
return pe_nllik(par, x, s, par_init, fix_par, calc_grad, calc_hess)
def _postcomp(self, optpar, optval, x, s, par_init, fix_par, scale_factor, **kwargs):
return pe_postcomp(optpar, optval, x, s, par_init, fix_par, scale_factor)
def _summres(self, x, s, optpar, output):
return pe_summres(x, s, optpar, output)
def _partog(self, par):
return pe_partog(par)
def _postsamp(self, x, s, optpar, nsamp):
return pe_postsamp(x, s, optpar, nsamp)
| 28.173913 | 89 | 0.660494 |
7956c41005ba0b9bd32dcd896fe5c39a9c3e97e5 | 81 | py | Python | 30.strings/10.isdigit.py | robinson-1985/python-zero-dnc | df510d67e453611fcd320df1397cdb9ca47fecb8 | [
"MIT"
] | null | null | null | 30.strings/10.isdigit.py | robinson-1985/python-zero-dnc | df510d67e453611fcd320df1397cdb9ca47fecb8 | [
"MIT"
] | null | null | null | 30.strings/10.isdigit.py | robinson-1985/python-zero-dnc | df510d67e453611fcd320df1397cdb9ca47fecb8 | [
"MIT"
] | null | null | null | # 9. isdigit() -> Retorna um booleano dizendo se todos os caracteres são dígitos. | 81 | 81 | 0.753086 |
7956c425310e134a8aa0b920d1c48eaebea29a79 | 5,757 | py | Python | cvxpy/reductions/solvers/qp_solvers/cplex_qpif.py | QiuWJX/cvxpy | fd1c225b0cdf541618e292cae1a4c7ea25ddc934 | [
"ECL-2.0",
"Apache-2.0"
] | 556 | 2021-04-20T03:19:49.000Z | 2022-03-30T12:31:38.000Z | cvxpy/reductions/solvers/qp_solvers/cplex_qpif.py | QiuWJX/cvxpy | fd1c225b0cdf541618e292cae1a4c7ea25ddc934 | [
"ECL-2.0",
"Apache-2.0"
] | 358 | 2021-04-20T08:17:49.000Z | 2022-03-31T21:16:28.000Z | cvxpy/reductions/solvers/qp_solvers/cplex_qpif.py | phschiele/cvxpy | a43aed7447b87f6d0fbc6f71ae5c7b84183f3369 | [
"ECL-2.0",
"Apache-2.0"
] | 131 | 2021-04-21T09:00:12.000Z | 2022-03-29T04:43:51.000Z | import numpy as np
import cvxpy.interface as intf
import cvxpy.settings as s
from cvxpy.reductions.solution import Solution, failure_solution
from cvxpy.reductions.solvers.conic_solvers.cplex_conif import (
get_status, hide_solver_output, set_parameters,)
from cvxpy.reductions.solvers.qp_solvers.qp_solver import QpSolver
def constrain_cplex_infty(v) -> None:
'''
Limit values of vector v between +/- infinity as
defined in the CPLEX package
'''
import cplex as cpx
n = len(v)
for i in range(n):
if v[i] >= cpx.infinity:
v[i] = cpx.infinity
if v[i] <= -cpx.infinity:
v[i] = -cpx.infinity
class CPLEX(QpSolver):
"""QP interface for the CPLEX solver"""
MIP_CAPABLE = True
def name(self):
return s.CPLEX
def import_solver(self) -> None:
import cplex
cplex
def invert(self, results, inverse_data):
model = results["model"]
attr = {}
if "cputime" in results:
attr[s.SOLVE_TIME] = results["cputime"]
attr[s.NUM_ITERS] = \
int(model.solution.progress.get_num_barrier_iterations()) \
if not inverse_data[CPLEX.IS_MIP] \
else 0
status = get_status(model)
if status in s.SOLUTION_PRESENT:
# Get objective value
opt_val = model.solution.get_objective_value() + \
inverse_data[s.OFFSET]
# Get solution
x = np.array(model.solution.get_values())
primal_vars = {
CPLEX.VAR_ID:
intf.DEFAULT_INTF.const_to_matrix(np.array(x))
}
# Only add duals if not a MIP.
dual_vars = None
if not inverse_data[CPLEX.IS_MIP]:
y = -np.array(model.solution.get_dual_values())
dual_vars = {CPLEX.DUAL_VAR_ID: y}
sol = Solution(status, opt_val, primal_vars, dual_vars, attr)
else:
sol = failure_solution(status, attr)
return sol
def solve_via_data(self, data, warm_start: bool, verbose: bool, solver_opts, solver_cache=None):
import cplex as cpx
P = data[s.P].tocsr() # Convert matrix to csr format
q = data[s.Q]
A = data[s.A].tocsr() # Convert A matrix to csr format
b = data[s.B]
F = data[s.F].tocsr() # Convert F matrix to csr format
g = data[s.G]
n_var = data['n_var']
n_eq = data['n_eq']
n_ineq = data['n_ineq']
# Constrain values between bounds
constrain_cplex_infty(b)
constrain_cplex_infty(g)
# Define CPLEX problem
model = cpx.Cplex()
# Minimize problem
model.objective.set_sense(model.objective.sense.minimize)
# Add variables and linear objective
var_idx = list(model.variables.add(obj=q,
lb=-cpx.infinity*np.ones(n_var),
ub=cpx.infinity*np.ones(n_var)))
# Constrain binary/integer variables if present
for i in data[s.BOOL_IDX]:
model.variables.set_types(var_idx[i],
model.variables.type.binary)
for i in data[s.INT_IDX]:
model.variables.set_types(var_idx[i],
model.variables.type.integer)
# Add constraints
lin_expr, rhs = [], []
for i in range(n_eq): # Add equalities
start = A.indptr[i]
end = A.indptr[i+1]
lin_expr.append([A.indices[start:end].tolist(),
A.data[start:end].tolist()])
rhs.append(b[i])
if lin_expr:
model.linear_constraints.add(lin_expr=lin_expr,
senses=["E"] * len(lin_expr),
rhs=rhs)
lin_expr, rhs = [], []
for i in range(n_ineq): # Add inequalities
start = F.indptr[i]
end = F.indptr[i+1]
lin_expr.append([F.indices[start:end].tolist(),
F.data[start:end].tolist()])
rhs.append(g[i])
if lin_expr:
model.linear_constraints.add(lin_expr=lin_expr,
senses=["L"] * len(lin_expr),
rhs=rhs)
# Set quadratic Cost
if P.count_nonzero(): # Only if quadratic form is not null
qmat = []
for i in range(n_var):
start = P.indptr[i]
end = P.indptr[i+1]
qmat.append([P.indices[start:end].tolist(),
P.data[start:end].tolist()])
model.objective.set_quadratic(qmat)
# Set verbosity
if not verbose:
hide_solver_output(model)
# Set parameters
reoptimize = solver_opts.pop('reoptimize', False)
set_parameters(model, solver_opts)
# Solve problem
results_dict = {}
try:
start = model.get_time()
model.solve()
end = model.get_time()
results_dict["cputime"] = end - start
ambiguous_status = get_status(model) == s.INFEASIBLE_OR_UNBOUNDED
if ambiguous_status and reoptimize:
model.parameters.preprocessing.presolve.set(0)
start_time = model.get_time()
model.solve()
results_dict["cputime"] += model.get_time() - start_time
except Exception: # Error in the solution
results_dict["status"] = s.SOLVER_ERROR
results_dict["model"] = model
return results_dict
| 33.47093 | 100 | 0.538127 |
7956c462d9d94c8cd75a6c8e41f034905baaf3e8 | 506 | py | Python | aula3/teste5aula3.py | otaviobizulli/python-exercices | 2c61f014bf481fa463721b174ddd4238bf8d0cb3 | [
"MIT"
] | null | null | null | aula3/teste5aula3.py | otaviobizulli/python-exercices | 2c61f014bf481fa463721b174ddd4238bf8d0cb3 | [
"MIT"
] | null | null | null | aula3/teste5aula3.py | otaviobizulli/python-exercices | 2c61f014bf481fa463721b174ddd4238bf8d0cb3 | [
"MIT"
] | null | null | null | print('Insira o valor de 3 angulos de um triangulo: ')
a1 = float(input('Valor 1: '))
a2 = float(input('Valor 2: '))
a3 = float(input('Valor 3: '))
if a1 + a2 + a3 != 180:
print('Os valores não formam um triangulo. "A soma dos angulos internos de um triangulo sempre é igual a 180."')
elif a1 == 90 or a2 == 90 or a3 == 90:
print('Triângulo Retângulo.')
elif a1 > 90 or a2 > 90 or a3 > 90:
print('Triângulo Obtusângulo.')
elif a1 < 90 and a2 < 90 and a3 < 90:
print('Triângulo Acutângulo.') | 42.166667 | 116 | 0.642292 |
7956c594f3267a7ff5d977866703f7a90d668ede | 2,343 | py | Python | python/thumbContextMenu.py | Schizo/MediaBrowser | a80bd045380bb1c5697d9b0a6b9447a4b0e4dcc0 | [
"MIT"
] | 3 | 2016-01-19T10:36:09.000Z | 2021-01-29T01:14:45.000Z | python/thumbContextMenu.py | Schizo/MediaBrowser | a80bd045380bb1c5697d9b0a6b9447a4b0e4dcc0 | [
"MIT"
] | 2 | 2016-02-20T13:09:38.000Z | 2016-03-08T06:47:47.000Z | python/thumbContextMenu.py | Schizo/MediaBrowser | a80bd045380bb1c5697d9b0a6b9447a4b0e4dcc0 | [
"MIT"
] | 3 | 2016-02-19T16:52:57.000Z | 2017-05-16T03:06:43.000Z | from PyQt4 import QtGui, QtCore
import settings
import os
import subprocess
class ThumbContextMenu(QtGui.QMenu):
def __init__(self):
super(ThumbContextMenu, self).__init__()
# disabled, since it has been broken in the old ElementsBrowser for a few months and no one complained
# self.addAction("Open EXR Photoshop", self.openPhotoshopEXR)
# self.addAction("Open JPG Photoshop", self.openPhotoshopJPG)
self.addAction("Open EXR in RV", self.openRVEXR)
self.addAction("Open JPG in RV", self.openRVJPG)
# self.addAction("Open EXR in djv", self.openDJVEXR)
# self.addAction("Open JPG in djv", self.openDJVJPG)
self.addAction("Open Folder Location", self.openFolder)
self.addAction("Remove from DB", self.removeFromDB)
def setFileData(self, scrubFrame, fileName):
"""Sets the Path to a file directory"""
self.openPathEXR = constructPath.replace('####', scrubFrame.zfill(4))
self.fileName = fileName
self.openPathEXR = os.path.split(settings.sourcePath(settings.currentCategory, fileName))[0]
self.openPathJPG = os.path.split(settings.proxyPath(settings.currentCategory, fileName))[0]
self.locationPath = settings.locationPath(settings.currentCategory, fileName)
# def openPhotoshopEXR(self):
# print "opening Photoshop"
# print self.openPathEXR
#
# def openPhotoshopEXR(self):
# print "opening Photoshop"
# print self.openPathJPG
def openRVEXR(self):
# print "opening RV"
# print "rv: " + settings.appPath["rv"]
# print self.openPathEXR
subprocess.Popen([settings.appPath["rv"], self.openPathEXR])
def openRVJPG(self):
print "opening RV"
subprocess.Popen([settings.appPath["rv"], self.openPathJPG])
# def openDJVEXR(self):
# os.system(settings.appPath["djv"] + " " + self.openPathEXR)
# # subprocess.Popen([settings.appPath["djv"], self.openPathEXR])
#
# def openDJVJPG(self):
# subprocess.Popen([settings.appPath["djv"], self.openPathJPG + "/*"])
def openFolder(self):
subprocess.Popen([settings.appPath["explorer"], self.locationPath.replace("/","\\")])
def removeFromDB(self):
settings.removeItem(settings.currentCategory, self.fileName)
| 38.409836 | 110 | 0.665813 |
7956c5e7b8a20a01d4cd44709b059e153d0d7718 | 1,482 | py | Python | tests/test_clean.py | jpivarski/jupyter-book | bbc43bc427508bea4062aaf35471ee0750e4e2a1 | [
"BSD-3-Clause"
] | 9 | 2020-02-28T22:27:36.000Z | 2020-04-20T11:31:35.000Z | tests/test_clean.py | jpivarski/jupyter-book | bbc43bc427508bea4062aaf35471ee0750e4e2a1 | [
"BSD-3-Clause"
] | 96 | 2020-02-29T20:00:48.000Z | 2020-04-28T21:40:51.000Z | tests/test_clean.py | jpivarski/jupyter-book | bbc43bc427508bea4062aaf35471ee0750e4e2a1 | [
"BSD-3-Clause"
] | 7 | 2020-03-10T17:26:27.000Z | 2020-04-23T19:46:32.000Z | """Testing clean functionality of the CLI."""
from pathlib import Path
from subprocess import run, PIPE
import pytest
path_tests = Path(__file__).parent.resolve()
path_books = path_tests.joinpath("books")
path_root = path_tests.parent
def test_clean_book(tmpdir):
path = path_books.joinpath("clean_cache")
build_path = path.joinpath("_build")
run(f"jb build {path}".split())
# Ensure _build exists
assert build_path.exists()
# Ensure _build/.jupyter_cache exists
assert build_path.joinpath(".jupyter_cache").exists()
# Empty _build except .jupyter_cache
run(f"jb clean {path}".split())
# Ensure _build and .jupyter_cache exist
assert build_path.exists()
assert build_path.joinpath(".jupyter_cache").exists()
run(f"jb clean --all {path}".split())
# Ensure _build is removed
assert not path.joinpath("_build").exists()
# === Excepted errors ===
# Non-existent folder
with pytest.raises(ValueError):
out = run(f"jb clean doesnt/exist".split(), stderr=PIPE)
err = out.stderr.decode()
if "ValueError" in err:
raise ValueError(err)
assert "Path to book isn't a directory" in err
# Non-existent _build
with pytest.raises(ValueError):
out = run(f"jb clean {path}".split(), stderr=PIPE)
err = out.stderr.decode()
if "ValueError" in err:
raise ValueError(err)
assert "Your book does not have a _build directory." in err
| 29.058824 | 64 | 0.668016 |
7956c7cc66193c5bab60b863405870dea431bb57 | 17,558 | py | Python | lib/pybind11/tests/test_python_types.py | idscan/pydegensac | ccb015f1e9fe28bae507643d1d6b8f741a49564d | [
"MIT"
] | 273 | 2018-04-10T13:38:06.000Z | 2022-03-31T16:06:59.000Z | lib/pybind11/tests/test_python_types.py | idscan/pydegensac | ccb015f1e9fe28bae507643d1d6b8f741a49564d | [
"MIT"
] | 32 | 2018-06-14T07:06:16.000Z | 2022-03-17T18:39:47.000Z | lib/pybind11/tests/test_python_types.py | idscan/pydegensac | ccb015f1e9fe28bae507643d1d6b8f741a49564d | [
"MIT"
] | 68 | 2018-02-24T06:04:02.000Z | 2022-03-19T10:42:09.000Z | # Python < 3 needs this: coding=utf-8
import pytest
from pybind11_tests import ExamplePythonTypes, ConstructorStats, has_optional, has_exp_optional
def test_repr():
# In Python 3.3+, repr() accesses __qualname__
assert "pybind11_type" in repr(type(ExamplePythonTypes))
assert "ExamplePythonTypes" in repr(ExamplePythonTypes)
def test_static():
ExamplePythonTypes.value = 15
assert ExamplePythonTypes.value == 15
assert ExamplePythonTypes.value2 == 5
with pytest.raises(AttributeError) as excinfo:
ExamplePythonTypes.value2 = 15
assert str(excinfo.value) == "can't set attribute"
def test_instance(capture):
with pytest.raises(TypeError) as excinfo:
ExamplePythonTypes()
assert str(excinfo.value) == "pybind11_tests.ExamplePythonTypes: No constructor defined!"
instance = ExamplePythonTypes.new_instance()
with capture:
dict_result = instance.get_dict()
dict_result['key2'] = 'value2'
instance.print_dict(dict_result)
assert capture.unordered == """
key: key, value=value
key: key2, value=value2
"""
with capture:
dict_result = instance.get_dict_2()
dict_result['key2'] = 'value2'
instance.print_dict_2(dict_result)
assert capture.unordered == """
key: key, value=value
key: key2, value=value2
"""
with capture:
set_result = instance.get_set()
set_result.add('key4')
instance.print_set(set_result)
assert capture.unordered == """
key: key1
key: key2
key: key3
key: key4
"""
with capture:
set_result = instance.get_set2()
set_result.add('key3')
instance.print_set_2(set_result)
assert capture.unordered == """
key: key1
key: key2
key: key3
"""
with capture:
list_result = instance.get_list()
list_result.append('value2')
instance.print_list(list_result)
assert capture.unordered == """
Entry at position 0: value
list item 0: overwritten
list item 1: value2
"""
with capture:
list_result = instance.get_list_2()
list_result.append('value2')
instance.print_list_2(list_result)
assert capture.unordered == """
list item 0: value
list item 1: value2
"""
with capture:
list_result = instance.get_list_2()
list_result.append('value2')
instance.print_list_2(tuple(list_result))
assert capture.unordered == """
list item 0: value
list item 1: value2
"""
array_result = instance.get_array()
assert array_result == ['array entry 1', 'array entry 2']
with capture:
instance.print_array(array_result)
assert capture.unordered == """
array item 0: array entry 1
array item 1: array entry 2
"""
varray_result = instance.get_valarray()
assert varray_result == [1, 4, 9]
with capture:
instance.print_valarray(varray_result)
assert capture.unordered == """
valarray item 0: 1
valarray item 1: 4
valarray item 2: 9
"""
with pytest.raises(RuntimeError) as excinfo:
instance.throw_exception()
assert str(excinfo.value) == "This exception was intentionally thrown."
assert instance.pair_passthrough((True, "test")) == ("test", True)
assert instance.tuple_passthrough((True, "test", 5)) == (5, "test", True)
# Any sequence can be cast to a std::pair or std::tuple
assert instance.pair_passthrough([True, "test"]) == ("test", True)
assert instance.tuple_passthrough([True, "test", 5]) == (5, "test", True)
assert instance.get_bytes_from_string().decode() == "foo"
assert instance.get_bytes_from_str().decode() == "bar"
assert instance.get_str_from_string().encode().decode() == "baz"
assert instance.get_str_from_bytes().encode().decode() == "boo"
class A(object):
def __str__(self):
return "this is a str"
def __repr__(self):
return "this is a repr"
with capture:
instance.test_print(A())
assert capture == """
this is a str
this is a repr
"""
cstats = ConstructorStats.get(ExamplePythonTypes)
assert cstats.alive() == 1
del instance
assert cstats.alive() == 0
# PyPy does not seem to propagate the tp_docs field at the moment
def test_class_docs(doc):
assert doc(ExamplePythonTypes) == "Example 2 documentation"
def test_method_docs(doc):
assert doc(ExamplePythonTypes.get_dict) == """
get_dict(self: m.ExamplePythonTypes) -> dict
Return a Python dictionary
"""
assert doc(ExamplePythonTypes.get_dict_2) == """
get_dict_2(self: m.ExamplePythonTypes) -> Dict[str, str]
Return a C++ dictionary
"""
assert doc(ExamplePythonTypes.get_list) == """
get_list(self: m.ExamplePythonTypes) -> list
Return a Python list
"""
assert doc(ExamplePythonTypes.get_list_2) == """
get_list_2(self: m.ExamplePythonTypes) -> List[str]
Return a C++ list
"""
assert doc(ExamplePythonTypes.get_dict) == """
get_dict(self: m.ExamplePythonTypes) -> dict
Return a Python dictionary
"""
assert doc(ExamplePythonTypes.get_set) == """
get_set(self: m.ExamplePythonTypes) -> set
Return a Python set
"""
assert doc(ExamplePythonTypes.get_set2) == """
get_set2(self: m.ExamplePythonTypes) -> Set[str]
Return a C++ set
"""
assert doc(ExamplePythonTypes.get_array) == """
get_array(self: m.ExamplePythonTypes) -> List[str[2]]
Return a C++ array
"""
assert doc(ExamplePythonTypes.get_valarray) == """
get_valarray(self: m.ExamplePythonTypes) -> List[int]
Return a C++ valarray
"""
assert doc(ExamplePythonTypes.print_dict) == """
print_dict(self: m.ExamplePythonTypes, arg0: dict) -> None
Print entries of a Python dictionary
"""
assert doc(ExamplePythonTypes.print_dict_2) == """
print_dict_2(self: m.ExamplePythonTypes, arg0: Dict[str, str]) -> None
Print entries of a C++ dictionary
"""
assert doc(ExamplePythonTypes.print_set) == """
print_set(self: m.ExamplePythonTypes, arg0: set) -> None
Print entries of a Python set
"""
assert doc(ExamplePythonTypes.print_set_2) == """
print_set_2(self: m.ExamplePythonTypes, arg0: Set[str]) -> None
Print entries of a C++ set
"""
assert doc(ExamplePythonTypes.print_list) == """
print_list(self: m.ExamplePythonTypes, arg0: list) -> None
Print entries of a Python list
"""
assert doc(ExamplePythonTypes.print_list_2) == """
print_list_2(self: m.ExamplePythonTypes, arg0: List[str]) -> None
Print entries of a C++ list
"""
assert doc(ExamplePythonTypes.print_array) == """
print_array(self: m.ExamplePythonTypes, arg0: List[str[2]]) -> None
Print entries of a C++ array
"""
assert doc(ExamplePythonTypes.pair_passthrough) == """
pair_passthrough(self: m.ExamplePythonTypes, arg0: Tuple[bool, str]) -> Tuple[str, bool]
Return a pair in reversed order
"""
assert doc(ExamplePythonTypes.tuple_passthrough) == """
tuple_passthrough(self: m.ExamplePythonTypes, arg0: Tuple[bool, str, int]) -> Tuple[int, str, bool]
Return a triple in reversed order
""" # noqa: E501 line too long
assert doc(ExamplePythonTypes.throw_exception) == """
throw_exception(self: m.ExamplePythonTypes) -> None
Throw an exception
"""
assert doc(ExamplePythonTypes.new_instance) == """
new_instance() -> m.ExamplePythonTypes
Return an instance
"""
def test_module():
import pybind11_tests
assert pybind11_tests.__name__ == "pybind11_tests"
assert ExamplePythonTypes.__name__ == "ExamplePythonTypes"
assert ExamplePythonTypes.__module__ == "pybind11_tests"
assert ExamplePythonTypes.get_set.__name__ == "get_set"
assert ExamplePythonTypes.get_set.__module__ == "pybind11_tests"
def test_print(capture):
from pybind11_tests import test_print_function
with capture:
test_print_function()
assert capture == """
Hello, World!
1 2.0 three True -- multiple args
*args-and-a-custom-separator
no new line here -- next print
flush
py::print + str.format = this
"""
assert capture.stderr == "this goes to stderr"
def test_str_api():
from pybind11_tests import test_str_format
s1, s2 = test_str_format()
assert s1 == "1 + 2 = 3"
assert s1 == s2
def test_dict_api():
from pybind11_tests import test_dict_keyword_constructor
assert test_dict_keyword_constructor() == {"x": 1, "y": 2, "z": 3}
def test_accessors():
from pybind11_tests import test_accessor_api, test_tuple_accessor, test_accessor_assignment
class SubTestObject:
attr_obj = 1
attr_char = 2
class TestObject:
basic_attr = 1
begin_end = [1, 2, 3]
d = {"operator[object]": 1, "operator[char *]": 2}
sub = SubTestObject()
def func(self, x, *args):
return self.basic_attr + x + sum(args)
d = test_accessor_api(TestObject())
assert d["basic_attr"] == 1
assert d["begin_end"] == [1, 2, 3]
assert d["operator[object]"] == 1
assert d["operator[char *]"] == 2
assert d["attr(object)"] == 1
assert d["attr(char *)"] == 2
assert d["missing_attr_ptr"] == "raised"
assert d["missing_attr_chain"] == "raised"
assert d["is_none"] is False
assert d["operator()"] == 2
assert d["operator*"] == 7
assert test_tuple_accessor(tuple()) == (0, 1, 2)
d = test_accessor_assignment()
assert d["get"] == 0
assert d["deferred_get"] == 0
assert d["set"] == 1
assert d["deferred_set"] == 1
assert d["var"] == 99
@pytest.mark.skipif(not has_optional, reason='no <optional>')
def test_optional():
from pybind11_tests import double_or_zero, half_or_none, test_nullopt
assert double_or_zero(None) == 0
assert double_or_zero(42) == 84
pytest.raises(TypeError, double_or_zero, 'foo')
assert half_or_none(0) is None
assert half_or_none(42) == 21
pytest.raises(TypeError, half_or_none, 'foo')
assert test_nullopt() == 42
assert test_nullopt(None) == 42
assert test_nullopt(42) == 42
assert test_nullopt(43) == 43
@pytest.mark.skipif(not has_exp_optional, reason='no <experimental/optional>')
def test_exp_optional():
from pybind11_tests import double_or_zero_exp, half_or_none_exp, test_nullopt_exp
assert double_or_zero_exp(None) == 0
assert double_or_zero_exp(42) == 84
pytest.raises(TypeError, double_or_zero_exp, 'foo')
assert half_or_none_exp(0) is None
assert half_or_none_exp(42) == 21
pytest.raises(TypeError, half_or_none_exp, 'foo')
assert test_nullopt_exp() == 42
assert test_nullopt_exp(None) == 42
assert test_nullopt_exp(42) == 42
assert test_nullopt_exp(43) == 43
def test_constructors():
"""C++ default and converting constructors are equivalent to type calls in Python"""
from pybind11_tests import (test_default_constructors, test_converting_constructors,
test_cast_functions)
types = [str, bool, int, float, tuple, list, dict, set]
expected = {t.__name__: t() for t in types}
assert test_default_constructors() == expected
data = {
str: 42,
bool: "Not empty",
int: "42",
float: "+1e3",
tuple: range(3),
list: range(3),
dict: [("two", 2), ("one", 1), ("three", 3)],
set: [4, 4, 5, 6, 6, 6],
memoryview: b'abc'
}
inputs = {k.__name__: v for k, v in data.items()}
expected = {k.__name__: k(v) for k, v in data.items()}
assert test_converting_constructors(inputs) == expected
assert test_cast_functions(inputs) == expected
def test_move_out_container():
"""Properties use the `reference_internal` policy by default. If the underlying function
returns an rvalue, the policy is automatically changed to `move` to avoid referencing
a temporary. In case the return value is a container of user-defined types, the policy
also needs to be applied to the elements, not just the container."""
from pybind11_tests import MoveOutContainer
c = MoveOutContainer()
moved_out_list = c.move_list
assert [x.value for x in moved_out_list] == [0, 1, 2]
def test_implicit_casting():
"""Tests implicit casting when assigning or appending to dicts and lists."""
from pybind11_tests import get_implicit_casting
z = get_implicit_casting()
assert z['d'] == {
'char*_i1': 'abc', 'char*_i2': 'abc', 'char*_e': 'abc', 'char*_p': 'abc',
'str_i1': 'str', 'str_i2': 'str1', 'str_e': 'str2', 'str_p': 'str3',
'int_i1': 42, 'int_i2': 42, 'int_e': 43, 'int_p': 44
}
assert z['l'] == [3, 6, 9, 12, 15]
def test_unicode_conversion():
"""Tests unicode conversion and error reporting."""
import pybind11_tests
from pybind11_tests import (good_utf8_string, bad_utf8_string,
good_utf16_string, bad_utf16_string,
good_utf32_string, # bad_utf32_string,
good_wchar_string, # bad_wchar_string,
u8_Z, u8_eacute, u16_ibang, u32_mathbfA, wchar_heart)
assert good_utf8_string() == u"Say utf8‽ 🎂 𝐀"
assert good_utf16_string() == u"b‽🎂𝐀z"
assert good_utf32_string() == u"a𝐀🎂‽z"
assert good_wchar_string() == u"a⸘𝐀z"
with pytest.raises(UnicodeDecodeError):
bad_utf8_string()
with pytest.raises(UnicodeDecodeError):
bad_utf16_string()
# These are provided only if they actually fail (they don't when 32-bit and under Python 2.7)
if hasattr(pybind11_tests, "bad_utf32_string"):
with pytest.raises(UnicodeDecodeError):
pybind11_tests.bad_utf32_string()
if hasattr(pybind11_tests, "bad_wchar_string"):
with pytest.raises(UnicodeDecodeError):
pybind11_tests.bad_wchar_string()
assert u8_Z() == 'Z'
assert u8_eacute() == u'é'
assert u16_ibang() == u'‽'
assert u32_mathbfA() == u'𝐀'
assert wchar_heart() == u'♥'
def test_single_char_arguments():
"""Tests failures for passing invalid inputs to char-accepting functions"""
from pybind11_tests import ord_char, ord_char16, ord_char32, ord_wchar, wchar_size
def toobig_message(r):
return "Character code point not in range({0:#x})".format(r)
toolong_message = "Expected a character, but multi-character string found"
assert ord_char(u'a') == 0x61 # simple ASCII
assert ord_char(u'é') == 0xE9 # requires 2 bytes in utf-8, but can be stuffed in a char
with pytest.raises(ValueError) as excinfo:
assert ord_char(u'Ā') == 0x100 # requires 2 bytes, doesn't fit in a char
assert str(excinfo.value) == toobig_message(0x100)
with pytest.raises(ValueError) as excinfo:
assert ord_char(u'ab')
assert str(excinfo.value) == toolong_message
assert ord_char16(u'a') == 0x61
assert ord_char16(u'é') == 0xE9
assert ord_char16(u'Ā') == 0x100
assert ord_char16(u'‽') == 0x203d
assert ord_char16(u'♥') == 0x2665
with pytest.raises(ValueError) as excinfo:
assert ord_char16(u'🎂') == 0x1F382 # requires surrogate pair
assert str(excinfo.value) == toobig_message(0x10000)
with pytest.raises(ValueError) as excinfo:
assert ord_char16(u'aa')
assert str(excinfo.value) == toolong_message
assert ord_char32(u'a') == 0x61
assert ord_char32(u'é') == 0xE9
assert ord_char32(u'Ā') == 0x100
assert ord_char32(u'‽') == 0x203d
assert ord_char32(u'♥') == 0x2665
assert ord_char32(u'🎂') == 0x1F382
with pytest.raises(ValueError) as excinfo:
assert ord_char32(u'aa')
assert str(excinfo.value) == toolong_message
assert ord_wchar(u'a') == 0x61
assert ord_wchar(u'é') == 0xE9
assert ord_wchar(u'Ā') == 0x100
assert ord_wchar(u'‽') == 0x203d
assert ord_wchar(u'♥') == 0x2665
if wchar_size == 2:
with pytest.raises(ValueError) as excinfo:
assert ord_wchar(u'🎂') == 0x1F382 # requires surrogate pair
assert str(excinfo.value) == toobig_message(0x10000)
else:
assert ord_wchar(u'🎂') == 0x1F382
with pytest.raises(ValueError) as excinfo:
assert ord_wchar(u'aa')
assert str(excinfo.value) == toolong_message
def test_builtins_cast_return_none():
"""Casters produced with PYBIND11_TYPE_CASTER() should convert nullptr to None"""
import pybind11_tests as m
assert m.return_none_string() is None
assert m.return_none_char() is None
assert m.return_none_bool() is None
assert m.return_none_int() is None
assert m.return_none_float() is None
def test_capsule_with_destructor(capture):
import pybind11_tests as m
with capture:
a = m.return_capsule_with_destructor()
del a
pytest.gc_collect()
assert capture.unordered == """
creating capsule
destructing capsule
"""
with capture:
a = m.return_capsule_with_destructor_2()
del a
pytest.gc_collect()
assert capture.unordered == """
creating capsule
destructing capsule: 1234
"""
| 32.757463 | 107 | 0.644493 |
7956c85a0fdd581dd6926dc6f37437839781c67b | 649 | py | Python | mediadecoder/soundrenderers/__init__.py | dschreij/Python-Media-decoder | f01b02d790f2abc52d9792e43076cf4cb7d3ce51 | [
"MIT"
] | 8 | 2016-05-30T07:30:29.000Z | 2017-07-14T23:36:06.000Z | mediadecoder/soundrenderers/__init__.py | open-cogsci/python-mediadecoder | f01b02d790f2abc52d9792e43076cf4cb7d3ce51 | [
"MIT"
] | 4 | 2016-08-04T12:52:48.000Z | 2018-07-16T20:21:45.000Z | mediadecoder/soundrenderers/__init__.py | open-cogsci/python-mediadecoder | f01b02d790f2abc52d9792e43076cf4cb7d3ce51 | [
"MIT"
] | 3 | 2016-05-30T14:56:14.000Z | 2016-06-23T10:36:20.000Z | import warnings
try:
from mediadecoder.soundrenderers.pyaudiorenderer import SoundrendererPyAudio
except Exception as e:
warnings.warn("Could not import pyaudio sound renderer: {}".format(e))
try:
from mediadecoder.soundrenderers.pygamerenderer import SoundrendererPygame
except Exception as e:
warnings.warn("Could not import pygame sound renderer: {}".format(e))
try:
from mediadecoder.soundrenderers.sounddevicerenderer import SoundrendererSounddevice
except Exception as e:
warnings.warn("Could not import sounddevice sound renderer: {}".format(e))
__all__ = ['SoundrendererPygame', 'SoundrendererPyAudio','SoundrendererSounddevice']
| 32.45 | 85 | 0.813559 |
7956c9e49ab2bcc2d773f0891188091c45e56fe8 | 416 | py | Python | baekjoon/18111/minecraft.py | ucyang/AlgoEx | 465c88f04b9449c06ee5c9a684ded5aba8ccf399 | [
"MIT"
] | null | null | null | baekjoon/18111/minecraft.py | ucyang/AlgoEx | 465c88f04b9449c06ee5c9a684ded5aba8ccf399 | [
"MIT"
] | null | null | null | baekjoon/18111/minecraft.py | ucyang/AlgoEx | 465c88f04b9449c06ee5c9a684ded5aba8ccf399 | [
"MIT"
] | null | null | null | import sys
input = lambda: sys.stdin.readline().rstrip()
N, _, B = map(int, input().split())
a = []
for _ in range(N):
a += map(int, input().split())
min_t = -1
for h in range(min(a), max(a) + 1):
b, t = B, 0
for v in a:
b += v - h
t += 2 * (v - h) if v > h else h - v
if b < 0:
break
if min_t < 0 or t <= min_t:
min_t = t
max_h = h
print(min_t, max_h) | 18.909091 | 45 | 0.473558 |
7956cbf4da7132028944b6813e891f64f104d3f8 | 211 | py | Python | gen_stack.py | Globidev/push-swap | d3c6f1c7ab6b33d7281eff4895b3d0e4c291fe77 | [
"MIT"
] | null | null | null | gen_stack.py | Globidev/push-swap | d3c6f1c7ab6b33d7281eff4895b3d0e4c291fe77 | [
"MIT"
] | null | null | null | gen_stack.py | Globidev/push-swap | d3c6f1c7ab6b33d7281eff4895b3d0e4c291fe77 | [
"MIT"
] | null | null | null | from random import shuffle, seed
from sys import argv
size = int(argv[1])
try:
seed(int(argv[2]))
except:
pass
data_set = list(range(size))
shuffle(data_set)
print(' '.join(f'{i}' for i in data_set))
| 15.071429 | 41 | 0.672986 |
7956cc0765e00935db690e020b497edebbc3cd95 | 1,727 | py | Python | chp7/finetune_bert_spc.py | Sheldoer/plm-nlp-code | 04127d137c8bd40bc1412bee863640b9d909ddf9 | [
"Apache-2.0"
] | 330 | 2021-07-25T13:46:18.000Z | 2022-03-29T08:52:09.000Z | chp7/finetune_bert_spc.py | Sheldoer/plm-nlp-code | 04127d137c8bd40bc1412bee863640b9d909ddf9 | [
"Apache-2.0"
] | 11 | 2021-07-29T16:37:19.000Z | 2022-03-29T05:14:26.000Z | chp7/finetune_bert_spc.py | Sheldoer/plm-nlp-code | 04127d137c8bd40bc1412bee863640b9d909ddf9 | [
"Apache-2.0"
] | 107 | 2021-07-26T08:30:43.000Z | 2022-03-21T15:34:47.000Z | # Defined in Section 7.4.3.2
import numpy as np
from datasets import load_dataset, load_metric
from transformers import BertTokenizerFast, BertForSequenceClassification, TrainingArguments, Trainer
# 加载训练数据、分词器、预训练模型以及评价方法
dataset = load_dataset('glue', 'rte')
tokenizer = BertTokenizerFast.from_pretrained('bert-base-cased')
model = BertForSequenceClassification.from_pretrained('bert-base-cased', return_dict=True)
metric = load_metric('glue', 'rte')
# 对训练集进行分词
def tokenize(examples):
return tokenizer(examples['sentence1'], examples['sentence2'], truncation=True, padding='max_length')
dataset = dataset.map(tokenize, batched=True)
encoded_dataset = dataset.map(lambda examples: {'labels': examples['label']}, batched=True)
# 将数据集格式化为torch.Tensor类型以训练PyTorch模型
columns = ['input_ids', 'token_type_ids', 'attention_mask', 'labels']
encoded_dataset.set_format(type='torch', columns=columns)
# 定义评价指标
def compute_metrics(eval_pred):
predictions, labels = eval_pred
return metric.compute(predictions=np.argmax(predictions, axis=1), references=labels)
# 定义训练参数TrainingArguments,默认使用AdamW优化器
args = TrainingArguments(
"ft-rte", # 输出路径,存放检查点和其他输出文件
evaluation_strategy="epoch", # 定义每轮结束后进行评价
learning_rate=2e-5, # 定义初始学习率
per_device_train_batch_size=16, # 定义训练批次大小
per_device_eval_batch_size=16, # 定义测试批次大小
num_train_epochs=2, # 定义训练轮数
)
# 定义Trainer,指定模型和训练参数,输入训练集、验证集、分词器以及评价函数
trainer = Trainer(
model,
args,
train_dataset=encoded_dataset["train"],
eval_dataset=encoded_dataset["validation"],
tokenizer=tokenizer,
compute_metrics=compute_metrics
)
# 开始训练!(主流GPU上耗时约几小时)
trainer.train()
| 34.54 | 105 | 0.738854 |
7956cc4dc263468d53ee09ae951ba08640876dce | 1,142 | py | Python | RealSenseSDK/API_test.py | cutz-j/AR-project | 50d4f407a4f2c42e12bf2bcd54c436df6fa3c9fa | [
"MIT"
] | null | null | null | RealSenseSDK/API_test.py | cutz-j/AR-project | 50d4f407a4f2c42e12bf2bcd54c436df6fa3c9fa | [
"MIT"
] | null | null | null | RealSenseSDK/API_test.py | cutz-j/AR-project | 50d4f407a4f2c42e12bf2bcd54c436df6fa3c9fa | [
"MIT"
] | null | null | null | ### pyrealsense2 INSTRUCTION ###
import pyrealsense2 as rs
import numpy as np
from PIL import Image
import matplotlib.pyplot as plt
pipeline = rs.pipeline()
pipeline.start()
#try:
# while True:
# frames = pipeline.wait_for_frames()
# depth = frames.get_depth_frame()
#
# if not depth:
# continue
#
# coverage = [0] * 64
# for y in range(480):
# for x in range(640):
# dist = depth.get_distance(x, y)
# if 0 < dist and dist < 1:
# coverage[x//10] += 1
#
# if y % 20 == 19:
# line = ""
# for c in coverage:
# line += " .:nhBXWW"[c//25]
# coverage = [0]*64
# print(line)
#
#finally:
# pipeline.stop()
### numpy INSTRUCTION ###
frames = pipeline.wait_for_frames()
depth = frames.get_depth_frame()
img_data = frames.get_color_frame().as_frame().get_data()
depth_data = depth.as_frame().get_data()
np_image = np.asanyarray(img_data)
np_depth = np.asanyarray(depth_data)
plt.imshow(np_image)
plt.imshow(np_depth) | 26.55814 | 57 | 0.548161 |
7956ccc2a95a3dab0ee70564cdd95e42454f0bfd | 4,886 | py | Python | savecode/threeyears/idownclient/scout/plugin/sonar/sonardomainwhois.py | Octoberr/swm0920 | 8f05a6b91fc205960edd57f9076facec04f49a1a | [
"Apache-2.0"
] | 2 | 2019-05-19T11:54:26.000Z | 2019-05-19T12:03:49.000Z | savecode/threeyears/idownclient/scout/plugin/sonar/sonardomainwhois.py | Octoberr/swm0920 | 8f05a6b91fc205960edd57f9076facec04f49a1a | [
"Apache-2.0"
] | 1 | 2020-11-27T07:55:15.000Z | 2020-11-27T07:55:15.000Z | savecode/threeyears/idownclient/scout/plugin/sonar/sonardomainwhois.py | Octoberr/swm0920 | 8f05a6b91fc205960edd57f9076facec04f49a1a | [
"Apache-2.0"
] | 2 | 2021-09-06T18:06:12.000Z | 2021-12-31T07:44:43.000Z | """
使用sonar查询domian的whois信息
create by judy 2019/07/16
"""
import datetime
import json
import traceback
import requests
from commonbaby.mslog import MsLogger, MsLogManager
from datacontract.iscoutdataset.iscouttask import IscoutTask
from idownclient.config_scouter import scouter_config
from ....clientdatafeedback.scoutdatafeedback import Whois, Email, Phone
logger: MsLogger = MsLogManager.get_logger("Sonarapidomainwhois")
class SonarDomainWhois(object):
"""domain whois search"""
@classmethod
def _make_email(cls, task: IscoutTask, level, email, reason):
"""
当获取到了email的时候,会做一个email返回
:param email:
:return:
"""
email_obj = Email(task, level, email)
email_obj.reason = reason
email_obj.source = 'Sonar system'
return email_obj
@classmethod
def _make_phone(cls, task: IscoutTask, level, phone, reason):
"""
当获取到了phone的时候会做一个phone返回
:param phone:
:return:
"""
phone_obj = Phone(task, level, phone)
phone_obj.reason = reason
phone_obj.source = 'Sonar system'
return phone_obj
@staticmethod
def get_whois_info(task: IscoutTask, level, domainname: str, reason):
if not isinstance(task, IscoutTask):
raise Exception("Invalid IscoutTask")
if not isinstance(domainname, str):
raise Exception("Invalid domain")
try:
url = f'{scouter_config.sonarapi}/dbs/domainwhois'
headers = {
'Accept': 'application/json'
}
querystring = {"domainName": domainname}
response = requests.request("GET", url, headers=headers, params=querystring, timeout=10)
res_text = response.text
res_dict = json.loads(res_text)
data = res_dict.get('data')
if len(data) == 0:
return
data_res: dict = data[0]
# registrantinfo
registrantinfo: dict = data_res.get('registrant')
if registrantinfo is None:
# raise Exception(" Sonar registrant not found in whois info.")
return
registrant = registrantinfo.get('name')
registrar = data_res.get('registrar')
reg_time = data_res.get('creationDate')
if reg_time is None or registrar is None:
# raise Exception("Registtime not found in whois info")
return
registtime = datetime.datetime.fromtimestamp(
int(reg_time)).strftime('%Y-%m-%d %H:%M:%S')
whois: Whois = Whois(task, level, registrar, registtime)
whois.registrant = registrant
whois.registrantorg = registrantinfo.get('organization')
# email 和 phone
registrantemail = registrantinfo.get('email')
if registrantemail is not None:
whois.registrantemail = registrantinfo.get('email')
emailobj = SonarDomainWhois._make_email(task, level, registrantemail, reason)
yield emailobj
registrantphone = registrantinfo.get('telephone')
if registrantphone is not None:
rphone = registrantphone.replace('.', '')
if not rphone.startswith('+'):
rphone = '+' + rphone
whois.registrantphone = rphone
phoneobj = SonarDomainWhois._make_phone(task, level, rphone, reason)
yield phoneobj
# 拼接地址
country = registrantinfo.get('country')
state = registrantinfo.get('state')
city = registrantinfo.get('city')
street = registrantinfo.get('street1')
addr = ''
if country is not None:
addr += f'{country}/'
if state is not None:
addr += f'{state}/'
if city is not None:
addr += f'{city}/'
if street is not None:
addr += f'{street}'
whois.registrantaddr = addr
dns = data_res.get('nameServers')
if dns is not None:
for d in dns.split('|'):
whois.set_dns_server(d.strip())
update_time = data_res.get('updatedDate')
if update_time is not None:
whois.infotime = datetime.datetime.fromtimestamp(
int(update_time)).strftime('%Y-%m-%d %H:%M:%S')
expire_time = data_res.get('expirationDate')
if expire_time is not None:
whois.expiretime = datetime.datetime.fromtimestamp(int(expire_time)).strftime('%Y-%m-%d %H:%M:%S')
yield whois
except:
logger.error(
f"Sonar api get domain whois error, please check sonar api connect, err:{traceback.format_exc()}")
| 36.462687 | 114 | 0.576341 |
7956cd1617f219397d8110116771b0295d8c82ad | 2,772 | py | Python | setup.py | selmaneislam/rdootl | 0ec936d998bdf1d2614d53c7fa57fbed28bd54aa | [
"MIT"
] | null | null | null | setup.py | selmaneislam/rdootl | 0ec936d998bdf1d2614d53c7fa57fbed28bd54aa | [
"MIT"
] | null | null | null | setup.py | selmaneislam/rdootl | 0ec936d998bdf1d2614d53c7fa57fbed28bd54aa | [
"MIT"
] | null | null | null | #!/usr/bin/env python
try:
from setuptools import setup
except ImportError:
raise RuntimeError('setuptools is required')
import versioneer
DESCRIPTION = 'Functions for reproducible timeseries analysis of photovoltaic systems.'
LONG_DESCRIPTION = """
RdTools is an open-source library to support reproducible technical analysis of
PV time series data. The library aims to provide best practice analysis
routines along with the building blocks for users to tailor their own analyses.
Source code: https://github.com/NREL/rdtools
"""
DISTNAME = 'rdtools'
LICENSE = 'MIT'
AUTHOR = 'Rdtools Python Developers'
AUTHOR_EMAIL = 'RdTools@nrel.gov'
MAINTAINER_EMAIL = 'RdTools@nrel.gov'
URL = 'https://github.com/NREL/rdtools'
SETUP_REQUIRES = [
'pytest-runner',
]
TESTS_REQUIRE = [
'pytest >= 3.6.3',
]
INSTALL_REQUIRES = [
'matplotlib >= 2.2.2',
'numpy >= 1.12',
'pandas >= 0.23.0,!=1.0.0,!=1.0.1', # exclude 1.0.0 & 1.0.1 for GH142
'statsmodels >= 0.8.0',
'scipy >= 0.19.1',
'h5py >= 2.7.1',
'pvlib >= 0.7.0, <0.8.0',
]
EXTRAS_REQUIRE = {
'doc': [
'sphinx==1.8.5',
'nbsphinx==0.4.3',
'nbsphinx-link==1.3.0',
'pandas==0.23.0',
'pvlib==0.7.1',
'sphinx_rtd_theme==0.4.3',
'ipython',
],
'test': [
'pytest',
'coverage',
]
}
EXTRAS_REQUIRE['all'] = sorted(set(sum(EXTRAS_REQUIRE.values(), [])))
CLASSIFIERS = [
'Development Status :: 5 - Production/Stable',
'License :: OSI Approved :: MIT License',
'Operating System :: OS Independent',
'Intended Audience :: Science/Research',
'Programming Language :: Python',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
'Programming Language :: Python :: 3.8',
'Topic :: Scientific/Engineering',
]
KEYWORDS = [
'photovoltaic',
'solar',
'analytics',
'analysis',
'performance',
'degradation',
'PV'
]
setuptools_kwargs = {
'zip_safe': False,
'scripts': [],
'include_package_data': True
}
# set up packages to be installed and extensions to be compiled
PACKAGES = ['rdtools']
setup(name=DISTNAME,
version=versioneer.get_version(),
cmdclass=versioneer.get_cmdclass(),
packages=PACKAGES,
keywords=KEYWORDS,
setup_requires=SETUP_REQUIRES,
tests_require=TESTS_REQUIRE,
install_requires=INSTALL_REQUIRES,
extras_require=EXTRAS_REQUIRE,
description=DESCRIPTION,
long_description=LONG_DESCRIPTION,
author=AUTHOR,
author_email=AUTHOR_EMAIL,
maintainer_email=MAINTAINER_EMAIL,
license=LICENSE,
url=URL,
classifiers=CLASSIFIERS,
**setuptools_kwargs)
| 23.692308 | 87 | 0.642136 |
7956cded2fa180c0ffd8e292515fa03940312a80 | 1,091 | py | Python | api/tacticalrmm/agents/urls.py | jeffreyvh/tacticalrmm | dcfb1732954c2c165e82e6b24686e27f9f909eb3 | [
"MIT"
] | null | null | null | api/tacticalrmm/agents/urls.py | jeffreyvh/tacticalrmm | dcfb1732954c2c165e82e6b24686e27f9f909eb3 | [
"MIT"
] | null | null | null | api/tacticalrmm/agents/urls.py | jeffreyvh/tacticalrmm | dcfb1732954c2c165e82e6b24686e27f9f909eb3 | [
"MIT"
] | null | null | null | from django.urls import path
from . import views
urlpatterns = [
path("listagents/", views.list_agents),
path("listagentsnodetail/", views.list_agents_no_detail),
path("byclient/<client>/", views.by_client),
path("bysite/<client>/<site>/", views.by_site),
path("overdueaction/", views.overdue_action),
path("sendrawcmd/", views.send_raw_cmd),
path("<pk>/agentdetail/", views.agent_detail),
path("<int:pk>/meshcentral/", views.meshcentral),
path("poweraction/", views.power_action),
path("uninstall/", views.uninstall),
path("editagent/", views.edit_agent),
path("<pk>/geteventlog/<logtype>/<days>/", views.get_event_log),
path("getagentversions/", views.get_agent_versions),
path("updateagents/", views.update_agents),
path("<pk>/getprocs/", views.get_processes),
path("<pk>/<pid>/killproc/", views.kill_proc),
path("rebootlater/", views.reboot_later),
path("installagent/", views.install_agent),
path("<int:pk>/ping/", views.ping),
path("recover/", views.recover),
path("runscript/", views.run_script),
]
| 40.407407 | 68 | 0.679193 |
7956ce13f6c42fdead393fba1f1ebf5f7af8a49e | 7,674 | py | Python | heat/tests/openstack/designate/test_zone.py | HyunJin-Jeong/heat | 8353fddf9ebfb0eca67d6f2b2feb529031acff89 | [
"Apache-2.0"
] | 1 | 2020-06-18T01:05:29.000Z | 2020-06-18T01:05:29.000Z | heat/tests/openstack/designate/test_zone.py | HyunJin-Jeong/heat | 8353fddf9ebfb0eca67d6f2b2feb529031acff89 | [
"Apache-2.0"
] | null | null | null | heat/tests/openstack/designate/test_zone.py | HyunJin-Jeong/heat | 8353fddf9ebfb0eca67d6f2b2feb529031acff89 | [
"Apache-2.0"
] | null | null | null | #
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from unittest import mock
from heat.common import exception
from heat.engine.resources.openstack.designate import zone
from heat.engine import stack
from heat.engine import template
from heat.tests import common
from heat.tests import utils
sample_template = {
'heat_template_version': '2015-04-30',
'resources': {
'test_resource': {
'type': 'OS::Designate::Zone',
'properties': {
'name': 'test-zone.com',
'description': 'Test zone',
'ttl': 3600,
'email': 'abc@test-zone.com',
'type': 'PRIMARY',
'masters': []
}
}
}
}
class DesignateZoneTest(common.HeatTestCase):
def setUp(self):
super(DesignateZoneTest, self).setUp()
self.ctx = utils.dummy_context()
self.stack = stack.Stack(
self.ctx, 'test_stack',
template.Template(sample_template)
)
self.test_resource = self.stack['test_resource']
# Mock client plugin
self.test_client_plugin = mock.MagicMock()
self.test_resource.client_plugin = mock.MagicMock(
return_value=self.test_client_plugin)
# Mock client
self.test_client = mock.MagicMock()
self.test_resource.client = mock.MagicMock(
return_value=self.test_client)
def _get_mock_resource(self):
value = {}
value['id'] = '477e8273-60a7-4c41-b683-fdb0bc7cd152'
value['serial'] = '1434596972'
return value
def test_resource_handle_create(self):
mock_zone_create = self.test_client.zones.create
mock_resource = self._get_mock_resource()
mock_zone_create.return_value = mock_resource
# validate the properties
self.assertEqual(
'test-zone.com',
self.test_resource.properties.get(zone.DesignateZone.NAME))
self.assertEqual(
'Test zone',
self.test_resource.properties.get(
zone.DesignateZone.DESCRIPTION))
self.assertEqual(
3600,
self.test_resource.properties.get(zone.DesignateZone.TTL))
self.assertEqual(
'abc@test-zone.com',
self.test_resource.properties.get(zone.DesignateZone.EMAIL))
self.assertEqual(
'PRIMARY',
self.test_resource.properties.get(zone.DesignateZone.TYPE))
self.assertEqual(
[],
self.test_resource.properties.get(zone.DesignateZone.MASTERS))
self.test_resource.data_set = mock.Mock()
self.test_resource.handle_create()
args = dict(
name='test-zone.com',
description='Test zone',
ttl=3600,
email='abc@test-zone.com',
type_='PRIMARY'
)
mock_zone_create.assert_called_once_with(**args)
# validate physical resource id
self.assertEqual(mock_resource['id'], self.test_resource.resource_id)
def _mock_check_status_active(self):
self.test_client.zones.get.side_effect = [
{'status': 'PENDING'},
{'status': 'ACTIVE'},
{'status': 'ERROR'}
]
def test_check_create_complete(self):
self._mock_check_status_active()
self.assertFalse(self.test_resource.check_create_complete())
self.assertTrue(self.test_resource.check_create_complete())
ex = self.assertRaises(exception.ResourceInError,
self.test_resource.check_create_complete)
self.assertIn('Error in zone',
ex.message)
def _test_resource_validate(self, type_, prp):
def _side_effect(key):
if key == prp:
return None
if key == zone.DesignateZone.TYPE:
return type_
else:
return sample_template['resources'][
'test_resource']['properties'][key]
self.test_resource.properties = mock.MagicMock()
self.test_resource.properties.get.side_effect = _side_effect
self.test_resource.properties.__getitem__.side_effect = _side_effect
ex = self.assertRaises(exception.StackValidationFailed,
self.test_resource.validate)
self.assertEqual('Property %s is required for zone type %s' %
(prp, type_),
ex.message)
def test_resource_validate_primary(self):
self._test_resource_validate(zone.DesignateZone.PRIMARY,
zone.DesignateZone.EMAIL)
def test_resource_validate_secondary(self):
self._test_resource_validate(zone.DesignateZone.SECONDARY,
zone.DesignateZone.MASTERS)
def test_resource_handle_update(self):
mock_zone_update = self.test_client.zones.update
self.test_resource.resource_id = '477e8273-60a7-4c41-b683-fdb0bc7cd151'
prop_diff = {zone.DesignateZone.EMAIL: 'xyz@test-zone.com',
zone.DesignateZone.DESCRIPTION: 'updated description',
zone.DesignateZone.TTL: 4200}
self.test_resource.handle_update(json_snippet=None,
tmpl_diff=None,
prop_diff=prop_diff)
args = dict(
description='updated description',
ttl=4200,
email='xyz@test-zone.com'
)
mock_zone_update.assert_called_once_with(
self.test_resource.resource_id,
args)
def test_check_update_complete(self):
self._mock_check_status_active()
self.assertFalse(self.test_resource.check_update_complete())
self.assertTrue(self.test_resource.check_update_complete())
ex = self.assertRaises(exception.ResourceInError,
self.test_resource.check_update_complete)
self.assertIn('Error in zone',
ex.message)
def test_check_delete_complete(self):
self._mock_check_status_active()
self.assertFalse(self.test_resource.check_delete_complete(
self._get_mock_resource()['id']
))
self.assertTrue(self.test_resource.check_delete_complete(
self._get_mock_resource()['id']
))
ex = self.assertRaises(exception.ResourceInError,
self.test_resource.check_delete_complete,
self._get_mock_resource()['id'])
self.assertIn('Error in zone',
ex.message)
def test_resolve_attributes(self):
mock_zone = self._get_mock_resource()
self.test_resource.resource_id = mock_zone['id']
self.test_client.zones.get.return_value = mock_zone
self.assertEqual(
mock_zone['serial'],
self.test_resource._resolve_attribute(zone.DesignateZone.SERIAL))
self.test_client.zones.get.assert_called_once_with(
self.test_resource.resource_id
)
| 35.859813 | 79 | 0.611936 |
7956ceb80ac510918935561c08bba696729a7b78 | 644 | py | Python | Python_3/Easy/Find_a_string.py | NagiLam/HackerRank | f83e00f2af72f978d248f7955e71f3885932a58f | [
"MIT"
] | null | null | null | Python_3/Easy/Find_a_string.py | NagiLam/HackerRank | f83e00f2af72f978d248f7955e71f3885932a58f | [
"MIT"
] | null | null | null | Python_3/Easy/Find_a_string.py | NagiLam/HackerRank | f83e00f2af72f978d248f7955e71f3885932a58f | [
"MIT"
] | null | null | null | """ Problem: Find a string || Task:
In this challenge, the user enters a string and a substring.
You have to print the number of times that the substring occurs in the given string.
String traversal will take place from left to right, not from right to left.
NOTE: String letters are case-sensitive.
Created on Tue Oct 16 12:43:49 2018
@author: nagiAI
"""
def count_substring(string, sub_string):
count = 0
sub_len = len(sub_string)
for i in range(len(string)):
if string[i:i+sub_len] == sub_string:
count +=1
return count
#Uncomment for testing
#print(count_substring("ABCDCDC", "CDC"))
| 28 | 85 | 0.689441 |
7956cfacd84a7e14983e5de7aae0877a55318bdf | 14,864 | py | Python | package/views.py | krekotenko/iclub-python | 3452d5d5ea5103ede5ec370ac075955bac2f2bdf | [
"MIT"
] | null | null | null | package/views.py | krekotenko/iclub-python | 3452d5d5ea5103ede5ec370ac075955bac2f2bdf | [
"MIT"
] | null | null | null | package/views.py | krekotenko/iclub-python | 3452d5d5ea5103ede5ec370ac075955bac2f2bdf | [
"MIT"
] | null | null | null | import importlib
import json
from django.conf import settings
from django.contrib import messages
from django.contrib.auth.decorators import login_required
from django.core.cache import cache
from django.core.urlresolvers import reverse
from django.db.models import Q, Count
from django.http import HttpResponseRedirect, HttpResponse, HttpResponseForbidden
from django.shortcuts import get_object_or_404, render, redirect
from django.utils import timezone
from django.views.decorators.csrf import csrf_exempt
from django.core.exceptions import PermissionDenied
from django.views.decorators.http import require_POST
from grid.models import Grid
from homepage.models import Dpotw, Gotw
from package.forms import PackageForm, PackageExampleForm, DocumentationForm
from package.models import Category, Package, PackageExample
from package.repos import get_all_repos
from .utils import quote_plus
def repo_data_for_js():
repos = [handler.serialize() for handler in get_all_repos()]
return json.dumps(repos)
def get_form_class(form_name):
bits = form_name.split('.')
form_module_name = '.'.join(bits[:-1])
form_module = importlib.import_module(form_module_name)
form_name = bits[-1]
return getattr(form_module, form_name)
@login_required
def add_package(request, template_name="package/package_form.html"):
if not request.user.profile.can_add_package:
return HttpResponseForbidden("permission denied")
new_package = Package()
form = PackageForm(request.POST or None, instance=new_package)
if form.is_valid():
new_package = form.save()
new_package.created_by = request.user
new_package.last_modified_by = request.user
new_package.save()
#new_package.fetch_metadata()
#new_package.fetch_commits()
return HttpResponseRedirect(reverse("package", kwargs={"slug": new_package.slug}))
return render(request, template_name, {
"form": form,
"repo_data": repo_data_for_js(),
"action": "add",
})
@login_required
def edit_package(request, slug, template_name="package/package_form.html"):
if not request.user.profile.can_edit_package:
return HttpResponseForbidden("permission denied")
package = get_object_or_404(Package, slug=slug)
form = PackageForm(request.POST or None, instance=package)
if form.is_valid():
modified_package = form.save()
modified_package.last_modified_by = request.user
modified_package.save()
messages.add_message(request, messages.INFO, 'Package updated successfully')
return HttpResponseRedirect(reverse("package", kwargs={"slug": modified_package.slug}))
return render(request, template_name, {
"form": form,
"package": package,
"repo_data": repo_data_for_js(),
"action": "edit",
})
@login_required
def update_package(request, slug):
package = get_object_or_404(Package, slug=slug)
package.fetch_metadata()
package.fetch_commits()
package.last_fetched = timezone.now()
messages.add_message(request, messages.INFO, 'Package updated successfully')
return HttpResponseRedirect(reverse("package", kwargs={"slug": package.slug}))
@login_required
def add_example(request, slug, template_name="package/add_example.html"):
package = get_object_or_404(Package, slug=slug)
new_package_example = PackageExample()
form = PackageExampleForm(request.POST or None, instance=new_package_example)
if form.is_valid():
package_example = PackageExample(package=package,
title=request.POST["title"],
url=request.POST["url"],
created_by=request.user)
package_example.save()
return HttpResponseRedirect(reverse("package", kwargs={"slug": package_example.package.slug}))
return render(request, template_name, {
"form": form,
"package": package
})
@login_required
def edit_example(request, slug, id, template_name="package/edit_example.html"):
package_example = get_object_or_404(PackageExample, id=id)
form = PackageExampleForm(request.POST or None, instance=package_example)
if form.is_valid():
form.save()
return HttpResponseRedirect(reverse("package", kwargs={"slug": package_example.package.slug}))
return render(request, template_name, {
"form": form,
"package_example": package_example
})
@login_required
def delete_example(request, slug, id, template_name="package/delete_example.html"):
package_example = get_object_or_404(PackageExample, id=id, package__slug__iexact=slug)
if package_example.created_by is None and not request.user.is_staff:
raise PermissionDenied
if package_example.created_by.id != request.user.id and not request.user.is_staff:
raise PermissionDenied
return render(request, template_name, {
"package_example": package_example
})
@login_required
@require_POST
def confirm_delete_example(request, slug, id):
package_example = get_object_or_404(PackageExample, id=id, package__slug__iexact=slug)
if package_example.created_by.id != request.user.id and not request.user.is_staff:
raise PermissionDenied
package_example.delete()
messages.add_message(request, messages.INFO, 'Package example successfully deleted.')
return HttpResponseRedirect(reverse("package", kwargs={"slug": slug}))
def package_autocomplete(request):
"""
Provides Package matching based on matches of the beginning
"""
titles = []
q = request.GET.get("q", "")
if q:
titles = (x.title for x in Package.objects.filter(title__istartswith=q))
response = HttpResponse("\n".join(titles))
setattr(response, "djangologging.suppress_output", True)
return response
def category(request, slug, template_name="package/category.html"):
category = get_object_or_404(Category, slug=slug)
packages = category.package_set.select_related().annotate(usage_count=Count("usage")).order_by("-repo_watchers", "title")
return render(request, template_name, {
"category": category,
"packages": packages,
}
)
def ajax_package_list(request, template_name="package/ajax_package_list.html"):
q = request.GET.get("q", "")
packages = []
if q:
_dash = "%s-%s" % (settings.PACKAGINATOR_SEARCH_PREFIX, q)
_space = "%s %s" % (settings.PACKAGINATOR_SEARCH_PREFIX, q)
_underscore = '%s_%s' % (settings.PACKAGINATOR_SEARCH_PREFIX, q)
packages = Package.objects.filter(
Q(title__istartswith=q) |
Q(title__istartswith=_dash) |
Q(title__istartswith=_space) |
Q(title__istartswith=_underscore)
)
packages_already_added_list = []
grid_slug = request.GET.get("grid", "")
if packages and grid_slug:
grids = Grid.objects.filter(slug=grid_slug)
if grids:
grid = grids[0]
packages_already_added_list = [x['slug'] for x in grid.packages.all().values('slug')]
new_packages = tuple(packages.exclude(slug__in=packages_already_added_list))[:20]
number_of_packages = len(new_packages)
if number_of_packages < 20:
try:
old_packages = packages.filter(slug__in=packages_already_added_list)[:20 - number_of_packages]
except AssertionError:
old_packages = None
if old_packages:
old_packages = tuple(old_packages)
packages = new_packages + old_packages
else:
packages = new_packages
return render(request, template_name, {
"packages": packages,
'packages_already_added_list': packages_already_added_list,
}
)
@login_required
def usage(request, slug, action):
success = False
package = get_object_or_404(Package, slug=slug)
# Update the current user's usage of the given package as specified by the
# request.
if package.usage.filter(username=request.user.username):
if action.lower() == 'add':
# The user is already using the package
success = True
change = 0
else:
# If the action was not add and the user has already specified
# they are a use the package then remove their usage.
package.usage.remove(request.user)
success = True
change = -1
else:
if action.lower() == 'lower':
# The user is not using the package
success = True
change = 0
else:
# If the action was not lower and the user is not already using
# the package then add their usage.
package.usage.add(request.user)
success = True
change = 1
# Invalidate the cache of this users's used_packages_list.
if change == 1 or change == -1:
cache_key = "sitewide_used_packages_list_%s" % request.user.pk
cache.delete(cache_key)
package.grid_clear_detail_template_cache()
# Return an ajax-appropriate response if necessary
if request.is_ajax():
response = {'success': success}
if success:
response['change'] = change
return HttpResponse(json.dumps(response))
# Intelligently determine the URL to redirect the user to based on the
# available information.
next = request.GET.get('next') or request.META.get("HTTP_REFERER") or reverse("package", kwargs={"slug": package.slug})
return HttpResponseRedirect(next)
def python3_list(request, template_name="package/python3_list.html"):
packages = Package.objects.filter(version__supports_python3=True).distinct()
packages = packages.order_by("-pypi_downloads", "-repo_watchers", "title")
values = "category, category_id, commit, commit_list, created, created_by, created_by_id, documentation_url, dpotw, grid, gridpackage, id, last_fetched, last_modified_by, last_modified_by_id, modified, packageexample, participants, pypi_downloads, pypi_url, repo_description, repo_forks, repo_url, repo_watchers, slug, title, usage, version".split(',')
values = [x.strip() for x in values]
if request.GET.get('sort') and request.GET.get('sort') not in values:
# Some people have cached older versions of this view
request.GET = request.GET.copy()
del request.GET['sort']
return render(
request,
template_name, {
"packages": packages
}
)
def package_list(request, template_name="package/package_list.html"):
categories = []
for category in Category.objects.annotate(package_count=Count("package")):
element = {
"title": category.title,
"description": category.description,
"count": category.package_count,
"slug": category.slug,
"title_plural": category.title_plural,
"show_pypi": category.show_pypi,
"packages": category.package_set.annotate(usage_count=Count("usage")).order_by("-pypi_downloads", "-repo_watchers", "title")[:9]
}
categories.append(element)
return render(
request,
template_name, {
"categories": categories,
"dpotw": Dpotw.objects.get_current(),
"gotw": Gotw.objects.get_current(),
}
)
def package_detail(request, slug, template_name="package/package.html"):
package = get_object_or_404(Package, slug=slug)
no_development = package.no_development
try:
if package.category == Category.objects.get(slug='projects'):
# projects get a bye because they are a website
pypi_ancient = False
pypi_no_release = False
else:
pypi_ancient = package.pypi_ancient
pypi_no_release = package.pypi_ancient is None
warnings = no_development or pypi_ancient or pypi_no_release
except Category.DoesNotExist:
pypi_ancient = False
pypi_no_release = False
warnings = no_development
if request.GET.get("message"):
messages.add_message(request, messages.INFO, request.GET.get("message"))
return render(request, template_name,
dict(
package=package,
pypi_ancient=pypi_ancient,
no_development=no_development,
pypi_no_release=pypi_no_release,
warnings=warnings,
latest_version=package.last_released(),
repo=package.repo
)
)
def int_or_0(value):
try:
return int(value)
except ValueError:
return 0
@login_required
def post_data(request, slug):
# if request.method == "POST":
# try:
# # TODO Do this this with a form, really. Duh!
# package.repo_watchers = int_or_0(request.POST.get("repo_watchers"))
# package.repo_forks = int_or_0(request.POST.get("repo_forks"))
# package.repo_description = request.POST.get("repo_description")
# package.participants = request.POST.get('contributors')
# package.fetch_commits() # also saves
# except Exception as e:
# print e
package = get_object_or_404(Package, slug=slug)
package.fetch_pypi_data()
package.repo.fetch_metadata(package)
package.repo.fetch_commits(package)
package.last_fetched = timezone.now()
package.save()
return HttpResponseRedirect(reverse("package", kwargs={"slug": package.slug}))
@login_required
def edit_documentation(request, slug, template_name="package/documentation_form.html"):
package = get_object_or_404(Package, slug=slug)
form = DocumentationForm(request.POST or None, instance=package)
if form.is_valid():
form.save()
messages.add_message(request, messages.INFO, 'Package documentation updated successfully')
return redirect(package)
return render(request, template_name,
dict(
package=package,
form=form
)
)
@csrf_exempt
def github_webhook(request):
if request.method == "POST":
data = json.loads(request.POST['payload'])
# Webhook Test
if "zen" in data:
return HttpResponse(data['hook_id'])
repo_url = data['repository']['url']
# service test
if repo_url == "http://github.com/mojombo/grit":
return HttpResponse("Service Test pass")
package = get_object_or_404(Package, repo_url=repo_url)
package.repo.fetch_commits(package)
package.last_fetched = timezone.now()
package.save()
return HttpResponse()
| 34.974118 | 356 | 0.665164 |
7956cfb185ca62f58915bfc2da13443944572a73 | 5,106 | py | Python | src/hu_entity/legacy_entity_finder.py | hutomadotAI/entity_recogniser | 6390c65190b826fb98bc3505f41f3f0ce6837ef9 | [
"Apache-2.0"
] | 4 | 2019-06-01T12:28:28.000Z | 2020-09-29T21:01:17.000Z | src/hu_entity/legacy_entity_finder.py | hutomadotAI/entity_recogniser | 6390c65190b826fb98bc3505f41f3f0ce6837ef9 | [
"Apache-2.0"
] | null | null | null | src/hu_entity/legacy_entity_finder.py | hutomadotAI/entity_recogniser | 6390c65190b826fb98bc3505f41f3f0ce6837ef9 | [
"Apache-2.0"
] | 1 | 2020-08-19T19:28:55.000Z | 2020-08-19T19:28:55.000Z | import marisa_trie
import string
import re
import sre_constants
import logging
from collections import defaultdict
def _get_logger():
logger = logging.getLogger('hu_entity.entity_finder')
return logger
class LegacyEntityFinder:
def __init__(self):
self.logger = _get_logger()
self.entity_tries = {}
self.punctuation = string.punctuation
self.regex_entities = {}
def setup_entity_values(self, entities):
self.logger.info("Setting up value entities'%s'", entities)
for entity_name, entity_values in entities.items():
# This can be done more concisely, expanded for clarity
updated_words = []
for word in entity_values:
lower = word.lower()
temp_word = lower.strip(self.punctuation)
updated_words.append(temp_word)
self.entity_tries[entity_name] = marisa_trie.Trie(updated_words)
def setup_regex_entities(self, regex_entities):
self.logger.info("Setting up regex entities '%s'", regex_entities)
regex_good = True
try:
for entity_name, entity_regex in regex_entities.items():
self.logger.debug("Compiling regex entity '%s'", entity_regex)
compiled = re.compile(entity_regex)
self.regex_entities[entity_name] = compiled
except re.error:
self.logger.warn("Caught re.error in setup_regex_entities")
regex_good = False
except sre_constants.error:
self.logger.warn("Caught sre_constants.error in setup_regex_entities")
regex_good = False
except Exception:
self.logger.warn("Caught Exception in setup_regex_entities")
regex_good = False
return regex_good
def find_entity_values(self, conversation):
# Construct the list of values to match against
words_to_find_list = self.split_message(conversation)
words_to_find_regex = conversation.split()
candidate_matches_list = defaultdict(list)
candidate_matches_regex = defaultdict(list)
entity_matches = defaultdict(list)
words_matched = set()
# Examine value type entities
candidate_matches_list, words_matched = \
self.match_value_entities(candidate_matches_list, words_matched, words_to_find_list)
# Examine regex type entities
candidate_matches_regex, words_matched =\
self.match_regex_entities(candidate_matches_regex, words_matched, words_to_find_regex)
# Ensure only the longest match is counted for list type entities
for entity_name, candidate_words in candidate_matches_list.items():
longest_word = candidate_words[0]
for candidate_word in candidate_words:
if len(candidate_word) > len(longest_word):
longest_word = candidate_word
entity_matches[longest_word].append(entity_name)
# Include regex type entities
for entity_name, candidate_words in candidate_matches_regex.items():
for candidate_word in candidate_words:
entity_matches[candidate_word].append(entity_name)
return entity_matches
def match_regex_entities(self, candidate_matches_regex, words_matched, words_to_find_regex):
for word in words_to_find_regex:
compare_word_original = word.strip(self.punctuation)
if word not in words_matched:
match_found = False
for entity_name, compiled in self.regex_entities.items():
if compiled.fullmatch(compare_word_original):
candidate_matches_regex[entity_name].append(compare_word_original)
match_found = True
if match_found:
words_matched.add(compare_word_original)
return candidate_matches_regex, words_matched
def match_value_entities(self, candidate_matches_list, words_matched, words_to_find_list):
for word in words_to_find_list:
compare_word_original = word.strip(self.punctuation)
compare_word = compare_word_original.lower()
if word not in words_matched:
match_found = False
for entity_name, entity_trie in self.entity_tries.items():
if compare_word in entity_trie:
candidate_matches_list[entity_name].append(compare_word_original)
match_found = True
if match_found:
words_matched.add(compare_word_original)
return candidate_matches_list, words_matched
def split_message(self, conversation):
conversation_words = conversation.split()
search_words = []
# Iterate over all possible word permutations
for start in range(0, len(conversation_words)):
for end in range(start, len(conversation_words)):
search_words.append(" ".join(conversation_words[start:end + 1]))
return search_words
| 41.512195 | 98 | 0.658441 |
7956cfc414c9309df13b9fe9238026e3a7faf0de | 9,326 | py | Python | docs/conf.py | sam-mi/django-template-theming | a152cfdf59c77b344463309c15b5d490f6e94e7c | [
"MIT"
] | 5 | 2015-11-01T03:25:11.000Z | 2018-10-29T10:09:55.000Z | docs/conf.py | sam-mi/django-template-theming | a152cfdf59c77b344463309c15b5d490f6e94e7c | [
"MIT"
] | 1 | 2018-02-09T21:00:29.000Z | 2018-02-09T21:00:29.000Z | docs/conf.py | sam-mi/django-template-theming | a152cfdf59c77b344463309c15b5d490f6e94e7c | [
"MIT"
] | 1 | 2018-02-02T05:16:41.000Z | 2018-02-02T05:16:41.000Z | # -*- coding: utf-8 -*-
#
# Django Template Theming documentation build configuration file, created by
# sphinx-quickstart on Sat Oct 31 09:17:19 2015.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys
import os
import shlex
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#sys.path.insert(0, os.path.abspath('.'))
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.autodoc',
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
# source_suffix = ['.rst', '.md']
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'Django Template Theming'
copyright = u'2015, w.Tayyeb'
author = u'w.Tayyeb'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '0.7'
# The full version, including alpha/beta/rc tags.
release = '0.7'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all
# documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
#keep_warnings = False
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = False
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'alabaster'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
#html_extra_path = []
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Language to be used for generating the HTML full-text search index.
# Sphinx supports the following languages:
# 'da', 'de', 'en', 'es', 'fi', 'fr', 'hu', 'it', 'ja'
# 'nl', 'no', 'pt', 'ro', 'ru', 'sv', 'tr'
#html_search_language = 'en'
# A dictionary with options for the search language support, empty by default.
# Now only 'ja' uses this config value
#html_search_options = {'type': 'default'}
# The name of a javascript file (relative to the configuration directory) that
# implements a search results scorer. If empty, the default will be used.
#html_search_scorer = 'scorer.js'
# Output file base name for HTML help builder.
htmlhelp_basename = 'DjangoTemplateThemingdoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
# Latex figure (float) alignment
#'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, 'DjangoTemplateTheming.tex', u'Django Template Theming Documentation',
u'w.Tayyeb', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(master_doc, 'djangotemplatetheming', u'Django Template Theming Documentation',
[author], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, 'DjangoTemplateTheming', u'Django Template Theming Documentation',
author, 'DjangoTemplateTheming', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
#texinfo_no_detailmenu = False
| 32.494774 | 85 | 0.720566 |
7956d13dd2cb89339c8d23cde1dedec8509d38e7 | 1,836 | py | Python | vendor-local/lib/python/django_browserid/admin.py | jgmize/nucleus | 1fd9d069103b7be00f5815ae1f3eac6ba0e3530d | [
"BSD-3-Clause"
] | null | null | null | vendor-local/lib/python/django_browserid/admin.py | jgmize/nucleus | 1fd9d069103b7be00f5815ae1f3eac6ba0e3530d | [
"BSD-3-Clause"
] | null | null | null | vendor-local/lib/python/django_browserid/admin.py | jgmize/nucleus | 1fd9d069103b7be00f5815ae1f3eac6ba0e3530d | [
"BSD-3-Clause"
] | null | null | null | from django.contrib.admin.sites import AdminSite, site as admin_site
class BrowserIDAdminSite(AdminSite):
"""Support logging in to the admin interface via BrowserID."""
login_template = 'browserid/admin_login.html'
#: If True, include the normal username and password form as well as
#: the BrowserID button.
include_password_form = False
def copy_registry(self, site=None):
"""
Copy the ModelAdmins that have been registered on another site
so that they are available on this site as well.
Useful when used with :func:`django.contrib.admin.autocomplete`,
allowing you to copy the ModelAdmin entries registered with the
default site, such as the User ModelAdmin. For example, in
``urls.py``:
.. code-block:: python
from django.contrib import admin
admin.autodiscover()
from django_browserid.admin import site as admin_site
admin_site.copy_registry()
# To include: url(r'^admin/', include(admin_site.urls))
:param site:
Site to copy registry entries from. Defaults to
:data:`django.contrib.admin.site`.
"""
if not site:
site = admin_site
for model, modeladmin in site._registry.items():
self.register(model, modeladmin.__class__)
def login(self, request, extra_context=None):
# Add extra context variables to login view.
extra_context = extra_context or {}
extra_context['include_password_form'] = self.include_password_form
return super(BrowserIDAdminSite, self).login(request, extra_context)
#: Global object for the common case. You can import this in
#: ``admin.py`` and ``urls.py`` instead of
#: :data:`django.contrib.admin.site`.
site = BrowserIDAdminSite()
| 34.641509 | 76 | 0.666667 |
7956d19dd3933058b2319c21a0dba8d43a354180 | 3,259 | py | Python | slider-agent/src/main/python/jinja2/setup.py | turningme/incubator-retired-slider | 1d4f519d763210f46e327338be72efa99e65cb5d | [
"Apache-2.0"
] | 60 | 2015-01-05T10:51:11.000Z | 2018-12-15T03:48:09.000Z | slider-agent/src/main/python/jinja2/setup.py | turningme/incubator-retired-slider | 1d4f519d763210f46e327338be72efa99e65cb5d | [
"Apache-2.0"
] | 1 | 2021-11-04T13:31:30.000Z | 2021-11-04T13:31:30.000Z | ambari-common/src/main/python/jinja2/setup.py | isabella232/incubator-ambari | bf747346312170834c6beb89a60c8624b47aa288 | [
"Apache-2.0"
] | 87 | 2015-01-14T05:14:15.000Z | 2018-12-25T14:14:56.000Z | # -*- coding: utf-8 -*-
"""
Jinja2
~~~~~~
Jinja2 is a template engine written in pure Python. It provides a
`Django`_ inspired non-XML syntax but supports inline expressions and
an optional `sandboxed`_ environment.
Nutshell
--------
Here a small example of a Jinja template::
{% extends 'base.html' %}
{% block title %}Memberlist{% endblock %}
{% block content %}
<ul>
{% for user in users %}
<li><a href="{{ user.url }}">{{ user.username }}</a></li>
{% endfor %}
</ul>
{% endblock %}
Philosophy
----------
Application logic is for the controller but don't try to make the life
for the template designer too hard by giving him too few functionality.
For more informations visit the new `Jinja2 webpage`_ and `documentation`_.
.. _sandboxed: http://en.wikipedia.org/wiki/Sandbox_(computer_security)
.. _Django: http://www.djangoproject.com/
.. _Jinja2 webpage: http://jinja.pocoo.org/
.. _documentation: http://jinja.pocoo.org/2/documentation/
"""
import sys
from setuptools import setup, Extension, Feature
debugsupport = Feature(
'optional C debug support',
standard=False,
ext_modules = [
Extension('jinja2._debugsupport', ['jinja2/_debugsupport.c']),
],
)
# tell distribute to use 2to3 with our own fixers.
extra = {}
if sys.version_info >= (3, 0):
extra.update(
use_2to3=True,
use_2to3_fixers=['custom_fixers']
)
# ignore the old '--with-speedups' flag
try:
speedups_pos = sys.argv.index('--with-speedups')
except ValueError:
pass
else:
sys.argv[speedups_pos] = '--with-debugsupport'
sys.stderr.write('*' * 74 + '\n')
sys.stderr.write('WARNING:\n')
sys.stderr.write(' the --with-speedups flag is deprecated, assuming '
'--with-debugsupport\n')
sys.stderr.write(' For the actual speedups install the MarkupSafe '
'package.\n')
sys.stderr.write('*' * 74 + '\n')
setup(
name='Jinja2',
version='2.5.5',
url='http://jinja.pocoo.org/',
license='BSD',
author='Armin Ronacher',
author_email='armin.ronacher@active-4.com',
description='A small but fast and easy to use stand-alone template '
'engine written in pure python.',
long_description=__doc__,
# jinja is egg safe. But we hate eggs
zip_safe=False,
classifiers=[
'Development Status :: 5 - Production/Stable',
'Environment :: Web Environment',
'Intended Audience :: Developers',
'License :: OSI Approved :: BSD License',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Programming Language :: Python :: 3',
'Topic :: Internet :: WWW/HTTP :: Dynamic Content',
'Topic :: Software Development :: Libraries :: Python Modules',
'Topic :: Text Processing :: Markup :: HTML'
],
packages=['jinja2', 'jinja2.testsuite', 'jinja2.testsuite.res',
'jinja2._markupsafe'],
extras_require={'i18n': ['Babel>=0.8']},
test_suite='jinja2.testsuite.suite',
include_package_data=True,
entry_points="""
[babel.extractors]
jinja2 = jinja2.ext:babel_extract[i18n]
""",
features={'debugsupport': debugsupport},
**extra
)
| 29.36036 | 75 | 0.631175 |
7956d1acf23dab83293e67238c186140727ea1c5 | 1,838 | py | Python | config.env.py | jabbate19/conditional | 20013459438d80bca06a844da250e2543c84186e | [
"MIT"
] | 9 | 2016-08-21T19:27:24.000Z | 2019-09-12T06:56:49.000Z | config.env.py | jabbate19/conditional | 20013459438d80bca06a844da250e2543c84186e | [
"MIT"
] | 237 | 2016-08-21T18:08:58.000Z | 2022-03-28T17:01:36.000Z | config.env.py | jabbate19/conditional | 20013459438d80bca06a844da250e2543c84186e | [
"MIT"
] | 31 | 2016-08-22T23:46:48.000Z | 2022-03-26T22:37:39.000Z | import json
import secrets
import os
from os import environ as env
# Fetch the version number from the npm package file
with open(os.path.join(os.getcwd(), "package.json")) as package_file:
VERSION = json.load(package_file)["version"]
# Flask config
DEBUG = env.get("CONDITIONAL_DEBUG", "false").lower() == "true"
HOST_NAME = env.get("CONDITIONAL_HOST_NAME", "conditional.csh.rit.edu")
SERVER_NAME = env.get('CONDITIONAL_SERVER_NAME', 'conditional.csh.rit.edu')
APP_NAME = "conditional"
IP = env.get("CONDITIONAL_IP", "0.0.0.0")
PORT = env.get("CONDITIONAL_PORT", 6969)
# DB Info
SQLALCHEMY_DATABASE_URI = env.get("SQLALCHEMY_DATABASE_URI", "")
SQLALCHEMY_TRACK_MODIFICATIONS = False
# LDAP config
LDAP_RO = env.get("CONDITIONAL_LDAP_RO", "true").lower() == "true"
LDAP_BIND_DN = env.get("CONDITIONAL_LDAP_BIND_DN", "cn=conditional,ou=Apps,dc=csh,dc=rit,dc=edu")
LDAP_BIND_PW = env.get("CONDITIONAL_LDAP_BIND_PW", "")
# Sentry config
# Not required for local development, but if you set it, make sure the
# SENTRY_ENV is 'local-development'
SENTRY_DSN = env.get("CONDITIONAL_SENTRY_DSN", "")
SENTRY_CONFIG = {
'dsn': env.get("CONDITIONAL_SENTRY_LEGACY_DSN", ""),
'release': VERSION,
}
SENTRY_ENV = env.get("CONDITIONAL_SENTRY_ENV", "local-development")
# OIDC Config
OIDC_ISSUER = env.get("CONDITIONAL_OIDC_ISSUER", "https://sso.csh.rit.edu/auth/realms/csh")
OIDC_CLIENT_CONFIG = {
'client_id': env.get("CONDITIONAL_OIDC_CLIENT_ID", "conditional"),
'client_secret': env.get("CONDITIONAL_OIDC_CLIENT_SECRET", ""),
'post_logout_redirect_uris': [env.get("CONDITIONAL_OIDC_CLIENT_LOGOUT", "http://0.0.0.0:6969/logout")]
}
# Openshift secret
SECRET_KEY = env.get("CONDITIONAL_SECRET_KEY", default=''.join(secrets.token_hex(16)))
# General config
DUES_PER_SEMESTER = env.get("CONDITIONAL_DUES_PER_SEMESTER", 80)
| 36.76 | 106 | 0.745375 |
7956d2a3feea39e7d4a0a93d52cab9e568c42f2c | 2,392 | py | Python | ambari-server/src/main/resources/stacks/ADH/1.4/services/RANGER_KMS/package/scripts/kms_server.py | kuhella/ambari | 9396c17b0305665d31d7a4f4525be857958b5d4c | [
"Apache-2.0"
] | null | null | null | ambari-server/src/main/resources/stacks/ADH/1.4/services/RANGER_KMS/package/scripts/kms_server.py | kuhella/ambari | 9396c17b0305665d31d7a4f4525be857958b5d4c | [
"Apache-2.0"
] | null | null | null | ambari-server/src/main/resources/stacks/ADH/1.4/services/RANGER_KMS/package/scripts/kms_server.py | kuhella/ambari | 9396c17b0305665d31d7a4f4525be857958b5d4c | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python
"""
Licensed to the Apache Software Foundation (ASF) under one
or more contributor license agreements. See the NOTICE file
distributed with this work for additional information
regarding copyright ownership. The ASF licenses this file
to you under the Apache License, Version 2.0 (the
"License"); you may not use this file except in compliance
with the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from resource_management.libraries.script import Script
from resource_management.core.resources.system import Execute
from resource_management.core.exceptions import ComponentIsNotRunning
from resource_management.libraries.functions.format import format
from resource_management.core.logger import Logger
from resource_management.core import shell
from kms import kms, setup_kms_db, setup_java_patch, enable_kms_plugin
from kms_service import kms_service
import upgrade
class KmsServer(Script):
def get_stack_to_component(self):
return {"ADH": "ranger-kms"}
def install(self, env):
self.install_packages(env)
import params
env.set_params(params)
setup_kms_db()
self.configure(env)
setup_java_patch()
def stop(self, env, upgrade_type=None):
import params
env.set_params(params)
kms_service(action = 'stop')
def start(self, env, upgrade_type=None):
import params
env.set_params(params)
self.configure(env)
enable_kms_plugin()
kms_service(action = 'start')
def status(self, env):
cmd = 'ps -ef | grep proc_rangerkms | grep -v grep'
code, output = shell.call(cmd, timeout=20)
if code != 0:
Logger.debug('KMS process not running')
raise ComponentIsNotRunning()
pass
def configure(self, env):
import params
env.set_params(params)
kms()
def pre_upgrade_restart(self, env, upgrade_type=None):
import params
env.set_params(params)
upgrade.prestart(env, "ranger-kms")
setup_kms_db()
kms()
setup_java_patch()
if __name__ == "__main__":
KmsServer().execute()
| 28.819277 | 72 | 0.748328 |
7956d3e4b56cfbd0c4d903480fa16b8b485a9522 | 5,697 | py | Python | pysnmp-with-texts/DNOS-KEYING-PRIVATE-MIB.py | agustinhenze/mibs.snmplabs.com | 1fc5c07860542b89212f4c8ab807057d9a9206c7 | [
"Apache-2.0"
] | 8 | 2019-05-09T17:04:00.000Z | 2021-06-09T06:50:51.000Z | pysnmp-with-texts/DNOS-KEYING-PRIVATE-MIB.py | agustinhenze/mibs.snmplabs.com | 1fc5c07860542b89212f4c8ab807057d9a9206c7 | [
"Apache-2.0"
] | 4 | 2019-05-31T16:42:59.000Z | 2020-01-31T21:57:17.000Z | pysnmp-with-texts/DNOS-KEYING-PRIVATE-MIB.py | agustinhenze/mibs.snmplabs.com | 1fc5c07860542b89212f4c8ab807057d9a9206c7 | [
"Apache-2.0"
] | 10 | 2019-04-30T05:51:36.000Z | 2022-02-16T03:33:41.000Z | #
# PySNMP MIB module DNOS-KEYING-PRIVATE-MIB (http://snmplabs.com/pysmi)
# ASN.1 source file:///Users/davwang4/Dev/mibs.snmplabs.com/asn1/DNOS-KEYING-PRIVATE-MIB
# Produced by pysmi-0.3.4 at Wed May 1 12:51:39 2019
# On host DAVWANG4-M-1475 platform Darwin version 18.5.0 by user davwang4
# Using Python version 3.7.3 (default, Mar 27 2019, 09:23:15)
#
ObjectIdentifier, OctetString, Integer = mibBuilder.importSymbols("ASN1", "ObjectIdentifier", "OctetString", "Integer")
NamedValues, = mibBuilder.importSymbols("ASN1-ENUMERATION", "NamedValues")
SingleValueConstraint, ValueRangeConstraint, ConstraintsIntersection, ValueSizeConstraint, ConstraintsUnion = mibBuilder.importSymbols("ASN1-REFINEMENT", "SingleValueConstraint", "ValueRangeConstraint", "ConstraintsIntersection", "ValueSizeConstraint", "ConstraintsUnion")
dnOS, = mibBuilder.importSymbols("DELL-REF-MIB", "dnOS")
ModuleCompliance, NotificationGroup = mibBuilder.importSymbols("SNMPv2-CONF", "ModuleCompliance", "NotificationGroup")
ModuleIdentity, Gauge32, ObjectIdentity, iso, MibScalar, MibTable, MibTableRow, MibTableColumn, Integer32, Counter32, IpAddress, Counter64, Bits, TimeTicks, NotificationType, MibIdentifier, Unsigned32 = mibBuilder.importSymbols("SNMPv2-SMI", "ModuleIdentity", "Gauge32", "ObjectIdentity", "iso", "MibScalar", "MibTable", "MibTableRow", "MibTableColumn", "Integer32", "Counter32", "IpAddress", "Counter64", "Bits", "TimeTicks", "NotificationType", "MibIdentifier", "Unsigned32")
DisplayString, RowPointer, TextualConvention, RowStatus = mibBuilder.importSymbols("SNMPv2-TC", "DisplayString", "RowPointer", "TextualConvention", "RowStatus")
fastPathKeyingPrivate = ModuleIdentity((1, 3, 6, 1, 4, 1, 674, 10895, 5000, 2, 6132, 1, 1, 24))
fastPathKeyingPrivate.setRevisions(('2011-01-26 00:00', '2007-05-23 00:00',))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
if mibBuilder.loadTexts: fastPathKeyingPrivate.setRevisionsDescriptions(('Add new Postal address change.', 'Dell branding related changes.',))
if mibBuilder.loadTexts: fastPathKeyingPrivate.setLastUpdated('201101260000Z')
if mibBuilder.loadTexts: fastPathKeyingPrivate.setOrganization('Dell, Inc.')
if mibBuilder.loadTexts: fastPathKeyingPrivate.setContactInfo('')
if mibBuilder.loadTexts: fastPathKeyingPrivate.setDescription('The Broadcom Private MIB for DNOS Keying Utility')
agentFeatureKeyingGroup = MibIdentifier((1, 3, 6, 1, 4, 1, 674, 10895, 5000, 2, 6132, 1, 1, 24, 1))
agentFeatureKeyingEnableKey = MibScalar((1, 3, 6, 1, 4, 1, 674, 10895, 5000, 2, 6132, 1, 1, 24, 1, 1), DisplayString()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: agentFeatureKeyingEnableKey.setStatus('current')
if mibBuilder.loadTexts: agentFeatureKeyingEnableKey.setDescription('Hexadecimal Key-string entered to enable an advance functionality.')
agentFeatureKeyingDisableKey = MibScalar((1, 3, 6, 1, 4, 1, 674, 10895, 5000, 2, 6132, 1, 1, 24, 1, 2), DisplayString()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: agentFeatureKeyingDisableKey.setStatus('current')
if mibBuilder.loadTexts: agentFeatureKeyingDisableKey.setDescription('Hexadecimal Key-string entered to disable an advance functionality.')
agentFeatureKeyingTable = MibTable((1, 3, 6, 1, 4, 1, 674, 10895, 5000, 2, 6132, 1, 1, 24, 1, 3), )
if mibBuilder.loadTexts: agentFeatureKeyingTable.setStatus('current')
if mibBuilder.loadTexts: agentFeatureKeyingTable.setDescription('A table for license key and associated functionality. ')
agentFeatureKeyingEntry = MibTableRow((1, 3, 6, 1, 4, 1, 674, 10895, 5000, 2, 6132, 1, 1, 24, 1, 3, 1), ).setIndexNames((0, "DNOS-KEYING-PRIVATE-MIB", "agentFeatureKeyingIndex"))
if mibBuilder.loadTexts: agentFeatureKeyingEntry.setStatus('current')
if mibBuilder.loadTexts: agentFeatureKeyingEntry.setDescription('Represents entry for key table')
agentFeatureKeyingIndex = MibTableColumn((1, 3, 6, 1, 4, 1, 674, 10895, 5000, 2, 6132, 1, 1, 24, 1, 3, 1, 1), Unsigned32())
if mibBuilder.loadTexts: agentFeatureKeyingIndex.setStatus('current')
if mibBuilder.loadTexts: agentFeatureKeyingIndex.setDescription('A value corresponding to a keyable feature.When this table is walked, only values associated with keyable features are returned.This value must be equivalent to valid value of agentFeatureKeyingIndex.')
agentFeatureKeyingName = MibTableColumn((1, 3, 6, 1, 4, 1, 674, 10895, 5000, 2, 6132, 1, 1, 24, 1, 3, 1, 2), DisplayString()).setMaxAccess("readonly")
if mibBuilder.loadTexts: agentFeatureKeyingName.setStatus('current')
if mibBuilder.loadTexts: agentFeatureKeyingName.setDescription('The abbreviated name of this component.This is also equivalent to agentFeatureKeyingName')
agentFeatureKeyingStatus = MibTableColumn((1, 3, 6, 1, 4, 1, 674, 10895, 5000, 2, 6132, 1, 1, 24, 1, 3, 1, 3), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2))).clone(namedValues=NamedValues(("enable", 1), ("disable", 2)))).setMaxAccess("readonly")
if mibBuilder.loadTexts: agentFeatureKeyingStatus.setStatus('current')
if mibBuilder.loadTexts: agentFeatureKeyingStatus.setDescription('Returns a value of (1) if the feature is enabled for management, (2) if disabled.')
mibBuilder.exportSymbols("DNOS-KEYING-PRIVATE-MIB", agentFeatureKeyingDisableKey=agentFeatureKeyingDisableKey, fastPathKeyingPrivate=fastPathKeyingPrivate, agentFeatureKeyingEntry=agentFeatureKeyingEntry, PYSNMP_MODULE_ID=fastPathKeyingPrivate, agentFeatureKeyingTable=agentFeatureKeyingTable, agentFeatureKeyingIndex=agentFeatureKeyingIndex, agentFeatureKeyingName=agentFeatureKeyingName, agentFeatureKeyingEnableKey=agentFeatureKeyingEnableKey, agentFeatureKeyingStatus=agentFeatureKeyingStatus, agentFeatureKeyingGroup=agentFeatureKeyingGroup)
| 121.212766 | 546 | 0.791118 |
7956d43349a05d201e5acdf042b310b9a7f3e88f | 50 | py | Python | languages/python/cyclicimports/badcase2/bar.py | JiniousChoi/encyclopedia-in-code | 77bc551a03a2a3e3808e50016ece14adb5cfbd96 | [
"MIT"
] | 2 | 2018-07-20T10:15:49.000Z | 2018-07-20T10:16:54.000Z | languages/python/cyclicimports/badcase2/bar.py | JiniousChoi/encyclopedia-in-code | 77bc551a03a2a3e3808e50016ece14adb5cfbd96 | [
"MIT"
] | 2 | 2018-06-26T09:12:44.000Z | 2019-12-18T00:09:14.000Z | languages/python/cyclicimports/badcase2/bar.py | JiniousChoi/encyclopedia-in-code | 77bc551a03a2a3e3808e50016ece14adb5cfbd96 | [
"MIT"
] | null | null | null | from foo import abc
print(abc)
xyz = 5
print(xyz)
| 10 | 19 | 0.72 |
7956d4944e42856b84a4cdc6ae178a42926b4303 | 11,627 | py | Python | gui.py | Hack-a-thingie/terminalvelociraptor | ec96034d59a422b81ed9188ef861b79f6d0b2132 | [
"MIT"
] | null | null | null | gui.py | Hack-a-thingie/terminalvelociraptor | ec96034d59a422b81ed9188ef861b79f6d0b2132 | [
"MIT"
] | null | null | null | gui.py | Hack-a-thingie/terminalvelociraptor | ec96034d59a422b81ed9188ef861b79f6d0b2132 | [
"MIT"
] | null | null | null | #------------gui.py------------------------------------------------------------#
# gui for academic terminal card game
#
# Purpose: This file has been created during the hack-a-thingie 2016 event and
# will be using curses to create the terminal ui for playing the game.
#
# Notes: importing game runs the game, fix this later.
#------------------------------------------------------------------------------#
#!/usr/local/bin/python2
# coding: latin-1
import curses
from game import *
from player import *
from deck import *
from cardpile import *
from staff import *
from actions import *
from reactions import *
#def print(str):
# disp_message(str)
# displays a message, front and center!
def disp_message(message):
while True:
bg.addstr(11, int(59 / 2 - len(message) / 2), message)
bgcomm = bg.getch()
if bgcomm == ord(" "):
for i in range(1,59):
bg.addch(11,i, curses.ACS_HLINE)
bg.refresh()
break
# This function places BP, BS and all AP
def point_placement(BP, BS, phys, bio, chem, math, screen):
if curses.has_colors() == True:
# spacing between points
coloffset = 5
#index for column number
colindex = 5
tmpstr = "BP: " + str(BP) + "/" + str(BS)
screen.addstr(0, colindex, tmpstr)
colindex = colindex + len(tmpstr) + coloffset
tmpstr = "P: " + str(phys)
screen.addstr(0, colindex, tmpstr, curses.color_pair(4))
colindex = colindex + len(tmpstr) + coloffset
tmpstr = "B: " + str(bio)
screen.addstr(0, colindex, tmpstr, curses.color_pair(2))
colindex = colindex + len(tmpstr) + coloffset
tmpstr = "C: " + str(chem)
screen.addstr(0, colindex, tmpstr, curses.color_pair(1))
colindex = colindex + len(tmpstr) + coloffset
tmpstr = "M: " + str(math)
screen.addstr(0, colindex, tmpstr)
screen.refresh()
# Chooses action
def choose_action(act, act_list, hand, hand_h, hand_w, bg, index):
actidx = 1
while True:
playcomm = act.getch()
if playcomm == ord("w"):
act.addstr(0, 6, act_list[-1], curses.A_REVERSE | curses.A_BOLD)
act.addstr(1, 6, act_list[2], curses.A_BOLD)
act.move(0,6)
actidx = 0
act.refresh()
if playcomm == ord("s"):
act.addstr(0, 6, act_list[-1], curses.A_BOLD)
act.addstr(1, 6, act_list[2], curses.A_REVERSE | curses.A_BOLD)
act.move(1,6)
actidx = 1
act.refresh()
if playcomm == ord(" "):
act.addstr(0,6," ")
act.addstr(1,6,act_list[1], curses.A_BOLD)
if actidx == 0:
for i in range(hand_w - 1):
for j in range(hand_h - 1):
hand.addch(j, i, " ")
for i in range(len(handlist)):
if i == 0:
hand.addstr(i, 1, handlist[i], curses.A_REVERSE)
else:
hand.addstr(i, 1, handlist[i])
hand.move(0,1)
elif actidx == 1:
play_card(realplayer.hand.cards[index])
#disp_message("yo")
for i in range(hand_w - 1):
for j in range(hand_h - 1):
hand.addch(j, i, " ")
for i in range(len(handlist)):
if i == 0:
hand.addstr(i, 1, handlist[i], curses.A_REVERSE)
else:
hand.addstr(i, 1, handlist[i])
hand.move(0,1)
act.refresh()
break
# These are additional functions that must be implemented that I do not have
#def scroll_up();
#def scroll_down();
# creating dummy description:
description = "This card is awesome. it does a bunch of things and is super duper awesome and such."
# Setting up small-scale game data to work with
#staff = ["Bob", "Alice", "Quantum Crypt", '123456789012345678901234567890']
handlist = [realplayer.unit.cards[i].name
for i in range(len(realplayer.unit.cards))]
# Set up standard screen
bg = curses.initscr()
curses.start_color()
curses.use_default_colors()
for i in range(0, curses.COLORS):
curses.init_pair(i, i, -1)
# Inhibits typing to screen
curses.noecho()
# No need for enter to use commands
curses.cbreak()
# Setting up keypad usage
bg.keypad(1)
bg_x = 0
bg_y = 0
bg_h = 24
bg_w = 80
bg = curses.newwin(bg_h, bg_w, bg_y, bg_x)
#------------------------------------------------------------------------------#
# Background and Hand Selection
#------------------------------------------------------------------------------#
# Note: handlist may change form
# Defining corners
bg.addch(0, 0, curses.ACS_ULCORNER)
bg.addch(23, 0, curses.ACS_LLCORNER)
bg.addch(0, curses.COLS - 2, curses.ACS_URCORNER)
bg.addch(23, curses.COLS - 2, curses.ACS_LRCORNER)
# Defining borders
center_hline = 11
vline = 59
# We need to change the names of the staff to fit into our box:
for i in range(len(handlist)):
if len(handlist[i]) < 76 - vline:
for j in range(76 - vline - len(handlist[i])):
handlist[i] = handlist[i] + ' '
elif len(handlist[i]) > 76 - vline:
handlist[i] = handlist[i][0:76 - vline]
for i in range(1,curses.COLS-2):
bg.addch(0, i, curses.ACS_HLINE)
bg.addch(23, i, curses.ACS_HLINE)
# Drawing hlines
if i < vline:
bg.addch(center_hline, i, curses.ACS_HLINE)
bg.addch(21, i, curses.ACS_HLINE)
bg.addch(19, i, curses.ACS_HLINE)
bg.addch(2, i, curses.ACS_HLINE)
bg.addch(4, i, curses.ACS_HLINE)
if i > vline:
bg.addch(20, i, curses.ACS_HLINE)
# Drawing vlines
if i <= 22:
bg.addch(i, 0, curses.ACS_VLINE)
bg.addch(i, curses.COLS - 2, curses.ACS_VLINE)
bg.addch(i, vline, curses.ACS_VLINE)
# Adding corners Top and Bottom
if i == vline:
bg.addch(0, vline, curses.ACS_TTEE)
bg.addch(23, vline, curses.ACS_BTEE)
# Adding corners Left and Right
if i == center_hline or i == 21 or i == 19 or i == 2 or i == 4:
bg.addch(i, 0, curses.ACS_LTEE)
bg.addch(i, vline, curses.ACS_RTEE)
if i == 20:
bg.addch(i, vline, curses.ACS_LTEE)
bg.addch(i, curses.COLS - 2, curses.ACS_RTEE)
bg.refresh()
# Add in a window for hand cards
hand_x = vline + 1
hand_y = 1
hand_h = 23 - 5
hand_w = 18
hand = curses.newwin(hand_h, hand_w, hand_y, hand_x)
hand.move(0, 0)
index = 0
for i in range(len(handlist)):
if i == 0:
hand.addstr(i, 1, handlist[i], curses.A_REVERSE)
else:
hand.addstr(i, 1, handlist[i])
#------------------------------------------------------------------------------#
# Passive Windows
#------------------------------------------------------------------------------#
# First, the window with "ACTION" in it
act_x = vline + 1
act_y = 23 - 2
act_h = 2
act_w = 18
act = curses.newwin(act_h, act_w, act_y, act_x)
# creating an act_string list
act_list = ['ACTION ', 'SELECT ', ' PLAY ', 'DISCARD', 'RETURN']
act_str = act_list[1]
act.addstr(1, 6, act_str, curses.A_BOLD)
act.refresh()
# Now to implement the Impact Factor bars, 50 cols total
# These will be implemented as highlighted bars, no biggie
# Opponent first, because we are gentlemen
oppif_x = 1
oppif_y = 1
oppif_h = 1
oppif_w = vline - 1
oppif = curses.newwin(oppif_h, oppif_w, oppif_y, oppif_x)
oppif.addstr(0,1,"IF: [")
oppif.addch(0, vline - 3 , "]")
# Now to fill the IF bar with stuff (fake IF percent)
oppif_percent = computer.impact / 20
oppif_col = int(oppif_percent * 50)
for i in range(50):
if i < oppif_col:
oppif.addch(0,i+6," ", curses.A_STANDOUT)
oppif.refresh()
# Now for my IF
meif_x = 1
meif_y = 22
meif_h = 1
meif_w = vline - 1
meif = curses.newwin(meif_h, meif_w, meif_y, meif_x)
meif.addstr(0,1,"IF: [")
meif.addch(0, vline - 3 , "]")
# Now to fill the IF bar with stuff (fake IF percent)
meif_percent = realplayer.impact
meif_col = int(meif_percent * 50)
for i in range(50):
if i < meif_col:
meif.addch(0,i+6," ", curses.A_STANDOUT)
meif.refresh()
# Now to update the points
opppt_x = 1
opppt_y = 3
opppt_h = 1
opppt_w = vline - 1
opppt = curses.newwin(opppt_h, opppt_w, opppt_y, opppt_x)
# Setting up Budget, Physics, Bio, Chem, and math with colors
oppbp = computer.points.BP
oppbs = computer.bs
oppphys = computer.points.APP
oppbio = computer.points.APB
oppchem = computer.points.APB
oppmath = computer.points.APM
point_placement(oppbp, oppbs, oppphys, oppbio, oppchem, oppmath, opppt)
mept_x = 1
mept_y = 20
mept_h = 1
mept_w = vline - 1
mept = curses.newwin(mept_h, mept_w, mept_y, mept_x)
# Setting up Budget, Physics, Bio, Chem, and math with colors
mebp = realplayer.points.BP
mebs = realplayer.bs
mephys = realplayer.points.APP
mebio = realplayer.points.APB
mechem = realplayer.points.APC
memath = realplayer.points.APM
point_placement(mebp, mebs, mephys, mebio, mechem, memath, mept)
#------------------------------------------------------------------------------#
# Hand Cursor Movement
#------------------------------------------------------------------------------#
index = 0
prev = 0
hand.move(0,1)
while True:
command = hand.getch()
if command == ord("w"):
prev = index
index = index - 1
if index < 0:
index = 0
hand.addstr(index, 1, handlist[index], curses.A_REVERSE)
hand.addstr(prev, 1, handlist[prev])
hand.move(index,1)
hand.refresh()
if command == ord("s"):
prev = index
index = index + 1
if index >= len(handlist):
index = len(handlist) - 1
hand.addstr(index, 1, handlist[index], curses.A_REVERSE)
hand.addstr(prev, 1, handlist[prev])
hand.move(index,1)
hand.refresh()
if command == ord(" "):
if act_str == act_list[1]:
# prints description
act.addstr(0, 6, act_list[-1], curses.A_BOLD)
act.addstr(1, 6, act_list[2], curses.A_BOLD | curses.A_REVERSE)
act.refresh()
hand.addstr(0, 1, handlist[index], curses.A_REVERSE)
for i in range(77-vline):
hand.addch(1,i, curses.ACS_HLINE)
words = description.split()
desc_idx = 0
line = ""
for word in words:
if len(line) + len(word) + 1 < 76-vline:
line = line + word + " "
if word == words[-1]:
hand.addstr(2 + desc_idx, 1, line)
else:
for i in range (76-vline-len(line)):
line = line + " "
hand.addstr(2 + desc_idx, 1, line)
line = word + " "
desc_idx = desc_idx + 1
if word == words[-1]:
hand.addstr(2 + desc_idx, 1, line)
hand.refresh()
act_str = act_list[1]
act.move(1,6)
choose_action(act, act_list, hand, hand_h, hand_w, bg, index)
index = 0
prev = 0
else:
act.addstr(1, 6, act_str, curses.A_BOLD)
act.refresh()
act_str = act_list[1]
if command == ord("q"):
break
bg.getch()
# Terminating curses:
curses.nocbreak()
bg.keypad(0)
curses.echo()
curses.endwin()
| 28.358537 | 100 | 0.546229 |
7956d4f4495cc6b9511b7c06cc4407e6e37a7032 | 1,936 | py | Python | opentuner-master/examples/tsp/tsp.py | SapientsUOM/JATT | cf932938b1ca67fdda78bdd651e458c3193c21ad | [
"MIT"
] | 1 | 2018-08-10T07:26:07.000Z | 2018-08-10T07:26:07.000Z | opentuner-master/examples/tsp/tsp.py | SapientsUOM/JATT | cf932938b1ca67fdda78bdd651e458c3193c21ad | [
"MIT"
] | null | null | null | opentuner-master/examples/tsp/tsp.py | SapientsUOM/JATT | cf932938b1ca67fdda78bdd651e458c3193c21ad | [
"MIT"
] | 5 | 2017-01-18T00:41:28.000Z | 2021-07-29T02:25:12.000Z | #!/usr/bin/env python
#
# This is a simple testcase purely for testing the autotuner
#
# http://en.wikipedia.org/wiki/Rosenbrock_function
#
# Also supports some other test functions taken from:
# http://en.wikipedia.org/wiki/Test_functions_for_optimization
#
import adddeps #fix sys.path
import argparse
import logging
import opentuner
from opentuner.search.manipulator import (ConfigurationManipulator,
PermutationParameter)
from opentuner.search.objective import MinimizeTime
from opentuner.measurement import MeasurementInterface
from opentuner.measurement.inputmanager import FixedInputManager
from opentuner.tuningrunmain import TuningRunMain
parser = argparse.ArgumentParser(parents=opentuner.argparsers())
parser.add_argument('data', help='distance matrix file')
class TSP(MeasurementInterface):
def __init__(self, args):
super(TSP, self).__init__(args)
data = args.data
m = open(data).readlines()
self.distance = [[int(i) for i in l.split()] for l in m]
def run(self, desired_result, input, limit):
cfg = desired_result.configuration.data
p = cfg[0] # cheating: should use manipulator function
t = self.eval_path(p)
return opentuner.resultsdb.models.Result(time=t)
def eval_path(self, p):
""" Given permutation of cities as a list of indices,
return total path length """
out = sum(self.distance[p[i]][p[i+1]] for i in range(len(p)-1))
## print out, p
return out
def manipulator(self):
manipulator = ConfigurationManipulator()
manipulator.add_parameter(PermutationParameter(0, range(len(self.distance))))
return manipulator
def solution(self):
p = [1,13,2,15,9,5,7,3,12,14,10,8,6,4,11]
return self.eval_path(p)
if __name__ == '__main__':
args = parser.parse_args()
TSP.main(args)
| 30.730159 | 85 | 0.682335 |
7956d566358486626deae2e04f66f2f75c4fa4c3 | 31,707 | py | Python | pde/trackers/trackers.py | noah-ziethen/py-pde | b88e86439290c31284a4ac665a8e9ff34d08b494 | [
"MIT"
] | null | null | null | pde/trackers/trackers.py | noah-ziethen/py-pde | b88e86439290c31284a4ac665a8e9ff34d08b494 | [
"MIT"
] | null | null | null | pde/trackers/trackers.py | noah-ziethen/py-pde | b88e86439290c31284a4ac665a8e9ff34d08b494 | [
"MIT"
] | null | null | null | """
Module defining classes for tracking results from simulations.
The trackers defined in this module are:
.. autosummary::
:nosignatures:
CallbackTracker
ProgressTracker
PrintTracker
PlotTracker
DataTracker
SteadyStateTracker
RuntimeTracker
ConsistencyTracker
MaterialConservationTracker
.. codeauthor:: David Zwicker <david.zwicker@ds.mpg.de>
"""
import inspect
import sys
import os.path
import time
from datetime import timedelta
from pathlib import Path
from typing import (Callable, Optional, Union, IO, List, Any, # @UnusedImport
Dict, TYPE_CHECKING)
import numpy as np
from .base import TrackerBase, InfoDict, FinishedSimulation, Real
from .intervals import IntervalData, RealtimeIntervals
from ..fields.base import FieldBase
from ..fields import FieldCollection
from ..tools.parse_duration import parse_duration
from ..tools.misc import get_progress_bar_class
from ..tools.docstrings import fill_in_docstring
if TYPE_CHECKING:
import pandas # @UnusedImport
from ..visualization.movies import Movie # @UnusedImport
class CallbackTracker(TrackerBase):
""" Tracker that calls a function periodically """
@fill_in_docstring
def __init__(self, func: Callable,
interval: IntervalData = 1):
"""
Args:
func: The function to call periodically. The function signature
should be `(state)` or `(state, time)`, where `state` contains
the current state as an instance of
:class:`~pde.fields.FieldBase` and `time` is a
float value indicating the current time. Note that only a view
of the state is supplied, implying that a copy needs to be made
if the data should be stored.
interval:
{ARG_TRACKER_INTERVAL}
"""
super().__init__(interval=interval)
self._callback = func
self._num_args = len(inspect.signature(func).parameters)
if not 0 < self._num_args < 3:
raise ValueError('`func` must be a function accepting one or two '
f'arguments, not {self._num_args}')
def handle(self, field: FieldBase, t: float) -> None:
""" handle data supplied to this tracker
Args:
field (:class:`~pde.fields.FieldBase`):
The current state of the simulation
t (float):
The associated time
"""
if self._num_args == 1:
self._callback(field)
else:
self._callback(field, t)
class ProgressTracker(TrackerBase):
""" Tracker that shows the progress of the simulation """
name = 'progress'
@fill_in_docstring
def __init__(self, interval: IntervalData = None,
ndigits: int = 5, leave: bool = True):
"""
Args:
interval:
{ARG_TRACKER_INTERVAL}
The default value `None` updates the progress bar approximately
every (real) second.
ndigits (int): The number of digits after the decimal point that are
shown maximally.
leave (bool): Whether to leave the progress bar after the simulation
has finished (default: True)
"""
if interval is None:
# print every second by default
interval = RealtimeIntervals(duration=1)
super().__init__(interval=interval)
self.ndigits = ndigits
self.leave = leave
def initialize(self, field: FieldBase, info: InfoDict = None) -> float:
""" initialize the tracker with information about the simulation
Args:
field (:class:`~pde.fields.FieldBase`):
An example of the data that will be analyzed by the tracker
info (dict):
Extra information from the simulation
Returns:
float: The first time the tracker needs to handle data
"""
result = super().initialize(field, info)
# get solver information
controller_info = {} if info is None else info.get('controller', {})
# initialize the progress bar
pb_cls = get_progress_bar_class()
self.progress_bar = pb_cls(total=controller_info.get('t_end'),
initial=controller_info.get('t_start', 0),
leave=self.leave)
self.progress_bar.set_description('Initializing')
return result
def handle(self, field: FieldBase, t: float) -> None:
""" handle data supplied to this tracker
Args:
field (:class:`~pde.fields.FieldBase`):
The current state of the simulation
t (float):
The associated time
"""
# show an update
if self.progress_bar.total:
t_new = min(t, self.progress_bar.total)
else:
t_new = t
self.progress_bar.n = round(t_new, self.ndigits)
self.progress_bar.set_description('')
def finalize(self, info: InfoDict = None) -> None:
""" finalize the tracker, supplying additional information
Args:
info (dict):
Extra information from the simulation
"""
super().finalize(info)
self.progress_bar.set_description('')
# limit progress bar to 100%
controller_info = {} if info is None else info.get('controller', {})
t_final = controller_info.get('t_final', -np.inf)
t_end = controller_info.get('t_end', -np.inf)
if t_final >= t_end and self.progress_bar.total:
self.progress_bar.n = self.progress_bar.total
self.progress_bar.refresh()
if (controller_info.get('successful', False) and self.leave and
hasattr(self.progress_bar, 'sp')):
# show progress bar in green if simulation was successful. We
# need to overwrite the default behavior (and disable the
# progress bar) since reaching steady state means the simulation
# was successful even though it did not reach t_final
try:
self.progress_bar.sp(bar_style='success')
except TypeError:
self.progress_bar.close()
else:
self.disable = True
else:
self.progress_bar.close()
def __del__(self):
if hasattr(self, 'progress_bar') and not self.progress_bar.disable:
self.progress_bar.close()
class PrintTracker(TrackerBase):
""" Tracker that prints data to a stream (default: stdout) """
name = 'print'
@fill_in_docstring
def __init__(self, interval: IntervalData = 1,
stream: IO[str] = sys.stdout):
"""
Args:
interval:
{ARG_TRACKER_INTERVAL}
stream:
The stream used for printing
"""
super().__init__(interval=interval)
self.stream = stream
def handle(self, field: FieldBase, t: float) -> None:
""" handle data supplied to this tracker
Args:
field (:class:`~pde.fields.FieldBase`):
The current state of the simulation
t (float):
The associated time
"""
data = f"c={field.data.mean():.3g}±{field.data.std():.3g}"
self.stream.write(f"t={t:g}, {data}\n")
self.stream.flush()
class PlotTracker(TrackerBase):
""" Tracker that plots the state, either on screen or to a file
This tracker can be used to create movies from simulations or to simply
update a single image file on the fly (i.e. to monitor simulations running
on a cluster). The default values of this tracker are chosen with regular
output to a file in mind.
"""
@fill_in_docstring
def __init__(self, interval: IntervalData = 1, *,
title: Union[str, Callable] = 'Time: {time:g}',
output_file: Optional[str] = None,
movie: Union[str, Path, 'Movie'] = None,
show: bool = None,
plot_args: Dict[str, Any] = None,
**kwargs):
"""
Args:
interval:
{ARG_TRACKER_INTERVAL}
title (str or callable):
Title text of the figure. If this is a string, it is shown with
a potential placeholder named `time` being replaced by the
current simulation time. Conversely, if `title` is a function,
it is called with the current state and the time as arguments.
This function is expected to return a string.
output_file (str, optional):
Specifies a single image file, which is updated periodically, so
that the progress can be monitored (e.g. on a compute cluster)
movie (str or :class:`~pde.visualization.movies.Movie`):
Create a movie. If a filename is given, all frames are written
to this file in the format deduced from the extension after the
simulation ran. If a :class:`~pde.visualization.movies.Movie` is
supplied, frames are appended to the instance.
show (bool, optional):
Determines whether the plot is shown while the simulation is
running. If `False`, the files are created in the background.
This option can slow down a simulation severely. For the default
value of `None`, the images are only shown if neither
`output_file` nor `movie` is set.
plot_args (dict):
Extra arguments supplied to the plot call. For example, this can
be used to specify axes ranges when a single panel is shown. For
instance, the value `{'ax_style': {'ylim': (0, 1)}}` enforces
the y-axis to lie between 0 and 1.
Note:
If an instance of :class:`~pde.visualization.movies.Movie` is given
as the `movie` argument, it can happen that the movie is not written
to the file when the simulation ends. This is because, the movie
could still be extended by appending frames. To write the movie to
a file call its :meth:`~pde.visualization.movies.Movie.save` method.
Beside adding frames before and after the simulation, an explicit
movie object can also be used to adjust the output, e.g., by setting
the `dpi` argument or the `frame_rate`.
"""
from ..visualization.movies import Movie # @Reimport
# handle deprecated parameters
if 'movie_file' in kwargs:
# Deprecated this method on 2020-06-04
import warnings
warnings.warn("Argument `movie_file` is deprecated. Use `movie` "
"instead.", DeprecationWarning)
if movie is None:
movie = kwargs.pop('movie_file')
if 'output_folder' in kwargs:
# Deprecated this method on 2020-06-04
import warnings # @Reimport
warnings.warn("Argument `output_folder` is deprecated. Use an "
"instance of pde.visualization.movies.Movie with "
"`image_folder` and supply it to the `movie` "
"argument instead.", DeprecationWarning)
del kwargs['output_folder']
if kwargs:
raise ValueError(f"Unused kwargs: {kwargs}")
# initialize the tracker
super().__init__(interval=interval)
self.title = title
self.output_file = output_file
self.plot_args = {} if plot_args is None else plot_args.copy()
# make sure the plot is only create and not shown since the context
# handles showing the plot itself
self.plot_args['action'] = 'create'
# initialize the movie class
if movie is None:
self.movie: Optional[Movie] = None
self._save_movie = False
elif isinstance(movie, Movie):
self.movie = movie
self._save_movie = False
elif isinstance(movie, (str, Path)):
self.movie = Movie(filename=str(movie))
self._save_movie = True
else:
raise TypeError('Unknown type of argument `movie`: '
f'{movie.__class__.__name__}')
# determine whether to show the images interactively
if show is None:
self.show = not (self._save_movie or self.output_file)
else:
self.show = show
def initialize(self, state: FieldBase, info: InfoDict = None) -> float:
""" initialize the tracker with information about the simulation
Args:
field (:class:`~pde.fields.FieldBase`):
An example of the data that will be analyzed by the tracker
info (dict):
Extra information from the simulation
Returns:
float: The first time the tracker needs to handle data
"""
# initialize the plotting context
from ..tools.plotting import get_plotting_context
self._context = get_plotting_context(title='Initializing...',
show=self.show)
# do the actual plotting
with self._context:
self._plot_reference = state.plot(**self.plot_args)
if self._context.supports_update:
# the context supports reusing figures
if hasattr(state.plot, 'update_method'):
# the plotting method supports updating the plot
if state.plot.update_method is None: # type: ignore
if state.plot.mpl_class == 'axes': # type: ignore
self._update_method = 'update_ax'
elif state.plot.mpl_class == 'figure': # type: ignore
self._update_method = 'update_fig'
else:
mpl_class = state.plot.mpl_class # type: ignore
raise RuntimeError('Unknown mpl_class on plot method: '
f'{mpl_class}')
else:
self._update_method = 'update_data'
else:
raise RuntimeError('PlotTracker does not work since the state '
f'of type {state.__class__.__name__} does '
'not use the plot protocol of '
'`pde.tools.plotting`.')
else:
self._update_method = 'replot'
self._logger.info(f'Update method: "{self._update_method}"')
return super().initialize(state, info=info)
def handle(self, state: FieldBase, t: float) -> None:
""" handle data supplied to this tracker
Args:
field (:class:`~pde.fields.FieldBase`):
The current state of the simulation
t (float):
The associated time
"""
if callable(self.title):
self._context.title = str(self.title(state, t))
else:
self._context.title = self.title.format(time=t)
# update the plot in the correct plotting context
with self._context:
if self._update_method == 'update_data':
# the state supports updating the plot data
update_func = getattr(state,
state.plot.update_method) # type: ignore
update_func(self._plot_reference)
elif self._update_method == 'update_fig':
fig = self._context.fig
fig.clf() # type: ignore
state.plot(fig=fig, **self.plot_args)
elif self._update_method == 'update_ax':
fig = self._context.fig
fig.clf() # type: ignore
ax = fig.add_subplot(1, 1, 1) # type: ignore
state.plot(ax=ax, **self.plot_args)
elif self._update_method == 'replot':
state.plot(**self.plot_args)
else:
raise RuntimeError('Unknown update method '
f'`{self._update_method}`')
if self.output_file and self._context.fig is not None:
self._context.fig.savefig(self.output_file)
if self.movie:
self.movie.add_figure(self._context.fig)
def finalize(self, info: InfoDict = None) -> None:
""" finalize the tracker, supplying additional information
Args:
info (dict):
Extra information from the simulation
"""
super().finalize(info)
if self._save_movie:
# write out movie file
self.movie.save() # type: ignore
# end recording the movie (e.g. delete temporary files)
self.movie._end() # type: ignore
if not self.show:
self._context.close()
class PlotInteractiveTracker(PlotTracker):
""" Tracker that plots data on screen, to files, or writes a movie
The only difference to :class:`PlotTracker` are the changed default values,
where output is by default shown on screen and the `interval` is set
something more suitable for interactive plotting. In particular, this
tracker can be enabled by simply listing 'plot' as a tracker.
"""
name = 'plot'
@fill_in_docstring
def __init__(self, interval: IntervalData = '0:02', *,
show: bool = True,
**kwargs):
"""
Args:
interval:
{ARG_TRACKER_INTERVAL}
title (str):
Text to show in the title. The current time point will be
appended to this text, so include a space for optimal results.
output_file (str, optional):
Specifies a single image file, which is updated periodically, so
that the progress can be monitored (e.g. on a compute cluster)
output_folder (str, optional):
Specifies a folder to which all images are written. The files
will have names with increasing numbers.
movie_file (str, optional):
Specifies a filename to which a movie of all the frames is
written after the simulation.
show (bool, optional):
Determines whether the plot is shown while the simulation is
running. If `False`, the files are created in the background.
This option can slow down a simulation severely.
plot_args (dict):
Extra arguments supplied to the plot call
"""
super().__init__(interval=interval, show=show, **kwargs)
class DataTracker(CallbackTracker):
""" Tracker that stores custom data obtained by calling a function
Attributes:
times (list):
The time points at which the data is stored
data (list):
The actually stored data, which is a list of the objects returned by
the callback function.
"""
@fill_in_docstring
def __init__(self, func: Callable,
interval: IntervalData = 1,
filename: str = None):
"""
Args:
func:
The function to call periodically. The function signature
should be `(state)` or `(state, time)`, where `state` contains
the current state as an instance of
:class:`~pde.fields.FieldBase` and `time` is a
float value indicating the current time. Note that only a view
of the state is supplied, implying that a copy needs to be made
if the data should be stored.
Typical return values of the function are either a single
number, a numpy array, a list of number, or a dictionary to
return multiple numbers with assigned labels.
interval:
{ARG_TRACKER_INTERVAL}
filename (str):
A path to a file to which the data is written at the end of the
tracking. The data format will be determined by the extension
of the filename. '.pickle' indicates a python pickle file
storing a tuple `(self.times, self.data)`, whereas any other
data format requires :mod:`pandas`.
"""
super().__init__(func=func, interval=interval)
self.filename = filename
self.times: List[float] = []
self.data: List[Any] = []
def handle(self, field: FieldBase, t: float) -> None:
""" handle data supplied to this tracker
Args:
field (:class:`~pde.fields.FieldBase`):
The current state of the simulation
t (float):
The associated time
"""
self.times.append(t)
if self._num_args == 1:
self.data.append(self._callback(field))
else:
self.data.append(self._callback(field, t))
def finalize(self, info: InfoDict = None) -> None:
""" finalize the tracker, supplying additional information
Args:
info (dict):
Extra information from the simulation
"""
super().finalize(info)
if self.filename:
self.to_file(self.filename)
@property
def dataframe(self) -> "pandas.DataFrame":
""" :class:`pandas.DataFrame`: the data in a dataframe
If `func` returns a dictionary, the keys are used as column names.
Otherwise, the returned data is enumerated starting with '0'. In any
case the time point at which the data was recorded is stored in the
column 'time'.
"""
import pandas as pd
df = pd.DataFrame(self.data)
# insert the times and use them as an index
df.insert(0, 'time', self.times)
return df
def to_file(self, filename: str, **kwargs):
r""" store data in a file
The extension of the filename determines what format is being used. For
instance, '.pickle' indicates a python pickle file storing a tuple
`(self.times, self.data)`, whereas any other data format requires
:mod:`pandas`. Supported formats include 'csv', 'json'.
Args:
filename (str):
Path where the data is stored
\**kwargs:
Additional parameters may be supported for some formats
"""
extension = os.path.splitext(filename)[1].lower()
if extension == '.pickle':
# default
import pickle
with open(filename, "wb") as fp:
pickle.dump((self.times, self.data), fp, **kwargs)
elif extension == '.csv':
self.dataframe.to_csv(filename, **kwargs)
elif extension == '.json':
self.dataframe.to_json(filename, **kwargs)
elif extension in {'.xls', '.xlsx'}:
self.dataframe.to_excel(filename, **kwargs)
else:
raise ValueError(f'Unsupported file extension `{extension}`')
class SteadyStateTracker(TrackerBase):
""" Tracker that interrupts the simulation once steady state is reached
Steady state is obtained when the state does not change anymore. This is the
case when the derivative is close to zero.
"""
name = 'steady_state'
@fill_in_docstring
def __init__(self, interval: IntervalData = None,
atol: float = 1e-8,
rtol: float = 1e-5):
"""
Args:
interval:
{ARG_TRACKER_INTERVAL}
The default value `None` checks for the steady state
approximately every (real) second.
atol (float): Absolute tolerance that must be reached to abort the
simulation
rtol (float): Relative tolerance that must be reached to abort the
simulation
"""
if interval is None:
interval = RealtimeIntervals(duration=1)
super().__init__(interval=interval)
self.atol = atol
self.rtol = rtol
self._last_data = None
def handle(self, field: FieldBase, t: float) -> None:
""" handle data supplied to this tracker
Args:
field (:class:`~pde.fields.FieldBase`):
The current state of the simulation
t (float):
The associated time
"""
if self._last_data is not None:
# scale with dt to make test independent of dt
atol = self.atol * self.interval.dt
rtol = self.rtol * self.interval.dt
if np.allclose(self._last_data, field.data,
rtol=rtol, atol=atol, equal_nan=True):
raise FinishedSimulation('Reached stationary state')
self._last_data = field.data.copy() # store data from last timestep
class RuntimeTracker(TrackerBase):
""" Tracker that interrupts the simulation once a duration has passed """
@fill_in_docstring
def __init__(self, max_runtime: Union[Real, str],
interval: IntervalData = 1):
"""
Args:
max_runtime (float or str):
The maximal runtime of the simulation. If the runtime is
exceeded, the simulation is interrupted. Values can be either
given as a number (interpreted as seconds) or as a string, which
is then parsed using the function
:func:`~pde.tools.parse_duration.parse_duration`.
interval:
{ARG_TRACKER_INTERVAL}
"""
super().__init__(interval=interval)
try:
self.max_runtime = float(max_runtime)
except ValueError:
td = parse_duration(str(max_runtime))
self.max_runtime = td.total_seconds()
def initialize(self, field: FieldBase, info: InfoDict = None) -> float:
"""
Args:
field (:class:`~pde.fields.FieldBase`):
An example of the data that will be analyzed by the tracker
info (dict):
Extra information from the simulation
Returns:
float: The first time the tracker needs to handle data
"""
self.max_time = time.time() + self.max_runtime
return super().initialize(field, info)
def handle(self, field: FieldBase, t: float) -> None:
""" handle data supplied to this tracker
Args:
field (:class:`~pde.fields.FieldBase`):
The current state of the simulation
t (float):
The associated time
"""
if time.time() > self.max_time:
dt = timedelta(seconds=self.max_runtime)
raise FinishedSimulation(f'Reached maximal runtime of {str(dt)}')
class ConsistencyTracker(TrackerBase):
""" Tracker that interrupts the simulation when the state is not finite """
name = 'consistency'
@fill_in_docstring
def __init__(self, interval: IntervalData = None):
"""
Args:
interval:
{ARG_TRACKER_INTERVAL}
The default value `None` checks for consistency approximately
every (real) second.
"""
if interval is None:
interval = RealtimeIntervals(duration=1)
super().__init__(interval=interval)
def handle(self, field: FieldBase, t: float) -> None:
""" handle data supplied to this tracker
Args:
field (:class:`~pde.fields.FieldBase`):
The current state of the simulation
t (float):
The associated time
"""
if not np.all(np.isfinite(field.data)):
raise StopIteration('Field was not finite')
self._last = field.data.copy() # store data from last timestep
class MaterialConservationTracker(TrackerBase):
""" Ensure that the amount of material is conserved """
name = 'material_conservation'
@fill_in_docstring
def __init__(self, interval: IntervalData = 1,
atol: float = 1e-4,
rtol: float = 1e-4):
"""
Args:
interval:
{ARG_TRACKER_INTERVAL}
atol (float):
Absolute tolerance for amount deviations
rtol (float):
Relative tolerance for amount deviations
"""
super().__init__(interval=interval)
self.atol = atol
self.rtol = rtol
def initialize(self, field: FieldBase, info: InfoDict = None) -> float:
"""
Args:
field (:class:`~pde.fields.base.FieldBase`):
An example of the data that will be analyzed by the tracker
info (dict):
Extra information from the simulation
Returns:
float: The first time the tracker needs to handle data
"""
if isinstance(field, FieldCollection):
self._reference = np.array([f.magnitude for f in field])
else:
self._reference = field.magnitude # type: ignore
return super().initialize(field, info)
def handle(self, field: FieldBase, t: float) -> None:
""" handle data supplied to this tracker
Args:
field (:class:`~pde.fields.FieldBase`):
The current state of the simulation
t (float):
The associated time
"""
if isinstance(field, FieldCollection):
mags = np.array([f.magnitude for f in field])
else:
mags = field.magnitude # type: ignore
c = np.isclose(mags, self._reference, rtol=self.rtol, atol=self.atol)
if not np.all(c):
if isinstance(field, FieldCollection):
msg = f'Material of field {np.flatnonzero(~c)} is not conserved'
else:
msg = f'Material is not conserved'
raise StopIteration(msg)
__all__ = ['CallbackTracker', 'ProgressTracker', 'PrintTracker', 'PlotTracker',
'DataTracker', 'SteadyStateTracker', 'RuntimeTracker',
'ConsistencyTracker', 'MaterialConservationTracker']
| 37.478723 | 80 | 0.556344 |
7956d5778eddb65e6ad464cec097c3a26c931868 | 149 | py | Python | initialize_weights.py | catskillsresearch/openasr20 | b9821c4ee6a51501e81103c1d6d4db0ea8aaa31e | [
"Apache-2.0"
] | null | null | null | initialize_weights.py | catskillsresearch/openasr20 | b9821c4ee6a51501e81103c1d6d4db0ea8aaa31e | [
"Apache-2.0"
] | null | null | null | initialize_weights.py | catskillsresearch/openasr20 | b9821c4ee6a51501e81103c1d6d4db0ea8aaa31e | [
"Apache-2.0"
] | 1 | 2021-07-28T02:13:21.000Z | 2021-07-28T02:13:21.000Z | import torch.nn as nn
def initialize_weights(m):
if hasattr(m, 'weight') and m.weight.dim() > 1:
nn.init.xavier_uniform_(m.weight.data)
| 24.833333 | 51 | 0.677852 |
7956d5b28bd0c1527877118117deec5195856527 | 2,233 | py | Python | TropicalGeometry/TropicalSemiring.py | SymmetricChaos/FiniteFields | 65258e06b7f04ce15223c1bc0c2384ef5e9cec1a | [
"MIT"
] | 1 | 2021-08-22T15:03:59.000Z | 2021-08-22T15:03:59.000Z | TropicalGeometry/TropicalSemiring.py | SymmetricChaos/NumberTheory | 65258e06b7f04ce15223c1bc0c2384ef5e9cec1a | [
"MIT"
] | null | null | null | TropicalGeometry/TropicalSemiring.py | SymmetricChaos/NumberTheory | 65258e06b7f04ce15223c1bc0c2384ef5e9cec1a | [
"MIT"
] | null | null | null | class Tropical:
def __init__(self,val):
self.val = val
# Relations
def __lt__(self,other):
if type(other) == Tropical:
return self.val-other.val < 0
else:
return self.val-other < 0
def __gt__(self,other):
if type(other) == Tropical:
return self.val-other.val > 0
else:
return self.val-other > 0
def __le__(self,other):
if type(other) == Tropical:
return self.val-other.val <= 0
else:
return self.val-other <= 0
def __ge__(self,other):
if type(other) == Tropical:
return self.val-other.val >= 0
else:
return self.val-other >= 0
def __eq__(self,other):
if type(other) == Tropical:
return self.val == other.val
else:
return self.val == other
# Simple operations
def __add__(self,b):
if type(b) == Tropical:
return Tropical(min(self.val,b.val))
else:
return Tropical(min(self.val,b))
def __radd__(self,b):
if type(b) == Tropical:
return Tropical(min(self.val,b.val))
else:
return Tropical(min(self.val,b))
def __mul__(self,b):
if type(b) == Tropical:
return Tropical(self.val+b.val)
else:
return Tropical(self.val+b)
def __rmul__(self,b):
if type(b) == Tropical:
return Tropical(self.val+b.val)
else:
return Tropical(self.val+b)
def __pow__(self,b):
if type(b) == Tropical:
return Tropical(self.val*b.val)
else:
return Tropical(self.val*b)
# Otheer
def __abs__(self):
return Tropical(abs(self.val))
def __str__(self):
return str(self.val)
def __repr__(self):
return str(self.val)
def __float__(self):
return float(self.val)
def sym(self):
return Tropical(-self.val)
def __truediv__(self,b):
return self * b.sym()
def __floordiv__(self,b):
return self * b.sym() | 23.505263 | 48 | 0.506046 |
7956d67415f9e5d250febb11a07b298fc0d236e8 | 2,690 | py | Python | pyagentx3/agent.py | Temien/PyAgentX3 | 8f29ee160825fb049b5cacdbce4ef33418fb7dd3 | [
"BSD-2-Clause"
] | null | null | null | pyagentx3/agent.py | Temien/PyAgentX3 | 8f29ee160825fb049b5cacdbce4ef33418fb7dd3 | [
"BSD-2-Clause"
] | 3 | 2021-07-20T13:58:29.000Z | 2022-03-07T14:47:21.000Z | pyagentx3/agent.py | Temien/PyAgentX3 | 8f29ee160825fb049b5cacdbce4ef33418fb7dd3 | [
"BSD-2-Clause"
] | 3 | 2021-07-04T00:03:43.000Z | 2022-03-23T07:44:03.000Z | # -*- coding: utf-8 -*-
# --------------------------------------------
import logging
class NullHandler(logging.Handler):
def emit(self, record):
pass
logger = logging.getLogger('pyagentx3.agent')
logger.addHandler(NullHandler())
# --------------------------------------------
import time
from queue import Queue
import inspect
import pyagentx3
from pyagentx3.updater import Updater
from pyagentx3.network import Network
class AgentError(Exception):
pass
class Agent():
def __init__(self, agent_id='MyAgent'):
self.agent_id = agent_id
self._updater_list = []
self._sethandlers = {}
self._threads = []
def register(self, oid, class_, freq=10, data_store=None):
if Updater not in inspect.getmro(class_):
raise AgentError('Class given isn\'t an updater')
# cleanup and test oid
try:
oid = oid.strip(' .')
_ = [int(i) for i in oid.split('.')]
except ValueError:
raise AgentError('OID isn\'t valid')
self._updater_list.append({
'oid': oid,
'class': class_,
'data_store': data_store,
'freq': freq})
def register_set(self, oid, class_, data_store=None):
if pyagentx3.SetHandler not in class_.__bases__:
raise AgentError('Class given isn\'t a SetHandler')
# cleanup and test oid
try:
oid = oid.strip(' .')
_ = [int(i) for i in oid.split('.')]
except ValueError:
raise AgentError('OID isn\'t valid')
self._sethandlers[oid] = class_(data_store=data_store)
def setup(self):
# Override this
pass
def start(self):
queue = Queue(maxsize=20)
self.setup()
# Start Updaters
for u in self._updater_list:
logger.debug('Starting updater [%s]', u['oid'])
thread = u['class'](data_store=u['data_store'])
thread.agent_setup(queue, u['oid'], u['freq'])
thread.start()
self._threads.append(thread)
# Start Network
oid_list = [u['oid'] for u in self._updater_list]
thread = Network(queue, oid_list, self._sethandlers, self.agent_id)
thread.start()
self._threads.append(thread)
# Do nothing ... just wait for someone to stop you
while True:
#logger.debug('Agent Sleeping ...')
time.sleep(1)
def stop(self):
logger.debug('Stop threads')
for thread in self._threads:
thread.stop.set()
logger.debug('Wait for updater')
for thread in self._threads:
thread.join()
| 28.315789 | 75 | 0.560223 |
7956d7000b3730376dc18d5559f5916a9235fdcb | 3,228 | py | Python | issuer_controller/test/issue_credential_resource_test.py | WadeBarnes/mines-digital-trust | f07df4a347d49a523b37a066ff8d6b753be110ef | [
"Apache-2.0"
] | null | null | null | issuer_controller/test/issue_credential_resource_test.py | WadeBarnes/mines-digital-trust | f07df4a347d49a523b37a066ff8d6b753be110ef | [
"Apache-2.0"
] | null | null | null | issuer_controller/test/issue_credential_resource_test.py | WadeBarnes/mines-digital-trust | f07df4a347d49a523b37a066ff8d6b753be110ef | [
"Apache-2.0"
] | null | null | null | import pytest,threading,json, random
from time import sleep
from unittest.mock import MagicMock, patch, PropertyMock
from app import issuer, credential
test_send_credential = [
{
"schema": "my-registration.empr",
"version": "1.0.0",
"attributes": {
"corp_num": "ABC12345",
"registration_date": "2018-01-01",
"entity_name": "Ima Permit",
"entity_name_effective": "2018-01-01",
"entity_status": "ACT",
"entity_status_effective": "2019-01-01",
"entity_type": "ABC",
"registered_jurisdiction": "BC",
"addressee": "A Person",
"address_line_1": "123 Some Street",
"city": "Victoria",
"country": "Canada",
"postal_code": "V1V1V1",
"province": "BC",
"effective_date": "2019-01-01",
"expiry_date": ""
}
},
{
"schema": "bcgov-mines-act-permit.empr",
"version": "1.0.0",
"attributes": {
"permit_id": "MYPERMIT12345",
"entity_name": "Ima Permit",
"corp_num": "ABC12345",
"permit_issued_date": "2018-01-01",
"permit_type": "ABC",
"permit_status": "OK",
"effective_date": "2019-01-01"
}
}
]
def test_liveness_method(app):
val = issuer.issuer_liveness_check()
assert val
def test_liveness_route(test_client):
get_resp = test_client.get(f'/liveness')
assert get_resp.status_code == 200
#TODO inconsistent passing, fails on first run, succeeds on second
# def test_health_method(app):
# val = issuer.tob_connection_synced()
# assert val
# def test_health_route(test_client):
# get_resp = test_client.get(f'/health')
# assert get_resp.status_code == 200
##-------------Issue-Credential--------------
class MockSendCredentialThread(threading.Thread):
def __init__(self,*args):
threading.Thread.__init__(self)
return
def run(self):
sleep(random.randint(1,1000)/1000)
self.cred_response = {"success": True, "result":"MOCK_RESPONSE"}
return
def test_issue_credential_spawns_thread(app):
with patch('app.issuer.SendCredentialThread',new=MockSendCredentialThread) as mock:
res = issuer.handle_send_credential(test_send_credential)
assert res.status_code == 200
responses = json.loads(res.response[0])
assert 'MOCK' in responses[0]["result"]
assert all(r['success'] == True for r in responses)
assert len(responses) == 2
def test_SendCredentialThread_posts_to_agent(app):
cred_def = "CRED_DEF_my-registration.empr_1.0.0"
cred_offer = {"test":"tests","test2":"test2"}
agent_url = app.ENV.get("AGENT_ADMIN_URL") + "/issue-credential/send"
headers = {"Content-Type": "application/json"}
with patch('app.credential.requests.post') as mock:
thread = credential.SendCredentialThread(
cred_def,
cred_offer,
agent_url,
headers,
)
thread.start()
thread.join()
mock.assert_called_with(agent_url, json.dumps(cred_offer), headers=headers)
| 31.339806 | 87 | 0.599442 |
7956d70872889926ac0d0887bdfc404c12bc2f0c | 4,657 | py | Python | rf_protocol_validator.py | DMTF/Redfish-Protocol-Validator | 657aae079d5e490c4196ef50d64d5fa9d86cd584 | [
"FSFAP"
] | 2 | 2020-10-01T15:30:13.000Z | 2022-03-02T18:38:51.000Z | rf_protocol_validator.py | DMTF/Redfish-Protocol-Validator | 657aae079d5e490c4196ef50d64d5fa9d86cd584 | [
"FSFAP"
] | 34 | 2020-09-29T14:54:57.000Z | 2022-03-22T12:43:57.000Z | rf_protocol_validator.py | DMTF/Redfish-Protocol-Validator | 657aae079d5e490c4196ef50d64d5fa9d86cd584 | [
"FSFAP"
] | 3 | 2020-07-24T15:17:57.000Z | 2021-03-31T02:37:33.000Z | # Copyright Notice:
# Copyright 2020 DMTF. All rights reserved.
# License: BSD 3-Clause License. For full text see link:
# https://github.com/DMTF/Redfish-Protocol-Validator/blob/master/LICENSE.md
import argparse
import logging
import sys
from datetime import datetime
from pathlib import Path
import requests
from urllib3.exceptions import InsecureRequestWarning
from assertions import protocol_details
from assertions import report
from assertions import resources
from assertions import security_details
from assertions import service_details
from assertions import service_requests
from assertions import service_responses
from assertions import sessions
from assertions import utils
from assertions.constants import Result
from assertions.system_under_test import SystemUnderTest
tool_version = '1.0.9'
def perform_tests(sut: SystemUnderTest):
"""Perform the protocol validation tests on the resources."""
protocol_details.test_protocol_details(sut)
service_requests.test_service_requests(sut)
service_responses.test_service_responses(sut)
service_details.test_service_details(sut)
security_details.test_security_details(sut)
def main():
parser = argparse.ArgumentParser(
description='Validate the protocol conformance of a Redfish service')
parser.add_argument('--version', action='version',
version='Redfish-Protocol-Validator %s' % tool_version)
parser.add_argument('--user', '-u', type=str, required=True,
help='the username for authentication')
parser.add_argument('--password', '-p', type=str, required=True,
help='the password for authentication')
parser.add_argument('--rhost', '-r', type=str, required=True,
help='address of the Redfish service (with scheme)')
parser.add_argument('--log-level', type=str, default='WARNING',
help='the logging level (default: WARNING)')
parser.add_argument('--report-dir', type=str, default='reports',
help='the directory for generated report files '
'(default: "reports")')
parser.add_argument('--report-type', choices=['html', 'tsv', 'both'],
help='the type of report to generate: html, tsv, or '
'both (default: both)', default='both')
parser.add_argument('--avoid-http-redirect', action='store_true',
help='avoid attempts to generate HTTP redirects for '
'services that do not support HTTP')
cert_g = parser.add_mutually_exclusive_group()
cert_g.add_argument('--no-cert-check', action='store_true',
help='disable verification of host SSL certificates')
cert_g.add_argument('--ca-bundle', type=str,
help='the file or directory containing trusted CAs')
args = parser.parse_args()
# set logging level
log_level = getattr(logging, args.log_level.upper())
logging.basicConfig(level=log_level)
# set up cert verify option
verify = args.ca_bundle if args.ca_bundle else not args.no_cert_check
if args.no_cert_check:
requests.packages.urllib3.disable_warnings(InsecureRequestWarning)
# create report directory if needed
report_dir = Path(args.report_dir)
if not report_dir.is_dir():
report_dir.mkdir(parents=True)
sut = SystemUnderTest(args.rhost, args.user, args.password, verify=verify)
sut.set_avoid_http_redirect(args.avoid_http_redirect)
sut.login()
resources.read_target_resources(sut, func=resources.get_default_resources)
no_auth_session = sessions.no_auth_session(sut)
resources.read_uris_no_auth(sut, no_auth_session)
resources.data_modification_requests(sut)
resources.data_modification_requests_no_auth(sut, no_auth_session)
resources.unsupported_requests(sut)
resources.basic_auth_requests(sut)
resources.http_requests(sut)
resources.bad_auth_requests(sut)
sessions.bad_login(sut)
perform_tests(sut)
sut.logout()
utils.print_summary(sut)
current_time = datetime.now()
print('Report output:')
report.json_results(sut, report_dir, current_time, tool_version)
if args.report_type in ('tsv', 'both'):
print(report.tsv_report(sut, report_dir, current_time))
if args.report_type in ('html', 'both'):
print(report.html_report(sut, report_dir, current_time, tool_version))
# exit with status 1 if any assertions failed, 0 otherwise
sys.exit(int(sut.summary_count(Result.FAIL) > 0))
if __name__ == "__main__":
main()
| 41.954955 | 79 | 0.703242 |
7956d78f3f2642f351093cb28dec7d329440efaf | 1,007 | py | Python | Python/Test_Typing_Speed/test_typing_speed.py | iamakkkhil/Rotten-Scripts | 116ae502271d699db88add5fd1cf733d01134b7d | [
"MIT"
] | 1,127 | 2020-02-16T04:14:00.000Z | 2022-03-31T21:37:24.000Z | Python/Test_Typing_Speed/test_typing_speed.py | iamakkkhil/Rotten-Scripts | 116ae502271d699db88add5fd1cf733d01134b7d | [
"MIT"
] | 1,123 | 2020-06-20T04:00:11.000Z | 2022-03-31T13:23:45.000Z | Python/Test_Typing_Speed/test_typing_speed.py | iamakkkhil/Rotten-Scripts | 116ae502271d699db88add5fd1cf733d01134b7d | [
"MIT"
] | 669 | 2020-05-30T16:14:43.000Z | 2022-03-31T14:36:11.000Z | # Python Script to test your Typing Speed
from time import time
print()
print("NO NEW LINE IS THERE, WRITE CONTINUOUSLY(just SPACES)")
s = (
"this is a simple paragraph that is meant to be nice and"
" easy to type which is why there will be no commas no periods "
"or any capital letters so i guess this means that it cannot really "
"be considered a paragraph but just a series of sentences"
)
words = len(s.split())
print()
print(s)
print("\nAfter you are done press enter to know your time and speed")
input("\nPress any key to Start:")
try:
print("\nTimer Started\n")
start = time()
t = input()
end = time()
if t == s:
total = round(end - start, 2)
print("\nVoila you typed that correctly")
print("Your time was %s seconds" % total)
total = int(total) / 60
print("Speed was %s wpm" % (str(words // total)))
else:
print("\nWrongly entered")
print("Try again")
except KeyboardInterrupt:
print("")
| 25.175 | 73 | 0.633565 |
7956d950d68aae525daba920d5aaca06631d00e6 | 11,255 | py | Python | laika/downloader.py | mengyou658/laika | cdf08816f9e1d7e1f9e12e565d6f4fca6742ef8e | [
"MIT"
] | null | null | null | laika/downloader.py | mengyou658/laika | cdf08816f9e1d7e1f9e12e565d6f4fca6742ef8e | [
"MIT"
] | null | null | null | laika/downloader.py | mengyou658/laika | cdf08816f9e1d7e1f9e12e565d6f4fca6742ef8e | [
"MIT"
] | null | null | null | import certifi
import ftplib
import gzip
import os
import urllib.request
import pycurl
from datetime import datetime
from urllib.parse import urlparse
from io import BytesIO
from .constants import SECS_IN_DAY, SECS_IN_WEEK
from .gps_time import GPSTime
from .unlzw import unlzw
dir_path = os.path.dirname(os.path.realpath(__file__))
def retryable(f):
"""
Decorator to allow us to pass multiple URLs from which to download.
Automatically retry the request with the next URL on failure
"""
def wrapped(url_bases, *args, **kwargs):
if isinstance(url_bases, str):
# only one url passed, don't do the retry thing
return f(url_bases, *args, **kwargs)
# not a string, must be a list of url_bases
for url_base in url_bases:
try:
return f(url_base, *args, **kwargs)
except IOError as e:
print(e)
# none of them succeeded
raise IOError("Multiple URL failures attempting to pull file(s)")
return wrapped
def ftp_connect(url):
parsed = urlparse(url)
assert parsed.scheme == 'ftp'
try:
domain = parsed.netloc
ftp = ftplib.FTP(domain)
ftp.login()
except (OSError, ftplib.error_perm):
raise IOError("Could not connect/auth to: " + domain)
try:
ftp.cwd(parsed.path)
except ftplib.error_perm:
raise IOError("Permission failure with folder: " + url)
return ftp
@retryable
def list_dir(url):
try:
ftp = ftp_connect(url)
return ftp.nlst()
except ftplib.error_perm:
raise IOError("Permission failure listing folder: " + url)
def decompress(filepath_zipped, filepath, compression=''):
if compression == '':
return filepath_zipped
elif compression == '.gz':
f = gzip.open(filepath_zipped, 'rb')
uncompressed_data = f.read()
f.close()
elif compression == '.Z':
f = open(filepath_zipped, 'rb')
compressed_data = f.read()
uncompressed_data = unlzw(compressed_data)
f.close()
else:
raise NotImplementedError('unknown compression: ', compression)
f = open(filepath, 'wb')
f.write(uncompressed_data)
f.close()
return filepath
def ftp_download_files(url_base, folder_path, cacheDir, filenames, compression='', overwrite=False):
"""
Like download file, but more of them. Keeps a persistent FTP connection open
to be more efficient.
"""
folder_path_abs = os.path.join(cacheDir, folder_path)
ftp = ftp_connect(url_base + folder_path)
filepaths = []
for filename in filenames:
filename_zipped = filename + compression
filepath = os.path.join(folder_path_abs, filename)
filepath_zipped = os.path.join(folder_path_abs, filename_zipped)
print("pulling from", url_base, "to", filepath)
if not os.path.isfile(filepath) or overwrite:
if not os.path.exists(folder_path_abs):
os.makedirs(folder_path_abs)
try:
ftp.retrbinary('RETR ' + filename_zipped, open(filepath_zipped, 'wb').write)
except (ftplib.error_perm):
raise IOError("Could not download file from: " + url_base + folder_path + filename_zipped)
filepaths.append(decompress(filepath_zipped, filepath, compression=compression))
else:
filepaths.append(filepath)
return filepaths
def https_download_file(url):
crl = pycurl.Curl()
crl.setopt(crl.CAINFO, certifi.where())
crl.setopt(crl.URL, url)
crl.setopt(crl.FOLLOWLOCATION, True)
crl.setopt(crl.NETRC_FILE, dir_path + '/.netrc')
crl.setopt(crl.NETRC, 2)
crl.setopt(crl.SSL_CIPHER_LIST, 'DEFAULT@SECLEVEL=1')
crl.setopt(crl.COOKIEJAR, '/tmp/cddis_cookies')
buf = BytesIO()
crl.setopt(crl.WRITEDATA, buf)
crl.perform()
response = crl.getinfo(pycurl.RESPONSE_CODE)
crl.close()
if response == 200:
return buf.getvalue()
else:
raise IOError('HTTPS error ' + str(response))
def ftp_download_file(url):
urlf = urllib.request.urlopen(url)
data_zipped = urlf.read()
urlf.close()
return data_zipped
@retryable
def download_files(url_base, folder_path, cacheDir, filenames, compression='', overwrite=False):
return ftp_download_files(
url_base, folder_path, cacheDir, filenames, compression=compression, overwrite=overwrite
)
@retryable
def download_file(url_base, folder_path, cacheDir, filename, compression='', overwrite=False):
folder_path_abs = os.path.join(cacheDir, folder_path)
filename_zipped = filename + compression
filepath = os.path.join(folder_path_abs, filename)
filepath_zipped = os.path.join(folder_path_abs, filename_zipped)
url = url_base + folder_path + filename_zipped
if not os.path.isfile(filepath) or overwrite:
if not os.path.exists(folder_path_abs):
os.makedirs(folder_path_abs)
try:
print('Downloading ' + url)
if 'https' in url:
data_zipped = https_download_file(url)
elif 'ftp':
data_zipped = ftp_download_file(url)
else:
raise NotImplementedError('Did find ftp or https preamble')
except IOError:
raise IOError("Could not download file from: " + url)
with open(filepath_zipped, 'wb') as wf:
wf.write(data_zipped)
filepath = decompress(filepath_zipped, filepath, compression=compression)
return filepath
def download_nav(time, cache_dir, constellation='GPS'):
t = time.as_datetime()
try:
if GPSTime.from_datetime(datetime.utcnow()) - time > SECS_IN_DAY:
url_base = 'https://cddis.nasa.gov/archive/gnss/data/daily/'
cache_subdir = cache_dir + 'daily_nav/'
if constellation =='GPS':
filename = t.strftime("brdc%j0.%yn")
folder_path = t.strftime('%Y/%j/%yn/')
elif constellation =='GLONASS':
filename = t.strftime("brdc%j0.%yg")
folder_path = t.strftime('%Y/%j/%yg/')
return download_file(url_base, folder_path, cache_subdir, filename, compression='.Z')
else:
url_base = 'https://cddis.nasa.gov/archive/gnss/data/hourly/'
cache_subdir = cache_dir + 'hourly_nav/'
if constellation =='GPS':
filename = t.strftime("hour%j0.%yn")
folder_path = t.strftime('%Y/%j/')
return download_file(url_base, folder_path, cache_subdir, filename, compression='.Z', overwrite=True)
except IOError:
pass
def download_orbits(time, cache_dir):
cache_subdir = cache_dir + 'cddis_products/'
url_bases = (
'https://cddis.nasa.gov/archive/gnss/products/',
'ftp://igs.ign.fr/pub/igs/products/',
)
downloaded_files = []
for time in [time - SECS_IN_DAY, time, time + SECS_IN_DAY]:
folder_path = "%i/" % (time.week)
if GPSTime.from_datetime(datetime.utcnow()) - time > 3*SECS_IN_WEEK:
try:
filename = "igs%i%i.sp3" % (time.week, time.day)
downloaded_files.append(download_file(url_bases, folder_path, cache_subdir, filename, compression='.Z'))
continue
except IOError:
pass
try:
filename = "igr%i%i.sp3" % (time.week, time.day)
downloaded_files.append(download_file(url_bases, folder_path, cache_subdir, filename, compression='.Z'))
continue
except IOError:
pass
try:
filename = "igu%i%i_18.sp3" % (time.week, time.day)
downloaded_files.append(download_file(url_bases, folder_path, cache_subdir, filename, compression='.Z'))
continue
except IOError:
pass
try:
filename = "igu%i%i_12.sp3" % (time.week, time.day)
downloaded_files.append(download_file(url_bases, folder_path, cache_subdir, filename, compression='.Z'))
continue
except IOError:
pass
try:
filename = "igu%i%i_06.sp3" % (time.week, time.day)
downloaded_files.append(download_file(url_bases, folder_path, cache_subdir, filename, compression='.Z'))
continue
except IOError:
pass
try:
filename = "igu%i%i_00.sp3" % (time.week, time.day)
downloaded_files.append(download_file(url_bases, folder_path, cache_subdir, filename, compression='.Z'))
continue
except IOError:
pass
return downloaded_files
def download_orbits_russia(time, cache_dir):
cache_subdir = cache_dir + 'russian_products/'
url_base = 'ftp://ftp.glonass-iac.ru/MCC/PRODUCTS/'
downloaded_files = []
for time in [time - SECS_IN_DAY, time, time + SECS_IN_DAY]:
t = time.as_datetime()
if GPSTime.from_datetime(datetime.utcnow()) - time > 2*SECS_IN_WEEK:
try:
folder_path = t.strftime('%y%j/final/')
filename = "Sta%i%i.sp3" % (time.week, time.day)
downloaded_files.append(download_file(url_base, folder_path, cache_subdir, filename))
continue
except IOError:
pass
try:
folder_path = t.strftime('%y%j/rapid/')
filename = "Sta%i%i.sp3" % (time.week, time.day)
downloaded_files.append(download_file(url_base, folder_path, cache_subdir, filename))
except IOError:
pass
try:
folder_path = t.strftime('%y%j/ultra/')
filename = "Sta%i%i.sp3" % (time.week, time.day)
downloaded_files.append(download_file(url_base, folder_path, cache_subdir, filename))
except IOError:
pass
return downloaded_files
def download_ionex(time, cache_dir):
cache_subdir = cache_dir + 'ionex/'
t = time.as_datetime()
url_bases = (
'https://cddis.nasa.gov/archive/gnss/products/ionex/',
'ftp://igs.ign.fr/pub/igs/products/ionosphere/',
)
for folder_path in [t.strftime('%Y/%j/')]:
for filename in [t.strftime("codg%j0.%yi"), t.strftime("c1pg%j0.%yi"), t.strftime("c2pg%j0.%yi")]:
try:
filepath = download_file(url_bases, folder_path, cache_subdir, filename, compression='.Z')
return filepath
except IOError as e:
last_err = e
raise last_err
def download_dcb(time, cache_dir):
cache_subdir = cache_dir + 'dcb/'
# seem to be a lot of data missing, so try many days
for time in [time - i*SECS_IN_DAY for i in range(14)]:
try:
t = time.as_datetime()
url_bases = (
'https://cddis.nasa.gov/archive/gnss/products/bias/',
'ftp://igs.ign.fr/pub/igs/products/mgex/dcb/',
)
folder_path = t.strftime('%Y/')
filename = t.strftime("CAS0MGXRAP_%Y%j0000_01D_01D_DCB.BSX")
filepath = download_file(url_bases, folder_path, cache_subdir, filename, compression='.gz')
return filepath
except IOError as e:
last_err = e
raise last_err
def download_cors_coords(cache_dir):
cache_subdir = cache_dir + 'cors_coord/'
url_bases = (
'ftp://geodesy.noaa.gov/cors/coord/coord_14/',
'ftp://alt.ngs.noaa.gov/cors/coord/coord_14/'
)
file_names = list_dir(url_bases)
file_names = [file_name for file_name in file_names if file_name.endswith('coord.txt')]
filepaths = download_files(url_bases, '', cache_subdir, file_names)
return filepaths
def download_cors_station(time, station_name, cache_dir):
cache_subdir = cache_dir + 'cors_obs/'
t = time.as_datetime()
folder_path = t.strftime('%Y/%j/') + station_name + '/'
filename = station_name + t.strftime("%j0.%yo")
url_bases = (
'ftp://geodesy.noaa.gov/cors/rinex/',
'ftp://alt.ngs.noaa.gov/cors/rinex/'
)
try:
filepath = download_file(url_bases, folder_path, cache_subdir, filename, compression='.gz')
return filepath
except IOError:
print("File not downloaded, check availability on server.")
return None
| 32.909357 | 112 | 0.683163 |
7956da327d34e493ebf213da8790d4eb288c9007 | 842 | py | Python | wishlist/views.py | randomowo/randomowo.ru | f00ddd1e6bfcd9cb30d2f164a4f2c0188e42f8f1 | [
"MIT"
] | null | null | null | wishlist/views.py | randomowo/randomowo.ru | f00ddd1e6bfcd9cb30d2f164a4f2c0188e42f8f1 | [
"MIT"
] | null | null | null | wishlist/views.py | randomowo/randomowo.ru | f00ddd1e6bfcd9cb30d2f164a4f2c0188e42f8f1 | [
"MIT"
] | null | null | null | """
"""
from django.shortcuts import render
from wishlist.forms import WishForm
from wishlist.models import Wish
def wish_list(request):
"""
"""
added = False
if request.method == "POST":
form = WishForm(request.POST)
if form.is_valid():
title = form.cleaned_data["title"]
username = form.cleaned_data["username"]
film_url = form.cleaned_data["film_url"]
Wish.objects.create(title=title,
username=username,
film_url=film_url)
added = True
wishes = Wish.objects.order_by("-pub_date")
template_name = "user/cinema/wlist.html"
context = {
"form": WishForm(),
"wishes": wishes,
"added": added,
}
return render(request, template_name, context)
| 27.16129 | 52 | 0.567696 |
7956dac4e367a7f2064c23b999f7944fe83d4826 | 9,796 | py | Python | e2e-tests/test_client.py | addyess/lightkube | 3d2f4ab41bf9daa168e923f3b820d9379d6d56b6 | [
"MIT"
] | null | null | null | e2e-tests/test_client.py | addyess/lightkube | 3d2f4ab41bf9daa168e923f3b820d9379d6d56b6 | [
"MIT"
] | null | null | null | e2e-tests/test_client.py | addyess/lightkube | 3d2f4ab41bf9daa168e923f3b820d9379d6d56b6 | [
"MIT"
] | null | null | null | import time
from datetime import datetime
import pytest
from lightkube import Client, ApiError, AsyncClient
from lightkube.types import PatchType
from lightkube.resources.core_v1 import Pod, Node, ConfigMap, Service, Namespace
from lightkube.resources.apps_v1 import Deployment
from lightkube.models.meta_v1 import ObjectMeta
from lightkube.models.core_v1 import PodSpec, Container, ServiceSpec, ServicePort
uid_count = 0
@pytest.fixture
def obj_name():
global uid_count
uid_count += 1
return f'test-{datetime.utcnow().strftime("%Y%m%d%H%M%S")}-{uid_count}'
def names(obj_list):
return [obj.metadata.name for obj in obj_list]
def create_pod(name, command) -> Pod:
return Pod(
metadata=ObjectMeta(name=name, labels={'app-name': name}),
spec=PodSpec(containers=[Container(
name='main',
image='busybox',
args=[
"/bin/sh",
"-c",
command
],
)], terminationGracePeriodSeconds=1)
)
def wait_pod(client, pod):
# watch pods
for etype, pod in client.watch(Pod, labels={'app-name': pod.metadata.name},
resource_version=pod.metadata.resourceVersion):
if pod.status.phase != 'Pending':
break
def test_pod_apis(obj_name):
client = Client()
# list kube-system namespace
pods = [pod.metadata.name for pod in client.list(Pod, namespace='kube-system')]
assert len(pods) > 0
assert any(name.startswith('metrics-server') for name in pods)
# create a pod
pod = client.create(create_pod(obj_name, "while true;do echo 'this is a test';sleep 5; done"))
try:
assert pod.metadata.name == obj_name
assert pod.metadata.namespace == client.namespace
assert pod.status.phase
wait_pod(client, pod)
# read pod logs
for l in client.log(obj_name, follow=True):
assert l == 'this is a test\n'
break
finally:
# delete the pod
client.delete(Pod, obj_name)
def test_pod_not_exist():
client = Client()
with pytest.raises(ApiError) as exc_info:
client.get(Pod, name='this-pod-is-not-found')
status = exc_info.value.status
assert status.code == 404
assert status.details.name == 'this-pod-is-not-found'
assert status.reason == 'NotFound'
assert status.status == 'Failure'
def test_pod_already_exist(obj_name):
client = Client()
client.create(create_pod(obj_name, "sleep 5"))
try:
with pytest.raises(ApiError) as exc_info:
client.create(create_pod(obj_name, "sleep 5"))
status = exc_info.value.status
assert status.code == 409
assert status.reason == 'AlreadyExists'
assert status.status == 'Failure'
finally:
# delete the pod
client.delete(Pod, obj_name)
def test_global_methods():
client = Client()
nodes = [node.metadata.name for node in client.list(Node)]
assert len(nodes) > 0
node = client.get(Node, name=nodes[0])
assert node.metadata.name == nodes[0]
assert node.metadata.labels['kubernetes.io/os'] == node.status.nodeInfo.operatingSystem
def test_namespaced_methods(obj_name):
client = Client()
config = ConfigMap(
metadata=ObjectMeta(name=obj_name, namespace='default'),
data={'key1': 'value1', 'key2': 'value2'}
)
# create
config = client.create(config)
try:
assert config.metadata.name == obj_name
assert config.data['key1'] == 'value1'
assert config.data['key2'] == 'value2'
# replace
config.data['key1'] = 'new value'
config = client.replace(config)
assert config.data['key1'] == 'new value'
assert config.data['key2'] == 'value2'
# patch with PatchType.STRATEGIC
patch = {'metadata': {'labels': {'app': 'xyz'}}}
config = client.patch(ConfigMap, name=obj_name, obj=patch)
assert config.metadata.labels['app'] == 'xyz'
# get
config2 = client.get(ConfigMap, name=obj_name)
assert config.metadata.creationTimestamp == config2.metadata.creationTimestamp
# list
configs = [config.metadata.name for config in client.list(ConfigMap)]
assert obj_name in configs
finally:
client.delete(ConfigMap, name=obj_name)
def test_patching(obj_name):
client = Client()
service = Service(
metadata=ObjectMeta(name=obj_name),
spec=ServiceSpec(
ports=[ServicePort(name='a', port=80, targetPort=8080)],
selector={'app': 'not-existing'}
)
)
# create
client.create(service)
try:
# patch with PatchType.STRATEGIC
patch = {'spec': {'ports': [{'name': 'b', 'port':81, 'targetPort': 8081}]}}
service = client.patch(Service, name=obj_name, obj=patch)
assert len(service.spec.ports) == 2
assert {port.name for port in service.spec.ports} == {'a', 'b'}
# strategic - patch merge key: port
# we also try to send a Resource type for patching
patch = Service(spec=ServiceSpec(ports=[ServicePort(name='b', port=81, targetPort=8082)]))
service = client.patch(Service, name=obj_name, obj=patch)
assert len(service.spec.ports) == 2
for port in service.spec.ports:
if port.port == 81:
assert port.targetPort == 8082
# patch with PatchType.MERGE
# merge will replace the full list
patch = {'spec': {'ports': [{'name': 'b', 'port': 81, 'targetPort': 8081}]}}
service = client.patch(Service, name=obj_name, obj=patch, patch_type=PatchType.MERGE)
assert len(service.spec.ports) == 1
assert service.spec.ports[0].port == 81
# patch with PatchType.JSON
patch = [
{'op': 'add', 'path': '/spec/ports/-', 'value': {'name': 'a', 'port': 80, 'targetPort': 8080}}
]
service = client.patch(Service, name=obj_name, obj=patch, patch_type=PatchType.JSON)
assert len(service.spec.ports) == 2
assert service.spec.ports[1].port == 80
finally:
client.delete(Service, name=obj_name)
def test_deletecollection(obj_name):
client = Client()
config = ConfigMap(
metadata=ObjectMeta(name=obj_name, namespace=obj_name),
data={'key1': 'value1', 'key2': 'value2'}
)
client.create(Namespace(metadata=ObjectMeta(name=obj_name)))
try:
# create
client.create(config)
config.metadata.name = f"{obj_name}-2"
client.create(config)
# k3s automatically create/recreate one extra configmap.
maps = names(client.list(ConfigMap, namespace=obj_name))
assert obj_name in maps
assert f"{obj_name}-2" in maps
client.deletecollection(ConfigMap, namespace=obj_name)
maps = names(client.list(ConfigMap, namespace=obj_name))
assert obj_name not in maps
assert f"{obj_name}-2" not in maps
finally:
client.delete(Namespace, name=obj_name)
def test_list_all_ns(obj_name):
client = Client()
ns1 = obj_name
ns2 = f"{obj_name}-2"
config = ConfigMap(
metadata=ObjectMeta(name=obj_name),
data={'key1': 'value1', 'key2': 'value2'}
)
client.create(Namespace(metadata=ObjectMeta(name=ns1)))
client.create(Namespace(metadata=ObjectMeta(name=ns2)))
try:
client.create(config, namespace=ns1)
client.create(config, namespace=ns2)
maps = [f"{cm.metadata.namespace}/{cm.metadata.name}" for cm in client.list(ConfigMap, namespace='*')]
assert f"{ns1}/{obj_name}" in maps
assert f"{ns2}/{obj_name}" in maps
finally:
client.delete(Namespace, name=ns1)
client.delete(Namespace, name=ns2)
@pytest.mark.parametrize("resource", [Node])
def test_wait_global(resource):
client = Client()
for obj in client.list(resource):
client.wait(resource, obj.metadata.name, for_conditions=["Ready"])
@pytest.mark.asyncio
@pytest.mark.parametrize("resource", [Node])
async def test_wait_global_async(resource):
client = AsyncClient()
async for obj in client.list(resource):
await client.wait(resource, obj.metadata.name, for_conditions=["Ready"])
await client.close()
WAIT_NAMESPACED_PARAMS = [
(Pod, "Ready", {"containers": [{"name": "nginx", "image": "nginx:1.21.4"}]}),
(
Deployment,
"Available",
{
"selector": {"matchLabels": {"foo": "bar"}},
"template": {
"metadata": {"labels": {"foo": "bar"}},
"spec": {"containers": [{"name": "nginx", "image": "nginx:1.21.4"}]},
},
},
),
]
@pytest.mark.parametrize("resource,for_condition,spec", WAIT_NAMESPACED_PARAMS)
def test_wait_namespaced(resource, for_condition, spec):
client = Client()
requested = resource.from_dict(
{"metadata": {"generateName": "e2e-test-"}, "spec": spec}
)
created = client.create(requested)
client.wait(
resource,
created.metadata.name,
for_conditions=[for_condition],
)
client.delete(resource, created.metadata.name)
@pytest.mark.asyncio
@pytest.mark.parametrize("resource,for_condition,spec", WAIT_NAMESPACED_PARAMS)
async def test_wait_namespaced_async(resource, for_condition, spec):
client = AsyncClient()
requested = resource.from_dict(
{"metadata": {"generateName": "e2e-test-"}, "spec": spec}
)
created = await client.create(requested)
await client.wait(
resource,
created.metadata.name,
for_conditions=[for_condition],
)
await client.delete(resource, created.metadata.name)
await client.close()
| 30.517134 | 110 | 0.626684 |
7956db1bb6ae173e72a87e94616c60b8a02bbab8 | 326 | py | Python | models/__init__.py | IgorIvkin/Children | a43bbfae3f9390b12df83099437ff6bde7bfcc5d | [
"Apache-2.0"
] | null | null | null | models/__init__.py | IgorIvkin/Children | a43bbfae3f9390b12df83099437ff6bde7bfcc5d | [
"Apache-2.0"
] | null | null | null | models/__init__.py | IgorIvkin/Children | a43bbfae3f9390b12df83099437ff6bde7bfcc5d | [
"Apache-2.0"
] | null | null | null | import os
import importlib
# Imports all the modules from this package (models).
# We need this code to make migration framework working.
for module in os.listdir(os.path.dirname(__file__)):
if module == '__init__.py' or module[-3:] != '.py':
continue
importlib.import_module(__package__ + '.' + module[:-3])
| 32.6 | 60 | 0.696319 |
7956db43348c0cc0f3d372e92a2e343f5aa62013 | 5,860 | py | Python | tensorflow/contrib/gan/python/eval/python/summaries_test.py | M155K4R4/Tensorflow | e5e03ef3148303b3dfed89a1492dedf92b45be25 | [
"Apache-2.0"
] | 5 | 2019-05-23T02:59:21.000Z | 2020-02-05T08:20:23.000Z | tensorflow/contrib/gan/python/eval/python/summaries_test.py | M155K4R4/Tensorflow | e5e03ef3148303b3dfed89a1492dedf92b45be25 | [
"Apache-2.0"
] | null | null | null | tensorflow/contrib/gan/python/eval/python/summaries_test.py | M155K4R4/Tensorflow | e5e03ef3148303b3dfed89a1492dedf92b45be25 | [
"Apache-2.0"
] | 2 | 2019-07-04T00:47:02.000Z | 2019-07-08T08:47:05.000Z | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for TFGAN summaries."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.contrib.gan.python import namedtuples
from tensorflow.contrib.gan.python.eval.python import summaries_impl as summaries
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import variable_scope
from tensorflow.python.ops import variables
from tensorflow.python.platform import test
from tensorflow.python.summary import summary
def generator_model(inputs):
return variable_scope.get_variable('dummy_g', initializer=2.0) * inputs
def discriminator_model(inputs, _):
return variable_scope.get_variable('dummy_d', initializer=2.0) * inputs
def get_gan_model():
# TODO(joelshor): Find a better way of creating a variable scope.
with variable_scope.variable_scope('generator') as gen_scope:
pass
with variable_scope.variable_scope('discriminator') as dis_scope:
pass
return namedtuples.GANModel(
generator_inputs=array_ops.zeros([4, 32, 32, 3]),
generated_data=array_ops.zeros([4, 32, 32, 3]),
generator_variables=[variables.Variable(0), variables.Variable(1)],
generator_scope=gen_scope,
generator_fn=generator_model,
real_data=array_ops.ones([4, 32, 32, 3]),
discriminator_real_outputs=array_ops.ones([1, 2, 3]),
discriminator_gen_outputs=array_ops.ones([1, 2, 3]),
discriminator_variables=[variables.Variable(0)],
discriminator_scope=dis_scope,
discriminator_fn=discriminator_model)
def get_cyclegan_model():
with variable_scope.variable_scope('x2y'):
model_x2y = get_gan_model()
with variable_scope.variable_scope('y2x'):
model_y2x = get_gan_model()
return namedtuples.CycleGANModel(
model_x2y=model_x2y,
model_y2x=model_y2x,
reconstructed_x=array_ops.zeros([3, 30, 35, 6]),
reconstructed_y=array_ops.zeros([3, 30, 35, 6]))
class SummariesTest(test.TestCase):
def _test_add_gan_model_image_summaries_impl(self, get_model_fn,
expected_num_summary_ops,
model_summaries):
summaries.add_gan_model_image_summaries(get_model_fn(), grid_size=2,
model_summaries=model_summaries)
self.assertEquals(expected_num_summary_ops,
len(ops.get_collection(ops.GraphKeys.SUMMARIES)))
with self.test_session(use_gpu=True):
variables.global_variables_initializer().run()
summary.merge_all().eval()
def test_add_gan_model_image_summaries(self):
self._test_add_gan_model_image_summaries_impl(get_gan_model, 5, True)
def test_add_gan_model_image_summaries_no_model(self):
self._test_add_gan_model_image_summaries_impl(get_gan_model, 2, False)
def test_add_gan_model_image_summaries_for_cyclegan(self):
self._test_add_gan_model_image_summaries_impl(get_cyclegan_model, 10,
True)
def _test_add_gan_model_summaries_impl(self, get_model_fn,
expected_num_summary_ops):
summaries.add_gan_model_summaries(get_model_fn())
self.assertEquals(expected_num_summary_ops,
len(ops.get_collection(ops.GraphKeys.SUMMARIES)))
with self.test_session(use_gpu=True):
variables.global_variables_initializer().run()
summary.merge_all().eval()
def test_add_gan_model_summaries(self):
self._test_add_gan_model_summaries_impl(get_gan_model, 3)
def test_add_gan_model_summaries_for_cyclegan(self):
self._test_add_gan_model_summaries_impl(get_cyclegan_model, 6)
def _test_add_regularization_loss_summaries_impl(self, get_model_fn,
expected_num_summary_ops):
summaries.add_regularization_loss_summaries(get_model_fn())
self.assertEquals(expected_num_summary_ops,
len(ops.get_collection(ops.GraphKeys.SUMMARIES)))
with self.test_session(use_gpu=True):
summary.merge_all().eval()
def test_add_regularization_loss_summaries(self):
self._test_add_regularization_loss_summaries_impl(get_gan_model, 2)
def test_add_regularization_loss_summaries_for_cyclegan(self):
self._test_add_regularization_loss_summaries_impl(get_cyclegan_model, 4)
# TODO(joelshor): Add correctness test.
def _test_add_image_comparison_summaries_impl(self, get_model_fn,
expected_num_summary_ops):
summaries.add_image_comparison_summaries(get_model_fn(), display_diffs=True)
self.assertEquals(expected_num_summary_ops,
len(ops.get_collection(ops.GraphKeys.SUMMARIES)))
with self.test_session(use_gpu=True):
summary.merge_all().eval()
def test_add_image_comparison_summaries(self):
self._test_add_image_comparison_summaries_impl(get_gan_model, 1)
def test_add_image_comparison_summaries_for_cyclegan(self):
self._test_add_image_comparison_summaries_impl(get_cyclegan_model, 2)
if __name__ == '__main__':
test.main()
| 40.136986 | 81 | 0.725939 |
7956dcb800aea83db15e0f4d67a36465f62281c0 | 221 | py | Python | books_app/books_app/books/admin.py | BoyanPeychinov/python_web_basics | 2f892ac119f7fe3a5c03fc5e7b35670dc609a70f | [
"MIT"
] | 1 | 2021-07-20T12:16:34.000Z | 2021-07-20T12:16:34.000Z | books_app/books_app/books/admin.py | BoyanPeychinov/python_web_basics | 2f892ac119f7fe3a5c03fc5e7b35670dc609a70f | [
"MIT"
] | null | null | null | books_app/books_app/books/admin.py | BoyanPeychinov/python_web_basics | 2f892ac119f7fe3a5c03fc5e7b35670dc609a70f | [
"MIT"
] | null | null | null | from django.contrib import admin
from books_app.books.models import Book, Author
@admin.register(Book)
class BookAdmin(admin.ModelAdmin):
pass
@admin.register(Author)
class AuthorAdmin(admin.ModelAdmin):
pass | 17 | 47 | 0.778281 |
7956dd0280162ab12213e2c5c63643464e02ab75 | 445 | py | Python | code/venv/lib/python3.6/site-packages/pgadmin4/pgadmin/browser/server_groups/servers/databases/schemas/tables/triggers/tests/__init__.py | jhkuang11/UniTrade | 5f68b853926e167936b58c8543b8f95ebd6f5211 | [
"MIT"
] | null | null | null | code/venv/lib/python3.6/site-packages/pgadmin4/pgadmin/browser/server_groups/servers/databases/schemas/tables/triggers/tests/__init__.py | jhkuang11/UniTrade | 5f68b853926e167936b58c8543b8f95ebd6f5211 | [
"MIT"
] | 10 | 2020-06-05T19:42:26.000Z | 2022-03-11T23:38:35.000Z | code/venv/lib/python3.6/site-packages/pgadmin4/pgadmin/browser/server_groups/servers/databases/schemas/tables/triggers/tests/__init__.py | jhkuang11/UniTrade | 5f68b853926e167936b58c8543b8f95ebd6f5211 | [
"MIT"
] | null | null | null | ##########################################################################
#
# pgAdmin 4 - PostgreSQL Tools
#
# Copyright (C) 2013 - 2017, The pgAdmin Development Team
# This software is released under the PostgreSQL Licence
#
##########################################################################
from pgadmin.utils.route import BaseTestGenerator
class TriggersTestGenerator(BaseTestGenerator):
def runTest(self):
return []
| 26.176471 | 74 | 0.494382 |
7956dd2d67cc23b3e4168199ecf51890e6f12e18 | 47,465 | py | Python | model/models.py | irenetrampoline/clustering-interval-censored | f6ab06a6cf3098ffe006d1b95d1b4f1d158b0bc4 | [
"MIT"
] | 1 | 2022-02-03T08:47:45.000Z | 2022-02-03T08:47:45.000Z | model/models.py | irenetrampoline/clustering-interval-censored | f6ab06a6cf3098ffe006d1b95d1b4f1d158b0bc4 | [
"MIT"
] | null | null | null | model/models.py | irenetrampoline/clustering-interval-censored | f6ab06a6cf3098ffe006d1b95d1b4f1d158b0bc4 | [
"MIT"
] | null | null | null | import logging
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
# torch.manual_seed(0)
# torch.backends.cudnn.deterministic = True
# torch.backends.cudnn.benchmark = False
from pyro.distributions import MultivariateNormal, Normal, Independent
from sklearn.cluster import KMeans, SpectralClustering
from sklearn.metrics import adjusted_rand_score
import scipy
from scipy.sparse import csgraph
from scipy.sparse.linalg import eigsh
import sys
sys.path.append('/home/REDACTED/chf-github/model/')
from utils import check_has_missing, quad_function, convert_XY_pack_pad
sys.path.append('../evaluation/')
from eval_utils import get_cluster_swap_metric, get_cluster_pear_metric
sys.path.append('../plot/')
from plot_utils import plot_latent_labels, plot_delta_comp
import matplotlib.pylab as pylab
params = {'legend.fontsize': 'x-large',
# 'figure.figsize': (10,6),
'axes.labelsize': 'x-large',
'axes.titlesize':'x-large',
'xtick.labelsize':'x-large',
'ytick.labelsize':'x-large'}
pylab.rcParams.update(params)
class Model(nn.Module):
def __init__(self):
torch.manual_seed(0)
np.random.seed(0)
if torch.cuda.is_available():
torch.backends.cudnn.deterministic = True
torch.backends.cudnn.benchmark = False
super(Model, self).__init__()
def forward(self,**kwargs):
raise ValueError('Should be overriden')
def get_masks(self, M):
m_t = ((torch.flip(torch.cumsum(torch.flip(M.sum(-1), (1,)), 1), (1,))>1.)*1)
m_g_t = (m_t.sum(-1)>1)*1.
lens = m_t.sum(-1)
return m_t, m_g_t, lens
def masked_gaussian_nll_3d(self, x, mu, std):
nll = 0.5*np.log(2*np.pi) + torch.log(std)+((mu-x)**2)/(2*std**2)
masked_nll = nll
return masked_nll
def apply_reg(self, p, reg_type='l2'):
if reg_type == 'l1':
return torch.sum(torch.abs(p))
elif reg_type=='l2':
return torch.sum(p.pow(2))
else:
raise ValueError('bad reg')
def fit(self, train_loader, valid_loader, epochs, lr, eval_freq=1, print_freq=1000, anneal = False, fname = None, verbose=False, plot_debug=False, epoch_debug=False):
if verbose:
eval_freq = 50
opt = torch.optim.Adam(self.parameters(), lr=lr, eps=1e-3)
best_nelbo, best_nll, best_kl, best_ep = 100000, 100000, 100000, -1
training_loss = list()
training_epochs = list()
testing_loss = list()
testing_epochs = list()
test_ari_vals = list()
test_mse_vals = list()
test_pear_vals = list()
test_swaps_vals = list()
train_nelbo = list()
train_nll = list()
train_kl = list()
test_nelbo = list()
test_nll = list()
test_kl = list()
train_likelihood = list()
test_likelihood = list()
train_affinity_num_clusters = list()
test_affinity_num_clusters = list()
if fname is not None:
logging.basicConfig(
filename=fname[:-4]+'_loss.log', filemode='w',
format='%(asctime)s - %(levelname)s \t %(message)s',
level=logging.INFO)
if anneal:
anneal = 0.01
# print ('With annealing starting at ',anneal)
else:
# print ('No annealing')
anneal = 1.
# TODO: consider caching the convert_XY_pad content because it's the bulk of the computation?
"""
if check_has_missing(X) or check_has_missing(Y):
has_missing = True
else:
has_missing = False
XY = concat(X,Y)
newXY, all_seq_lengths = convert_XY_pack_pad(XY)
"""
Y, S, X, M, T = [i for i in train_loader][0]
has_missing = False
newXY = None
all_seq_lengths = None
if check_has_missing(X) or check_has_missing(Y):
has_missing = True
XY = torch.cat([X,Y], axis=2)
newXY, all_seq_lengths = convert_XY_pack_pad(XY, how=self.how_missing)
else:
has_missing = False
# now validation
val_Y, val_S, val_X, val_M, val_T = [i for i in valid_loader][0]
val_has_missing = False
val_newXY = None
val_all_seq_lengths = None
if check_has_missing(val_X) or check_has_missing(val_Y):
val_has_missing = True
val_XY = torch.cat([val_X,val_Y], axis=2)
val_newXY, val_all_seq_lengths = convert_XY_pack_pad(val_XY, how=self.how_missing)
else:
val_has_missing = False
for epoch in range(1, epochs+1):
anneal = min(1, epoch/(epochs*0.5))
self.train()
batch_loss = 0
test_batch_loss = 0
idx = 0
test_idx = 0
for data_tuples in train_loader:
opt.zero_grad()
# if epoch == 3:
(nelbo, nll, kl), loss = self.forward(*data_tuples, anneal = anneal,
has_missing=has_missing,XY=newXY,
all_seq_lengths=all_seq_lengths)
nelbo, nll, kl = nelbo.item(), nll.item(), kl.item()
if epoch_debug:
train_nelbo.append(nelbo)
train_nll.append(nll)
train_kl.append(kl)
# from torch.autograd import grad
# grad(loss, model.debug['y_out'], only_inputs=True)
# grad(loss, model.debug['rnn'], only_inputs=True)
# grad(loss, model.debug['enc_h_mu'], only_inputs=True)
loss.backward()
opt.step()
idx +=1
batch_loss += loss.item()
cur_mse = batch_loss/float(idx)
training_loss.append(cur_mse)
if epoch%eval_freq==0:
self.eval()
(nelbo, nll, kl), eval_loss = self.forward(*valid_loader.dataset.tensors, anneal = 1.,
has_missing=val_has_missing,XY=val_newXY,
all_seq_lengths=val_all_seq_lengths)
nelbo, nll, kl = nelbo.item(), nll.item(), kl.item()
if nelbo<best_nelbo:
best_nelbo = nelbo; best_nll = nll; best_kl = kl; best_ep = epoch
if fname is not None:
torch.save(self.state_dict(), fname)
if epoch_debug:
test_nelbo.append(nelbo)
test_nll.append(nll)
test_kl.append(kl)
# if kl < 0.:
# print('%.3f' % kl,)
train_Y, train_S, train_X, train_M, train_T = train_loader.dataset.tensors
test_Y, test_S, test_X, test_M, test_T = valid_loader.dataset.tensors
"""
step 1: get z using mu not sampling
step 2: K-means cluster these z and save centers
step 3: return theta_k = g1(z_k) for K clusters
"""
train_z, _ = self.get_mu(train_X,train_Y)
train_z = train_z.detach().numpy()
# likelihood = self.imp_sampling(train_X, train_Y)
# train_likelihood.append(likelihood)
# for different cluster algs, plot labels and true subtypes
K = 2
km = KMeans(n_clusters=K)
km.fit(train_z)
self.subtypes_km = km
test_z, kl = self.get_mu(test_X,test_Y)
test_theta = self.infer_functional_params(test_z)
best_delta = self.get_best_delta(test_X, test_Y, test_M, test_theta, kl)
test_z = test_z.detach().numpy()
test_clusters = self.subtypes_km.predict(test_z)
true_clusters = [int(i) for i in np.squeeze(test_S)]
test_M = torch.ones_like(test_X)
test_mse = self.get_mse(test_X, test_Y, test_M, test_theta, best_delta)
test_ari = adjusted_rand_score(test_clusters, true_clusters)
test_swaps = get_cluster_swap_metric(test_clusters, test_T[:,0,0].detach().numpy(), best_delta.detach().numpy())
test_pear = get_cluster_pear_metric(test_clusters, test_T[:,0,0].detach().numpy(), best_delta.detach().numpy())
test_ari_vals.append(test_ari)
test_mse_vals.append(test_mse)
test_swaps_vals.append(test_swaps)
test_pear_vals.append(test_pear)
test_batch_loss += eval_loss.item()
test_idx += 1
testing_loss.append(test_batch_loss/float(test_idx))
likelihood = self.imp_sampling(train_X, train_Y, imp_samples=50)
train_likelihood.append(likelihood)
likelihood = self.imp_sampling(test_X, test_Y, imp_samples=50)
test_likelihood.append(likelihood)
if plot_debug:
train_Y, train_S, train_X, train_M, train_T = train_loader.dataset.tensors
test_Y, test_S, test_X, test_M, test_T = valid_loader.dataset.tensors
train_z, _ = self.get_mu(train_X,train_Y)
train_z = train_z.detach().numpy()
# for different cluster algs, plot labels and true subtypes
K = 2
km = KMeans(n_clusters=K)
km.fit(train_z)
self.subtypes_km = km
test_z, kl = self.get_mu(test_X,test_Y)
test_theta = self.infer_functional_params(test_z)
best_delta = self.get_best_delta(test_X, test_Y, test_M, test_theta, kl)
test_z = test_z.detach().numpy()
test_clusters = self.subtypes_km.predict(test_z)
true_clusters = [int(i) for i in np.squeeze(test_S)]
test_M = torch.ones_like(test_X)
test_mse = self.get_mse(test_X, test_Y, test_M, test_theta, best_delta)
test_ari = adjusted_rand_score(test_clusters, true_clusters)
plot_latent_labels(test_z, test_S, 'plots/pngs/lr_%.3f_%03d_latent.png' % (lr, epoch), title='Epoch %d, ARI: %.3f' % (epoch, test_ari))
plot_delta_comp(test_T[:,0,0].detach().numpy(), best_delta.detach().numpy(), 'plots/pngs/lr_%.3f_%03d_delta.png' % (lr, epoch), title='Epoch %d, Pear: %.3f' % (epoch, test_pear))
self.train()
# print ('Best NELBO:%.3f, NLL:%.3f, KL:%.3f@ epoch %d'%(best_nelbo, best_nll, best_kl, best_ep))
self.best_nelbo = best_nelbo
self.best_nll = best_nll
self.best_kl = best_kl
self.best_ep = best_ep
if fname is not None and epochs > eval_freq:
print('loaded state_dict. nelbo: %.4f (ep %d)' % (best_nelbo, best_ep))
self.load_state_dict(torch.load(fname))
self.eval()
self.training_loss = training_loss
self.testing_loss = testing_loss
if plot_debug:
import os
import imageio
png_dir = 'plots/pngs/'
kargs = {'duration': 0.3}
images = []
for file_name in sorted(os.listdir(png_dir)):
if file_name.endswith('_latent.png'):
file_path = os.path.join(png_dir, file_name)
images.append(imageio.imread(file_path))
imageio.mimsave('plots/data%d_latent_%.3f.gif' % (self.data_num, lr), images, **kargs)
images = []
for file_name in sorted(os.listdir(png_dir)):
if file_name.endswith('_delta.png'):
file_path = os.path.join(png_dir, file_name)
images.append(imageio.imread(file_path))
imageio.mimsave('plots/data%d_delta_%.3f.gif' % (self.data_num, lr), images, **kargs)
# delete everything when you're done
for file_name in os.listdir(png_dir):
root = os.getcwd()
complete_fname = os.path.join(root, png_dir+file_name)
if not os.path.isdir(complete_fname):
os.unlink(complete_fname)
if epoch_debug:
import pickle
f = open('data%d_results_lr%.3f.pk' % (self.data_num, lr), 'wb')
results = {'epochs': epochs,
'eval_freq': eval_freq,
'ari': test_ari_vals,
'mse': test_mse_vals,
'swaps': test_swaps_vals,
'pear': test_pear_vals,
'train_likelihood': train_likelihood,
'test_likelihood': test_likelihood,
'train_loss': training_loss,
'test_loss': testing_loss,
'best_nelbo': best_nelbo,
'best_nll': best_nll,
'best_kl': best_kl,
'best_ep': best_ep,
'train_nelbo': train_nelbo,
'train_nll': train_nll,
'train_kl': train_kl,
'test_nelbo': test_nelbo,
'test_nll': test_nll,
'test_kl': test_kl,
# 'train_affinity_num_clusters': train_affinity_num_clusters,
# 'test_affinity_num_clusters': test_affinity_num_clusters,
'train_M_sum': train_M.sum(),
'test_M_sum': test_M.sum()
}
pickle.dump(results, f)
f.close()
return best_nelbo, best_nll, best_kl, best_ep
class TwoLayer(nn.Module):
def __init__(self,dim_input, dim_inner, dim_output):
super(TwoLayer, self).__init__()
self.fc1 = nn.Linear(dim_input,dim_inner)
self.fc2 = nn.Linear(dim_inner,dim_output)
def forward(self, x):
x = self.fc2(F.relu(self.fc1(x)))
return x
class Sublign(Model):
def __init__(self, dim_stochastic, dim_hidden, dim_rnn, C=0.0, dim_biomarkers=3,
reg_type = 'l2', sigmoid=True, learn_time=True, auto_delta=True, max_delta=10.,
plot_debug=False, epoch_debug=False, beta=0.001, device='cpu',
how_missing='linear'):
"""
note no lr here. lr is in fit.
"""
super(Sublign, self).__init__()
self.dim_stochastic = dim_stochastic
self.dim_hidden = dim_hidden
self.dim_rnn = dim_rnn
self.n_biomarkers = dim_biomarkers
self.C = C
self.reg_type = reg_type
self.sigmoid = sigmoid
self.dz_features = self.dim_stochastic
rnn_input_size = self.n_biomarkers + 1
self.subtypes_km = None
self.rnn = nn.RNN(rnn_input_size, self.dim_rnn, 1, batch_first = True)
self.enc_h_mu = nn.Linear(self.dim_rnn, self.dim_stochastic)
self.enc_h_sig = nn.Linear(self.dim_rnn, self.dim_stochastic)
self.how_missing = how_missing
# initialize functions theta = g1(z)
if self.sigmoid:
self.dec_z_beta0 = TwoLayer(self.dz_features, self.dim_hidden, self.n_biomarkers)
self.dec_z_beta1 = TwoLayer(self.dz_features, self.dim_hidden, self.n_biomarkers)
else:
self.dec_z_a = TwoLayer(self.dz_features, self.dim_hidden, self.n_biomarkers)
self.dec_z_b = TwoLayer(self.dz_features, self.dim_hidden, self.n_biomarkers)
self.dec_z_c = TwoLayer(self.dz_features, self.dim_hidden, self.n_biomarkers)
# experiments for delta
if auto_delta:
self.max_delta = 10.
self.auto_delta = True
self.learn_time = True
elif learn_time:
self.max_delta = max_delta
self.auto_delta = False
self.learn_time = True
else:
self.max_delta = 0.
self.auto_delta = False
self.learn_time = False
if not learn_time:
self.learn_time = False
self.max_delta = 0.
self.auto_delta = False
self.N_delta_bins = 50
if device == 'cpu':
self.device = torch.device('cpu')
else:
self.device = torch.device('cuda')
self.debug = {}
self.beta = beta
self.data_num = 1
def get_delta_options(self, Xvals):
# output delta_options is tensor size N_patients, N_delta_bins
N_patients = Xvals.shape[0]
if self.auto_delta:
max_time_patient = Xvals.max(axis=1).values
max_time_all = max_time_patient.max()
max_delta_patient = max_time_all - max_time_patient
delta_options = torch.zeros(N_patients,self.N_delta_bins).to(self.device)
for i in range(N_patients):
delta_options[i] = torch.linspace(0,max_delta_patient[i,0],self.N_delta_bins)
return delta_options
else:
delta_options = torch.linspace(0, self.max_delta, self.N_delta_bins)
return delta_options[None,:].repeat(N_patients, 1).to(self.device)
def calc_loss_per_delta(self, X, Y, M, theta, delta_options, kl):
"""
input:
- X (N_patients, N_visits, 1)
- Y (N_patients, N_visits, N_biomarkers)
- theta (N_patients, N_biomarkers each component)
- delta_options (N_patients, N_delta_bins)
output:
- loss_per_patient (N_patients, N_delta_bins)
step 1: convert everything to size N_patients, N_visits, N_biomarkers, N_delta_bins
step 2: calculate loss yhat = f(x+delta; theta)
"""
N_patients, N_visits, N_biomarkers = Y.shape
X_repeat = X[:,:,:,None].repeat(1,1,N_biomarkers,self.N_delta_bins)
Y_repeat = Y[:,:,:,None].repeat(1,1,1,self.N_delta_bins)
delta_opt_repeat = delta_options[:,None,None,:].repeat(1,N_visits,N_biomarkers,1)
if self.sigmoid:
beta0 = theta[0][:,None,:,None].repeat(1,N_visits,1,self.N_delta_bins)
beta1 = theta[1][:,None,:,None].repeat(1,N_visits,1,self.N_delta_bins)
sig_input = X_repeat + delta_opt_repeat
mm = torch.nn.Sigmoid()
mm_input = (beta0 + beta1 * sig_input).to(self.device)
yhat = mm(mm_input)
else:
a = theta[0][:,None,:,None].repeat(1,N_visits,1,self.N_delta_bins)
b = theta[1][:,None,:,None].repeat(1,N_visits,1,self.N_delta_bins)
c = theta[2][:,None,:,None].repeat(1,N_visits,1,self.N_delta_bins)
quad_input = X_repeat + delta_opt_repeat
yhat = quad_function(a,b,c,quad_input)
kl_repeat = kl[:,None].repeat(1,self.N_delta_bins)
loss = ((yhat - Y_repeat)**2)
M_repeat = M[:,:,:,None].repeat(1,1,1,self.N_delta_bins)
loss = loss.masked_fill(M_repeat == 0., 0.)
loss_sum = loss.sum(axis=1).sum(axis=1)
delta_term = torch.log(torch.ones_like(loss_sum) / self.N_delta_bins).to(self.device)
kl_repeat = kl_repeat.to(self.device)
return loss_sum + self.beta*kl_repeat + delta_term
def get_best_delta(self, X,Y,M,theta, kl):
"""
output: best_delta is size N_patients
step 1: if subnolign, return 0.
step 2: get all the delta options
step 3: calculate loss for each option
step 4: find best delta option
note that z could be either from sample or get_mu so not included here
"""
# TODO: interpolate X and Y if they're missing
if type(X) == np.ndarray:
X = torch.tensor(X).to(self.device)
Y = torch.tensor(Y).to(self.device)
M = torch.tensor(M).to(self.device)
N = X.shape[0]
if not self.learn_time:
return torch.zeros(N)
delta_options = self.get_delta_options(X)
loss_per_delta = self.calc_loss_per_delta(X,Y,M,theta, delta_options, kl)
min_delta = loss_per_delta.min(axis=1).indices
best_delta = torch.zeros(N).to(self.device)
for i in range(N):
best_delta[i] = delta_options[i][min_delta[i]]
return best_delta
def predict_Y(self, X,Y,theta,delta):
"""
input:
- X (N_patients, N_visits, 1)
- Y (N_patients, N_visits, N_biomarkers)
- theta (N_patients, N_biomarkers each component)
- delta (N_patients)
output:
- yhat (N_patients, N_visits, N_biomarkers)
step 1: convert everything to size N_patients, N_visits, N_biomarkers
step 2: calculate loss yhat = f(x+delta; theta)
"""
N_patients, N_visits, N_biomarkers = Y.shape
X_repeat = X.repeat(1,1,N_biomarkers)
delta_rep = delta[:,None,None].repeat(1,N_visits,N_biomarkers)
if self.sigmoid:
beta0 = theta[0][:,None,:].repeat(1,N_visits,1)
beta1 = theta[1][:,None,:].repeat(1,N_visits,1)
sig_input = X_repeat + delta_rep
mm = torch.nn.Sigmoid()
mm_input = (beta0 + beta1 * sig_input).to(self.device)
yhat = mm(mm_input)
else:
a = theta[0][:,None,:].repeat(1,N_visits,1)
b = theta[1][:,None,:].repeat(1,N_visits,1)
c = theta[2][:,None,:].repeat(1,N_visits,1)
quad_input = X_repeat + delta_rep
yhat = quad_function(a,b,c,quad_input)
return yhat
def get_loss(self, Y, S, X, M, anneal=1.,
XY=None,all_seq_lengths=None, has_missing=False):
if type(X) == np.ndarray:
X = torch.tensor(X).to(self.device)
Y = torch.tensor(Y).to(self.device)
M = torch.tensor(M).to(self.device)
z, kl = self.sample(X,Y,XY=XY,
all_seq_lengths=all_seq_lengths, has_missing=has_missing)
theta = self.infer_functional_params(z)
with torch.no_grad():
best_delta = self.get_best_delta(X,Y,M,theta, kl)
yhat = self.predict_Y(X,Y,theta,best_delta)
self.debug['y_out'] = yhat
squared = (Y - yhat)**2
# mask out originally missing values
squared = squared.masked_fill(M == 0., 0)
nll = squared.sum(-1).sum(-1)
delta_term = torch.log(torch.ones_like(nll) / self.N_delta_bins)
# nelbo = nll + self.beta*anneal*kl + delta_term
nelbo = nll + self.beta*anneal*kl
return nelbo, nll, kl
def forward(self, Y, S, X, M, T, anneal = 1.,
XY=None,all_seq_lengths=None, has_missing=False):
if type(M) == np.ndarray:
X = torch.tensor(X).to(self.device)
Y = torch.tensor(Y).to(self.device)
M = torch.tensor(M).to(self.device)
if XY is None and (check_has_missing(X) or check_has_missing(Y)):
has_missing = True
XY = torch.cat([X,Y], axis=2)
newXY, all_seq_lengths = convert_XY_pack_pad(XY, how=self.how_missing)
else:
has_missing = False
(nelbo, nll, kl) = self.get_loss(Y, S, X, M, anneal = anneal, XY=XY,all_seq_lengths=all_seq_lengths, has_missing=has_missing)
reg_loss = nelbo
for name,param in self.named_parameters():
reg_loss += self.C*self.apply_reg(param, reg_type=self.reg_type)
normalizer = torch.sum(M)
norm_nelbo = (torch.sum(nelbo) / normalizer)
norm_nll = (torch.sum(nll)/normalizer)
norm_kl = torch.mean(kl)
norm_reg = torch.sum(reg_loss) / normalizer
return (norm_nelbo, norm_nll, norm_kl), norm_reg
def sample(self, X,Y,mu_std=False,XY=None,all_seq_lengths=None, has_missing=False):
"""
Returns z and KL sampled from observed X,Y
"""
cacheXY = XY
if type(X) == np.ndarray:
X = torch.tensor(X).to(self.device)
Y = torch.tensor(Y).to(self.device)
XY = torch.cat([X,Y], axis=2)
# import pdb; pdb.set_trace()
if has_missing:
# batch_in, sequences = convert_XY_pack_pad(XY,how=self.how_missing)
pack = torch.nn.utils.rnn.pack_padded_sequence(cacheXY, all_seq_lengths, batch_first=True, enforce_sorted=False)
_, hidden = self.rnn(pack)
elif check_has_missing(XY):
batch_in, sequences = convert_XY_pack_pad(XY,how=self.how_missing)
pack = torch.nn.utils.rnn.pack_padded_sequence(cacheXY, all_seq_lengths, batch_first=True, enforce_sorted=False)
_, hidden = self.rnn(pack)
else:
_, hidden = self.rnn(XY)
self.debug['rnn'] = hidden
hid = torch.squeeze(hidden)
hid = hid.to(self.device)
# idx contains list of indices representing the current datapoints in X
# mu_param is a pytorch tensor (randomly initialized) of size N x dimensionality of latent space
# gamma = 1 (learning w/ inf. network) or 0. (learning w/ svi)
mu_table = mu_param[idx]
mu_enc = self.enc_h_mu(hid)
mu = gamma*mu_enc+(1-gamma)*mu_table
sig = torch.exp(self.enc_h_sig(hid))
q_dist = Independent(Normal(mu, sig), 1)
z = torch.squeeze(q_dist.rsample((1,)))
p_dist = Independent(Normal(torch.zeros_like(mu), torch.ones_like(sig)), 1)
kl = q_dist.log_prob(z)-p_dist.log_prob(z)
self.debug['hid'] = hid
self.debug['kl'] = kl
self.debug['mu'] = mu
self.debug['sig'] = sig
if mu_std:
return z, kl, mu
else:
return z, kl
def get_mu(self, X,Y):
N = X.shape[0]
if type(X) == np.ndarray:
X = torch.tensor(X).to(self.device)
Y = torch.tensor(Y).to(self.device)
XY = torch.cat([X,Y], axis=2)
if check_has_missing(XY):
batch_in, sequences = convert_XY_pack_pad(XY)
pack = torch.nn.utils.rnn.pack_padded_sequence(batch_in, sequences, batch_first=True, enforce_sorted=False)
_, hidden = self.rnn(pack)
else:
_, hidden = self.rnn(XY)
hid = torch.squeeze(hidden)
mu = self.enc_h_mu(hid)
return mu, torch.zeros(N)
def infer_functional_params(self, z):
if self.sigmoid:
return [self.dec_z_beta0(z), self.dec_z_beta1(z)]
else:
return [self.dec_z_a(z), self.dec_z_b(z), self.dec_z_c(z)]
def get_subtypes(self, X, Y, K=2):
"""
step 1: get z using mu not sampling
step 2: K-means cluster these z and save centers
step 3: return theta_k = g1(z_k) for K clusters
"""
z, _ = self.get_mu(X,Y)
if z.get_device() > -1:
z = z.cpu().detach().numpy()
else:
z = z.detach().numpy()
# for different cluster algs, plot labels and true subtypes
km = KMeans(n_clusters=K)
if np.isnan(z).any():
print('z has nan in it')
import pdb; pdb.set_trace()
km.fit(z)
self.subtypes_km = km
z_mus = km.cluster_centers_
N_dims = Y.shape[2]
if self.sigmoid:
cent_lst = np.zeros((K,N_dims,2))
else:
cent_lst = np.zeros((K,N_dims,3))
for k_ix in range(K):
z_mu = z_mus[k_ix]
z_mu = torch.tensor(z_mu[None,:]).to(self.device)
theta = self.infer_functional_params(z_mu)
if theta[0].get_device() > -1:
theta = [t.cpu().detach().numpy() for t in theta]
else:
theta = [t.detach().numpy() for t in theta]
for param_i, param_component in enumerate(theta):
for dim_i, dim_val in enumerate(param_component[0]):
cent_lst[k_ix,dim_i,param_i] = dim_val
return cent_lst
def get_param_subtypes(self, X, Y, K=2):
"""
step 1: get z using mu not sampling
step 2: K-means cluster these z and save centers
step 3: return theta_k = g1(z_k) for K clusters
"""
params = self.get_params(X,Y)
pdb
z = z.detach().numpy()
# for different cluster algs, plot labels and true subtypes
km = KMeans(n_clusters=K)
km.fit(z)
self.subtypes_km = km
z_mus = km.cluster_centers_
cent_lst = list()
for k_ix in range(K):
z_mu = z_mus[k_ix]
z_mu = torch.tensor(z_mu[None,:]).to(self.device)
theta = self.infer_functional_params(z_mu)
theta = [t.detach().numpy() for t in theta]
cent_lst.append(theta)
return cent_lst
def get_params(self, X, Y):
"""
different from get_subtypes because now there is one theta per person
NOT num subtypes
"""
z, _ = self.get_mu(X,Y)
# z = z.detach().numpy()
if self.sigmoid:
return [self.dec_z_beta0(z), self.dec_z_beta1(z)]
else:
return [self.dec_z_a(z), self.dec_z_b(z), self.dec_z_c(z)]
def get_labels(self, data_dict):
X = torch.tensor(data_dict['obs_t_collect']).to(self.device)
Y = torch.tensor(data_dict['Y_collect']).to(self.device)
z, _ = self.get_mu(X,Y)
if z.get_device() > -1:
z = z.cpu().detach().numpy()
else:
z = z.detach().numpy()
labels = self.subtypes_km.predict(z)
return labels
def get_deltas(self, data_dict):
X = torch.tensor(data_dict['obs_t_collect']).to(self.device)
Y = torch.tensor(data_dict['Y_collect']).to(self.device)
M = torch.tensor(data_dict['mask_collect']).to(self.device)
z, kl = self.get_mu(X,Y)
theta = self.infer_functional_params(z)
if type(X) == np.ndarray:
X = torch.tensor(X).to(self.device)
Y = torch.tensor(Y).to(self.device)
M = torch.tensor(M).to(self.device)
best_delta = self.get_best_delta(X,Y,M,theta, kl)
return best_delta
def get_mse(self,X,Y,M,theta,best_delta):
yhat = self.predict_Y(X,Y,theta,best_delta)
squared = (Y - yhat)**2
nll = squared.sum(-1).sum(-1)
normsum = torch.sum(M)
return torch.sum(nll) / normsum
def score(self, train_data_dict, test_data_dict, K=2):
"""
step 1: get delta
step 2: get subtype assignments
step 3: get performance metrics
"""
for col in ['Y_collect', 'obs_t_collect', 's_collect', 't_collect']:
if col not in test_data_dict:
print('ERROR: %s not in test_data_dict' % col)
return
cent_lst = self.get_subtypes(train_data_dict['obs_t_collect'], train_data_dict['Y_collect'], K=K)
test_X = torch.tensor(test_data_dict['obs_t_collect']).to(self.device)
test_Y = torch.tensor(test_data_dict['Y_collect']).to(self.device)
test_M = torch.tensor(test_data_dict['mask_collect']).to(self.device)
test_z, kl = self.get_mu(test_X,test_Y)
test_theta = self.infer_functional_params(test_z)
best_delta = self.get_best_delta(test_X,test_Y, test_M, test_theta, kl)
test_z = test_z.detach().numpy()
test_clusters = self.subtypes_km.predict(test_z)
true_clusters = [int(i) for i in np.squeeze(test_data_dict['s_collect'])]
test_M = torch.ones_like(test_X)
test_mse = self.get_mse(test_X, test_Y, test_M, test_theta, best_delta)
test_ari = adjusted_rand_score(test_clusters, true_clusters)
test_swaps = get_cluster_swap_metric(test_clusters, test_data_dict['t_collect'][:,0,0], best_delta.detach().numpy())
test_pear = get_cluster_pear_metric(test_clusters, test_data_dict['t_collect'][:,0,0], best_delta.detach().numpy())
results = {
'mse': test_mse,
'ari': test_ari,
'swaps': test_swaps,
'pear': test_pear,
'cent_lst': cent_lst
}
return results
def imp_sampling(self, X, Y, imp_samples=10, delta_gran = 20):
delta_gran = self.N_delta_bins
if type(X) == np.ndarray:
X = torch.tensor(X).to(self.device)
Y = torch.tensor(Y).to(self.device)
ll_estimates = torch.zeros((imp_samples,delta_gran,X.shape[0])).to(X.device)
ll_priors = torch.zeros((imp_samples,delta_gran,X.shape[0])).to(X.device)
ll_posteriors = torch.zeros((imp_samples,delta_gran,X.shape[0])).to(X.device)
# TODO: fix this
N_latent_dim = self.dz_features
mu_prior, std_prior = torch.zeros(N_latent_dim), torch.ones(N_latent_dim)
M = torch.ones_like(Y)
for sample in range(imp_samples):
z, kl, qz_mu = self.sample(X,Y,mu_std=True)
qz_sig = torch.ones(N_latent_dim)
theta = self.infer_functional_params(z)
ll_estimate_list, ll_posterior_list, ll_prior_list = [],[],[]
for dval in np.linspace(0,5,delta_gran):
best_delta = self.get_best_delta(X,Y,M,theta, kl)
dval = best_delta*0.+dval
#print (best_delta.shape, dval)
#best_delta = dval
yhat = self.predict_Y(X,Y,theta,best_delta)
nll = (yhat - Y) ** 2
ll_estimate_list.append(-1*nll.sum(-1).sum(-1))
ll_prior_list.append((-1*self.masked_gaussian_nll_3d(z, mu_prior, std_prior)).sum(-1))
ll_posterior_list.append((-1*self.masked_gaussian_nll_3d(z, qz_mu, qz_sig)).sum(-1))
ll_priors[sample] = torch.stack(ll_prior_list)
ll_estimates[sample] = torch.stack(ll_estimate_list)
ll_posteriors[sample] = torch.stack(ll_posterior_list)
nll_estimate = -1*(torch.logsumexp(ll_estimates.view(imp_samples*delta_gran,-1) + ll_priors.view(imp_samples*delta_gran,-1) - ll_posteriors.view(imp_samples*delta_gran,-1), dim=0) - np.log(imp_samples*delta_gran))
log_p = torch.mean(nll_estimate)
return log_p
def get_subtypes_datadict(self, data_dict, K=2):
"""
assumes you've already fit a model
"""
X = torch.tensor(data_dict['obs_t_collect']).to(self.device)
Y = torch.tensor(data_dict['Y_collect']).to(self.device)
M = torch.tensor(data_dict['mask_collect']).to(self.device)
z, _ = self.get_mu(X,Y)
if z.get_device() > -1:
z = z.cpu().detach().numpy().copy()
else:
z = z.detach().numpy().copy()
if self.subtypes_km is None:
# for different cluster algs, plot labels and true subtypes
km = KMeans(n_clusters=K)
km.fit(z)
self.subtypes_km = km
labels = self.subtypes_km.predict(z)
return labels
def get_hyperparameters(data_format_num):
if data_format_num < 3:
C, ds, dh, drnn, reg_type, lr = 0., 10, 20, 50, 'l1', 0.01
if data_format_num == 5 or data_format_num == 3:
C, ds, dh, drnn, reg_type, lr = 0.01, 20, 20, 100, 'l2', 0.01
# if data_format_num == 4:
# C, ds, dh, drnn, reg_type, lr = 0.0, 30, 10, 50, 'l1', 0.001
if data_format_num == 1:
C, ds, dh, drnn, reg_type, lr = 0.0, 20, 30, 150, 'l1', 0.001
# C, ds, dh, drnn, reg_type, lr = 0.0, 20, 20, 100, 'l1', 0.001
if data_format_num == 11:
C, ds, dh, drnn, reg_type, lr = 0.0, 20, 30, 150, 'l1', 0.001
elif data_format_num > 2:
C, ds, dh, drnn, reg_type, lr = 0., 20, 50, 100, 'l1', 0.01
return C, ds, dh, drnn, reg_type, lr
def main():
import argparse
import os
import sys
sys.path.append('../data')
sys.path.append('../plot')
from load import sigmoid, quadratic, chf, parkinsons, load_data_format
from data_utils import parse_data, change_missing
from plot_utils import plot_subtypes, plot_latent
parser = argparse.ArgumentParser()
parser.add_argument('--epochs', action='store', type=int, default=800, help="Number of epochs")
parser.add_argument('--trials', action='store', type=int, default=1, help="Number of trials")
parser.add_argument('--model_name', action='store', type=str, default='SubLign', help="Model name for Latex table making")
# datasets
parser.add_argument('--data_num', action='store', type=int, help="Data Format Number")
parser.add_argument('--chf', action='store_true', help="Use CHF dataset")
parser.add_argument('--ppmi', action='store_true', help="Use PPMI dataset")
# delta setup
# parser.add_argument('--auto_delta', action='store_true', help="Learn delta dynamically for each patient")
parser.add_argument('--max_delta', action='store', type=float, help="Maximum possible delta")
parser.add_argument('--no_time', action='store_true', help="Learn time at all")
# debugging
parser.add_argument('--verbose', action='store_true', help="Plot everything")
parser.add_argument('--missing', action='store', type=float, default=0., help="What percent of data to make missing")
parser.add_argument('--plot_debug', action='store_true', help="Make animated gif about alignment / clusterings over epochs")
parser.add_argument('--epoch_debug', action='store_true', help="Save pickle about epoch differences over training")
parser.add_argument('--likelihood', action='store_true', help="Print likelihood")
parser.add_argument('--lr', action='store', type=float, help="Learning rate override")
parser.add_argument('--eval_freq', action='store', type=int, help="Make this larger than epochs for faster results", default=25)
# other experiments
args = parser.parse_args()
trial_results = np.zeros((args.trials, 4))
data_format_num = args.data_num
if args.max_delta is None:
auto_delta = True
else:
auto_delta = False
for trial_num in range(args.trials):
# datasets
if data_format_num is not None:
max_visits = 4
num_output_dims = 3 if data_format_num < 3 else 1
use_sigmoid = data_format_num < 3
if data_format_num > 10:
use_sigmoid = True
num_output_dims = 3
C, d_s, d_h, d_rnn, reg_type, lr = get_hyperparameters(data_format_num)
if args.lr != None:
print('Learning rate: %.3f' % args.lr)
lr = args.lr
data = load_data_format(data_format_num, trial_num, cache=True)
shuffle = False
elif args.chf:
print('HERE2')
data = chf()
max_visits = 38
shuffle = True
elif args.ppmi:
data = parkinsons()
max_visits = 17
shuffle = True
# data = data[data['subtype'] == 1]
train_data_loader, train_data_dict, _, _, test_data_loader, test_data_dict, valid_pid, test_pid, unique_pid = parse_data(data.values, max_visits=max_visits, test_per=0.2, valid_per=0.2, shuffle=shuffle)
# train_data_loader, train_data_dict, test_data_loader, test_data_dict, p_ids, full_p_ids = parse_data(data.values, max_visits=max_visits, test_per=0.2, shuffle=shuffle)
# pickle.dump((train_data_loader, train_data_dict, test_data_loader, test_data_dict, p_ids, full_p_ids), open('../synthetic_runs/data.pk', 'wb'))
# import pickle
# train_data_loader, train_data_dict, test_data_loader, test_data_dict, p_ids, full_p_ids = pickle.load(open('../synthetic_runs/data.pk', 'rb'))
if args.missing > 0.:
train_data_loader, train_data_dict = change_missing(train_data_dict, args.missing)
data_loader, collect_dict, unique_pid = parse_data(data.values, max_visits=max_visits)
"""
best parmas found through hypertuning (cross_validation/hpsearch.py)
# sigmoid: C (0.01), dim_h (20), ds (10 mid), dim_rnn (50 mid), reg_type (l1), lr (0.1)
# quad: C (0.1), dim_h (50), ds (10), dim_rnn (100), reg_type (l1), lr (0.1)
ppmi: (0.0, 10, 10, 50, 'l1', 0.1)
"""
# dim_stochastic, dim_hidden, dim_rnn, C, dim_biomarkers=3, reg_type = 'l2',
if data_format_num is not None:
model = Sublign(d_s, d_h, d_rnn, C, num_output_dims, sigmoid=use_sigmoid, reg_type=reg_type, auto_delta=auto_delta, max_delta=args.max_delta, learn_time=(not args.no_time))
model.fit(train_data_loader, test_data_loader, args.epochs, lr, verbose=args.verbose, fname='runs/data%d_trial%d.pt' % (data_format_num, trial_num), eval_freq=args.eval_freq,epoch_debug=args.epoch_debug, plot_debug=args.plot_debug)
elif args.chf:
args.verbose = False
model = Sublign(10, 20, 50, 0.1, data.shape[1] - 4, sigmoid=True, reg_type='l1', auto_delta=True, max_delta=args.max_delta, learn_time=(not args.no_time))
model.fit(data_loader, data_loader, args.epochs, 0.01, verbose=args.verbose)
subtypes = model.get_subtypes(collect_dict['obs_t_collect'], collect_dict['Y_collect'], K=3)
labels = model.get_labels(collect_dict['obs_t_collect'], collect_dict['Y_collect'])
deltas = model.get_deltas(collect_dict['obs_t_collect'], collect_dict['Y_collect'], collect_dict['mask_collect'])
zs = model.get_mu(collect_dict['obs_t_collect'], collect_dict['Y_collect'])
import pickle
pickle.dump((labels, deltas, subtypes, unique_pid, collect_dict, zs), open('../clinical_runs/chf_sublign_hera3.pk', 'wb'))
return
elif args.ppmi:
args.verbose = False
# (0.0, 10, 10, 50, 'l1', 0.1)
# C (0.1), dim_h (50), ds (10), dim_rnn (100), reg_type (l1), lr (0.1)
model = Sublign(10, 10, 20, 0., data.shape[1] - 4, sigmoid=True, reg_type='l1', auto_delta=True, max_delta=args.max_delta, learn_time=(not args.no_time))
# model.fit(train_data_loader, test_data_loader, args.epochs, 0.1, verbose=args.verbose)
# subtypes = model.get_subtypes(train_data_dict['obs_t_collect'], train_data_dict['Y_collect'], K=2)
# labels = model.get_labels(train_data_dict)
# deltas = model.get_deltas(train_data_dict)
model.fit(data_loader, data_loader, args.epochs, 0.1, verbose=args.verbose)
subtypes = model.get_subtypes(collect_dict['obs_t_collect'], collect_dict['Y_collect'], K=3)
labels = model.get_labels(collect_dict)
deltas = model.get_deltas(collect_dict)
# gt_labels = [int(i) for i in test_data_dict['s_collect'].squeeze()]
# print('ARI: %.3f' % adjusted_rand_score(gt_labels, labels))
import pickle
pickle.dump((labels, deltas, subtypes, unique_pid, collect_dict), open('../clinical_runs/ppmi_sublign_PDonly.pk', 'wb'))
return
subtypes = model.get_subtypes(train_data_dict['obs_t_collect'], train_data_dict['Y_collect'], K=2)
train_results = model.score(train_data_dict, train_data_dict)
test_results = model.score(train_data_dict, test_data_dict)
Y = test_data_dict['Y_collect']
X = test_data_dict['obs_t_collect']
M = test_data_dict['mask_collect']
S = None
T = None
if args.likelihood:
log_p = model.imp_sampling(X,Y,imp_samples=50)
print('Test Liklihood: %.3f' % log_p)
(nelbo, nll, kl), _ = model.forward(Y, S, X, M, T, anneal=1.)
# def forward(self, Y, S, X, M, T, anneal = 1.):
nelbo, nll, kl = nelbo.mean().detach().numpy(), nll.mean().detach().numpy(), kl.mean().detach().numpy()
if args.verbose:
plot_subtypes(subtypes, args.sigmoid, train_data_dict)
plot_latent(model, test_data_dict)
trial_results[trial_num] = [test_results['mse'],test_results['ari'], test_results['swaps'], test_results['pear']]
if args.trials == 1:
print('Train: %.3f, %.3f, %.3f, %.3f' % (train_results['mse'], train_results['ari'], train_results['swaps'], train_results['pear']))
print('Test : %.3f, %.3f, %.3f, %.3f' % (test_results['mse'], test_results['ari'], test_results['swaps'], test_results['pear']))
print('NELBO: %.3f, NLL: %.3f, KL: %.3f' % (nelbo, nll, kl))
else:
line_str = list()
for i,j in zip(trial_results.mean(axis=0), trial_results.std(axis=0)):
line_str.append('%.3f $\\pm$ %.3f' % (i,j))
print(' & '.join([args.model_name] + line_str) + '\\\\')
trials_fname = 'runs/%s.txt' % args.model_name
if not os.path.exists(trials_fname):
f = open(trials_fname, 'w')
else:
f = open(trials_fname, 'a')
# f.write(' & '.join([args.model_name] + line_str) + '\\\\' + '\n')
# f.close()
if __name__=='__main__':
main() | 41.893204 | 243 | 0.558601 |
7956dd6db9fe0ad639f4534081cadbd9d4c556a0 | 3,383 | py | Python | gdsfactory/mask/merge_test_metadata.py | simbilod/gdsfactory | 4d76db32674c3edb4d16260e3177ee29ef9ce11d | [
"MIT"
] | null | null | null | gdsfactory/mask/merge_test_metadata.py | simbilod/gdsfactory | 4d76db32674c3edb4d16260e3177ee29ef9ce11d | [
"MIT"
] | null | null | null | gdsfactory/mask/merge_test_metadata.py | simbilod/gdsfactory | 4d76db32674c3edb4d16260e3177ee29ef9ce11d | [
"MIT"
] | null | null | null | """Merge mask metadata with test labels to return test_metadata """
import warnings
from pathlib import Path
from typing import Any, Dict, List, Optional
from omegaconf import DictConfig, OmegaConf
from gdsfactory.config import logger
from gdsfactory.types import PathType
def parse_csv_data(
csv_labels_path: Path, ignore_prefix: str = "METR_"
) -> List[List[str]]:
"""Returns CSV labels as a list of strings."""
with open(csv_labels_path) as f:
# Get all lines
lines = [line.replace("\n", "") for line in f.readlines()]
# Ignore labels for metrology structures
lines = [line for line in lines if not line.startswith(ignore_prefix)]
# Split lines in fields
lines = [line.split(",") for line in lines]
lines = [[s.strip() for s in splitted if s.strip()] for splitted in lines]
# Remove empty lines
lines = [line for line in lines if line]
return lines
def get_cell_from_label(label: str) -> str:
"""get cell name from the label (cell_name is in parenthesis)"""
try:
cell_name = label.split("(")[1].split(")")[0]
except IndexError:
raise ValueError(f"{label!r} needs (cell name) between parenthesis")
if cell_name.startswith("loopback"):
cell_name = "_".join(cell_name.split("_")[1:])
return cell_name
def merge_test_metadata(
labels_path: PathType,
mask_metadata: Dict[str, Any],
labels_prefix: str = "opt",
get_cell_from_string=get_cell_from_label,
filepath: Optional[PathType] = None,
) -> DictConfig:
"""Returns a test metadata dict config of labeled cells
by merging GDS labels in CSV and YAML mask metadata
Args:
labels_path: for test labels in CSV.
mask_metadata: dict with test metadata.
labels_prefix: only select labels with a text prefix.
get_cell_from_string: returns label string.
filepath: Optional path to write test metadata.
.. code::
CSV labels -------
|--> merge_test_metadata dict
|
YAML metatada ----
"""
labels_path = Path(labels_path)
if not labels_path.exists():
raise FileNotFoundError(f"missing CSV labels {labels_path!r}")
labels_list = parse_csv_data(labels_path)
cells_metadata = mask_metadata.get("cells", {})
test_metadata = DictConfig({})
for label, x, y in labels_list:
cell = get_cell_from_string(label)
if cell in cells_metadata:
test_metadata[cell] = cells_metadata[cell]
test_metadata[cell].label = dict(x=float(x), y=float(y), text=label)
else:
logger.error(f"missing cell metadata for {cell!r}")
warnings.warn(f"missing cell metadata for {cell!r}")
if filepath:
filepath = Path(filepath)
filepath.write_text(OmegaConf.to_yaml(test_metadata))
return test_metadata
if __name__ == "__main__":
# from gdsfactory import CONFIG
# labels_path = (
# CONFIG["samples_path"] / "mask_pack" / "build" / "mask" / "sample_mask.csv"
# )
# mask_metadata_path = labels_path.with_suffix(".yml")
# mask_metadata = OmegaConf.load(mask_metadata_path)
# d = merge_test_metadata(labels_path=labels_path, mask_metadata=mask_metadata)
# print(d)
print(get_cell_from_label("opt_te1550_demo"))
| 31.036697 | 85 | 0.652675 |
7956dd97b5ecebd536d8d1f9afb936ed5ebec034 | 6,391 | py | Python | homeassistant/components/sensor/min_max.py | robin13/home-assistant | 4976569e304c23975d34ec88e2dfb94e84ab1f1c | [
"Apache-2.0"
] | 2 | 2020-08-29T07:24:56.000Z | 2020-10-27T21:47:35.000Z | homeassistant/components/sensor/min_max.py | robin13/home-assistant | 4976569e304c23975d34ec88e2dfb94e84ab1f1c | [
"Apache-2.0"
] | 6 | 2021-02-08T20:25:50.000Z | 2022-03-11T23:27:53.000Z | homeassistant/components/sensor/min_max.py | robin13/home-assistant | 4976569e304c23975d34ec88e2dfb94e84ab1f1c | [
"Apache-2.0"
] | 3 | 2018-09-14T07:34:09.000Z | 2018-09-29T12:57:10.000Z | """
Support for displaying the minimal and the maximal value.
For more details about this platform, please refer to the documentation at
https://home-assistant.io/components/sensor.min_max/
"""
import asyncio
import logging
import voluptuous as vol
import homeassistant.helpers.config_validation as cv
from homeassistant.components.sensor import PLATFORM_SCHEMA
from homeassistant.const import (
CONF_NAME, STATE_UNKNOWN, CONF_TYPE, ATTR_UNIT_OF_MEASUREMENT)
from homeassistant.core import callback
from homeassistant.helpers.entity import Entity
from homeassistant.helpers.event import async_track_state_change
_LOGGER = logging.getLogger(__name__)
ATTR_MIN_VALUE = 'min_value'
ATTR_MAX_VALUE = 'max_value'
ATTR_COUNT_SENSORS = 'count_sensors'
ATTR_MEAN = 'mean'
ATTR_LAST = 'last'
ATTR_TO_PROPERTY = [
ATTR_COUNT_SENSORS,
ATTR_MAX_VALUE,
ATTR_MEAN,
ATTR_MIN_VALUE,
ATTR_LAST,
]
CONF_ENTITY_IDS = 'entity_ids'
CONF_ROUND_DIGITS = 'round_digits'
ICON = 'mdi:calculator'
SENSOR_TYPES = {
ATTR_MIN_VALUE: 'min',
ATTR_MAX_VALUE: 'max',
ATTR_MEAN: 'mean',
ATTR_LAST: 'last',
}
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend({
vol.Optional(CONF_TYPE, default=SENSOR_TYPES[ATTR_MAX_VALUE]):
vol.All(cv.string, vol.In(SENSOR_TYPES.values())),
vol.Optional(CONF_NAME): cv.string,
vol.Required(CONF_ENTITY_IDS): cv.entity_ids,
vol.Optional(CONF_ROUND_DIGITS, default=2): vol.Coerce(int),
})
@asyncio.coroutine
def async_setup_platform(hass, config, async_add_entities,
discovery_info=None):
"""Set up the min/max/mean sensor."""
entity_ids = config.get(CONF_ENTITY_IDS)
name = config.get(CONF_NAME)
sensor_type = config.get(CONF_TYPE)
round_digits = config.get(CONF_ROUND_DIGITS)
async_add_entities(
[MinMaxSensor(hass, entity_ids, name, sensor_type, round_digits)],
True)
return True
def calc_min(sensor_values):
"""Calculate min value, honoring unknown states."""
val = STATE_UNKNOWN
for sval in sensor_values:
if sval != STATE_UNKNOWN:
if val == STATE_UNKNOWN or val > sval:
val = sval
return val
def calc_max(sensor_values):
"""Calculate max value, honoring unknown states."""
val = STATE_UNKNOWN
for sval in sensor_values:
if sval != STATE_UNKNOWN:
if val == STATE_UNKNOWN or val < sval:
val = sval
return val
def calc_mean(sensor_values, round_digits):
"""Calculate mean value, honoring unknown states."""
val = 0
count = 0
for sval in sensor_values:
if sval != STATE_UNKNOWN:
val += sval
count += 1
if count == 0:
return STATE_UNKNOWN
return round(val/count, round_digits)
class MinMaxSensor(Entity):
"""Representation of a min/max sensor."""
def __init__(self, hass, entity_ids, name, sensor_type, round_digits):
"""Initialize the min/max sensor."""
self._hass = hass
self._entity_ids = entity_ids
self._sensor_type = sensor_type
self._round_digits = round_digits
if name:
self._name = name
else:
self._name = '{} sensor'.format(
next(v for k, v in SENSOR_TYPES.items()
if self._sensor_type == v)).capitalize()
self._unit_of_measurement = None
self._unit_of_measurement_mismatch = False
self.min_value = self.max_value = self.mean = self.last = STATE_UNKNOWN
self.count_sensors = len(self._entity_ids)
self.states = {}
@callback
def async_min_max_sensor_state_listener(entity, old_state, new_state):
"""Handle the sensor state changes."""
if new_state.state is None or new_state.state in STATE_UNKNOWN:
self.states[entity] = STATE_UNKNOWN
hass.async_add_job(self.async_update_ha_state, True)
return
if self._unit_of_measurement is None:
self._unit_of_measurement = new_state.attributes.get(
ATTR_UNIT_OF_MEASUREMENT)
if self._unit_of_measurement != new_state.attributes.get(
ATTR_UNIT_OF_MEASUREMENT):
_LOGGER.warning(
"Units of measurement do not match for entity %s",
self.entity_id)
self._unit_of_measurement_mismatch = True
try:
self.states[entity] = float(new_state.state)
self.last = float(new_state.state)
except ValueError:
_LOGGER.warning("Unable to store state. "
"Only numerical states are supported")
hass.async_add_job(self.async_update_ha_state, True)
async_track_state_change(
hass, entity_ids, async_min_max_sensor_state_listener)
@property
def name(self):
"""Return the name of the sensor."""
return self._name
@property
def state(self):
"""Return the state of the sensor."""
if self._unit_of_measurement_mismatch:
return STATE_UNKNOWN
return getattr(self, next(
k for k, v in SENSOR_TYPES.items() if self._sensor_type == v))
@property
def unit_of_measurement(self):
"""Return the unit the value is expressed in."""
if self._unit_of_measurement_mismatch:
return "ERR"
return self._unit_of_measurement
@property
def should_poll(self):
"""No polling needed."""
return False
@property
def device_state_attributes(self):
"""Return the state attributes of the sensor."""
state_attr = {
attr: getattr(self, attr) for attr
in ATTR_TO_PROPERTY if getattr(self, attr) is not None
}
return state_attr
@property
def icon(self):
"""Return the icon to use in the frontend, if any."""
return ICON
@asyncio.coroutine
def async_update(self):
"""Get the latest data and updates the states."""
sensor_values = [self.states[k] for k in self._entity_ids
if k in self.states]
self.min_value = calc_min(sensor_values)
self.max_value = calc_max(sensor_values)
self.mean = calc_mean(sensor_values, self._round_digits)
| 31.17561 | 79 | 0.644031 |
7956dd9954a869adae25776f34d9cfad6f7f2ede | 1,912 | py | Python | mp/data/pytorch/domain_prediction_dataset_wrapper.py | MECLabTUDA/OOD-Gen | f85ea9106ae1425f18e34c9d82fa3ca4925d8d9e | [
"MIT"
] | null | null | null | mp/data/pytorch/domain_prediction_dataset_wrapper.py | MECLabTUDA/OOD-Gen | f85ea9106ae1425f18e34c9d82fa3ca4925d8d9e | [
"MIT"
] | null | null | null | mp/data/pytorch/domain_prediction_dataset_wrapper.py | MECLabTUDA/OOD-Gen | f85ea9106ae1425f18e34c9d82fa3ca4925d8d9e | [
"MIT"
] | null | null | null | from mp.data.pytorch.pytorch_dataset import PytorchDataset
from mp.data.datasets.dataset import Instance
import copy
import torch
class DomainPredictionDatasetWrapper(PytorchDataset):
r"""Wraps a PytorchDataset to reuse its instances.x and replacing the labels"""
def __init__(self, pytorch_ds, target_idx):
"""
Args:
pytorch_ds (PytorchSegmentationDataset): the Dataset that need to be wrapped
target_idx (int): the target idx for domain prediction, corresponding to this dataset
"""
class Dummy:
def __init__(self):
self.instances = pytorch_ds.instances
self.hold_out_ixs = []
self.original_ds = pytorch_ds
# Ugly
# noinspection PyTypeChecker
super().__init__(dataset=Dummy(), size=pytorch_ds.size)
# Copy the predictor, but prevent it from reshaping the prediction
self.predictor = copy.copy(pytorch_ds.predictor)
self.predictor.reshape_pred = False
# Create new target as one hot encoded
# self.target = torch.zeros((1, target_cnt), dtype=self.instances[0].y.tensor.dtype)
# self.target[:, target_idx] = 1
self.target = torch.tensor([target_idx], dtype=self.instances[0].y.tensor.dtype)
# Modify instances
self.instances = [Instance(inst.x, self.target, inst.name, inst.class_ix, inst.group_id)
for inst in self.instances]
def get_subject_dataloader(self, subject_ix):
r"""Get a list of input/target pairs equivalent to those if the dataset
was only of subject with index subject_ix. For evaluation purposes.
"""
# Generate the original subject dataloader and replace the target
subject_dataloader = self.original_ds.get_subject_dataloader(subject_ix)
return [(x, self.target) for x, _ in subject_dataloader]
| 40.680851 | 97 | 0.671548 |
7956dfced705294acdce6e72df5792f2b820a965 | 259 | py | Python | ver1_0/openassembly/api/models.py | fragro/Open-Assembly | e9679ff5e7ae9881fa5781d763288ed2f40b014d | [
"BSD-3-Clause"
] | 1 | 2015-11-05T08:22:19.000Z | 2015-11-05T08:22:19.000Z | ver1_0/openassembly/api/models.py | fragro/Open-Assembly | e9679ff5e7ae9881fa5781d763288ed2f40b014d | [
"BSD-3-Clause"
] | null | null | null | ver1_0/openassembly/api/models.py | fragro/Open-Assembly | e9679ff5e7ae9881fa5781d763288ed2f40b014d | [
"BSD-3-Clause"
] | 1 | 2018-02-03T18:25:41.000Z | 2018-02-03T18:25:41.000Z | from django.db import models
# Create your models here.
from piston.handler import BaseHandler
from myapp.models import Blogpost
class BlogpostHandler(BaseHandler):
allowed_methods = ('GET',)
model = Blogpost
def read(self, request, post_slug): | 23.545455 | 38 | 0.756757 |
7956dfd4e0075131f805cc94204a3ea4fccbca27 | 46,710 | py | Python | bayes_opt/visualization/vis_presentation.py | AndRossi/OpenKE_BayesianOpt | 31db25eb8406c6cf803e2187402290e466c0e824 | [
"MIT"
] | 2 | 2020-08-01T03:00:24.000Z | 2020-08-18T02:08:21.000Z | bayes_opt/visualization/vis_presentation.py | AndRossi/OpenKE_BayesianOpt | 31db25eb8406c6cf803e2187402290e466c0e824 | [
"MIT"
] | null | null | null | bayes_opt/visualization/vis_presentation.py | AndRossi/OpenKE_BayesianOpt | 31db25eb8406c6cf803e2187402290e466c0e824 | [
"MIT"
] | 1 | 2020-08-18T02:08:23.000Z | 2020-08-18T02:08:23.000Z | # -*- coding: utf-8 -*-
"""
Created on Sat Feb 27 23:22:32 2016
@author: Vu
"""
from __future__ import division
import sys
sys.path.insert(0,'../../')
sys.path.insert(0,'..')
import numpy as np
#import mayavi.mlab as mlab
#from scipy.stats import norm
#import matplotlib as plt
from mpl_toolkits.mplot3d import Axes3D
from prada_bayes_opt import PradaBayOptFn
#from prada_bayes_opt import PradaBayOptBatch
import matplotlib.patches as patches
import matplotlib.pyplot as plt
from matplotlib import gridspec
from sklearn.metrics.pairwise import euclidean_distances
from prada_bayes_opt.acquisition_maximization import acq_max
from scipy.stats import norm as norm_dist
import random
from prada_bayes_opt.acquisition_functions import AcquisitionFunction, unique_rows
import os
from pylab import *
cdict = {'red': ((0.0, 0.0, 0.0),
(0.5, 1.0, 0.7),
(1.0, 1.0, 1.0)),
'green': ((0.0, 0.0, 0.0),
(0.5, 1.0, 0.0),
(1.0, 1.0, 1.0)),
'blue': ((0.0, 0.0, 0.0),
(0.5, 1.0, 0.0),
(1.0, 0.5, 1.0))}
#my_cmap = matplotlib.colors.LinearSegmentedColormap('my_colormap',cdict,256)
#my_cmap = plt.get_cmap('cubehelix')
my_cmap = plt.get_cmap('Blues')
counter = 0
#class Visualization(object):
#def __init__(self,bo):
#self.plot_gp=0
#self.posterior=0
#self.myBo=bo
def plot_bo(bo):
if bo.dim==1:
plot_bo_1d(bo)
if bo.dim==2:
plot_bo_2d(bo)
def plot_histogram(bo,samples):
if bo.dim==1:
plot_histogram_1d(bo,samples)
if bo.dim==2:
plot_histogram_2d(bo,samples)
def plot_mixturemodel(g,bo,samples):
if bo.dim==1:
plot_mixturemodel_1d(g,bo,samples)
if bo.dim==2:
plot_mixturemodel_2d(g,bo,samples)
def plot_mixturemodel_1d(g,bo,samples):
samples_original=samples*bo.max_min_gap+bo.bounds[:,0]
x_plot = np.linspace(np.min(samples), np.max(samples), len(samples))
x_plot = np.reshape(x_plot,(len(samples),-1))
y_plot = g.score_samples(x_plot)[0]
x_plot_ori = np.linspace(np.min(samples_original), np.max(samples_original), len(samples_original))
x_plot_ori=np.reshape(x_plot_ori,(len(samples_original),-1))
fig=plt.figure(figsize=(8, 3))
plt.plot(x_plot_ori, np.exp(y_plot), color='red')
plt.xlim(bo.bounds[0,0],bo.bounds[0,1])
plt.xlabel("X",fontdict={'size':16})
plt.ylabel("f(X)",fontdict={'size':16})
plt.title("IGMM Approximation",fontsize=16)
def plot_mixturemodel_2d(dpgmm,bo,samples):
samples_original=samples*bo.max_min_gap+bo.bounds[:,0]
dpgmm_means_original=dpgmm.truncated_means_*bo.max_min_gap+bo.bounds[:,0]
#fig=plt.figure(figsize=(12, 5))
fig=plt.figure()
myGmm=fig.add_subplot(1,1,1)
x1 = np.linspace(bo.scalebounds[0,0],bo.scalebounds[0,1], 100)
x2 = np.linspace(bo.scalebounds[1,0],bo.scalebounds[1,1], 100)
x1g,x2g=np.meshgrid(x1,x2)
x_plot=np.c_[x1g.flatten(), x2g.flatten()]
y_plot2 = dpgmm.score_samples(x_plot)[0]
y_plot2=np.exp(y_plot2)
#y_label=dpgmm.predict(x_plot)[0]
x1_ori = np.linspace(bo.bounds[0,0],bo.bounds[0,1], 100)
x2_ori = np.linspace(bo.bounds[1,0],bo.bounds[1,1], 100)
x1g_ori,x2g_ori=np.meshgrid(x1_ori,x2_ori)
CS_acq=myGmm.contourf(x1g_ori,x2g_ori,y_plot2.reshape(x1g.shape),cmap=plt.cm.bone,origin='lower')
CS2_acq = plt.contour(CS_acq, levels=CS_acq.levels[::2],colors='r',origin='lower',hold='on')
myGmm.scatter(dpgmm_means_original[:,0],dpgmm_means_original[:,1], marker='*',label=u'Estimated Peaks by IGMM', s=100,color='green')
myGmm.set_title('IGMM Approximation',fontsize=16)
myGmm.set_xlim(bo.bounds[0,0],bo.bounds[0,1])
myGmm.set_ylim(bo.bounds[1,0],bo.bounds[1,1])
myGmm.legend(loc=2, bbox_to_anchor=(1.01, 1), borderaxespad=0.)
def plot_acq_bo_1d(bo):
global counter
counter=counter+1
func=bo.f
#x_original = np.linspace(bo.bounds[0,0], bo.bounds[0,1], 100)
x = np.linspace(bo.scalebounds[0,0], bo.scalebounds[0,1], 1000)
x_original=x*bo.max_min_gap+bo.bounds[:,0]
y_original = func(x_original)
#y = func(x)
#y_original=mu*(np.max(bo.Y_original)-np.min(bo.Y_original))+np.mean(bo.Y_original)
fig=plt.figure(figsize=(12, 8))
#fig.title('Bayesian Optimization with Different Acquisition Functions', fontdict={'size':20})
gs = gridspec.GridSpec(6, 1, height_ratios=[3, 1,1,1,1,1])
axis = plt.subplot(gs[0])
acq_UCB = plt.subplot(gs[1])
acq_EI = plt.subplot(gs[2])
acq_TS = plt.subplot(gs[3])
#acq_TS2 = plt.subplot(gs[5])
acq_ES = plt.subplot(gs[4])
acq_PES = plt.subplot(gs[5])
#acq_MRS = plt.subplot(gs[6])
#acq_Consensus = plt.subplot(gs[7])
mu, sigma = bo.posterior(x)
#mu_original=mu*(np.max(y_original)-np.min(y_original))+np.mean(y_original)
mu_original=mu*(np.max(bo.Y_original)-np.min(bo.Y_original))+np.mean(bo.Y_original)
sigma_original=sigma*(np.max(bo.Y_original)-np.min(bo.Y_original))+np.mean(bo.Y_original)**2
axis.plot(x_original, y_original, linewidth=3, label='Real Function')
axis.plot(bo.X_original.flatten(), bo.Y_original, 'D', markersize=8, label=u'Observations', color='r')
axis.plot(x_original, mu_original, '--', color='k', label='GP mean')
#samples*bo.max_min_gap+bo.bounds[:,0]
temp_xaxis=np.concatenate([x_original, x_original[::-1]])
#temp_xaxis=temp*bo.max_min_gap+bo.bounds[:,0]
temp_yaxis_original=np.concatenate([mu_original - 1.9600 * sigma_original, (mu_original + 1.9600 * sigma_original)[::-1]])
temp_yaxis=np.concatenate([mu - 1.9600 * sigma, (mu + 1.9600 * sigma)[::-1]])
temp_yaxis_original2=temp_yaxis*(np.max(bo.Y_original)-np.min(bo.Y_original))+np.mean(bo.Y_original)
axis.fill(temp_xaxis, temp_yaxis_original2,alpha=.6, fc='c', ec='None', label='95% CI')
axis.set_xlim((np.min(x_original), np.max(x_original)))
#axis.set_ylim((None, None))
axis.set_ylabel('f(x)', fontdict={'size':16})
axis.set_xlabel('x', fontdict={'size':16})
axis.set_title('Bayesian Optimization with Different Acquisition Functions', fontdict={'size':20})
# UCB
acq_func={}
acq_func['name']='ucb'
acq_func['kappa']=2
acq_func['dim']=1
acq_func['scalebounds']=bo.scalebounds
myacq=AcquisitionFunction(acq_func)
utility = myacq.acq_kind(x.reshape((-1, 1)), bo.gp, np.max(bo.Y))
acq_UCB.plot(x_original, utility, label='Utility Function', color='purple')
acq_UCB.plot(x_original[np.argmax(utility)], np.max(utility), '*', markersize=15,
label=u'Next Best Guess', markerfacecolor='gold', markeredgecolor='k', markeredgewidth=1)
# check batch BO
try:
nSelectedPoints=np.int(bo.NumPoints[-1])
except:
nSelectedPoints=1
max_point=np.max(utility)
#acq_UCB.plot(bo.X_original[-nSelectedPoints:], max_point.repeat(nSelectedPoints), 'v', markersize=15,
#label=u'Previous Selection', markerfacecolor='green', markeredgecolor='k', markeredgewidth=1)
acq_UCB.set_xlim((np.min(x_original), np.max(x_original)))
acq_UCB.set_ylabel('UCB', fontdict={'size':16})
acq_UCB.set_xlabel('x', fontdict={'size':16})
# EI
acq_func={}
acq_func['name']='ei'
acq_func['dim']=1
acq_func['scalebounds']=bo.scalebounds
myacq=AcquisitionFunction(acq_func)
utility = myacq.acq_kind(x.reshape((-1, 1)), bo.gp, np.max(bo.Y))
acq_EI.plot(x_original, utility, label='Utility Function', color='purple')
acq_EI.plot(x_original[np.argmax(utility)], np.max(utility), '*', markersize=15,
label=u'Next Best Guess', markerfacecolor='gold', markeredgecolor='k', markeredgewidth=1)
max_point=np.max(utility)
#acq_EI.plot(bo.X_original[-nSelectedPoints:], max_point.repeat(nSelectedPoints), 'v', markersize=15,
#label=u'Previous Selection', markerfacecolor='green', markeredgecolor='k', markeredgewidth=1)
acq_EI.set_xlim((np.min(x_original), np.max(x_original)))
acq_EI.set_ylabel('EI', fontdict={'size':16})
acq_EI.set_xlabel('x', fontdict={'size':16})
# TS
acq_func={}
acq_func['name']='thompson'
acq_func['dim']=1
acq_func['scalebounds']=bo.scalebounds
myacq=AcquisitionFunction(acq_func)
utility = myacq.acq_kind(x.reshape((-1, 1)), bo.gp, np.max(bo.Y))
acq_TS.plot(x_original, utility, label='Utility Function', color='purple')
acq_TS.plot(x_original[np.argmax(utility)], np.max(utility), '*', markersize=15,
label=u'Next Best Guess', markerfacecolor='gold', markeredgecolor='k', markeredgewidth=1)
max_point=np.max(utility)
#acq_POI.plot(bo.X_original[-nSelectedPoints:], max_point.repeat(nSelectedPoints), 'v', markersize=15,
#label=u'Previous Selection', markerfacecolor='green', markeredgecolor='k', markeredgewidth=1)
acq_TS.set_xlim((np.min(x_original), np.max(x_original)))
acq_TS.set_ylabel('TS', fontdict={'size':16})
acq_TS.set_xlabel('x', fontdict={'size':16})
#axis.legend(loc=2, bbox_to_anchor=(1.01, 1), borderaxespad=0.)
#acq_EI.legend(loc=2, bbox_to_anchor=(1.01, 1), borderaxespad=0.)
"""
# MRS
acq_func={}
acq_func['name']='mrs'
acq_func['dim']=1
acq_func['scalebounds']=bo.scalebounds
myacq=AcquisitionFunction(acq_func)
utility = myacq.acq_kind(x.reshape((-1, 1)), bo.gp, np.max(bo.Y))
acq_MRS.plot(x_original, utility, label='Utility Function', color='purple')
acq_MRS.plot(x_original[np.argmax(utility)], np.max(utility), '*', markersize=15,
label=u'Next Best Guess', markerfacecolor='gold', markeredgecolor='k', markeredgewidth=1)
max_point=np.max(utility)
#acq_MRS.plot(bo.X_original[-nSelectedPoints:], max_point.repeat(nSelectedPoints), 'v', markersize=15,
#label=u'Previous Selection', markerfacecolor='green', markeredgecolor='k', markeredgewidth=1)
acq_MRS.set_xlim((np.min(x_original), np.max(x_original)))
acq_MRS.set_ylabel('MRS', fontdict={'size':16})
acq_MRS.set_xlabel('x', fontdict={'size':16})
"""
# PES
acq_func={}
acq_func['name']='pes'
acq_func['dim']=1
acq_func['scalebounds']=bo.scalebounds
myacq=AcquisitionFunction(acq_func)
utility = myacq.acq_kind(x.reshape((-1, 1)), bo.gp, np.max(bo.Y))
acq_PES.plot(x_original, utility, label='Utility Function', color='purple')
acq_PES.plot(x_original[np.argmax(utility)], np.max(utility), '*', markersize=15,
label=u'Next Best Guess', markerfacecolor='gold', markeredgecolor='k', markeredgewidth=1)
max_point=np.max(utility)
#acq_PES.plot(bo.X_original[-nSelectedPoints:], max_point.repeat(nSelectedPoints), 'v', markersize=15,
#label=u'Previous Selection', markerfacecolor='green', markeredgecolor='k', markeredgewidth=1)
acq_PES.set_xlim((np.min(x_original), np.max(x_original)))
acq_PES.set_ylabel('PES', fontdict={'size':16})
acq_PES.set_xlabel('x', fontdict={'size':16})
# TS1
"""
acq_func={}
acq_func['name']='consensus'
acq_func['dim']=1
acq_func['scalebounds']=bo.scalebounds
myacq=AcquisitionFunction(acq_func)
utility = myacq.acq_kind(x.reshape((-1, 1)), bo.gp, np.max(bo.Y))
acq_Consensus.plot(x_original, utility, label='Utility Function', color='purple')
temp=np.asarray(myacq.object.xt_suggestions)
xt_suggestion_original=temp*bo.max_min_gap+bo.bounds[:,0]
acq_Consensus.plot(xt_suggestion_original, [np.max(utility)]*xt_suggestion_original.shape[0], 's', markersize=15,
label=u'Next Best Guess', markerfacecolor='red', markeredgecolor='k', markeredgewidth=1)
max_point=np.max(utility)
acq_Consensus.plot(x_original[np.argmax(utility)], np.max(utility), '*', markersize=15,
label=u'Next Best Guess', markerfacecolor='gold', markeredgecolor='k', markeredgewidth=1)
#acq_TS.plot(bo.X_original[-nSelectedPoints:], max_point.repeat(nSelectedPoints), 'v', markersize=15,
#label=u'Previous Selection', markerfacecolor='green', markeredgecolor='k', markeredgewidth=1)
acq_Consensus.set_xlim((np.min(x_original), np.max(x_original)))
#acq_TS.set_ylim((np.min(utility)*0.9, np.max(utility)*1.1))
acq_Consensus.set_ylabel('Consensus', fontdict={'size':16})
acq_Consensus.set_xlabel('x', fontdict={'size':16})
"""
# ES
acq_func={}
acq_func['name']='es'
acq_func['dim']=1
acq_func['scalebounds']=bo.scalebounds
myacq=AcquisitionFunction(acq_func)
utility = myacq.acq_kind(x.reshape((-1, 1)), bo.gp, np.max(bo.Y))
acq_ES.plot(x_original, utility, label='Utility Function', color='purple')
acq_ES.plot(x_original[np.argmax(utility)], np.max(utility), '*', markersize=15,
label=u'Next Best Guess', markerfacecolor='gold', markeredgecolor='k', markeredgewidth=1)
max_point=np.max(utility)
#acq_ES.plot(bo.X_original[-nSelectedPoints:], max_point.repeat(nSelectedPoints), 'v', markersize=15,
#label=u'Previous Selection', markerfacecolor='green', markeredgecolor='k', markeredgewidth=1)
acq_ES.set_xlim((np.min(x_original), np.max(x_original)))
acq_ES.set_ylabel('ES', fontdict={'size':16})
acq_ES.set_xlabel('x', fontdict={'size':16})
strFileName="{:d}_GP_acquisition_functions.pdf".format(counter)
fig.savefig(strFileName, bbox_inches='tight')
#axis.legend(loc=2, bbox_to_anchor=(1.01, 1), borderaxespad=0.)
#acq_TS.legend(loc=2, bbox_to_anchor=(1.01, 1), borderaxespad=0.)
def plot_bo_1d(bo):
func=bo.f
#x_original = np.linspace(bo.bounds[0,0], bo.bounds[0,1], 100)
x = np.linspace(bo.scalebounds[0,0], bo.scalebounds[0,1], 1000)
x_original=x*bo.max_min_gap+bo.bounds[:,0]
y_original = func(x_original)
#y = func(x)
#y_original=mu*(np.max(bo.Y_original)-np.min(bo.Y_original))+np.mean(bo.Y_original)
fig=plt.figure(figsize=(8, 5))
fig.suptitle('Gaussian Process and Utility Function After {} Points'.format(len(bo.X)), fontdict={'size':18})
gs = gridspec.GridSpec(2, 1, height_ratios=[3, 1])
axis = plt.subplot(gs[0])
acq = plt.subplot(gs[1])
mu, sigma = bo.posterior(x)
#mu_original=mu*(np.max(y_original)-np.min(y_original))+np.mean(y_original)
mu_original=mu*(np.max(bo.Y_original)-np.min(bo.Y_original))+np.mean(bo.Y_original)
sigma_original=sigma*(np.max(bo.Y_original)-np.min(bo.Y_original))+np.mean(bo.Y_original)**2
axis.plot(x_original, y_original, linewidth=3, label='Real Function')
axis.plot(bo.X_original.flatten(), bo.Y_original, 'D', markersize=8, label=u'Observations', color='r')
axis.plot(x_original, mu_original, '--', color='k', label='GP mean')
#samples*bo.max_min_gap+bo.bounds[:,0]
temp_xaxis=np.concatenate([x_original, x_original[::-1]])
#temp_xaxis=temp*bo.max_min_gap+bo.bounds[:,0]
temp_yaxis_original=np.concatenate([mu_original - 1.9600 * sigma_original, (mu_original + 1.9600 * sigma_original)[::-1]])
temp_yaxis=np.concatenate([mu - 1.9600 * sigma, (mu + 1.9600 * sigma)[::-1]])
temp_yaxis_original2=temp_yaxis*(np.max(bo.Y_original)-np.min(bo.Y_original))+np.mean(bo.Y_original)
axis.fill(temp_xaxis, temp_yaxis_original2,alpha=.6, fc='c', ec='None', label='95% CI')
axis.set_xlim((np.min(x_original), np.max(x_original)))
#axis.set_ylim((None, None))
axis.set_ylabel('f(x)', fontdict={'size':16})
axis.set_xlabel('x', fontdict={'size':16})
utility = bo.acq_func.acq_kind(x.reshape((-1, 1)), bo.gp, np.max(bo.Y))
acq.plot(x_original, utility, label='Utility Function', color='purple')
acq.plot(x_original[np.argmax(utility)], np.max(utility), '*', markersize=15,
label=u'Next Best Guess', markerfacecolor='gold', markeredgecolor='k', markeredgewidth=1)
# check batch BO
try:
nSelectedPoints=np.int(bo.NumPoints[-1])
except:
nSelectedPoints=1
max_point=np.max(utility)
acq.plot(bo.X_original[-nSelectedPoints:], max_point.repeat(nSelectedPoints), 'v', markersize=15,
label=u'Previous Selection', markerfacecolor='green', markeredgecolor='k', markeredgewidth=1)
acq.set_xlim((np.min(x_original), np.max(x_original)))
#acq.set_ylim((0, np.max(utility) + 0.5))
#acq.set_ylim((np.min(utility), 1.1*np.max(utility)))
acq.set_ylabel('Acq', fontdict={'size':16})
acq.set_xlabel('x', fontdict={'size':16})
axis.legend(loc=2, bbox_to_anchor=(1.01, 1), borderaxespad=0.)
acq.legend(loc=2, bbox_to_anchor=(1.01, 1), borderaxespad=0.)
def plot_bo_1d_variance(bo):
global counter
counter=counter+1
func=bo.f
#x_original = np.linspace(bo.bounds[0,0], bo.bounds[0,1], 100)
x = np.linspace(bo.scalebounds[0,0], bo.scalebounds[0,1], 1000)
x_original=x*bo.max_min_gap+bo.bounds[:,0]
y_original = func(x_original)
#y = func(x)
#y_original=mu*(np.max(bo.Y_original)-np.min(bo.Y_original))+np.mean(bo.Y_original)
#fig=plt.figure(figsize=(8, 5))
fig, ax1 = plt.subplots(figsize=(8.5, 4))
mu, sigma = bo.posterior(x)
mu_original=mu*(np.max(bo.Y_original)-np.min(bo.Y_original))+np.mean(bo.Y_original)
sigma_original=sigma*(np.max(bo.Y_original)-np.min(bo.Y_original))
utility = bo.acq_func.acq_kind(x.reshape((-1, 1)), bo.gp, np.max(bo.Y))
def distance_function(x,X):
Euc_dist=euclidean_distances(x,X)
dist=Euc_dist.min(axis=1)
return dist
utility_distance=distance_function(x.reshape((-1, 1)),bo.X)
idxMaxVar=np.argmax(utility)
#idxMaxVar=[idx for idx,val in enumerate(utility) if val>=0.995]
ax1.plot(x_original, utility, label='GP $\sigma(x)$', color='purple')
ax1.scatter(x_original[idxMaxVar], utility[idxMaxVar], marker='s',label='x=argmax $\sigma(x)$', color='blue',linewidth=2)
#ax1.scatter(x_original[idxMaxVar], utility[idxMaxVar], label='$||x-[x]||$', color='blue',linewidth=2)
ax1.plot(bo.X_original.flatten(), [0]*len(bo.X_original.flatten()), 'D', markersize=10, label=u'Observations', color='r')
idxMaxDE=np.argmax(utility_distance)
ax2 = ax1.twinx()
ax2.plot(x_original, utility_distance, label='$d(x)=||x-[x]||^2$', color='black')
ax2.plot(x_original[idxMaxDE], utility_distance[idxMaxDE], 'o',label='x=argmax d(x)', color='black',markersize=10)
ax2.set_ylim((0, 0.45))
ax1.set_xlim((np.min(x_original)-0.01, 0.01+np.max(x_original)))
ax1.set_ylim((-0.02, np.max(utility) + 0.05))
#acq.set_ylim((np.min(utility), 1.1*np.max(utility)))
ax1.set_ylabel(ur'$\sigma(x)$', fontdict={'size':18})
ax2.set_ylabel('d(x)', fontdict={'size':18})
ax1.set_xlabel('x', fontdict={'size':18})
#axis.legend(loc=2, bbox_to_anchor=(1.01, 1), borderaxespad=0.)
#ax1.legend(loc=2, bbox_to_anchor=(1.1, 1), borderaxespad=0.,fontsize=14)
#ax2.legend(loc=2, bbox_to_anchor=(1.1, 0.3), borderaxespad=0.,fontsize=14)
plt.title('Exploration by GP variance vs distance',fontsize=22)
ax1.legend(loc=3, bbox_to_anchor=(0.05,-0.32,1, -0.32), borderaxespad=0.,fontsize=14,ncol=4)
ax2.legend(loc=3, bbox_to_anchor=(0.05,-0.46,1, -0.46), borderaxespad=0.,fontsize=14,ncol=2)
#plt.legend(fontsize=14)
strFolder="P:\\03.Research\\05.BayesianOptimization\\PradaBayesianOptimization\\demo_geometric"
strFileName="{:d}_var_DE.eps".format(counter)
strPath=os.path.join(strFolder,strFileName)
fig.savefig(strPath, bbox_inches='tight')
def plot_acq_bo_2d(bo):
global counter
counter=counter+1
func=bo.f
#x_original = np.linspace(bo.bounds[0,0], bo.bounds[0,1], 100)
x1 = np.linspace(bo.scalebounds[0,0], bo.scalebounds[0,1], 80)
x2 = np.linspace(bo.scalebounds[1,0], bo.scalebounds[1,1], 80)
x1g,x2g=np.meshgrid(x1,x2)
X=np.c_[x1g.flatten(), x2g.flatten()]
x1_ori = np.linspace(bo.bounds[0,0], bo.bounds[0,1], 80)
x2_ori = np.linspace(bo.bounds[1,0], bo.bounds[1,1], 80)
x1g_ori,x2g_ori=np.meshgrid(x1_ori,x2_ori)
X_ori=np.c_[x1g_ori.flatten(), x2g_ori.flatten()]
#y_original = func(x_original)
#y = func(x)
#y_original=mu*(np.max(bo.Y_original)-np.min(bo.Y_original))+np.mean(bo.Y_original)
fig=plt.figure(figsize=(14, 20))
#fig.suptitle('Gaussian Process and Utility Function After {} Points'.format(len(bo.X)), fontdict={'size':18})
#gs = gridspec.GridSpec(7, 1, height_ratios=[1,1,1,1,1,1,1])
axis_mean2d = fig.add_subplot(4, 2, 1)
axis_variance2d = fig.add_subplot(4, 2, 2)
acq_UCB = fig.add_subplot(4, 2, 3)
acq_EI =fig.add_subplot(4, 2,4)
#acq_POI = plt.subplot(gs[3])
acq_ES = fig.add_subplot(4, 2, 5)
acq_PES = fig.add_subplot(4, 2, 6)
acq_MRS = fig.add_subplot(4, 2, 7)
acq_Consensus = fig.add_subplot(4, 2, 8)
mu, sigma = bo.posterior(X)
#mu_original=mu*(np.max(y_original)-np.min(y_original))+np.mean(y_original)
#mu_original=mu*(np.max(bo.Y_original)-np.min(bo.Y_original))+np.mean(bo.Y_original)
#sigma_original=sigma*(np.max(bo.Y_original)-np.min(bo.Y_original))+np.mean(bo.Y_original)**2
# mean
CS=axis_mean2d.contourf(x1g_ori,x2g_ori,mu.reshape(x1g.shape),cmap=my_cmap,origin='lower')
#CS2 = plt.contour(CS, levels=CS.levels[::2],colors='r',origin='lower',hold='on')
axis_mean2d.scatter(bo.X_original[:,0],bo.X_original[:,1], label=u'Observations', color='g')
axis_mean2d.set_title('Gaussian Process Mean',fontsize=16)
axis_mean2d.set_xlim(bo.bounds[0,0], bo.bounds[0,1])
axis_mean2d.set_ylim(bo.bounds[1,0], bo.bounds[1,1])
fig.colorbar(CS, ax=axis_mean2d, shrink=0.9)
# variance
CS=axis_variance2d.contourf(x1g_ori,x2g_ori,sigma.reshape(x1g.shape),cmap=my_cmap,origin='lower')
#CS2 = plt.contour(CS, levels=CS.levels[::2],colors='r',origin='lower',hold='on')
axis_variance2d.scatter(bo.X_original[:,0],bo.X_original[:,1], label=u'Observations', color='g')
axis_variance2d.set_title('Gaussian Process Variance',fontsize=16)
axis_variance2d.set_xlim(bo.bounds[0,0], bo.bounds[0,1])
axis_variance2d.set_ylim(bo.bounds[1,0], bo.bounds[1,1])
fig.colorbar(CS, ax=axis_variance2d, shrink=0.9)
# UCB
acq_func={}
acq_func['name']='ucb'
acq_func['kappa']=2
acq_func['dim']=2
acq_func['scalebounds']=bo.scalebounds
myacq=AcquisitionFunction(acq_func)
utility = myacq.acq_kind(X, bo.gp, np.max(bo.Y))
CS_acq=acq_UCB.contourf(x1g_ori,x2g_ori,utility.reshape(x1g.shape),cmap=my_cmap,origin='lower')
#CS2_acq = plt.contour(CS_acq, levels=CS_acq.levels[::2],colors='r',origin='lower',hold='on')
idxBest=np.argmax(utility)
acq_UCB.scatter(X_ori[idxBest,0],X_ori[idxBest,1],marker='*',color='r',s=300,label='Peak')
acq_UCB.scatter(bo.X_original[:,0],bo.X_original[:,1],color='g',label='Data')
#acq2d.scatter(bo.X_original[-1,0],bo.X_original[-1,1],color='r',s=30,label='Previous Selection')
#acq_UCB.scatter(bo.X_original[-1,0],bo.X_original[-1,1],marker='*', color='green',s=100,label='Selected')
xt_UCB=X[idxBest,:]
acq_UCB.set_title('UCB',fontsize=16)
acq_UCB.set_xlim(bo.bounds[0,0], bo.bounds[0,1])
acq_UCB.set_ylim(bo.bounds[1,0], bo.bounds[1,1])
fig.colorbar(CS_acq, ax=acq_UCB, shrink=0.9)
# EI
acq_func={}
acq_func['name']='ei'
acq_func['kappa']=2
acq_func['dim']=2
acq_func['scalebounds']=bo.scalebounds
myacq=AcquisitionFunction(acq_func)
utility = myacq.acq_kind(X, bo.gp, np.max(bo.Y))
CS_acq=acq_EI.contourf(x1g_ori,x2g_ori,utility.reshape(x1g.shape),cmap=my_cmap,origin='lower')
#CS2_acq = plt.contour(CS_acq, levels=CS_acq.levels[::2],colors='r',origin='lower',hold='on')
idxBest=np.argmax(utility)
acq_EI.scatter(X_ori[idxBest,0],X_ori[idxBest,1],marker='*',color='r',s=300,label='Peak')
acq_EI.scatter(bo.X_original[:,0],bo.X_original[:,1],color='g',label='Data')
#acq2d.scatter(bo.X_original[-1,0],bo.X_original[-1,1],color='r',s=30,label='Previous Selection')
#acq_EI.scatter(bo.X_original[-1,0],bo.X_original[-1,1],marker='*', color='green',s=100,label='Selected')
xt_EI=X[idxBest,:]
acq_EI.set_title('EI',fontsize=16)
acq_EI.set_xlim(bo.bounds[0,0], bo.bounds[0,1])
acq_EI.set_ylim(bo.bounds[1,0], bo.bounds[1,1])
fig.colorbar(CS_acq, ax=acq_EI, shrink=0.9)
# MRS
acq_func={}
acq_func['name']='mrs'
acq_func['kappa']=2
acq_func['dim']=2
acq_func['scalebounds']=bo.scalebounds
myacq=AcquisitionFunction(acq_func)
utility = myacq.acq_kind(X, bo.gp, np.max(bo.Y))
CS_acq=acq_MRS.contourf(x1g_ori,x2g_ori,utility.reshape(x1g.shape),cmap=my_cmap,origin='lower')
#CS2_acq = plt.contour(CS_acq, levels=CS_acq.levels[::2],colors='r',origin='lower',hold='on')
idxBest=np.argmax(utility)
acq_MRS.scatter(X_ori[idxBest,0],X_ori[idxBest,1],marker='*',color='r',s=300,label='Peak')
acq_MRS.scatter(bo.X_original[:,0],bo.X_original[:,1],color='g',label='Data')
#acq2d.scatter(bo.X_original[-1,0],bo.X_original[-1,1],color='r',s=30,label='Previous Selection')
#acq_MRS.scatter(bo.X_original[-1,0],bo.X_original[-1,1],marker='*', color='green',s=100,label='Selected')
acq_MRS.set_title('MRS',fontsize=16)
acq_MRS.set_xlim(bo.bounds[0,0], bo.bounds[0,1])
acq_MRS.set_ylim(bo.bounds[1,0], bo.bounds[1,1])
fig.colorbar(CS_acq, ax=acq_MRS, shrink=0.9)
# PES
acq_func={}
acq_func['name']='pes'
acq_func['kappa']=2
acq_func['dim']=2
acq_func['scalebounds']=bo.scalebounds
myacq=AcquisitionFunction(acq_func)
utility = myacq.acq_kind(X, bo.gp, np.max(bo.Y))
CS_acq=acq_PES.contourf(x1g_ori,x2g_ori,utility.reshape(x1g.shape),cmap=my_cmap,origin='lower')
#CS2_acq = plt.contour(CS_acq, levels=CS_acq.levels[::2],colors='r',origin='lower',hold='on')
idxBest=np.argmax(utility)
acq_PES.scatter(X_ori[idxBest,0],X_ori[idxBest,1],marker='*',color='r',s=300,label='Peak')
acq_PES.scatter(bo.X_original[:,0],bo.X_original[:,1],color='g',label='Data')
#acq2d.scatter(bo.X_original[-1,0],bo.X_original[-1,1],color='r',s=30,label='Previous Selection')
#acq_PES.scatter(bo.X_original[-1,0],bo.X_original[-1,1],marker='*', color='green',s=100,label='Selected')
xt_PES=X[idxBest,:]
acq_PES.set_title('PES',fontsize=16)
acq_PES.set_xlim(bo.bounds[0,0], bo.bounds[0,1])
acq_PES.set_ylim(bo.bounds[1,0], bo.bounds[1,1])
fig.colorbar(CS_acq, ax=acq_PES, shrink=0.9)
# ES
acq_func={}
acq_func['name']='es'
acq_func['kappa']=2
acq_func['dim']=2
acq_func['scalebounds']=bo.scalebounds
myacq=AcquisitionFunction(acq_func)
utility = myacq.acq_kind(X, bo.gp, np.max(bo.Y))
CS_acq=acq_ES.contourf(x1g_ori,x2g_ori,utility.reshape(x1g.shape),cmap=my_cmap,origin='lower')
#CS2_acq = plt.contour(CS_acq, levels=CS_acq.levels[::2],colors='r',origin='lower',hold='on')
idxBest=np.argmax(utility)
acq_ES.scatter(X_ori[idxBest,0],X_ori[idxBest,1],marker='*',color='r',s=300,label='Peak')
acq_ES.scatter(bo.X_original[:,0],bo.X_original[:,1],color='g',label='Data')
#acq2d.scatter(bo.X_original[-1,0],bo.X_original[-1,1],color='r',s=30,label='Previous Selection')
#acq_ES.scatter(bo.X_original[-1,0],bo.X_original[-1,1],marker='*', color='green',s=100,label='Selected')
xt_ES=X[idxBest,:]
acq_ES.set_title('ES',fontsize=16)
acq_ES.set_xlim(bo.bounds[0,0], bo.bounds[0,1])
acq_ES.set_ylim(bo.bounds[1,0], bo.bounds[1,1])
fig.colorbar(CS_acq, ax=acq_ES, shrink=0.9)
xt_suggestions=[]
xt_suggestions.append(xt_UCB)
xt_suggestions.append(xt_EI)
xt_suggestions.append(xt_ES)
xt_suggestions.append(xt_PES)
# Consensus
acq_func={}
acq_func['name']='consensus'
acq_func['kappa']=2
acq_func['dim']=2
acq_func['scalebounds']=bo.scalebounds
acq_func['xt_suggestions']=xt_suggestions
myacq=AcquisitionFunction(acq_func)
utility = myacq.acq_kind(X, bo.gp, np.max(bo.Y))
CS_acq=acq_Consensus.contourf(x1g_ori,x2g_ori,utility.reshape(x1g.shape),cmap=my_cmap,origin='lower')
#CS2_acq = plt.contour(CS_acq, levels=CS_acq.levels[::2],colors='r',origin='lower',hold='on')
idxBest=np.argmax(utility)
acq_Consensus.scatter(bo.X_original[:,0],bo.X_original[:,1],color='g',label='Data')
temp=np.asarray(myacq.object.xt_suggestions)
# convert from scale data points to original data points
xt_suggestion_original=temp*bo.max_min_gap+bo.bounds[:,0]
acq_Consensus.scatter(xt_suggestion_original[:,0],xt_suggestion_original[:,1],marker='s',color='y',s=100,label='xt_suggestions')
acq_Consensus.scatter(X_ori[idxBest,0],X_ori[idxBest,1],marker='*',color='r',s=300,label='Peak')
#acq2d.scatter(bo.X_original[-1,0],bo.X_original[-1,1],color='r',s=30,label='Previous Selection')
#acq_ES.scatter(bo.X_original[-1,0],bo.X_original[-1,1],marker='*', color='green',s=100,label='Selected')
acq_Consensus.set_title('Consensus',fontsize=16)
acq_Consensus.set_xlim(bo.bounds[0,0], bo.bounds[0,1])
acq_Consensus.set_ylim(bo.bounds[1,0], bo.bounds[1,1])
fig.colorbar(CS_acq, ax=acq_Consensus, shrink=0.9)
strFileName="{:d}_GP2d_acquisition_functions.eps".format(counter)
fig.savefig(strFileName, bbox_inches='tight')
#axis.legend(loc=2, bbox_to_anchor=(1.01, 1), borderaxespad=0.)
#acq_TS.legend(loc=2, bbox_to_anchor=(1.01, 1), borderaxespad=0.)
def plot_bo_2d(bo):
x1 = np.linspace(bo.scalebounds[0,0], bo.scalebounds[0,1], 100)
x2 = np.linspace(bo.scalebounds[1,0], bo.scalebounds[1,1], 100)
x1g,x2g=np.meshgrid(x1,x2)
X=np.c_[x1g.flatten(), x2g.flatten()]
x1_ori = np.linspace(bo.bounds[0,0], bo.bounds[0,1], 100)
x2_ori = np.linspace(bo.bounds[1,0], bo.bounds[1,1], 100)
x1g_ori,x2g_ori=np.meshgrid(x1_ori,x2_ori)
X_ori=np.c_[x1g_ori.flatten(), x2g_ori.flatten()]
fig = plt.figure()
#axis2d = fig.add_subplot(1, 2, 1)
acq2d = fig.add_subplot(1, 1, 1)
#mu, sigma = bo.posterior(X)
# plot the acquisition function
utility = bo.acq_func.acq_kind(X, bo.gp, np.max(bo.Y))
#acq3d.plot_surface(x1g,x1g,utility.reshape(x1g.shape))
CS_acq=acq2d.contourf(x1g_ori,x2g_ori,utility.reshape(x1g.shape),cmap=my_cmap,origin='lower')
CS2_acq = plt.contour(CS_acq, levels=CS_acq.levels[::2],colors='r',origin='lower',hold='on')
idxBest=np.argmax(utility)
acq2d.scatter(X_ori[idxBest,0],X_ori[idxBest,1],marker='s',color='r',s=30,label='Peak')
acq2d.scatter(bo.X_original[:,0],bo.X_original[:,1],color='g',label='Data')
#acq2d.scatter(bo.X_original[-1,0],bo.X_original[-1,1],color='r',s=30,label='Previous Selection')
acq2d.scatter(bo.X_original[-1,0],bo.X_original[-1,1],marker='*', color='green',s=100,label='Selected')
acq2d.set_title('Acquisition Function',fontsize=16)
acq2d.set_xlim(bo.bounds[0,0], bo.bounds[0,1])
acq2d.set_ylim(bo.bounds[1,0], bo.bounds[1,1])
#acq2d.legend(loc=1, bbox_to_anchor=(1.01, 1), borderaxespad=0.)
acq2d.legend(loc='center left',ncol=3,bbox_to_anchor=(0, -0.2))
fig.colorbar(CS_acq, ax=acq2d, shrink=0.9)
#acq.set_xlim((np.min(x), np.max(x)))
#acq.set_ylim((np.min(utility), 1.1*np.max(utility)))
#acq.set_ylabel('Acq', fontdict={'size':16})
#acq.set_xlabel('x', fontdict={'size':16})
#axis.legend(loc=2, bbox_to_anchor=(1.01, 1), borderaxespad=0.)
#acq.legend(loc=2, bbox_to_anchor=(1.01, 1), borderaxespad=0.)
def plot_bo_2d_unbounded(bo,myfunction):
global counter
counter=counter+1
strFolder="P:\\03.Research\\05.BayesianOptimization\\PradaBayesianOptimization\\plot_Nov_2016"
x1 = np.linspace(bo.scalebounds[0,0], bo.scalebounds[0,1], 100)
x2 = np.linspace(bo.scalebounds[1,0], bo.scalebounds[1,1], 100)
x1g,x2g=np.meshgrid(x1,x2)
X=np.c_[x1g.flatten(), x2g.flatten()]
x1_ori = np.linspace(bo.bounds[0,0], bo.bounds[0,1], 100)
x2_ori = np.linspace(bo.bounds[1,0], bo.bounds[1,1], 100)
x1g_ori,x2g_ori=np.meshgrid(x1_ori,x2_ori)
X_ori=np.c_[x1g_ori.flatten(), x2g_ori.flatten()]
fig = plt.figure(figsize=(10, 3.5))
#axis2d = fig.add_subplot(1, 2, 1)
# plot invasion set
acq_expansion = fig.add_subplot(1, 2, 1)
x1 = np.linspace(bo.b_limit_lower[0], bo.b_limit_upper[0], 100)
x2 = np.linspace(bo.b_limit_lower[1], bo.b_limit_upper[1], 100)
x1g_ori_limit,x2g_ori_limit=np.meshgrid(x1,x2)
X_plot=np.c_[x1g_ori_limit.flatten(), x2g_ori_limit.flatten()]
Y = myfunction.func(X_plot)
Y=-np.log(np.abs(Y))
CS_expansion=acq_expansion.contourf(x1g_ori_limit,x2g_ori_limit,Y.reshape(x1g_ori.shape),cmap=my_cmap,origin='lower')
if len(bo.X_invasion)!=0:
myinvasion_set=acq_expansion.scatter(bo.X_invasion[:,0],bo.X_invasion[:,1],color='m',s=1,label='Invasion Set')
else:
myinvasion_set=[]
myrectangle=patches.Rectangle(bo.bounds_bk[:,0], bo.max_min_gap_bk[0],bo.max_min_gap_bk[1],
alpha=0.3, fill=False, facecolor="#00ffff",linewidth=3)
acq_expansion.add_patch(myrectangle)
acq_expansion.set_xlim(bo.b_limit_lower[0]-0.2, bo.b_limit_upper[0]+0.2)
acq_expansion.set_ylim(bo.b_limit_lower[1]-0.2, bo.b_limit_upper[1]+0.2)
if len(bo.X_invasion)!=0:
acq_expansion.legend([myrectangle,myinvasion_set],[ur'$X_{t-1}$',ur'$I_t$'],loc=4,ncol=1,prop={'size':16},scatterpoints = 5)
strTitle_Inv="[t={:d}] Invasion Set".format(counter)
acq_expansion.set_title(strTitle_Inv,fontsize=16)
else:
acq_expansion.legend([myrectangle,myinvasion_set],[ur'$X_{t-1}$',ur'Empty $I_t$'],loc=4,ncol=1,prop={'size':16},scatterpoints = 5)
strTitle_Inv="[t={:d}] Empty Invasion Set".format(counter)
acq_expansion.set_title(strTitle_Inv,fontsize=16)
"""
temp=np.linspace(bo.bounds_bk[0,0], bo.bounds_bk[0,1], num=30)
acq_expansion.plot(temp,'ro')
temp=np.linspace(bo.bounds_bk[1,0], bo.bounds_bk[1,1], num=30)
acq_expansion.plot(temp,'ro')
temp=np.linspace(bo.bounds_bk[0,1], bo.bounds_bk[1,1], num=30)
acq_expansion.plot(temp,'ro')
temp=np.linspace(bo.bounds_bk[0,0], bo.bounds_bk[1,0], num=30)
acq_expansion.plot(temp,'ro')
"""
#CS2_acq_expansion = plt.contour(CS_acq_expansion, levels=CS_acq_expansion.levels[::2],colors='r',origin='lower',hold='on')
# plot acquisition function
acq2d = fig.add_subplot(1, 2, 2)
utility = bo.acq_func.acq_kind(X, bo.gp, np.max(bo.Y))
#acq3d.plot_surface(x1g,x1g,utility.reshape(x1g.shape))
CS_acq=acq2d.contourf(x1g_ori,x2g_ori,utility.reshape(x1g.shape),cmap=my_cmap,origin='lower')
CS2_acq = plt.contour(CS_acq, levels=CS_acq.levels[::2],colors='r',origin='lower',hold='on')
idxBest=np.argmax(utility)
myrectangle=patches.Rectangle(bo.bounds[:,0], bo.max_min_gap[0],bo.max_min_gap[1],
alpha=0.3, fill=False, facecolor="#00ffff",linewidth=3)
acq2d.add_patch(myrectangle)
#acq2d.scatter(X_ori[idxBest,0],X_ori[idxBest,1],color='b',s=30,label='Current Peak')
myobs=acq2d.scatter(bo.X_original[:,0],bo.X_original[:,1],color='g',s=6,label='Data')
#acq2d.scatter(bo.X_original[-1,0],bo.X_original[-1,1],color='r',s=30,label='Previous Selection')
#acq2d.set_xlim(bo.bounds[0,0], bo.bounds[0,1])
#acq2d.set_ylim(bo.bounds[1,0], bo.bounds[1,1])
acq2d.set_xlim(bo.b_limit_lower[0]-0.2, bo.b_limit_upper[0]+0.2)
acq2d.set_ylim(bo.b_limit_lower[1]-0.2, bo.b_limit_upper[1]+0.2)
#acq2d.legend(loc=1, bbox_to_anchor=(1.01, 1), borderaxespad=0.)
#acq2d.legend(loc='center left',bbox_to_anchor=(1.2, 0.5))
#acq2d.legend(loc=4)
acq2d.legend([myrectangle,myobs],[ur'$X_{t}$','Data'],loc=4,ncol=1,prop={'size':16}, scatterpoints = 3)
strTitle_Acq="[t={:d}] Acquisition Func".format(counter)
acq2d.set_title(strTitle_Acq,fontsize=16)
fig.colorbar(CS_expansion, ax=acq_expansion, shrink=0.9)
fig.colorbar(CS_acq, ax=acq2d, shrink=0.9)
#acq.set_xlim((np.min(x), np.max(x)))
#acq.set_ylim((np.min(utility), 1.1*np.max(utility)))
#acq.set_ylabel('Acq', fontdict={'size':16})
#acq.set_xlabel('x', fontdict={'size':16})
#axis.legend(loc=2, bbox_to_anchor=(1.01, 1), borderaxespad=0.)
#acq.legend(loc=2, bbox_to_anchor=(1.01, 1), borderaxespad=0.)
strFileName="{:d}_unbounded.eps".format(counter)
strPath=os.path.join(strFolder,strFileName)
fig.savefig(strPath, bbox_inches='tight')
def plot_bo_2d_withGPmeans(bo):
x1 = np.linspace(bo.scalebounds[0,0], bo.scalebounds[0,1], 100)
x2 = np.linspace(bo.scalebounds[1,0], bo.scalebounds[1,1], 100)
x1g,x2g=np.meshgrid(x1,x2)
X=np.c_[x1g.flatten(), x2g.flatten()]
x1_ori = np.linspace(bo.bounds[0,0], bo.bounds[0,1], 100)
x2_ori = np.linspace(bo.bounds[1,0], bo.bounds[1,1], 100)
x1g_ori,x2g_ori=np.meshgrid(x1_ori,x2_ori)
X_ori=np.c_[x1g_ori.flatten(), x2g_ori.flatten()]
#fig.suptitle('Gaussian Process and Utility Function After {} Points'.format(len(bo.X)), fontdict={'size':18})
fig = plt.figure(figsize=(12, 5))
#axis3d = fig.add_subplot(1, 2, 1, projection='3d')
axis2d = fig.add_subplot(1, 2, 1)
#acq3d = fig.add_subplot(2, 2, 3, projection='3d')
acq2d = fig.add_subplot(1, 2, 2)
mu, sigma = bo.posterior(X)
#axis.plot(x, y, linewidth=3, label='Target')
#axis3d.plot_surface(x1g,x1g,mu.reshape(x1g.shape))
#axis3d.scatter(bo.X[:,0],bo.X[:,1], bo.Y,zdir='z', label=u'Observations', color='r')
CS=axis2d.contourf(x1g_ori,x2g_ori,mu.reshape(x1g.shape),cmap=plt.cm.bone,origin='lower')
CS2 = plt.contour(CS, levels=CS.levels[::2],colors='r',origin='lower',hold='on')
axis2d.scatter(bo.X_original[:,0],bo.X_original[:,1], label=u'Observations', color='g')
axis2d.set_title('Gaussian Process Mean',fontsize=16)
axis2d.set_xlim(bo.bounds[0,0], bo.bounds[0,1])
axis2d.set_ylim(bo.bounds[1,0], bo.bounds[1,1])
fig.colorbar(CS, ax=axis2d, shrink=0.9)
#plt.colorbar(ax=axis2d)
#axis.plot(x, mu, '--', color='k', label='Prediction')
#axis.set_xlim((np.min(x), np.max(x)))
#axis.set_ylim((None, None))
#axis.set_ylabel('f(x)', fontdict={'size':16})
#axis.set_xlabel('x', fontdict={'size':16})
# plot the acquisition function
utility = bo.acq_func.acq_kind(X, bo.gp, np.max(bo.Y))
#acq3d.plot_surface(x1g,x1g,utility.reshape(x1g.shape))
#CS_acq=acq2d.contourf(x1g_ori,x2g_ori,utility.reshape(x1g.shape),cmap=plt.cm.bone,origin='lower')
CS_acq=acq2d.contourf(x1g_ori,x2g_ori,utility.reshape(x1g.shape),cmap=my_cmap,origin='lower')
CS2_acq = plt.contour(CS_acq, levels=CS_acq.levels[::2],colors='r',origin='lower',hold='on')
idxBest=np.argmax(utility)
acq2d.scatter(bo.X_original[:,0],bo.X_original[:,1],color='g')
acq2d.scatter(bo.X_original[-1,0],bo.X_original[-1,1],color='r',s=60)
acq2d.scatter(X_ori[idxBest,0],X_ori[idxBest,1],color='b',s=60)
acq2d.set_title('Acquisition Function',fontsize=16)
acq2d.set_xlim(bo.bounds[0,0]-0.2, bo.bounds[0,1]+0.2)
acq2d.set_ylim(bo.bounds[1,0]-0.2, bo.bounds[1,1]+0.2)
#acq.set_xlim((np.min(x), np.max(x)))
#acq.set_ylim((np.min(utility), 1.1*np.max(utility)))
#acq.set_ylabel('Acq', fontdict={'size':16})
#acq.set_xlabel('x', fontdict={'size':16})
#axis.legend(loc=2, bbox_to_anchor=(1.01, 1), borderaxespad=0.)
#acq.legend(loc=2, bbox_to_anchor=(1.01, 1), borderaxespad=0.)
fig.colorbar(CS_acq, ax=acq2d, shrink=0.9)
def plot_bo_2d_withGPmeans_Sigma(bo):
x1 = np.linspace(bo.scalebounds[0,0], bo.scalebounds[0,1], 100)
x2 = np.linspace(bo.scalebounds[1,0], bo.scalebounds[1,1], 100)
x1g,x2g=np.meshgrid(x1,x2)
X=np.c_[x1g.flatten(), x2g.flatten()]
x1_ori = np.linspace(bo.bounds[0,0], bo.bounds[0,1], 100)
x2_ori = np.linspace(bo.bounds[1,0], bo.bounds[1,1], 100)
x1g_ori,x2g_ori=np.meshgrid(x1_ori,x2_ori)
X_ori=np.c_[x1g_ori.flatten(), x2g_ori.flatten()]
#fig.suptitle('Gaussian Process and Utility Function After {} Points'.format(len(bo.X)), fontdict={'size':18})
fig = plt.figure(figsize=(12, 3))
#axis3d = fig.add_subplot(1, 2, 1, projection='3d')
axis2d = fig.add_subplot(1, 2, 1)
#acq3d = fig.add_subplot(2, 2, 3, projection='3d')
acq2d = fig.add_subplot(1, 2, 2)
mu, sigma = bo.posterior(X)
#axis.plot(x, y, linewidth=3, label='Target')
#axis3d.plot_surface(x1g,x1g,mu.reshape(x1g.shape))
#axis3d.scatter(bo.X[:,0],bo.X[:,1], bo.Y,zdir='z', label=u'Observations', color='r')
utility = bo.acq_func.acq_kind(X, bo.gp, np.max(bo.Y))
CS=axis2d.contourf(x1g_ori,x2g_ori,mu.reshape(x1g.shape),cmap=plt.cm.bone,origin='lower')
CS2 = plt.contour(CS, levels=CS.levels[::2],colors='r',origin='lower',hold='on')
axis2d.scatter(bo.X_original[:,0],bo.X_original[:,1], label=u'Observations', color='g')
axis2d.set_title('Gaussian Process Mean',fontsize=16)
axis2d.set_xlim(bo.bounds[0,0], bo.bounds[0,1])
axis2d.set_ylim(bo.bounds[1,0], bo.bounds[1,1])
fig.colorbar(CS, ax=axis2d, shrink=0.9)
#CS_acq=acq2d.contourf(x1g_ori,x2g_ori,utility.reshape(x1g.shape),cmap=plt.cm.bone,origin='lower')
CS_acq=acq2d.contourf(x1g_ori,x2g_ori,sigma.reshape(x1g.shape),cmap=my_cmap,origin='lower')
CS2_acq = plt.contour(CS_acq, levels=CS_acq.levels[::2],colors='r',origin='lower',hold='on')
idxBest=np.argmax(utility)
acq2d.scatter(bo.X_original[:,0],bo.X_original[:,1],color='g')
acq2d.scatter(bo.X_original[-1,0],bo.X_original[-1,1],color='r',s=60)
acq2d.scatter(X_ori[idxBest,0],X_ori[idxBest,1],color='b',s=60)
acq2d.set_title('Gaussian Process Variance',fontsize=16)
#acq2d.set_xlim(bo.bounds[0,0]-0.2, bo.bounds[0,1]+0.2)
#acq2d.set_ylim(bo.bounds[1,0]-0.2, bo.bounds[1,1]+0.2)
fig.colorbar(CS_acq, ax=acq2d, shrink=0.9)
def plot_original_function(myfunction):
origin = 'lower'
func=myfunction.func
if myfunction.input_dim==1:
x = np.linspace(myfunction.bounds['x'][0], myfunction.bounds['x'][1], 1000)
y = func(x)
fig=plt.figure(figsize=(8, 5))
plt.plot(x, y)
strTitle="{:s}".format(myfunction.name)
plt.title(strTitle)
if myfunction.input_dim==2:
# Create an array with parameters bounds
if isinstance(myfunction.bounds,dict):
# Get the name of the parameters
bounds = []
for key in myfunction.bounds.keys():
bounds.append(myfunction.bounds[key])
bounds = np.asarray(bounds)
else:
bounds=np.asarray(myfunction.bounds)
x1 = np.linspace(bounds[0][0], bounds[0][1], 50)
x2 = np.linspace(bounds[1][0], bounds[1][1], 50)
x1g,x2g=np.meshgrid(x1,x2)
X_plot=np.c_[x1g.flatten(), x2g.flatten()]
Y = func(X_plot)
#fig=plt.figure(figsize=(8, 5))
#fig = plt.figure(figsize=(12, 3.5))
fig = plt.figure(figsize=(14, 4))
ax3d = fig.add_subplot(1, 2, 1, projection='3d')
ax2d = fig.add_subplot(1, 2, 2)
alpha = 0.7
ax3d.plot_surface(x1g,x2g,Y.reshape(x1g.shape),cmap=my_cmap,alpha=alpha)
idxBest=np.argmax(Y)
#idxBest=np.argmin(Y)
ax3d.scatter(X_plot[idxBest,0],X_plot[idxBest,1],Y[idxBest],marker='*',color='r',s=200,label='Peak')
#mlab.view(azimuth=0, elevation=90, roll=-90+alpha)
strTitle="{:s}".format(myfunction.name)
#print strTitle
ax3d.set_title(strTitle)
#ax3d.view_init(40, 130)
idxBest=np.argmax(Y)
CS=ax2d.contourf(x1g,x2g,Y.reshape(x1g.shape),cmap=my_cmap,origin=origin)
#CS2 = plt.contour(CS, levels=CS.levels[::2],colors='r',origin=origin,hold='on')
ax2d.scatter(X_plot[idxBest,0],X_plot[idxBest,1],marker='*',color='r',s=300,label='Peak')
plt.colorbar(CS, ax=ax2d, shrink=0.9)
ax2d.set_title(strTitle)
strFolder="P:\\03.Research\\05.BayesianOptimization\\PradaBayesianOptimization\\plot_2017"
strFileName="{:s}.eps".format(myfunction.name)
strPath=os.path.join(strFolder,strFileName)
fig.savefig(strPath, bbox_inches='tight')
| 40.336788 | 141 | 0.634939 |
7956e03f6717fe92b16db1846063a9cf33884351 | 1,909 | py | Python | api/api_date.py | MrLai/Django-Data-quality-system | 0e9113b5b851d7ed411cbd1231c5c13bb0428ee3 | [
"MIT"
] | 148 | 2020-01-06T10:39:16.000Z | 2022-03-17T09:32:31.000Z | api/api_date.py | MrLai/Django-Data-quality-system | 0e9113b5b851d7ed411cbd1231c5c13bb0428ee3 | [
"MIT"
] | 15 | 2020-06-12T05:17:06.000Z | 2022-02-10T16:50:44.000Z | api/api_date.py | MrLai/Django-Data-quality-system | 0e9113b5b851d7ed411cbd1231c5c13bb0428ee3 | [
"MIT"
] | 57 | 2020-01-10T06:18:20.000Z | 2022-03-22T03:27:02.000Z | from django.http.response import HttpResponse, JsonResponse
from django.views.decorators.http import require_http_methods
import datetime
import math
import sys
sys.path.insert(0, '..')
from mysite import db_config
from utils import functions as f
@require_http_methods(['GET'])
def year_list(request):
'''查询已有检核结果的年份
'''
year = f.query_data_year()
if year:
return JsonResponse({'data': year})
else:
return HttpResponse({'获取年份错误'}, status=500)
@require_http_methods(['GET'])
def quarter_list(request):
'''查询已有检核结果的季度
'''
year = request.GET.get('year')
if not year:
year = datetime.datetime.now().year
quarter = f.query_data_quarter(year)
if quarter:
return JsonResponse({'data': quarter})
else:
return HttpResponse({'获取季度错误'}, status=500)
@require_http_methods(['GET'])
def month_list(request):
'''查询已有检核结果的月份
'''
year = request.GET.get('year')
quarter = request.GET.get('quarter')
if not all((year, quarter)):
year = year or datetime.datetime.now().year
quarter = quarter or math.ceil(datetime.datetime.now().month/3.)
month = f.query_data_month(year, quarter)
if month:
return JsonResponse({'data': month})
else:
return HttpResponse({'获取月份错误'}, status=500)
@require_http_methods(['GET'])
def day_list(request):
'''查询已有检核结果的天
'''
year = request.GET.get('year')
quarter = request.GET.get('quarter')
month = request.GET.get('month')
if not all((year, quarter, month)):
year = year or datetime.datetime.now().year
quarter = quarter or math.ceil(datetime.datetime.now().month/3.)
month = month or datetime.datetime.now().month
day = f.query_data_day(year, quarter, month)
if day:
return JsonResponse({'data': day})
else:
return HttpResponse({'获取天错误'}, status=500) | 26.513889 | 72 | 0.647983 |
7956e13e453f9ff770e44fc2bb4ac6fbdd685ea2 | 485 | py | Python | solution-bank/pattern/solution_13.py | anishLearnsToCode/python-training-1 | ef5d6b64f888e167faecd1410563173dcc27f319 | [
"MIT"
] | 3 | 2021-01-05T18:00:14.000Z | 2021-11-28T15:43:04.000Z | solution-bank/pattern/solution_13.py | anishLearnsToCode/python-training-1 | ef5d6b64f888e167faecd1410563173dcc27f319 | [
"MIT"
] | null | null | null | solution-bank/pattern/solution_13.py | anishLearnsToCode/python-training-1 | ef5d6b64f888e167faecd1410563173dcc27f319 | [
"MIT"
] | null | null | null | rows = int(input())
# upper triangle
for i in range(rows):
# spaces
print(end=' ' * (rows - 1 - i))
# first star
print(end='*')
# spaces
print(end=' ' * (2 * i - 1))
# second star
print('' if i is 0 else '*')
# lower triangle
for i in range(rows - 1):
# spaces
print(end=' ' * (i + 1))
# first star
print(end='*')
# spaces
print(end=' ' * (2 * rows - 5 - 2 * i))
# second star
print('' if i is rows - 2 else '*')
| 16.166667 | 43 | 0.478351 |
7956e5c77ac5c2a1421005ac1c5b3b4ca36a2e9b | 8,262 | py | Python | other/DaSiamRPN/code/run_SiamRPN.py | shenjl/asimo | 6aad2c89bb5eb3ca59c85521934fe854d1a0e0e6 | [
"MIT"
] | null | null | null | other/DaSiamRPN/code/run_SiamRPN.py | shenjl/asimo | 6aad2c89bb5eb3ca59c85521934fe854d1a0e0e6 | [
"MIT"
] | null | null | null | other/DaSiamRPN/code/run_SiamRPN.py | shenjl/asimo | 6aad2c89bb5eb3ca59c85521934fe854d1a0e0e6 | [
"MIT"
] | null | null | null | # --------------------------------------------------------
# DaSiamRPN
# Licensed under The MIT License
# Written by Qiang Wang (wangqiang2015 at ia.ac.cn)
# --------------------------------------------------------
import numpy as np
from torch.autograd import Variable
import torch.nn.functional as F
from utils import get_subwindow_tracking
def generate_anchor(total_stride, scales, ratios, score_size):
'''
构造出以图像中心为原点,格式为[cx, cy, w, h]的锚点矩阵
'''
# 构造锚点数组。
# size似乎改成 Receptive Field 更好理解。scale为8,需要根据输入小心设计
score_size = int(score_size)
anchor_num = len(ratios) * len(scales)
anchor = np.zeros((anchor_num, 4), dtype=np.float32)
size = total_stride * total_stride
count = 0
for ratio in ratios:
# ws = int(np.sqrt(size * 1.0 / ratio))
ws = int(np.sqrt(size / ratio))
hs = int(ws * ratio)
for scale in scales:
wws = ws * scale
hhs = hs * scale
anchor[count, 0] = 0
anchor[count, 1] = 0
anchor[count, 2] = wws
anchor[count, 3] = hhs
count += 1
# 对锚点组进行广播,并设置其坐标。
# 加上ori偏移后,xx和yy以图像中心为原点
# numpy.tile(A,B)函数:重复A,B次
anchor = np.tile(anchor, score_size * score_size).reshape((-1, 4))
ori = - (score_size / 2) * total_stride
xx, yy = np.meshgrid([ori + total_stride * dx for dx in range(score_size)],
[ori + total_stride * dy for dy in range(score_size)])
xx, yy = np.tile(xx.flatten(), (anchor_num, 1)).flatten(), \
np.tile(yy.flatten(), (anchor_num, 1)).flatten()
anchor[:, 0], anchor[:, 1] = xx.astype(np.float32), yy.astype(np.float32)
return anchor
class TrackerConfig(object):
'''
TrackerConfig 类定义了跟踪器参数
'''
# 默认的超参数
# These are the default hyper-params for DaSiamRPN 0.3827
# 余弦窗,惩罚大位移
windowing = 'cosine' # to penalize large displacements [cosine/uniform]
# Params from the network architecture, have to be consistent with the training
exemplar_size = 127 # input z size
instance_size = 271 # input x size (search region)
total_stride = 8
score_size = (instance_size-exemplar_size)/total_stride+1
context_amount = 0.5 # context amount for the exemplar
ratios = [0.33, 0.5, 1, 2, 3] # 宽高比有5种
scales = [8, ] # 尺度只有1种
anchor_num = len(ratios) * len(scales)
anchor = []
penalty_k = 0.055
window_influence = 0.42
lr = 0.295
# adaptive change search region #
adaptive = True
def update(self, cfg):
for k, v in cfg.items():
setattr(self, k, v)
self.score_size = (self.instance_size - self.exemplar_size) / self.total_stride + 1
def tracker_eval(net, x_crop, target_pos, target_sz, window, scale_z, p):
"""
预测出新的位置和得分
:param net:
:param x_crop:
:param target_pos:
:param target_sz:
:param window:
:param scale_z:
:param p:
"""
# 运行网络的检测分支,得到坐标回归量和得分
delta, score = net(x_crop)
# torch.Tensor.permute 置换此张量的尺寸。
# torch.Tensor.contiguous 返回包含与自张量相同的数据的连续张量。如果自张量是连续的,则此函数返回自张量。
# torch.Tensor.numpy 将自张量作为 NumPy ndarray 返回。此张量和返回的 ndarray 共享相同的底层存储。自张量的变化将反映在 ndarray 中,反之亦然。
# 置换delta,其形状由 N x 4k x H x W 变为4x(kx17x17)。score形状为2x(kx17x17),并取其后一半结果
delta = delta.permute(1, 2, 3, 0).contiguous().view(4, -1).data.cpu().numpy()
score = F.softmax(score.permute(1, 2, 3, 0).contiguous().view(2, -1), dim=0).data[1, :].cpu().numpy()
#
delta[0, :] = delta[0, :] * p.anchor[:, 2] + p.anchor[:, 0]
delta[1, :] = delta[1, :] * p.anchor[:, 3] + p.anchor[:, 1]
delta[2, :] = np.exp(delta[2, :]) * p.anchor[:, 2]
delta[3, :] = np.exp(delta[3, :]) * p.anchor[:, 3]
def change(r):
return np.maximum(r, 1./r)
def sz(w, h):
pad = (w + h) * 0.5
sz2 = (w + pad) * (h + pad)
return np.sqrt(sz2)
def sz_wh(wh):
pad = (wh[0] + wh[1]) * 0.5
sz2 = (wh[0] + pad) * (wh[1] + pad)
return np.sqrt(sz2)
# size penalty
s_c = change(sz(delta[2, :], delta[3, :]) / (sz_wh(target_sz))) # scale penalty
r_c = change((target_sz[0] / target_sz[1]) / (delta[2, :] / delta[3, :])) # ratio penalty
penalty = np.exp(-(r_c * s_c - 1.) * p.penalty_k)
pscore = penalty * score
# window float
# pscore按一定权值叠加一个窗分布值。找出最优得分的索引
pscore = pscore * (1 - p.window_influence) + window * p.window_influence
best_pscore_id = np.argmax(pscore)
# 获得目标的坐标及尺寸。delta除以scale_z映射到原图
target = delta[:, best_pscore_id] / scale_z
target_sz = target_sz / scale_z
lr = penalty[best_pscore_id] * score[best_pscore_id] * p.lr
# 由预测坐标偏移得到目标中心,宽高进行滑动平均
res_x = target[0] + target_pos[0]
res_y = target[1] + target_pos[1]
res_w = target_sz[0] * (1 - lr) + target[2] * lr
res_h = target_sz[1] * (1 - lr) + target[3] * lr
target_pos = np.array([res_x, res_y])
target_sz = np.array([res_w, res_h])
return target_pos, target_sz, score[best_pscore_id]
def SiamRPN_init(im, target_pos, target_sz, net):
"""
SiamRPN_init:SiamRPN网络初始化
:param im: 跟踪的图片
:param target_pos: 目标的中心点
:param target_sz: 目标区域的宽高
:param net: 跟踪网络
"""
state = dict()
p = TrackerConfig()
p.update(net.cfg)
state['im_h'] = im.shape[0] # 图片的高度
state['im_w'] = im.shape[1] # 图片的宽度
if p.adaptive:
# 根据目标和输入图像的大小调整搜索区域,比例小于0.4%,需要调大搜索区域
if ((target_sz[0] * target_sz[1]) / float(state['im_h'] * state['im_w'])) < 0.004:
p.instance_size = 287 # small object big search region
else:
p.instance_size = 271
# 根据网络总步长计算出得分图大小
p.score_size = (p.instance_size - p.exemplar_size) / p.total_stride + 1
# generate_anchor:构造出以图像中心为原点,格式为[cx, cy, w, h]的锚点矩阵
p.anchor = generate_anchor(p.total_stride, p.scales, p.ratios, int(p.score_size))
# 求图片RGB三像素的行列均值,len(avg_chans)=3
avg_chans = np.mean(im, axis=(0, 1))
# wc_z和hc_z表示纹理填充后的宽高,s_z为等效边长
wc_z = target_sz[0] + p.context_amount * sum(target_sz)
hc_z = target_sz[1] + p.context_amount * sum(target_sz)
s_z = round(np.sqrt(wc_z * hc_z))
# initialize the exemplar
# get_subwindow_tracking:填充并截取出目标
z_crop = get_subwindow_tracking(im, target_pos, p.exemplar_size, s_z, avg_chans)
z = Variable(z_crop.unsqueeze(0)) # z.size=([1, 3, 127, 127])
net.temple(z.cuda()) # 运行 temple 函数计算模板结果
# 两种窗
if p.windowing == 'cosine':
window = np.outer(np.hanning(p.score_size), np.hanning(p.score_size))
elif p.windowing == 'uniform':
window = np.ones((p.score_size, p.score_size))
window = np.tile(window.flatten(), p.anchor_num)
state['p'] = p
state['net'] = net
state['avg_chans'] = avg_chans
state['window'] = window
state['target_pos'] = target_pos
state['target_sz'] = target_sz
return state
def SiamRPN_track(state, im):
"""
docstring here
:param state:
:param im:
"""
p = state['p']
net = state['net']
avg_chans = state['avg_chans']
window = state['window']
target_pos = state['target_pos']
target_sz = state['target_sz']
# 计算扩展后尺寸
wc_z = target_sz[1] + p.context_amount * sum(target_sz)
hc_z = target_sz[0] + p.context_amount * sum(target_sz)
s_z = np.sqrt(wc_z * hc_z)
scale_z = p.exemplar_size / s_z
d_search = (p.instance_size - p.exemplar_size) / 2
pad = d_search / scale_z
s_x = s_z + 2 * pad
# extract scaled crops for search region x at previous target position
# 在前一个目标位置为搜索区域x提取缩放的截图
x_crop = Variable(get_subwindow_tracking(im, target_pos, p.instance_size, round(s_x), avg_chans).unsqueeze(0))
# tracker_eval 预测出新的位置和得分
target_pos, target_sz, score = tracker_eval(net, x_crop.cuda(), target_pos, target_sz * scale_z, window, scale_z, p)
target_pos[0] = max(0, min(state['im_w'], target_pos[0]))
target_pos[1] = max(0, min(state['im_h'], target_pos[1]))
target_sz[0] = max(10, min(state['im_w'], target_sz[0]))
target_sz[1] = max(10, min(state['im_h'], target_sz[1]))
state['target_pos'] = target_pos
state['target_sz'] = target_sz
state['score'] = score
return state
| 33.722449 | 120 | 0.605543 |
7956e686ff51cf303fa48f86ce5eb1be2f759f7c | 983 | py | Python | cases.py | reidac/covid19-curve-your-county | ab3ec4e6f3249844cda35fbceff3676976a5c914 | [
"BSD-3-Clause"
] | null | null | null | cases.py | reidac/covid19-curve-your-county | ab3ec4e6f3249844cda35fbceff3676976a5c914 | [
"BSD-3-Clause"
] | 1 | 2020-04-09T21:08:32.000Z | 2020-04-09T21:11:09.000Z | cases.py | reidac/covid19-curve-your-county | ab3ec4e6f3249844cda35fbceff3676976a5c914 | [
"BSD-3-Clause"
] | 1 | 2020-04-09T20:15:45.000Z | 2020-04-09T20:15:45.000Z | import matplotlib.pyplot as plt
import numpy as np
import os
import get_dc_data
# Cumulative figure.
casedata = get_dc_data.retrieve(download=False)
f2 = plt.figure(figsize=(6,4))
plt.suptitle("COVID-19 Data Summary, District of Columbia ",
fontweight="bold")
plt.title("github.com/reidac/covid19-curve-dc", style="oblique")
plt.xlabel("Days since March 8, 2020")
plt.ylabel("Cases")
plt.bar(casedata.x,casedata.positive,color='y',width=1.0)
plt.bar(casedata.x,casedata.recovered,
bottom=casedata.positive-casedata.recovered,color='g',width=1.0)
plt.bar(casedata.x,casedata.deaths,color='r',width=1.0)
plt.legend(labels=['Positives','Recovered positives','Deaths'])
if "FIG_PATH" in os.environ:
fig_path = os.environ['FIG_PATH']
else:
fig_path = "."
plt.savefig("{0}/us_dc_cases.png".format(fig_path),dpi=300,bbox_inches="tight")
print("Bar graph of cumulative Covid-19 cases reported by DC, broken out into positives, recoveries, and deaths.")
| 29.787879 | 114 | 0.734486 |
7956e76b7be92923dda4e84bc1c8d750a357e00f | 25,350 | py | Python | meshio/_vtk.py | yuan-feng/meshio | a58b9080e5b288320df2bee1bf4d03097184f3d2 | [
"MIT"
] | null | null | null | meshio/_vtk.py | yuan-feng/meshio | a58b9080e5b288320df2bee1bf4d03097184f3d2 | [
"MIT"
] | null | null | null | meshio/_vtk.py | yuan-feng/meshio | a58b9080e5b288320df2bee1bf4d03097184f3d2 | [
"MIT"
] | null | null | null | """
I/O for VTK <https://www.vtk.org/wp-content/uploads/2015/04/file-formats.pdf>.
"""
import logging
from functools import reduce
import numpy
from .__about__ import __version__
from ._common import raw_from_cell_data
from ._exceptions import ReadError, WriteError
from ._files import open_file
from ._mesh import Mesh
# https://www.vtk.org/doc/nightly/html/vtkCellType_8h_source.html
vtk_to_meshio_type = {
0: "empty",
1: "vertex",
# 2: 'poly_vertex',
3: "line",
# 4: 'poly_line',
5: "triangle",
# 6: 'triangle_strip',
7: "polygon",
# 8: 'pixel',
9: "quad",
10: "tetra",
# 11: 'voxel',
12: "hexahedron",
13: "wedge",
14: "pyramid",
15: "penta_prism",
16: "hexa_prism",
21: "line3",
22: "triangle6",
23: "quad8",
24: "tetra10",
25: "hexahedron20",
26: "wedge15",
27: "pyramid13",
28: "quad9",
29: "hexahedron27",
30: "quad6",
31: "wedge12",
32: "wedge18",
33: "hexahedron24",
34: "triangle7",
35: "line4",
#
# 60: VTK_HIGHER_ORDER_EDGE,
# 61: VTK_HIGHER_ORDER_TRIANGLE,
# 62: VTK_HIGHER_ORDER_QUAD,
# 63: VTK_HIGHER_ORDER_POLYGON,
# 64: VTK_HIGHER_ORDER_TETRAHEDRON,
# 65: VTK_HIGHER_ORDER_WEDGE,
# 66: VTK_HIGHER_ORDER_PYRAMID,
# 67: VTK_HIGHER_ORDER_HEXAHEDRON,
}
meshio_to_vtk_type = {v: k for k, v in vtk_to_meshio_type.items()}
vtk_type_to_numnodes = {
0: 0, # empty
1: 1, # vertex
3: 2, # line
5: 3, # triangle
9: 4, # quad
10: 4, # tetra
12: 8, # hexahedron
13: 6, # wedge
14: 5, # pyramid
15: 10, # penta_prism
16: 12, # hexa_prism
21: 3, # line3
22: 6, # triangle6
23: 8, # quad8
24: 10, # tetra10
25: 20, # hexahedron20
26: 15, # wedge15
27: 13, # pyramid13
28: 9, # quad9
29: 27, # hexahedron27
30: 6, # quad6
31: 12, # wedge12
32: 18, # wedge18
33: 24, # hexahedron24
34: 7, # triangle7
35: 4, # line4
}
# These are all VTK data types. One sometimes finds 'vtktypeint64', but
# this is ill-formed.
vtk_to_numpy_dtype_name = {
"bit": "bool",
"unsigned_char": "uint8",
"char": "int8",
"unsigned_short": "uint16",
"short": "int16",
"unsigned_int": "uint32",
"int": "int32",
"unsigned_long": "int64",
"long": "int64",
"float": "float32",
"double": "float64",
}
numpy_to_vtk_dtype = {v: k for k, v in vtk_to_numpy_dtype_name.items()}
# supported vtk dataset types
vtk_dataset_types = [
"UNSTRUCTURED_GRID",
"STRUCTURED_POINTS",
"STRUCTURED_GRID",
"RECTILINEAR_GRID",
]
# additional infos per dataset type
vtk_dataset_infos = {
"UNSTRUCTURED_GRID": [],
"STRUCTURED_POINTS": [
"DIMENSIONS",
"ORIGIN",
"SPACING",
"ASPECT_RATIO", # alternative for SPACING in version 1.0 and 2.0
],
"STRUCTURED_GRID": ["DIMENSIONS"],
"RECTILINEAR_GRID": [
"DIMENSIONS",
"X_COORDINATES",
"Y_COORDINATES",
"Z_COORDINATES",
],
}
# all main sections in vtk
vtk_sections = [
"METADATA",
"DATASET",
"POINTS",
"CELLS",
"CELL_TYPES",
"POINT_DATA",
"CELL_DATA",
"LOOKUP_TABLE",
]
class Info:
"""Info Container for the VTK reader."""
def __init__(self):
self.points = None
self.field_data = {}
self.cell_data_raw = {}
self.point_data = {}
self.dataset = {}
self.c = None
self.ct = None
self.active = None
self.is_ascii = False
self.split = []
self.num_items = 0
# One of the problem in reading VTK files are POINT_DATA and CELL_DATA fields. They
# can contain a number of SCALARS+LOOKUP_TABLE tables, without giving and indication
# of how many there are. Hence, SCALARS must be treated like a first-class section.
# To associate it with POINT/CELL_DATA, we store the `active` section in this
# variable.
self.section = None
def read(filename):
"""Reads a VTK vtk file.
"""
with open_file(filename, "rb") as f:
out = read_buffer(f)
return out
def read_buffer(f):
# initialize output data
info = Info()
# skip header and title
f.readline()
f.readline()
data_type = f.readline().decode("utf-8").strip().upper()
if data_type not in ["ASCII", "BINARY"]:
raise ReadError("Unknown VTK data type '{}'.".format(data_type))
info.is_ascii = data_type == "ASCII"
while True:
line = f.readline().decode("utf-8")
if not line:
# EOF
break
line = line.strip()
if len(line) == 0:
continue
info.split = line.split()
info.section = info.split[0].upper()
if info.section in vtk_sections:
_read_section(f, info)
else:
_read_subsection(f, info)
_check_mesh(info)
cells, cell_data = translate_cells(info.c, info.ct, info.cell_data_raw)
return Mesh(
info.points,
cells,
point_data=info.point_data,
cell_data=cell_data,
field_data=info.field_data,
)
def _read_section(f, info):
if info.section == "METADATA":
_skip_meta(f)
elif info.section == "DATASET":
info.active = "DATASET"
info.dataset["type"] = info.split[1].upper()
if info.dataset["type"] not in vtk_dataset_types:
raise ReadError(
"Only VTK '{}' supported (not {}).".format(
"', '".join(vtk_dataset_types), info.dataset["type"]
)
)
elif info.section == "POINTS":
info.active = "POINTS"
info.num_points = int(info.split[1])
data_type = info.split[2].lower()
info.points = _read_points(f, data_type, info.is_ascii, info.num_points)
elif info.section == "CELLS":
info.active = "CELLS"
info.num_items = int(info.split[2])
info.c = _read_cells(f, info.is_ascii, info.num_items)
elif info.section == "CELL_TYPES":
info.active = "CELL_TYPES"
info.num_items = int(info.split[1])
info.ct = _read_cell_types(f, info.is_ascii, info.num_items)
elif info.section == "POINT_DATA":
info.active = "POINT_DATA"
info.num_items = int(info.split[1])
elif info.section == "CELL_DATA":
info.active = "CELL_DATA"
info.num_items = int(info.split[1])
elif info.section == "LOOKUP_TABLE":
info.num_items = int(info.split[2])
data = numpy.fromfile(f, count=info.num_items * 4, sep=" ", dtype=float)
rgba = data.reshape((info.num_items, 4)) # noqa F841
def _read_subsection(f, info):
if info.active == "POINT_DATA":
d = info.point_data
elif info.active == "CELL_DATA":
d = info.cell_data_raw
elif info.active == "DATASET":
d = info.dataset
else:
d = info.field_data
if info.section in vtk_dataset_infos[info.dataset["type"]]:
if info.section[1:] == "_COORDINATES":
info.num_points = int(info.split[1])
data_type = info.split[2].lower()
d[info.section] = _read_coords(f, data_type, info.is_ascii, info.num_points)
else:
if info.section == "DIMENSIONS":
d[info.section] = list(map(int, info.split[1:]))
else:
d[info.section] = list(map(float, info.split[1:]))
if len(d[info.section]) != 3:
raise ReadError(
"Wrong number of info in section '{}'. Need 3, got {}.".format(
info.section, len(d[info.section])
)
)
elif info.section == "SCALARS":
d.update(_read_scalar_field(f, info.num_items, info.split, info.is_ascii))
elif info.section == "VECTORS":
d.update(_read_field(f, info.num_items, info.split, [3], info.is_ascii))
elif info.section == "TENSORS":
d.update(_read_field(f, info.num_items, info.split, [3, 3], info.is_ascii))
elif info.section == "FIELD":
d.update(_read_fields(f, int(info.split[2]), info.is_ascii))
else:
raise ReadError("Unknown section '{}'.".format(info.section))
def _check_mesh(info):
if info.dataset["type"] == "UNSTRUCTURED_GRID":
if info.c is None:
raise ReadError("Required section CELLS not found.")
if info.ct is None:
raise ReadError("Required section CELL_TYPES not found.")
elif info.dataset["type"] == "STRUCTURED_POINTS":
dim = info.dataset["DIMENSIONS"]
ori = info.dataset["ORIGIN"]
spa = (
info.dataset["SPACING"]
if "SPACING" in info.dataset
else info.dataset["ASPECT_RATIO"]
)
axis = [
numpy.linspace(ori[i], ori[i] + (dim[i] - 1.0) * spa[i], dim[i])
for i in range(3)
]
info.points = _generate_points(axis)
info.c, info.ct = _generate_cells(dim=info.dataset["DIMENSIONS"])
elif info.dataset["type"] == "RECTILINEAR_GRID":
axis = [
info.dataset["X_COORDINATES"],
info.dataset["Y_COORDINATES"],
info.dataset["Z_COORDINATES"],
]
info.points = _generate_points(axis)
info.c, info.ct = _generate_cells(dim=info.dataset["DIMENSIONS"])
elif info.dataset["type"] == "STRUCTURED_GRID":
info.c, info.ct = _generate_cells(dim=info.dataset["DIMENSIONS"])
def _generate_cells(dim):
ele_dim = [d - 1 for d in dim if d > 1]
ele_no = numpy.prod(ele_dim, dtype=int)
spatial_dim = len(ele_dim)
if spatial_dim == 1:
# cells are lines in 1D
cells = numpy.empty((ele_no, 3), dtype=int)
cells[:, 0] = 2
cells[:, 1] = numpy.arange(ele_no, dtype=int)
cells[:, 2] = cells[:, 1] + 1
cell_types = numpy.full(ele_no, 3, dtype=int)
elif spatial_dim == 2:
# cells are quad in 2D
cells = numpy.empty((ele_no, 5), dtype=int)
cells[:, 0] = 4
cells[:, 1] = numpy.arange(0, ele_no, dtype=int)
cells[:, 1] += numpy.arange(0, ele_no, dtype=int) // ele_dim[0]
cells[:, 2] = cells[:, 1] + 1
cells[:, 3] = cells[:, 1] + 2 + ele_dim[0]
cells[:, 4] = cells[:, 3] - 1
cell_types = numpy.full(ele_no, 9, dtype=int)
else:
# cells are hex in 3D
cells = numpy.empty((ele_no, 9), dtype=int)
cells[:, 0] = 8
cells[:, 1] = numpy.arange(ele_no)
cells[:, 1] += (ele_dim[0] + ele_dim[1] + 1) * (
numpy.arange(ele_no) // (ele_dim[0] * ele_dim[1])
)
cells[:, 1] += (numpy.arange(ele_no) % (ele_dim[0] * ele_dim[1])) // ele_dim[0]
cells[:, 2] = cells[:, 1] + 1
cells[:, 3] = cells[:, 1] + 2 + ele_dim[0]
cells[:, 4] = cells[:, 3] - 1
cells[:, 5] = cells[:, 1] + (1 + ele_dim[0]) * (1 + ele_dim[1])
cells[:, 6] = cells[:, 5] + 1
cells[:, 7] = cells[:, 5] + 2 + ele_dim[0]
cells[:, 8] = cells[:, 7] - 1
cell_types = numpy.full(ele_no, 12, dtype=int)
return cells.reshape(-1), cell_types
def _generate_points(axis):
x_dim = len(axis[0])
y_dim = len(axis[1])
z_dim = len(axis[2])
pnt_no = x_dim * y_dim * z_dim
x_id, y_id, z_id = numpy.mgrid[0:x_dim, 0:y_dim, 0:z_dim]
points = numpy.empty((pnt_no, 3), dtype=axis[0].dtype)
# VTK sorts points and cells in Fortran order
points[:, 0] = axis[0][x_id.reshape(-1, order="F")]
points[:, 1] = axis[1][y_id.reshape(-1, order="F")]
points[:, 2] = axis[2][z_id.reshape(-1, order="F")]
return points
def _read_coords(f, data_type, is_ascii, num_points):
dtype = numpy.dtype(vtk_to_numpy_dtype_name[data_type])
if is_ascii:
coords = numpy.fromfile(f, count=num_points, sep=" ", dtype=dtype)
else:
# Binary data is big endian, see
# <https://www.vtk.org/Wiki/VTK/Writing_VTK_files_using_python#.22legacy.22>.
dtype = dtype.newbyteorder(">")
coords = numpy.fromfile(f, count=num_points, dtype=dtype)
line = f.readline().decode("utf-8")
if line != "\n":
raise ReadError()
return coords
def _read_points(f, data_type, is_ascii, num_points):
dtype = numpy.dtype(vtk_to_numpy_dtype_name[data_type])
if is_ascii:
points = numpy.fromfile(f, count=num_points * 3, sep=" ", dtype=dtype)
else:
# Binary data is big endian, see
# <https://www.vtk.org/Wiki/VTK/Writing_VTK_files_using_python#.22legacy.22>.
dtype = dtype.newbyteorder(">")
points = numpy.fromfile(f, count=num_points * 3, dtype=dtype)
line = f.readline().decode("utf-8")
if line != "\n":
raise ReadError()
return points.reshape((num_points, 3))
def _read_cells(f, is_ascii, num_items):
if is_ascii:
c = numpy.fromfile(f, count=num_items, sep=" ", dtype=int)
else:
c = numpy.fromfile(f, count=num_items, dtype=">i4")
line = f.readline().decode("utf-8")
if line != "\n":
raise ReadError()
return c
def _read_cell_types(f, is_ascii, num_items):
if is_ascii:
ct = numpy.fromfile(f, count=int(num_items), sep=" ", dtype=int)
else:
# binary
ct = numpy.fromfile(f, count=int(num_items), dtype=">i4")
line = f.readline().decode("utf-8")
# Sometimes, there's no newline at the end
if line.strip() != "":
raise ReadError()
return ct
def _read_scalar_field(f, num_data, split, is_ascii):
data_name = split[1]
data_type = split[2].lower()
try:
num_comp = int(split[3])
except IndexError:
num_comp = 1
# The standard says:
# > The parameter numComp must range between (1,4) inclusive; [...]
if not (0 < num_comp < 5):
raise ReadError("The parameter numComp must range between (1,4) inclusive")
dtype = numpy.dtype(vtk_to_numpy_dtype_name[data_type])
lt, _ = f.readline().decode("utf-8").split()
if lt.upper() != "LOOKUP_TABLE":
raise ReadError()
if is_ascii:
data = numpy.fromfile(f, count=num_data, sep=" ", dtype=dtype)
else:
# Binary data is big endian, see
# <https://www.vtk.org/Wiki/VTK/Writing_VTK_files_using_python#.22legacy.22>.
dtype = dtype.newbyteorder(">")
data = numpy.fromfile(f, count=num_data, dtype=dtype)
line = f.readline().decode("utf-8")
if line != "\n":
raise ReadError()
return {data_name: data}
def _read_field(f, num_data, split, shape, is_ascii):
data_name = split[1]
data_type = split[2].lower()
dtype = numpy.dtype(vtk_to_numpy_dtype_name[data_type])
# <https://stackoverflow.com/q/2104782/353337>
k = reduce((lambda x, y: x * y), shape)
if is_ascii:
data = numpy.fromfile(f, count=k * num_data, sep=" ", dtype=dtype)
else:
# Binary data is big endian, see
# <https://www.vtk.org/Wiki/VTK/Writing_VTK_files_using_python#.22legacy.22>.
dtype = dtype.newbyteorder(">")
data = numpy.fromfile(f, count=k * num_data, dtype=dtype)
line = f.readline().decode("utf-8")
if line != "\n":
raise ReadError()
data = data.reshape(-1, *shape)
return {data_name: data}
def _read_fields(f, num_fields, is_ascii):
data = {}
for _ in range(num_fields):
line = f.readline().decode("utf-8").split()
if line[0] == "METADATA":
_skip_meta(f)
name, shape0, shape1, data_type = f.readline().decode("utf-8").split()
else:
name, shape0, shape1, data_type = line
shape0 = int(shape0)
shape1 = int(shape1)
dtype = numpy.dtype(vtk_to_numpy_dtype_name[data_type.lower()])
if is_ascii:
dat = numpy.fromfile(f, count=shape0 * shape1, sep=" ", dtype=dtype)
else:
# Binary data is big endian, see
# <https://www.vtk.org/Wiki/VTK/Writing_VTK_files_using_python#.22legacy.22>.
dtype = dtype.newbyteorder(">")
dat = numpy.fromfile(f, count=shape0 * shape1, dtype=dtype)
line = f.readline().decode("utf-8")
if line != "\n":
raise ReadError()
if shape0 != 1:
dat = dat.reshape((shape1, shape0))
data[name] = dat
return data
def _skip_meta(f):
# skip possible metadata
# https://vtk.org/doc/nightly/html/IOLegacyInformationFormat.html
while True:
line = f.readline().decode("utf-8").strip()
if not line:
# end of metadata is a blank line
break
def translate_cells(data, types, cell_data_raw):
# https://www.vtk.org/doc/nightly/html/vtkCellType_8h_source.html
# Translate it into the cells dictionary.
# `data` is a one-dimensional vector with
# (num_points0, p0, p1, ... ,pk, numpoints1, p10, p11, ..., p1k, ...
# Collect types into bins.
# See <https://stackoverflow.com/q/47310359/353337> for better
# alternatives.
bins = {u: numpy.where(types == u)[0] for u in numpy.unique(types)}
has_polygon = meshio_to_vtk_type["polygon"] in bins
# Deduct offsets from the cell types. This is much faster than manually
# going through the data array. Slight disadvantage: This doesn't work for
# cells with a custom number of points.
numnodes = numpy.empty(len(types), dtype=int)
if has_polygon:
# If some polygons are in the VTK file, loop over the cells
nbcells = len(types)
offsets = numpy.empty(len(types), dtype=int)
offsets[0] = 0
for idx in range(nbcells - 1):
numnodes[idx] = data[offsets[idx]]
offsets[idx + 1] = offsets[idx] + numnodes[idx] + 1
idx = nbcells - 1
numnodes[idx] = data[offsets[idx]]
else:
for tpe, idx in bins.items():
numnodes[idx] = vtk_type_to_numnodes[tpe]
offsets = numpy.cumsum(numnodes + 1) - (numnodes + 1)
if not numpy.all(numnodes == data[offsets]):
raise ReadError()
cells = {}
cell_data = {}
if has_polygon:
# TODO: cell_data
for idx in range(nbcells):
nbedges = data[offsets[idx]]
start = offsets[idx] + 1
end = start + numnodes[idx]
cell = data[start:end]
if nbedges == vtk_type_to_numnodes[meshio_to_vtk_type["triangle"]]:
key = "triangle"
elif nbedges == vtk_type_to_numnodes[meshio_to_vtk_type["quad"]]:
key = "quad"
else:
key = "polygon" + str(nbedges)
if key in cells:
cells[key] = numpy.vstack([cells[key], cell])
else:
cells[key] = numpy.reshape(cell, (1, -1))
else:
for tpe, b in bins.items():
meshio_type = vtk_to_meshio_type[tpe]
n = data[offsets[b[0]]]
if not (data[offsets[b]] == n).all():
raise ReadError()
indices = numpy.add.outer(offsets[b], numpy.arange(1, n + 1))
cells[meshio_type] = data[indices]
cell_data[meshio_type] = {
key: value[b] for key, value in cell_data_raw.items()
}
return cells, cell_data
def write(filename, mesh, binary=True):
def pad(array):
return numpy.pad(array, ((0, 0), (0, 1)), "constant")
if mesh.points.shape[1] == 2:
logging.warning(
"VTK requires 3D points, but 2D points given. "
"Appending 0 third component."
)
points = pad(mesh.points)
else:
points = mesh.points
if mesh.point_data:
for name, values in mesh.point_data.items():
if len(values.shape) == 2 and values.shape[1] == 2:
logging.warning(
"VTK requires 3D vectors, but 2D vectors given. "
"Appending 0 third component to {}.".format(name)
)
mesh.point_data[name] = pad(values)
if mesh.cell_data:
for t, data in mesh.cell_data.items():
for name, values in data.items():
if len(values.shape) == 2 and values.shape[1] == 2:
logging.warning(
"VTK requires 3D vectors, but 2D vectors given. "
"Appending 0 third component to {}.".format(name)
)
mesh.cell_data[t][name] = pad(mesh.cell_data[t][name])
if not binary:
logging.warning("VTK ASCII files are only meant for debugging.")
with open_file(filename, "wb") as f:
f.write("# vtk DataFile Version 4.2\n".encode("utf-8"))
f.write("written by meshio v{}\n".format(__version__).encode("utf-8"))
f.write(("BINARY\n" if binary else "ASCII\n").encode("utf-8"))
f.write("DATASET UNSTRUCTURED_GRID\n".encode("utf-8"))
# write points and cells
_write_points(f, points, binary)
_write_cells(f, mesh.cells, binary)
# write point data
if mesh.point_data:
num_points = mesh.points.shape[0]
f.write("POINT_DATA {}\n".format(num_points).encode("utf-8"))
_write_field_data(f, mesh.point_data, binary)
# write cell data
if mesh.cell_data:
total_num_cells = sum([len(c) for c in mesh.cells.values()])
cell_data_raw = raw_from_cell_data(mesh.cell_data)
f.write("CELL_DATA {}\n".format(total_num_cells).encode("utf-8"))
_write_field_data(f, cell_data_raw, binary)
return
def _write_points(f, points, binary):
f.write(
"POINTS {} {}\n".format(
len(points), numpy_to_vtk_dtype[points.dtype.name]
).encode("utf-8")
)
if binary:
# Binary data must be big endian, see
# <https://www.vtk.org/Wiki/VTK/Writing_VTK_files_using_python#.22legacy.22>.
points.astype(points.dtype.newbyteorder(">")).tofile(f, sep="")
else:
# ascii
points.tofile(f, sep=" ")
f.write("\n".encode("utf-8"))
return
def _write_cells(f, cells, binary):
total_num_cells = sum([len(c) for c in cells.values()])
total_num_idx = sum([numpy.prod(c.shape) for c in cells.values()])
# For each cell, the number of nodes is stored
total_num_idx += total_num_cells
f.write("CELLS {} {}\n".format(total_num_cells, total_num_idx).encode("utf-8"))
if binary:
for c in cells.values():
n = c.shape[1]
d = numpy.column_stack([numpy.full(c.shape[0], n), c]).astype(
numpy.dtype(">i4")
)
f.write(d.tostring())
f.write("\n".encode("utf-8"))
else:
# ascii
for c in cells.values():
n = c.shape[1]
# prepend a column with the value n
out = numpy.column_stack([numpy.full(c.shape[0], n), c])
fmt = " ".join(["{}"] * out.shape[1])
# join them all together as strings
out = "\n".join([fmt.format(*row) for row in out]) + "\n"
f.write(out.encode("utf-8"))
# write cell types
f.write("CELL_TYPES {}\n".format(total_num_cells).encode("utf-8"))
if binary:
for key in cells:
if key[:7] == "polygon":
d = numpy.full(len(cells[key]), meshio_to_vtk_type[key[:7]]).astype(
numpy.dtype(">i4")
)
else:
d = numpy.full(len(cells[key]), meshio_to_vtk_type[key]).astype(
numpy.dtype(">i4")
)
f.write(d.tostring())
f.write("\n".encode("utf-8"))
else:
# ascii
for key in cells:
if key[:7] == "polygon":
for _ in range(len(cells[key])):
f.write("{}\n".format(meshio_to_vtk_type[key[:7]]).encode("utf-8"))
else:
for _ in range(len(cells[key])):
f.write("{}\n".format(meshio_to_vtk_type[key]).encode("utf-8"))
return
def _write_field_data(f, data, binary):
f.write(("FIELD FieldData {}\n".format(len(data))).encode("utf-8"))
for name, values in data.items():
if len(values.shape) == 1:
num_tuples = values.shape[0]
num_components = 1
else:
if len(values.shape) != 2:
raise WriteError("Only one- and two-dimensional field data supported.")
num_tuples = values.shape[0]
num_components = values.shape[1]
if " " in name:
raise WriteError(
"VTK doesn't support spaces in field names ('{}').".format(name)
)
f.write(
(
"{} {} {} {}\n".format(
name,
num_components,
num_tuples,
numpy_to_vtk_dtype[values.dtype.name],
)
).encode("utf-8")
)
if binary:
values.astype(values.dtype.newbyteorder(">")).tofile(f, sep="")
else:
# ascii
values.tofile(f, sep=" ")
# numpy.savetxt(f, points)
f.write("\n".encode("utf-8"))
return
| 32.54172 | 92 | 0.564103 |
7956e7c26497cae11cb2e5e108e7847dfbedab0a | 4,111 | py | Python | botc/commands/abilities/tb/read.py | Xinverse/BOTC-Bot | 1932c649c81a5a1eab735d7abdee0761c2853940 | [
"MIT"
] | 1 | 2020-06-21T17:20:17.000Z | 2020-06-21T17:20:17.000Z | botc/commands/abilities/tb/read.py | BlueLenz/Blood-on-the-Clocktower-Storyteller-Discord-Bot | 1932c649c81a5a1eab735d7abdee0761c2853940 | [
"MIT"
] | 1 | 2020-07-07T03:47:44.000Z | 2020-07-07T03:47:44.000Z | botc/commands/abilities/tb/read.py | BlueLenz/Blood-on-the-Clocktower-Storyteller-Discord-Bot | 1932c649c81a5a1eab735d7abdee0761c2853940 | [
"MIT"
] | 1 | 2022-02-18T00:42:19.000Z | 2022-02-18T00:42:19.000Z | """Read command"""
import botutils
import discord
import traceback
import json
from discord.ext import commands
from botc import check_if_is_player, check_if_is_night, check_if_dm, RoleCannotUseCommand, \
check_if_player_really_alive, check_if_can_read, PlayerParser, AbilityForbidden, \
NotAPlayer, BOTCUtils, AliveOnlyCommand, NotNight, NotDMChannel
with open('botutils/bot_text.json') as json_file:
language = json.load(json_file)
error_str = language["system"]["error"]
with open('botc/game_text.json') as json_file:
documentation = json.load(json_file)
class Read(commands.Cog, name = documentation["misc"]["abilities_cog"]):
"""BoTC in-game commands cog
Read command - used by fortune teller
"""
def __init__(self, client):
self.client = client
def cog_check(self, ctx):
"""Check performed on all commands of this cog.
Must be a non-fleaved player to use these commands.
"""
return check_if_is_player(ctx) # Registered non-quit player -> NotAPlayer
# ---------- READ COMMAND (Fortune Teller) ----------------------------------------
@commands.command(
pass_context = True,
name = "read",
hidden = False,
brief = documentation["doc"]["read"]["brief"],
help = documentation["doc"]["read"]["help"],
description = documentation["doc"]["read"]["description"]
)
@commands.check(check_if_is_night) # Correct phase -> NotNight
@commands.check(check_if_dm) # Correct channel -> NotDMChannel
@commands.check(check_if_player_really_alive) # Player alive -> AliveOnlyCommand
@commands.check(check_if_can_read) # Correct character -> RoleCannotUseCommand
async def read(self, ctx, *, read: PlayerParser()):
"""Read command
usage: read <player> and <player> and...
characters: fortune teller
"""
player = BOTCUtils.get_player_from_id(ctx.author.id)
await player.role.ego_self.register_read(player, read)
@read.error
async def read_error(self, ctx, error):
emoji = documentation["cmd_warnings"]["x_emoji"]
# Incorrect character -> RoleCannotUseCommand
if isinstance(error, RoleCannotUseCommand):
return
# If it passed all the checks but raised an error in the character class
elif isinstance(error, AbilityForbidden):
error = getattr(error, 'original', error)
await ctx.send(error)
# Non-registered or quit player -> NotAPlayer
elif isinstance(error, NotAPlayer):
return
# Incorrect channel -> NotDMChannel
elif isinstance(error, NotDMChannel):
return
# Incorrect argument -> commands.BadArgument
elif isinstance(error, commands.BadArgument):
return
# Incorrect phase -> NotNight
elif isinstance(error, NotNight):
try:
await ctx.author.send(documentation["cmd_warnings"]["night_only"].format(ctx.author.mention, emoji))
except discord.Forbidden:
pass
# Player not alive -> AliveOnlyCommand
elif isinstance(error, AliveOnlyCommand):
try:
await ctx.author.send(documentation["cmd_warnings"]["alive_only"].format(ctx.author.mention, emoji))
except discord.Forbidden:
pass
# Missing argument -> commands.MissingRequiredArgument
elif isinstance(error, commands.MissingRequiredArgument):
player = BOTCUtils.get_player_from_id(ctx.author.id)
msg = player.role.ego_self.emoji + " " + player.role.ego_self.instruction + " " + player.role.ego_self.action
try:
await ctx.author.send(msg)
except discord.Forbidden:
pass
else:
try:
raise error
except Exception:
await ctx.send(error_str)
await botutils.log(botutils.Level.error, traceback.format_exc())
def setup(client):
client.add_cog(Read(client))
| 39.528846 | 121 | 0.633423 |
7956e87c4063087aca37349365e4c697810c154d | 9,582 | py | Python | ceph/ceph_admin/orch.py | hmaheswa/cephci | b75c1e58e1222865c81c0558ff98b3708dc4236a | [
"MIT"
] | null | null | null | ceph/ceph_admin/orch.py | hmaheswa/cephci | b75c1e58e1222865c81c0558ff98b3708dc4236a | [
"MIT"
] | null | null | null | ceph/ceph_admin/orch.py | hmaheswa/cephci | b75c1e58e1222865c81c0558ff98b3708dc4236a | [
"MIT"
] | null | null | null | """
Module that interacts with the orchestrator CLI.
Provide the interfaces to ceph orch and in turn manage the orchestration engine.
"""
from datetime import datetime, timedelta
from json import loads
from time import sleep
from ceph.ceph import ResourceNotFoundError
from utility.log import Log
from .ceph import CephCLI
from .common import config_dict_to_string
from .helper import GenerateServiceSpec
from .ls import LSMixin
from .pause import PauseMixin
from .ps import PSMixin
from .reconfig import ReconfigMixin
from .redeploy import RedeployMixin
from .remove import RemoveMixin
from .restart import RestartMixin
from .resume import ResumeMixin
from .start import StartMixin
from .stop import StopMixin
from .upgrade import UpgradeMixin
LOG = Log(__name__)
class Orch(
LSMixin,
PSMixin,
ReconfigMixin,
RedeployMixin,
RemoveMixin,
RestartMixin,
StartMixin,
StopMixin,
UpgradeMixin,
PauseMixin,
ResumeMixin,
CephCLI,
):
"""Represent ceph orch command."""
direct_calls = ["ls", "ps"]
def get_hosts_by_label(self, label: str):
"""
Fetch host object by label attached to it.
Args:
label (Str): name of the label
Returns:
hosts (List)
"""
out, _ = self.shell(args=["ceph", "orch", "host", "ls", "--format=json"])
return [node for node in loads(out) if label in node.get("labels")]
def check_service_exists(
self,
service_name: str = None,
service_type: str = None,
timeout: int = 300,
interval: int = 5,
) -> bool:
"""
Verify the provided service is running for the given list of ids.
Args:
service_name (Str): The name of the service to be checked.
service_type (Str): The type of the service to be checked.
timeout (Int): In seconds, the maximum allowed time (default=300)
interval (int): In seconds, the polling interval time (default=5)
Returns:
Boolean: True if the service and the list of daemons are running else False.
"""
end_time = datetime.now() + timedelta(seconds=timeout)
check_status_dict = {
"base_cmd_args": {"format": "json"},
"args": {"refresh": True},
}
if service_name:
check_status_dict["args"]["service_name"] = service_name
if service_type:
check_status_dict["args"]["service_type"] = service_type
while end_time > datetime.now():
sleep(interval)
out, err = self.ls(check_status_dict)
out = loads(out)[0]
running = out["status"]["running"]
count = out["status"]["size"]
LOG.info(
f"{running}/{count} {service_name if service_name else service_type} up... retrying"
)
if count == running:
return True
# Identify the failure
out, err = self.ls(check_status_dict)
out = loads(out)
LOG.error(
f"{service_name if service_name else service_type} failed with \n{out[0]['events']}"
)
return False
def get_role_service(self, service_name: str) -> str:
"""
Get service info by name.
Args:
service_name (Str): service name
Returns:
service (Dict)
Raises:
ResourceNotFound: when no resource with the provided is matched.
"""
out, _ = self.ls()
for svc in loads(out):
if service_name in svc.get("service_name"):
return svc
raise ResourceNotFoundError(f"No service names matched {service_name}")
def check_service(
self, service_name: str, timeout: int = 300, interval: int = 5, exist=True
) -> bool:
"""
check service existence based on the exist parameter.
if exist is set, then validate its presence. otherwise, for its removal.
Args:
service_name (Str): service name
timeout (Int): timeout in seconds
interval (Int): interval in seconds
exist (Bool): exists or not
Returns:
Boolean
"""
end_time = datetime.now() + timedelta(seconds=timeout)
while end_time > datetime.now():
sleep(interval)
out, err = self.ls({"base_cmd_args": {"format": "json"}})
out = loads(out)
service = [d for d in out if d.get("service_name") == service_name]
if service_name not in service and not exist:
return True
elif service_name in service and exist:
return True
LOG.info("[%s] check for existence: %s, retrying" % (service_name, exist))
return False
def apply_spec(self, config) -> None:
"""
Execute the apply_spec method using the object's service name and provided input.
Args:
config (Dict): Key/value pairs passed from the test suite.
Example::
config:
command: apply_spec
service: orch
base_cmd_args: # arguments to ceph orch
concise: true
verbose: true
specs:
- service_type: host
attach_ip_address: true
labels: apply-all-labels
nodes:
- node2
- node3
base_cmd_args - key/value pairs to set for base command
specs - service specifications.
"""
base_cmd = ["ceph", "orch"]
if config.get("base_cmd_args"):
base_cmd_args_str = config_dict_to_string(config.get("base_cmd_args"))
base_cmd.append(base_cmd_args_str)
base_cmd.append("apply -i")
specs = config["specs"]
spec_cls = GenerateServiceSpec(
node=self.installer, cluster=self.cluster, specs=specs
)
spec_filename = spec_cls.create_spec_file()
base_cmd.append(spec_filename)
out, err = self.shell(
args=base_cmd,
base_cmd_args={"mount": "/tmp:/tmp"},
)
LOG.info(f"apply-spec command response :\n{out}")
# todo: add verification part
# validate services
validate_spec_services = config.get("validate-spec-services")
if validate_spec_services:
self.validate_spec_services(specs=specs)
LOG.info("Validation of service created using a spec file is completed")
def op(self, op, config):
"""
Execute the command ceph orch <start|stop|restart|reconfigure|redeploy> <service>.
Args:
config (Dict): command and service are passed from the test case.
op (Str): operation parameters.
Returns:
output (Str), error (Str) returned by the command.
Example::
Testing ceph orch restart mon
op: restart|start|stop|reconfigure|redeploy
config:
command: restart
service: mon
...
config:
command: start
base_cmd_args:
verbose: true
pos_args:
- service_name
"""
base_cmd = ["ceph", "orch"]
if config.get("base_cmd_args"):
base_cmd.append(config_dict_to_string(config["base_cmd_args"]))
base_cmd.append(op)
base_cmd.extend(config.get("pos_args"))
return self.shell(args=base_cmd)
def status(self, config) -> bool:
"""Execute the command ceph orch status <args>.
Args:
config (Dict): The key/value pairs passed from the test case.
Returns:
output, error returned by the command.
Example::
Testing ceph orch status
config:
command: status
base_cmd_args:
verbose: true
format: json | json-pretty | xml | xml-pretty | plain | yaml
args:
detail: true
"""
cmd = ["ceph", "orch"]
if config and config.get("base_cmd_args"):
base_cmd_args = config_dict_to_string(config["base_cmd_args"])
cmd.append(base_cmd_args)
cmd.append("status")
if config and config.get("args"):
args = config.get("args")
if args["detail"]:
cmd.append("--detail")
return self.shell(args=cmd)
def verify_status(self, op) -> None:
"""Verify the status of the orchestrator for the operation specified.
Args:
op (str): pause/resume based on whether the pause or resume status to be checked
"""
config = {"command": "status", "base_cmd_args": {"format": "json"}}
out, _ = self.status(config)
status = loads(out)
if op == "pause" and status["paused"]:
LOG.info("The orch operations are paused")
return True
elif op == "resume" and not loads(out)["paused"]:
LOG.info("The orch operations are resumed")
return True
return False
def validate_spec_services(self, specs) -> None:
LOG.info("Validating spec services")
for spec in specs:
self.check_service_exists(service_type=spec["service_type"])
return False
| 29.392638 | 100 | 0.567836 |
7956e8f8d0ec0fef2a695bd6195a66b5f1e4e0e9 | 420 | py | Python | docs/python/attachments/animals.py | Benbinbin/blog-data | e98b6560253bb6a1aa35e08b4ba36d03194920d1 | [
"MIT"
] | null | null | null | docs/python/attachments/animals.py | Benbinbin/blog-data | e98b6560253bb6a1aa35e08b4ba36d03194920d1 | [
"MIT"
] | null | null | null | docs/python/attachments/animals.py | Benbinbin/blog-data | e98b6560253bb6a1aa35e08b4ba36d03194920d1 | [
"MIT"
] | null | null | null |
class Dog:
def speak(self):
print("Woof!")
def __init__(self, name):
self.name = name
def hear(self, words):
if self.name in words:
self.speak()
class Husky(Dog):
origin = "Siberia"
def speak(self):
print("Awoo!")
class Chihuahua(Dog):
origin = "Mexico"
def speak(self):
print("Yip!")
class Labrador(Dog):
origin = "Canada"
| 14 | 30 | 0.538095 |
7956ea4ffbb947ac325d8cc8e4460087f45235c4 | 15,548 | py | Python | Sizmek/Sizmek.hype-export.py | tumult/hype-export-scripts | 2cf96ceeeadd238ac25211b2003028abe75738fc | [
"MIT"
] | 27 | 2016-12-12T19:03:26.000Z | 2021-12-10T11:12:52.000Z | Sizmek/Sizmek.hype-export.py | tumult/hype-export-scripts | 2cf96ceeeadd238ac25211b2003028abe75738fc | [
"MIT"
] | 4 | 2017-05-31T10:21:39.000Z | 2020-05-05T00:28:06.000Z | Sizmek/Sizmek.hype-export.py | tumult/hype-export-scripts | 2cf96ceeeadd238ac25211b2003028abe75738fc | [
"MIT"
] | 15 | 2017-02-16T19:01:34.000Z | 2020-05-09T08:32:14.000Z | #!/usr/bin/python
# Sizmek.hype-export.py
# Export Script for Tumult Hype to produce ads for Sizmek MDX
#
# Installation, usage, and additional info:
# https://tumult.com/hype/export-scripts/
#
# MIT License
# Copyright (c) 2017 Tumult Inc.
#
import argparse
import json
import sys
import distutils.util
import os
# update info
current_script_version = 4
version_info_url = "https://static.tumult.com/hype/export-scripts/Sizmek/latest_script_version.txt" # only returns a version number
download_url = "https://tumult.com/hype/export-scripts/Sizmek/" # gives a user info to download and install
minimum_update_check_duration_in_seconds = 60 * 60 * 24 # once a day
defaults_bundle_identifier = "com.tumult.Hype2.hype-export.Sizmek"
# html insertions
insert_at_head_start = """
<meta name="ad.size" content="width=${width},height=${height}">
${EBModulesToLoad}
<script type="text/javascript" src="./EBLoader.js"></script>
"""
insert_at_head_end = """
<script>
(function () {
var thisHypeDocument = null;
var didLoadHypeDocument = false;
function preInit() {
if(EB.isInitialized()) {
init();
} else {
EB.addEventListener(EBG.EventName.EB_INITIALIZED, init);
}
}
function init() {
show();
}
function show() {
if(thisHypeDocument != null && didLoadHypeDocument == false) {
thisHypeDocument.showSceneNamed(thisHypeDocument.sceneNames()[0]);
didLoadHypeDocument = true;
}
}
function hypeDocumentLoadCallback(hypeDocument, element, event) {
thisHypeDocument = hypeDocument;
if(!EB.isInitialized() ) {
// don't load the Hype document until Sizmek EBLoader has loaded
return false;
}
didLoadHypeDocument = true;
return true;
}
if("HYPE_eventListeners" in window === false) {
window.HYPE_eventListeners = Array();
}
window.HYPE_eventListeners.push({"type":"HypeDocumentLoad", "callback":hypeDocumentLoadCallback});
window.addEventListener('load', preInit);
})();
function hypeAdExit(identifier, url) {
if(identifier != null && url != null) {
EB.clickthrough(identifier, url);
} else {
EB.clickthrough();
}
}
function hypeAdCounter(identifier) {
EB.userActionCounter(identifier);
}
function hypeAdAutoEventCounter(identifier) {
EB.automaticEventCounter(identifier);
}
function hypeAdStartTimer(identifier) {
EB.startTimer(identifier);
}
function hypeAdStopTimer(identifier) {
EB.stopTimer(identifier);
}
function hypeAdDummyInteractions() {
${dummy_interactions}
}
</script>
"""
insert_at_body_start = ""
insert_at_body_end = ""
function_name_mapping = { "hypeAdExit" : "EB.clickthrough", "hypeAdCounter" : "EB.userActionCounter", "hypeAdAutoEventCounter" : "EB.automaticEventCounter", "hypeAdStartTimer" : "EB.startTimer", "hypeAdStopTimer" : "EB.stopTimer" }
def construct_dummy_interaction(function_name, arguments):
if function_name in function_name_mapping:
replaced_function_name = function_name_mapping[function_name]
else:
return None
return "" + replaced_function_name + "(" + ",".join(arguments) + ")"
class HypeURLType:
Unknown = 0
HypeJS = 1
Resource = 2
Link = 3
ResourcesFolder = 4
def main():
parser = argparse.ArgumentParser()
parser.add_argument('--hype_version')
parser.add_argument('--hype_build')
parser.add_argument('--export_uid')
parser.add_argument('--get_options', action='store_true')
parser.add_argument('--replace_url')
parser.add_argument('--url_type')
parser.add_argument('--is_reference', default="False")
parser.add_argument('--should_preload')
parser.add_argument('--modify_staging_path')
parser.add_argument('--destination_path')
parser.add_argument('--export_info_json_path')
parser.add_argument('--is_preview', default="False")
parser.add_argument('--check_for_updates', action='store_true')
args, unknown = parser.parse_known_args()
## --get_options
## return arguments to be presented in the Hype UI as a dictionary:
## 'export_options' is a dictionary of key/value pairs that make modifications to Hype's export/preview system. Some useful ones:
## 'exportShouldInlineHypeJS' : boolean
## 'exportShouldInlineDocumentLoader' : boolean
## 'exportShouldUseExternalRuntime' : boolean
## 'exportExternalRuntimeURL' : string
## 'exportShouldSaveHTMLFile' : boolean
## 'indexTitle' : string
## 'exportShouldBustBrowserCaching' : boolean
## 'exportShouldIncludeTextContents' : boolean
## 'exportShouldIncludePIE' : boolean
## 'exportSupportInternetExplorer6789' : boolean
## 'initialSceneIndex' : integer
## 'save_options' is a dictionary of key/value pairs that for determining when/how to export. valid keys:
## 'file_extension' : the final extension when exported (ex. "zip")
## 'allows_export' : should show up in the File > Export as HTML5 menu and Advanced Export
## 'allows_preview' : should show up in the Preview menu, if so --is_preview True is passed into the --modify_staging_path call
## 'document_arguments' should be an array of keys, these will be passed to subsequent calls via --key value
## 'extra_actions' should be an array of dictionaries
## 'label': string that is the user presented name
## 'function': javascript function to call if this action is triggered, just the name of it
## 'arguments': array of dictionaries that represent arguments passed into the function
## 'label': string that is presented to Hype UI
## 'type': string that is either "String" (will be quoted and escaped) or "Expression" (passed directly to function argument as-is)
if args.get_options:
def export_options():
#cdnPath = "https://secure-ds.serving-sys.com/BurstingcachedScripts/libraries/hype/" + args.hype_build
return {
"exportShouldInlineHypeJS" : True,
"exportShouldInlineDocumentLoader" : True,
#"exportShouldUseExternalRuntime" : False,
#"exportExternalRuntimeURL" : cdnPath,
"exportShouldSaveHTMLFile" : True,
"exportShouldNameAsIndexDotHTML" : True,
#"indexTitle" : "",
"exportShouldBustBrowserCaching" : False,
"exportShouldIncludeTextContents" : False,
"exportShouldIncludePIE" : False,
"exportSupportInternetExplorer6789" : False,
"exportShouldSaveRestorableDocument" : False,
}
def save_options():
return {
"file_extension" : "zip",
"allows_export" : True,
"allows_preview" : True,
}
def extra_actions():
return [
{"label" : "ClickThrough", "function" : "hypeAdExit"},
{"label" : "Custom ClickThrough", "function" : "hypeAdExit", "arguments":[{"label":"intName", "type": "String"}, {"label":"clickURL", "type": "String"}]},
{"label" : "User Action Counter", "function" : "hypeAdCounter", "arguments":[{"label":"intName", "type": "String"}]},
{"label" : "Automatic Event Counter", "function" : "hypeAdAutoEventCounter", "arguments":[{"label":"intName", "type": "String"}]},
{"label" : "Start Timer", "function" : "hypeAdStartTimer", "arguments":[{"label":"intName", "type": "String"}]},
{"label" : "Stop Timer", "function" : "hypeAdStopTimer", "arguments":[{"label":"intName", "type": "String"}]},
]
options = {
"export_options" : export_options(),
"save_options" : save_options(),
"extra_actions" : extra_actions(),
"min_hype_build_version" : "574", # build number (ex "574") and *not* marketing version (ex "3.6.0")
#"max_hype_build_version" : "10000", # build number (ex "574") and *not* marketing version (ex "3.6.0")
}
exit_with_result(options)
## --replace_url [url] --url_type [HypeURLType] --is_reference [True|False] --should_preload [None|True|False] --is_preview [True|False] --export_uid [identifier]
## return a dictionary with "url", "is_reference", and optional "should_preload" keys
## if HypeURLType.ResourcesFolder, you can set the url to "." so there is no .hyperesources folder and everything
## is placed next to the .html file
## should_preload may be None type in cases where it won't be used
elif args.replace_url != None:
url_info = {}
url_info['is_reference'] = bool(distutils.util.strtobool(args.is_reference))
if args.should_preload != None:
url_info['should_preload'] = bool(distutils.util.strtobool(args.should_preload))
if int(args.url_type) == HypeURLType.ResourcesFolder:
url_info['url'] = "."
else:
url_info['url'] = args.replace_url
exit_with_result(url_info)
## --modify_staging_path [filepath] --destination_path [filepath] --export_info_json_path [filepath] --is_preview [True|False] --export_uid [identifier]
## return True if you moved successfully to the destination_path, otherwise don't return anything and Hype will make the move
## make any changes you'd like before the save is complete
## for example, if you are a zip, you need to zip and write to the destination_path
## or you may want to inject items into the HTML file
## if it is a preview, you shouldn't do things like zip it up, as Hype needs to know where the index.html file is
## export_info_json_path is a json object holding keys:
## html_filename: string that is the filename for the html file which you may want to inject changes into
## main_container_width: number representing the width of the document in pixels
## main_container_height: number representing the height of the document in pixels
## document_arguments: dictionary of key/value pairs based on what was passed in from the earlier --get_options call
## extra_actions: array of dictionaries for all usages of the extra actions. There is no guarantee these all originated from this script or version.
## function: string of function name (as passed in from --get_options)
## arguments: array of strings elif args.modify_staging_path != None:
elif args.modify_staging_path != None:
import os
import string
is_preview = bool(distutils.util.strtobool(args.is_preview))
# read export_info.json file
export_info_file = open(args.export_info_json_path)
export_info = json.loads(export_info_file.read())
export_info_file.close()
# write out EBLoader
writeEBLoader(args.modify_staging_path)
# determine if there is any video and then make sure this module is set to be loaded
global insert_at_head_start
template = string.Template(insert_at_head_start)
if folder_contains_file_of_types(args.modify_staging_path, ["mp4", "ogv", "webm", "avi", "mov", "ogg", "m4v"]):
modulesToLoad = '<script type="text/javascript"> EBModulesToLoad = [\'Video\']; </script>';
else:
modulesToLoad = '';
insert_at_head_start = template.substitute({'width' : export_info['main_container_width'], 'height' : export_info['main_container_height'], "EBModulesToLoad" : modulesToLoad })
# insert interactions for dummy code so it is picked up by ad parsers
global insert_at_head_end
template = string.Template(insert_at_head_end)
dummy_interactions = ""
for actionInfo in export_info['extra_actions']:
dummy_interaction = construct_dummy_interaction(actionInfo["function"], actionInfo["arguments"])
if dummy_interaction == None:
continue
dummy_interactions = dummy_interactions + "\t\t" + dummy_interaction + ";\n"
insert_at_head_end = template.substitute({"dummy_interactions" : dummy_interactions})
# rewrite HTML file
index_path = os.path.join(args.modify_staging_path, export_info['html_filename'].encode("utf-8"))
perform_html_additions(index_path)
# move to final location and zip up if not a preview
import shutil
shutil.rmtree(args.destination_path, ignore_errors=True)
if is_preview == True:
shutil.move(args.modify_staging_path, args.destination_path)
exit_with_result(True)
else:
zip(args.modify_staging_path, args.destination_path)
exit_with_result(True)
## --check_for_updates
## return a dictionary with "url", "from_version", and "to_version" keys if there is an update, otherwise don't return anything and exit
## it is your responsibility to decide how often to check
elif args.check_for_updates:
import subprocess
import urllib2
last_check_timestamp = None
try:
last_check_timestamp = subprocess.check_output(["defaults", "read", defaults_bundle_identifier, "last_check_timestamp"]).strip()
except:
pass
try:
timestamp_now = subprocess.check_output(["date", "+%s"]).strip()
if (last_check_timestamp == None) or ((int(timestamp_now) - int(last_check_timestamp)) > minimum_update_check_duration_in_seconds):
subprocess.check_output(["defaults", "write", defaults_bundle_identifier, "last_check_timestamp", timestamp_now])
request = urllib2.Request(version_info_url, headers={'User-Agent' : "Magic Browser"})
latest_script_version = int(urllib2.urlopen(request).read().strip())
if latest_script_version > current_script_version:
exit_with_result({"url" : download_url, "from_version" : str(current_script_version), "to_version" : str(latest_script_version)})
except:
pass
def writeEBLoader(folder_path):
eb_loader_script_contents = """
(function() {
document.write("<script src='" + (document.location.protocol === "https:" ? "https://secure-" : "http://") + "ds.serving-sys.com/BurstingScript/EBLoader.js'><\/script>");
})();
"""
eb_loader_path = os.path.join(folder_path, "EBLoader.js")
if os.path.exists(eb_loader_path) == False:
eb_loader_file = open(eb_loader_path, "w")
eb_loader_file.write(eb_loader_script_contents)
eb_loader_file.close()
# HTML FILE MODIFICATION
def perform_html_additions(index_path):
index_contents = None
with open(index_path, 'r') as target_file:
index_contents = target_file.read()
if index_contents == None:
return
import re
if insert_at_head_start != None:
head_start = re.search("<head.*?>", index_contents, re.IGNORECASE).end()
index_contents = index_contents[:head_start] + insert_at_head_start + index_contents[head_start:]
if insert_at_head_end != None:
head_end = re.search("</head", index_contents, re.IGNORECASE).start()
index_contents = index_contents[:head_end] + insert_at_head_end + index_contents[head_end:]
if insert_at_body_start != None:
body_start = re.search("<body.*?>", index_contents, re.IGNORECASE).end()
index_contents = index_contents[:body_start] + insert_at_body_start + index_contents[body_start:]
if insert_at_body_end != None:
body_end = re.search("</body", index_contents, re.IGNORECASE).start()
index_contents = index_contents[:body_end] + insert_at_body_end + index_contents[body_end:]
with open(index_path, 'w') as target_file:
target_file.write(index_contents)
# UTILITIES
def folder_contains_file_of_types(folder_path, extensions):
from os import walk
for dirpath, dirnames, files in os.walk(folder_path):
for name in files:
for extension in extensions:
if name.lower().endswith(extension):
return True
return False
# communicate info back to Hype
# uses delimiter (20 equal signs) so any above printing doesn't interfere with json data
def exit_with_result(result):
import sys
print "===================="
print json.dumps({"result" : result})
sys.exit(0)
# from http://stackoverflow.com/questions/14568647/create-zip-in-python
def zip(src, dst):
import os
import zipfile
zf = zipfile.ZipFile(dst, "w", zipfile.ZIP_DEFLATED)
abs_src = os.path.abspath(src)
for dirname, subdirs, files in os.walk(src):
for filename in files:
absname = os.path.abspath(os.path.join(dirname, filename))
arcname = absname[len(abs_src) + 1:]
zf.write(absname, arcname)
zf.close()
if __name__ == "__main__":
main()
| 38.107843 | 231 | 0.726524 |
7956ea67e59e32a27b23179ef9215810b98c7b73 | 3,182 | py | Python | astrality/filewatcher.py | JakobGM/Astrality | 72935b616f9a6a2e9254e9cd9319b525c596e8f0 | [
"MIT"
] | 111 | 2018-03-19T12:56:35.000Z | 2022-02-05T11:19:04.000Z | astrality/filewatcher.py | JakobGM/Astrality | 72935b616f9a6a2e9254e9cd9319b525c596e8f0 | [
"MIT"
] | 120 | 2018-02-22T11:23:08.000Z | 2021-03-25T22:13:47.000Z | astrality/filewatcher.py | JakobGM/Astrality | 72935b616f9a6a2e9254e9cd9319b525c596e8f0 | [
"MIT"
] | 7 | 2018-04-06T14:28:33.000Z | 2020-03-18T20:25:59.000Z | """Module for directory modification watching."""
from pathlib import Path
import logging
from sys import platform
from typing import Callable
from watchdog.events import FileModifiedEvent, FileSystemEventHandler
from watchdog.observers import Observer
class DirectoryWatcher:
"""A directory watcher class."""
def __init__(
self,
directory: Path,
on_modified: Callable[[Path], None],
) -> None:
"""
Initialize a watcher which observes modifications in `directory`.
on_modified: A callable which is invoked with the path of modified
files within `directory`.
"""
self.on_modified = on_modified
self.watched_directory = str(directory)
self.observer = Observer()
def start(self) -> None:
"""Start watching the specified directory for file modifications."""
event_handler = DirectoryEventHandler(self.on_modified)
self.observer.schedule(
event_handler,
self.watched_directory,
recursive=True,
)
try:
self.observer.start()
except BaseException as e:
logger = logging.getLogger(__name__)
logger.exception(
'Could not start filesystem watcher.\n'
f'Error message: "{e}".\n'
'Set logging level to DEBUG for full stack trace.',
exc_info=logger.getEffectiveLevel() <= logging.DEBUG,
)
def stop(self) -> None:
"""Stop watching the directory."""
if self.observer.is_alive():
try:
self.observer.stop()
self.observer.join()
except (RuntimeError, SystemError):
# TODO: Understand exactly what join() does, and why
# it sometimes throws a RuntimeError
# Also find out why MacOS throws SystemError
pass
class DirectoryEventHandler(FileSystemEventHandler):
"""An event handler for filesystem changes within a directory."""
def __init__(self, on_modified: Callable[[Path], None]) -> None:
"""Initialize event handler with callback functions."""
self._on_modified = on_modified
def on_modified(self, event: FileModifiedEvent) -> None:
"""Call on_modified callback function on modifed event in dir."""
if event.is_directory:
if platform != 'darwin':
return
# FSEvents on MacOS only supplies the directory containing the
# modified file. We need to find the modified file manually...
files_in_directory = [
path
for path
in Path(event.src_path).glob('**/*')
if not path.is_dir()
]
if len(files_in_directory) > 0:
modified_path = max(
files_in_directory,
key=lambda path: path.stat().st_mtime_ns,
)
self._on_modified(modified_path)
else:
return None
else:
self._on_modified(Path(event.src_path).absolute())
| 34.215054 | 76 | 0.582652 |
7956eaeeb3503e0cf6cf7c7be20a13b781d3dba4 | 637 | py | Python | setup.py | D-Krystek/storybook | b632e0657c74f1163df2777376b2366801aaa849 | [
"MIT"
] | null | null | null | setup.py | D-Krystek/storybook | b632e0657c74f1163df2777376b2366801aaa849 | [
"MIT"
] | 2 | 2020-09-12T00:13:28.000Z | 2020-09-18T02:24:30.000Z | setup.py | D-Krystek/storybook | b632e0657c74f1163df2777376b2366801aaa849 | [
"MIT"
] | 1 | 2020-10-06T21:38:28.000Z | 2020-10-06T21:38:28.000Z | """
A collaboration in Python.
"""
from os import path
from setuptools import find_packages, setup
this_directory = path.abspath(path.dirname(__file__))
with open(path.join(this_directory, "README.md"), encoding="utf-8") as f:
long_description = f.read()
setup(
name="storybook",
version="0.0.0",
description=("A collaboration in Python."),
long_description=long_description,
long_description_content_type="text/markdown",
url="https://github.com/blakeNaccarato/storybook",
author="Blake Naccarato",
package_dir={"": "src"},
packages=find_packages(where="src"),
python_requires=">=3.7",
)
| 25.48 | 73 | 0.703297 |
7956ebd0519151afc62dcf94e95f6580627b163b | 23,920 | py | Python | src/rene.py | logesh0304/Rene | 20769ce41358bfa97356c214aab9bee0c72fd08b | [
"MIT"
] | 1 | 2020-11-04T17:18:19.000Z | 2020-11-04T17:18:19.000Z | src/rene.py | logesh0304/Rene | 20769ce41358bfa97356c214aab9bee0c72fd08b | [
"MIT"
] | null | null | null | src/rene.py | logesh0304/Rene | 20769ce41358bfa97356c214aab9bee0c72fd08b | [
"MIT"
] | null | null | null | # The MIT License
#
# Copyright 2020 Logesh0304.
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
import collections
import os
import re
import sys
import typing
import pathlib
from pathlib import Path
import glob
from typing import *
_head=r"""
==========================================================
||\\\\\\\ ||///////// ||\ || ||/////////
|| || || ||\\ || ||
|| || || || \\ || ||
|| || ||\\\\\\\\ || \\ || ||\\\\\\\
||/////// || || \\ || ||
||\\ || || \\ || ||
|| \\ || || \\|| ||
|| \\\ ||\\\\\\\\\ || \|| ||\\\\\\\\\
==========================================================
"""
_version='v1.1.0'
_help="""
usage: rene [-glob] [ -f | -d | -a ] [[-base] <basedir>] [-pat] <pattern> [-templt] <template>
[-max <number>] [-r] [ -bf | -df ] [-p]
[-h] [-v]
-glob - match files using 'glob' instead of 'regex'
Note: You sould use glob-pattern (not regex-pattern) if this option is enabled.
Group replacement is not supported in glob, you have to use 'attributes'
-f - files only (default)
-d - directory only
-a - any
<basedir> - base directory for searching files (current directory is default)
<pattern> - regex or glob pattern for searching file (use '/' as path seperator)
<template> - template string for renaming matched files
Note: you can also use -base, -pat and -templt to specify the base directory, pattern and template.
This has use only in the case where the matching pattern is same as any of arguments.
-max - maximum number of files to be renamed (-1 is default)
-r - enables recursive-search-mode. This is used when you want to match files in subdirectories also
Note: use [^/] instead of . (matches path-seperator '/' also) in regex to match only names if recursive-search-mode is enabled
-bf - search files in breadth-first manner
-df - search files in depth-first manner
Note: The above two works only when recursive-search-mode is enabled and it is only for regex.
Using -r, -bf, -df has no effect in glob (always do recursive search)
-p - rename the file's path from base directory (only for recursive-search-mode).
-h shows this help
-v shows version of this script
-i enter into interactive mode
This is a open-source project, contribute to this project if you like.
For more details visit this project's github page, https://github.com/logesh0304/Rene
"""
class ListIncrementor:
def __init__(self, base: List, initial: List=None, step: int=1):
if not base :
raise ValueError('base list cannot be empty')
self.base=base
self.step=step
if step<0:
raise ValueError(f"'step'({step}) cannot be neagative")
self.initial= [base[0]] if initial is None else initial
self.current= self.initial
self.first_el, self.last_el =base[0], base[len(base)-1]
def incr_by_conf(self, lst: List, step=None, idx=None):
if step is None : # if step is none, uses default step
step=self.step
elif step<0:
raise ValueError(f"'step'({step}) cannot be neagative")
if idx is None : # if idx is none, uses last idx
idx = len(lst)-1
# if incremented index is not larger than length of base, assign it
if (inc_idx:=(self.base.index(lst[idx])+step)) < len(self.base) :
lst[idx]=self.base[inc_idx]
# else increment element before idx
else:
# getting quotien
# t and remainder
# remainder, quotient is for inementing element in idx, before idx
# by considering "current place is incremented by total length of base, the place before current is incremented by 1 and the recurtion follows"
q,r=divmod(inc_idx, len(self.base))
lst[idx]=self.base[r]
# incremeting element before idx
if idx>0 :
self.incr_by_conf(lst, q, idx-1)
else: # if there is no element before idx, add an element
lst.insert(0, self.base[0])
# if remaining step is more than 1, increment the new element
if stp:=q-1 != 0 :
self.incr_by_conf(lst, stp, 0)
def incr(self, step=None):
to_return=self.current.copy()
self.incr_by_conf(self.current, step)
return to_return
def reset(self):
self.current=self.initial
class Incrementor:
NUM='num'; ALPHA='alpha'; ALNUM='alnum'
# args :
# num - initial, width=0, step
# alpha - initial, [case > up, lw, lu], step
# alpha - initial, intWidth=None, case, step , intMaxCount
def __init__(self, incrType, arg_str):
args, kwargs = Incrementor.__parse_args(arg_str)
try :
if incrType == Incrementor.NUM :
self.incr_obj = Incrementor.NumIncrementor(*args, **kwargs)
elif incrType == Incrementor.ALPHA :
self.incr_obj = Incrementor.AlphaIncrementor(*args, **kwargs)
elif incrType == Incrementor.ALNUM :
self.incr_obj = Incrementor.AlnumIncrementor(*args, **kwargs)
else :
raise ValueError(f'There is no incrementor type like \'{incrType}\'')
except TypeError as te:
show_error(f'Invalid arguments passed to {incrType.capitalize()}Incrementor')
def incr(self):
return self.incr_obj.incr()
@staticmethod
# Parse args for iters and return args and kwargs as a list and dict
# we can mix positional and keywords args, but positional args are taken first
def __parse_args(arg_str: str):
args=[]
kwargs={}
if arg_str :
arg_list=re.split(r'\s+', arg_str)
for arg in arg_list :
if arg: # only if arg is not empty
if (idx:=arg.find('='))!=-1 :
kwargs[arg[:idx]] = arg[idx+1:]
else:
args.append(arg)
return args, kwargs
class NumIncrementor:
# args can be int or string representation of int
def __init__ (self, init=0, step=1, width=None):
try :
self.current = int(init)
# width is calculated using init (i.e. 0001 is taken as same as 0001 not 1)
self.min_width= int(width) if width is not None else len(init) if type(init) is str else 0
self.incr_step=int(step)
except ValueError:
show_error('Invalid arguments to NumIncrementor')
if self.min_width<0 :
show_error('Width cannot be negative')
def incr (self, step=None) :
# using zfill instead of rjust. so, minus sign is always in first
to_return = str(self.current).zfill(self.min_width)
incr_step = self.incr_step if step is None else step
self.current += incr_step
return to_return
class AlphaIncrementor:
alpha_upper = [ chr(i) for i in range(65,91) ]
alpha_lower = [ chr(i) for i in range(97, 123) ]
alpha_all = alpha_upper + alpha_lower
def __init__ (self, init: str='A', step=1, case: Optional[str]=None) :
# if case is None, the case of initial is used
if case == None :
if init.isupper() :
alpha = Incrementor.AlphaIncrementor.alpha_upper
elif init.islower() :
alpha = Incrementor.AlphaIncrementor.alpha_lower
else :
alpha = Incrementor.AlphaIncrementor.alpha_all
# if case is specified, case of the initial is changed according to the specifed case
elif case == 'up' :
alpha = Incrementor.AlphaIncrementor.alpha_upper
init = init.upper()
elif case == "lw" :
alpha = Incrementor.AlphaIncrementor.alpha_lower
init = init.lower()
elif case == 'ul' :
alpha = Incrementor.AlphaIncrementor.alpha_all
else :
show_error(f'\'{case}\' is not an keyword for case')
if not init.isalpha():
show_error(f'{init} is not alphabetic')
try:
self.iter=ListIncrementor(alpha ,list(init), int(step))
except ValueError as ve:
if str(ve).startswith('invalid literal'):
show_error('Invalid arguments passed to AlphaIncrementor')
else:
show_error(ve)
self.current=self.iter.current
def incr(self, step=None) :
return ''.join(self.iter.incr(step))
class AlnumIncrementor:
def __init__ (self, init='A0', step=1, case=None, intWidth=None, intMaxCount=None):
try:
self.incr_step = int(step)
if self.incr_step < 0 :
show_error(f"'step'({step}) cannot be negative")
# seperate alphabet and integer part
temp_ = re.split(r'(?<=[A-Za-z])(?=\d)', init)
if len(temp_)!=2:
show_error(f'{init} is not a valid initial value for AlnumIncrementor')
# current uses AlphaIncrementor for alphabet part
self.current = [Incrementor.AlphaIncrementor(temp_[0], case=case),
int(temp_[1])]
# intWidth is calculated using init, if it is not given (i.e. 0001 is taken as same as 0001 not 1)
self.int_min_width = len(temp_[1]) if intWidth is None else int(intWidth)
# if max count is None, it is calculated based on width
self.int_max_count = int('1'+('0'*self.int_min_width)) if not intMaxCount else int(intMaxCount)
except ValueError :
show_error("Invalid arguments passed to AlnumIncrementor")
if self.int_min_width<0 :
show_error('Width cannot be negative')
def incr(self, step=None):
to_return = ''.join(self.current[0].current)+str(self.current[1]).rjust(self.int_min_width, '0')
incr_step = self.incr_step if step is None else step
# increment integer part,
# if integer part is greater than max count, increment alpha part
if (n_val := self.current[1]+incr_step) < self.int_max_count-1 :
self.current[1]=n_val
else :
q,r = divmod(n_val, self.int_max_count)
self.current[0].incr(q)
self.current[1] = r
return to_return
# attributes common for all files
static_attribs={
'base' :'',
'base_parent' :''
}
incrs: Dict[int, Incrementor]={}
def sub_attrib(templt: str, attribs: Dict[str, str]={}):
final_str=''
last_pos=0 # end position of last match
# iterating the group(1) of founded matches
for i, match in enumerate(re.finditer('<:(.+?):>', templt)):
group = match.group(1)
attrib, arg= group.split(' ', 1) if ' ' in group else (group, '')
attrib_val=''
# check if the attribute is an incrementor
if attrib in (Incrementor.NUM, Incrementor.ALPHA, Incrementor.ALNUM) :
if i not in incrs :
incrs[i]=Incrementor(attrib, arg)
attrib_val=incrs[i].incr()
else:
try:
attrib_val = attribs[attrib] if attrib in attribs else static_attribs[attrib]
except KeyError as ke:
show_error(f'There is no attribute like "{attrib}", please use the correct attribute')
# replace attribute with its value
final_str+=templt[last_pos:match.start()] + attrib_val
last_pos=match.end()
# append unmatched remaining string to final_str
if last_pos != len(templt):
final_str+=templt[last_pos:]
return final_str
def show_error(err, header='Error', exit_=True, confirm_before_exit=False, confirm_msg='Would you like to continue ?',exit_msg='', inverse_yn=False):
if err :
print(header+': 'if header else '', err, sep='', file=sys.stderr)
if exit_:
if confirm_before_exit :
positive, negative = ('y', 'n') if not inverse_yn else ('n', 'y')
# ask the question until you answer yes(y) or no(n)
while True :
a=input(confirm_msg+' (y/n) :').lower()
if a == 'y' :
break
elif a == 'n':
sys.exit(exit_msg)
else :
sys.exit(exit_msg)
# rename file with new name specified in name map.
# if sort is True, it sorts the path of the files (files in deepest subdirectory has more priority)
def rename(name_map, sort=False):
if name_map :
n=0
print("Preview:")
for item, new_name in name_map.items() :
print('\t'+str(item.relative_to(base_path))+'\t->\t'+str(new_name.relative_to(base_path)))
show_error('', confirm_before_exit=True, confirm_msg='Confirm to rename', exit_msg='Rename cancelled !!')
if sort:
# sorting the paths by depth, for renameming the files in the subdirectories first
name_list=list(name_map.items())
name_list.sort(key = lambda x : str(x[0]).count('\\'), reverse=True)
name_map=dict(name_list)
for item, new_name in name_map.items() :
try:
item.rename(new_name)
n+=1 # increment n when rename is success
except FileExistsError as fee:
show_error(f'File name already exixts, cannot rename : {fee.filename} -> {fee.filename2}',
header="Warning",
confirm_before_exit=True,
confirm_msg='would you like to skip this file ?'
)
except OSError as oe:
show_error(oe)
return n
else :
print('No files matched the pattern !!')
return None
# returns the new-name of the given file based on the given template
def get_newname(path: Path, templt, rename_path=False):
attribs={}
attribs['name']=path.stem
attribs['full_name']=path.name
attribs['ext']=path.suffix[1:] if path.is_file() else ''
attribs['parent']=path.parent.name
# path of file(parent) relative to base-path
attribs['path']='' if (_:=str(path.parent.relative_to(base_path).as_posix())) == '.' else _+'/'
attribs['abs_path']=str(path.parent.resolve().as_posix())+'/'
new_name=sub_attrib(templt, attribs)
# if from_base is True, path is not appended to new name (for also renaming the path)
# and templt should specifies the new path of the file
return new_name if rename_path else attribs['path']+new_name
# search the files is current directory using regex pattern
def search(pat, templt, filedir='f', max_files=-1):
name_map={} # dict containg oldname (name means path of the file) and newname
matched=0
for item in base_path.iterdir():
# check whether to rename a file or dir or both
if matched!=max_files and ((filedir=='a') or (filedir=='f' and item.is_file()) or (filedir=='d' and item.is_dir())) and (group:=re.fullmatch(pat, item.name)) != None:
name_map[item]=base_path.joinpath(group.expand(get_newname(item, templt)))
matched+=1
return name_map
# search the files recursively (i.e. also in subdirectory) using regex pattern
# form_base specifies whether to rename only name of the file (false)(default) or entire path of the file from base_directory (true)
def recr_search(pat, templt, filedir='f', max_files=-1, s_type='bf', rename_path=False):
name_map={}
matched = 0
if s_type not in ['bf', 'df'] :
raise ValueError(f"{s_type} is not 'bf' or 'df'")
for dir_path, dirs, files in os.walk(base_path, topdown=True if s_type=='bf' else False) :
p_list=files if filedir=='f' else (dirs if filedir=='d' else files+dirs)
for item in p_list :
path=Path(os.path.join(dir_path, item))
# posix-path is used (which uses / insdead if \)due to \ are used in regex replacement patterns
posixpath=path.relative_to(base_path).as_posix()
if matched!=max_files and (match:=re.fullmatch(pat, posixpath)) != None:
name_map[path]=base_path.joinpath(match.expand(get_newname(path, templt, rename_path)))
matched+=1
return name_map
# search the files using glob pattern
def glob_search(pat, templt, filedir='f', max_files=-1, rename_path=False):
name_map={}
matched = 0
for item in glob.iglob(str(base_path)+'\\'+pat, recursive=True) :
path = Path(item)
if matched!=max_files and ((filedir=='a') or (filedir=='f' and path.is_file()) or (filedir=='d' and path.is_dir())) :
name_map[path]=base_path.joinpath(get_newname(path, templt, rename_path))
matched+=1
return name_map
# default values for command line arguments
arg_def_val={
'base' : '',
'filedir' : 'f',
'pat' : None,
'templt' : None,
'max' : -1,
'glob' : False,
'recr' : False,
'rs_type' : 'bf', # recursive search type : breath-first or depth-first
'rn_path' : False
}
def parse_args() :
args=collections.deque(sys.argv[1:])
argval=arg_def_val.copy()
unknown=[]
try:
while(args):
arg=args.popleft()
if arg == '-h' : sys.exit(_help)
elif arg == '-v' : sys.exit(_version)
elif arg == '-i' : return interact()
elif arg == '-glob' : argval['glob']=True
elif arg == '-base' : argval['base']=args.popleft()
elif arg in ['-f', '-d', '-a'] : argval['filedir']=arg[1:]
elif arg == '-pat' : argval['pat']=args.popleft()
elif arg == '-templt' : argval['templt']=args.popleft()
elif arg == '-max' : argval['max']=int(args.popleft())
elif arg == '-r' : argval['recr']=True
elif arg in ['-bf', '-df'] : argval['rs_type']=arg[1:]
elif arg == '-p' : argval['rn_path']=True
else: # positional arguments
unknown.insert(0, arg)
except IndexError or ValueError :
sys.exit('Given arguments are invalid !!\n'+_help)
# pat and templt has priority over base
for val in unknown:
if not argval['templt'] : argval['templt']=val
elif not argval['pat'] : argval['pat']=val
elif not argval['base'] : argval['base']=val
else:
sys.exit('Given arguments are invalid !!\n'+_help)
if not (argval['pat'] and argval['templt']):
sys.exit('Given arguments are invalid !!\n'+_help)
return argval
def interact():
print(_head)
print('Rene - Interactive Mode')
argval=arg_def_val.copy()
# help, exit
res=input("press Enter to continue or type 'help' to display help and 'quit' to exit\n")
if res=='help' :
print(_help)
return interact()
elif res=='quit' :
sys.exit()
print('Note: Enter nothing for default values\n')
# base
if base:=input('> Base-directory (current directory is default) :') : argval['base']=base
# else, it use default value in argval
# is_glob
if input('> [r]egex (default) or [g]lob (r/g) :') == 'g' : argval['glob']=True
# pat
while not (pat:=input('> Pattern :')):
print('This cannot be empty !!')
argval['pat']=pat
# templt
while not (templt:=input('> Template :')):
print('This cannot be empty !!')
argval['templt']=templt
# file-dir
if (tmp_fd:=input('Rename only,\n\t1. [f]iles (default)\n\t2. [d]irectory\n\t3. [a]ny\n> Enter (f/d/a) :')) in ('f','d','a') :
argval['filedir']=tmp_fd
# max
while True :
try:
if max_:=input('> Maximum files (-1 (default) means no limit) :') :
argval['max']=int(max_)
break
except ValueError:
print('Value should be an integer !!')
# recr
if input('> Enable recursive-search-mode (y/n default) :') == 'y' : argval['recr']=True
# s_type
if input('> Recursive search type,\n\t[b]readth-first (default) or [d]epth-first (b/d) :') == 'd' : argval['rs_type']='df'
# from_base
if input('> Rename path of file (y/n default) :') == 'y' : argval['rn_path']=True
print() # prints empty line
return argval
base_path=Path('')
def main():
argval = parse_args() if len(sys.argv)>1 else interact()
global base_path
base_path = Path(argval['base'])
if base_path.absolute() == Path(__file__).parent :
show_error('You are trying to rename the files in the folder where this program is running',
header='Warning',
confirm_before_exit=True,
confirm_msg='Are you sure about renaming the files ?',
)
# assigning static attributes
static_attribs['base']=argval['base']
static_attribs['base_parent']= base_path.parent.name
try:
if argval['glob'] :
name_map=glob_search(argval['pat'], argval['templt'], argval['filedir'], argval['max'], argval['rn_path'])
n=rename(name_map, True)
else:
if argval['recr'] :
name_map=recr_search(argval['pat'], argval['templt'], argval['filedir'], argval['max'], argval['rs_type'], argval['rn_path'])
n=rename(name_map, True)
else :
name_map=search(argval['pat'], argval['templt'], argval['filedir'], argval['max'])
n=rename(name_map)
if not n is None:
print('Files matched:',len(name_map))
print('Files renamed:',n)
except FileNotFoundError as fnfe:
show_error(fnfe)
except re.error as rerr:
show_error(rerr, header='PatternError')
except Exception as e:
raise e
sys.exit('Sorry, an error occured !!')
input('press Enter to exit...')
if __name__ == "__main__":
main() | 42.562278 | 174 | 0.571405 |
7956ec01da3f37774c5a7d2b876352d8ebdc6938 | 541 | py | Python | tweeter.py | asa1896/GameOfLifeBot | 2ae71c466e26d94cd2821c974befc865fc83bb63 | [
"Unlicense",
"MIT"
] | null | null | null | tweeter.py | asa1896/GameOfLifeBot | 2ae71c466e26d94cd2821c974befc865fc83bb63 | [
"Unlicense",
"MIT"
] | null | null | null | tweeter.py | asa1896/GameOfLifeBot | 2ae71c466e26d94cd2821c974befc865fc83bb63 | [
"Unlicense",
"MIT"
] | null | null | null | #Script for tweeting
import tweepy as tp
from os import environ
def Tweeter(img,msg):
consumer_key = environ['API_KEY']
consumer_secret_key = environ['API_SECRET_KEY']
access_token = environ['ACCESS_TOKEN']
access_token_secret = environ['ACCESS_TOKEN_SECRET']
auth = tp.OAuthHandler(consumer_key,consumer_secret_key)
auth.set_access_token(access_token, access_token_secret)
api = tp.API(auth)
#composing tweet
media = api.media_upload(img)
api.update_status(status=msg, media_ids=[media.media_id]) | 30.055556 | 61 | 0.750462 |
7956ed06f3461e4da7bad25fccc9d4c1cebe7320 | 148 | py | Python | AtCoder/ABC/170-179/ABC177_C1.py | sireline/PyCode | 8578467710c3c1faa89499f5d732507f5d9a584c | [
"MIT"
] | null | null | null | AtCoder/ABC/170-179/ABC177_C1.py | sireline/PyCode | 8578467710c3c1faa89499f5d732507f5d9a584c | [
"MIT"
] | null | null | null | AtCoder/ABC/170-179/ABC177_C1.py | sireline/PyCode | 8578467710c3c1faa89499f5d732507f5d9a584c | [
"MIT"
] | null | null | null | from itertools import combinations
N = int(input())
A = [int(n) for n in input().split()]
print(sum([i*j for i,j in combinations(A, 2)])%(10**9+7)) | 29.6 | 57 | 0.648649 |
7956ee23ba2977cd409c7b196d45bbe59ad18347 | 267 | py | Python | code.py | Meenakshi0907/serversideprocessing | 070b352cdcd06c2f3f40c96e7b781c8b8671ef3d | [
"BSD-3-Clause"
] | null | null | null | code.py | Meenakshi0907/serversideprocessing | 070b352cdcd06c2f3f40c96e7b781c8b8671ef3d | [
"BSD-3-Clause"
] | null | null | null | code.py | Meenakshi0907/serversideprocessing | 070b352cdcd06c2f3f40c96e7b781c8b8671ef3d | [
"BSD-3-Clause"
] | null | null | null | import { Component } from "@angular/core";
@Component({
selector:'student-detail',
templateUrl:'./student.saveetha.in'
})
export class StudentComponent{
registerNumber:string;
name:string;
constructor(){
this.registerNumber="212221230057";
this.name="meenakshi"
}
}
| 17.8 | 42 | 0.7603 |
7956ee31f92cc3751d400bc4b5f077cae918d325 | 4,130 | py | Python | ctm_saas_client/models/role_header.py | tadinve/ctm_python_client | de44e5012214ec42bb99b7f9b4ebc5394cd14328 | [
"BSD-3-Clause"
] | null | null | null | ctm_saas_client/models/role_header.py | tadinve/ctm_python_client | de44e5012214ec42bb99b7f9b4ebc5394cd14328 | [
"BSD-3-Clause"
] | null | null | null | ctm_saas_client/models/role_header.py | tadinve/ctm_python_client | de44e5012214ec42bb99b7f9b4ebc5394cd14328 | [
"BSD-3-Clause"
] | null | null | null | # coding: utf-8
"""
Control-M Services
Provides access to BMC Control-M Services # noqa: E501
OpenAPI spec version: 9.20.30
Contact: customer_support@bmc.com
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
import pprint
import re # noqa: F401
import six
from ctm_saas_client.configuration import Configuration
class RoleHeader(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'name': 'str',
'description': 'str'
}
attribute_map = {
'name': 'name',
'description': 'description'
}
def __init__(self, name=None, description=None, _configuration=None): # noqa: E501
"""RoleHeader - a model defined in Swagger""" # noqa: E501
if _configuration is None:
_configuration = Configuration()
self._configuration = _configuration
self._name = None
self._description = None
self.discriminator = None
if name is not None:
self.name = name
if description is not None:
self.description = description
@property
def name(self):
"""Gets the name of this RoleHeader. # noqa: E501
role name # noqa: E501
:return: The name of this RoleHeader. # noqa: E501
:rtype: str
"""
return self._name
@name.setter
def name(self, name):
"""Sets the name of this RoleHeader.
role name # noqa: E501
:param name: The name of this RoleHeader. # noqa: E501
:type: str
"""
self._name = name
@property
def description(self):
"""Gets the description of this RoleHeader. # noqa: E501
role description # noqa: E501
:return: The description of this RoleHeader. # noqa: E501
:rtype: str
"""
return self._description
@description.setter
def description(self, description):
"""Sets the description of this RoleHeader.
role description # noqa: E501
:param description: The description of this RoleHeader. # noqa: E501
:type: str
"""
self._description = description
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
if issubclass(RoleHeader, dict):
for key, value in self.items():
result[key] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, RoleHeader):
return False
return self.to_dict() == other.to_dict()
def __ne__(self, other):
"""Returns true if both objects are not equal"""
if not isinstance(other, RoleHeader):
return True
return self.to_dict() != other.to_dict()
| 26.818182 | 87 | 0.562712 |
7956ee5a9ebf488ff8d03d9808a5ea29f67d56b7 | 2,507 | py | Python | Python_ABC/2-8ReadWriteFile/1plainText.py | Chandler-Song/Python_Awesome | a44b8b79de7b429a00ac5798e7ecdc26c79a09ed | [
"MIT"
] | null | null | null | Python_ABC/2-8ReadWriteFile/1plainText.py | Chandler-Song/Python_Awesome | a44b8b79de7b429a00ac5798e7ecdc26c79a09ed | [
"MIT"
] | null | null | null | Python_ABC/2-8ReadWriteFile/1plainText.py | Chandler-Song/Python_Awesome | a44b8b79de7b429a00ac5798e7ecdc26c79a09ed | [
"MIT"
] | null | null | null | # Jeanette = open('Jeanette.txt')
# quote = Jeanette.read()
# print(quote)
# print(Jeanette.closed)
# Jeanette.close()
# print(Jeanette.closed)
#
#
# with open('Jeanette.txt', mode='a+') as Jeanette:
# Jeanette.write("\nFrom Jeanette\n")
# Jeanette.seek(0)
# quote = Jeanette.read()
# print(quote)
# print(Jeanette.name)
# print(Jeanette.closed)
# with open('Jeanette.txt', mode='w') as Jeanette:
# Jeanette.write("\nLook what I did!\n")
# with open('Jeanette.txt') as Jeanette:
# print(Jeanette.read())
#
# with open('Jeanette.txt', mode='w+') as Jeanette:
# Jeanette.write("\nLook what I did!\n")
# Jeanette.seek(0)
# print(Jeanette.read())
# #
# with open('Jeanette.txt', mode='r+') as Jeanette:
# print(Jeanette.tell())
# print(Jeanette.read())
# print(Jeanette.tell())
# Jeanette.seek(10)
# Jeanette.write("mess!")
# print(Jeanette.tell())
# print(Jeanette.read())
# Jeanette.seek(0)
# print(Jeanette.read())
#
with open('Jeanette.txt',mode='w') as Jeanette:
Jeanette.write('''I’ll call you, and we’ll light a fire, and drink some wine, and recognise each other in the place that is ours.
Don’t wait. Don’t tell the story later. Life is so short. This stretch of sea and sand, this walk on the beach before the tide covers everything we have done.
I love you.
The three most difficult words in the world.
But what else can I say?
''')
# #
# with open('Jeanette.txt') as Jeanette:
# for line in Jeanette:
# print(line, end='')
# print()
# with open('heine.txt') as heine:
# f_content = heine.read()
# print(f_content)
#
with open('Jeanette.txt') as Jeanette:
f_content = Jeanette.readlines()
print(f_content)
print()
#
# with open('Jeanette.txt') as Jeanette:
# f_content = Jeanette.readline()
# print(f_content,end='')
# f_content = Jeanette.readline()
# print(f_content,end='')
# f_content = Jeanette.readline()
# print(f_content,end='')
# with open('Jeanette.txt') as Jeanette:
# f_content = Jeanette.read(100)
# print(f_content)
# f_content = Jeanette.read(100)
# print(f_content)
# f_content = Jeanette.read(100)
# print(f_content)
# #
#
# # big file
# with open('heine.txt') as heine:
#
# size_to_read = 100
# f_content = heine.read(size_to_read)
#
# while len(f_content)>0:
# print(f_content, end='')
# f_content = heine.read(size_to_read)
#
with open('Jeanette.txt') as Jeanette:
size_to_read = 10
f_content = Jeanette.read(size_to_read)
while len(f_content)>0:
print(f_content, end='*')
f_content = Jeanette.read(size_to_read)
| 24.339806 | 158 | 0.680096 |
7956ee89157f619c8401a8c47b7596ece2482a29 | 7,057 | py | Python | docusign_esign/models/user.py | joekohlsdorf/docusign-esign-python-client | 40407544f79c88716d36fabf36f65c3ef1a5c3ba | [
"MIT"
] | 58 | 2017-10-18T23:06:57.000Z | 2021-04-15T23:14:58.000Z | docusign_esign/models/user.py | joekohlsdorf/docusign-esign-python-client | 40407544f79c88716d36fabf36f65c3ef1a5c3ba | [
"MIT"
] | 49 | 2017-10-27T05:54:09.000Z | 2021-04-29T22:06:17.000Z | docusign_esign/models/user.py | joekohlsdorf/docusign-esign-python-client | 40407544f79c88716d36fabf36f65c3ef1a5c3ba | [
"MIT"
] | 49 | 2017-09-16T07:23:41.000Z | 2021-05-07T20:21:20.000Z | # coding: utf-8
"""
DocuSign REST API
The DocuSign REST API provides you with a powerful, convenient, and simple Web services API for interacting with DocuSign. # noqa: E501
OpenAPI spec version: v2.1
Contact: devcenter@docusign.com
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
import pprint
import re # noqa: F401
import six
class User(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'cell_phone_number': 'str',
'country_code': 'str',
'credentials': 'list[Credential]',
'display_name': 'str',
'email': 'str',
'external_claims': 'list[ExternalClaim]'
}
attribute_map = {
'cell_phone_number': 'cellPhoneNumber',
'country_code': 'countryCode',
'credentials': 'credentials',
'display_name': 'displayName',
'email': 'email',
'external_claims': 'externalClaims'
}
def __init__(self, cell_phone_number=None, country_code=None, credentials=None, display_name=None, email=None, external_claims=None): # noqa: E501
"""User - a model defined in Swagger""" # noqa: E501
self._cell_phone_number = None
self._country_code = None
self._credentials = None
self._display_name = None
self._email = None
self._external_claims = None
self.discriminator = None
if cell_phone_number is not None:
self.cell_phone_number = cell_phone_number
if country_code is not None:
self.country_code = country_code
if credentials is not None:
self.credentials = credentials
if display_name is not None:
self.display_name = display_name
if email is not None:
self.email = email
if external_claims is not None:
self.external_claims = external_claims
@property
def cell_phone_number(self):
"""Gets the cell_phone_number of this User. # noqa: E501
# noqa: E501
:return: The cell_phone_number of this User. # noqa: E501
:rtype: str
"""
return self._cell_phone_number
@cell_phone_number.setter
def cell_phone_number(self, cell_phone_number):
"""Sets the cell_phone_number of this User.
# noqa: E501
:param cell_phone_number: The cell_phone_number of this User. # noqa: E501
:type: str
"""
self._cell_phone_number = cell_phone_number
@property
def country_code(self):
"""Gets the country_code of this User. # noqa: E501
# noqa: E501
:return: The country_code of this User. # noqa: E501
:rtype: str
"""
return self._country_code
@country_code.setter
def country_code(self, country_code):
"""Sets the country_code of this User.
# noqa: E501
:param country_code: The country_code of this User. # noqa: E501
:type: str
"""
self._country_code = country_code
@property
def credentials(self):
"""Gets the credentials of this User. # noqa: E501
# noqa: E501
:return: The credentials of this User. # noqa: E501
:rtype: list[Credential]
"""
return self._credentials
@credentials.setter
def credentials(self, credentials):
"""Sets the credentials of this User.
# noqa: E501
:param credentials: The credentials of this User. # noqa: E501
:type: list[Credential]
"""
self._credentials = credentials
@property
def display_name(self):
"""Gets the display_name of this User. # noqa: E501
# noqa: E501
:return: The display_name of this User. # noqa: E501
:rtype: str
"""
return self._display_name
@display_name.setter
def display_name(self, display_name):
"""Sets the display_name of this User.
# noqa: E501
:param display_name: The display_name of this User. # noqa: E501
:type: str
"""
self._display_name = display_name
@property
def email(self):
"""Gets the email of this User. # noqa: E501
# noqa: E501
:return: The email of this User. # noqa: E501
:rtype: str
"""
return self._email
@email.setter
def email(self, email):
"""Sets the email of this User.
# noqa: E501
:param email: The email of this User. # noqa: E501
:type: str
"""
self._email = email
@property
def external_claims(self):
"""Gets the external_claims of this User. # noqa: E501
# noqa: E501
:return: The external_claims of this User. # noqa: E501
:rtype: list[ExternalClaim]
"""
return self._external_claims
@external_claims.setter
def external_claims(self, external_claims):
"""Sets the external_claims of this User.
# noqa: E501
:param external_claims: The external_claims of this User. # noqa: E501
:type: list[ExternalClaim]
"""
self._external_claims = external_claims
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
if issubclass(User, dict):
for key, value in self.items():
result[key] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, User):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
| 27.352713 | 151 | 0.578433 |
7956ef4d3fdd8361a6c5fcd394edd39f37127914 | 4,176 | py | Python | asyncdynamo/async_aws_sts.py | homm/asyncdynamo | 6c1097e727fdc3e87bc0429fae1e1e5547b2eb4a | [
"Apache-2.0"
] | null | null | null | asyncdynamo/async_aws_sts.py | homm/asyncdynamo | 6c1097e727fdc3e87bc0429fae1e1e5547b2eb4a | [
"Apache-2.0"
] | 2 | 2019-04-20T15:43:28.000Z | 2019-04-30T06:59:10.000Z | asyncdynamo/async_aws_sts.py | homm/asyncdynamo | 6c1097e727fdc3e87bc0429fae1e1e5547b2eb4a | [
"Apache-2.0"
] | null | null | null | #!/bin/env python
#
# Copyright 2012 bit.ly
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Created by Dan Frank on 2012-01-25.
Copyright (c) 2012 bit.ly. All rights reserved.
"""
import xml.sax
from tornado.httpclient import AsyncHTTPClient, HTTPRequest, HTTPError
import boto.handler
from boto.sts.connection import STSConnection
from boto.sts.credentials import Credentials
from boto.exception import BotoServerError
class InvalidClientTokenIdError(BotoServerError):
"""
Error subclass to indicate that the client's token(s) is/are invalid
"""
pass
class AsyncAwsSts(STSConnection):
"""
Class that manages session tokens. Users of AsyncDynamoDB should not
need to worry about what goes on here.
Usage: Keep an instance of this class (though it should be cheap to
re instantiate) and periodically call get_session_token to get a new
Credentials object when, say, your session token expires
"""
def __init__(self, aws_access_key_id=None, aws_secret_access_key=None,
is_secure=True, port=None, proxy=None, proxy_port=None,
proxy_user=None, proxy_pass=None, debug=0,
https_connection_factory=None, region=None, path='/',
converter=None):
STSConnection.__init__(self, aws_access_key_id,
aws_secret_access_key,
True, port, proxy, proxy_port,
proxy_user, proxy_pass, debug,
https_connection_factory, region, path,
converter)
self.http_client = AsyncHTTPClient()
def get_session_token(self):
"""
Gets a new Credentials object with a session token, using this
instance's aws keys.
"""
return self.get_object('GetSessionToken', {}, Credentials, verb='POST')
async def get_object(self, action, params, cls, path="/", parent=None, verb="GET"):
"""
Get an instance of `cls` using `action`
"""
if not parent:
parent = self
response = await self.make_request(action, params, path, verb)
if response.error and not isinstance(response.error, HTTPError):
raise response.error
"""
Process the body returned by STS. If an error is present, convert from a tornado error
to a boto error
"""
error = response.error
if error:
if error.code == 403:
error_class = InvalidClientTokenIdError
else:
error_class = BotoServerError
raise error_class(error.code, error.message, response.body)
obj = cls(parent)
h = boto.handler.XmlHandler(obj, parent)
xml.sax.parseString(response.body, h)
return obj
async def make_request(self, action, params=None, path='/', verb='GET'):
"""
Make an async request. This handles the logic of translating from boto params
to a tornado request obj, issuing the request, and passing back the body.
"""
request = HTTPRequest('https://%s' % self.host, method=verb)
request.params = params or {}
request.auth_path = '/' # need this for auth
request.host = self.host # need this for auth
request.port = 443
request.protocol = self.protocol
if action:
request.params['Action'] = action
if self.APIVersion:
request.params['Version'] = self.APIVersion
self._auth_handler.add_auth(request) # add signature
return await self.http_client.fetch(request, raise_error=False)
| 37.963636 | 94 | 0.643439 |
7956ef4dcf65af8fcd2479b2bedf0e00e362ebb0 | 3,388 | py | Python | Lib/hTools2/dialogs/glyphs/anchors_rename.py | gferreira/hTools2 | a75a671b81a0f4ce5c82b2ad3e2f971ca3e3d98c | [
"BSD-3-Clause"
] | 11 | 2015-01-06T15:43:56.000Z | 2019-07-27T00:35:20.000Z | Lib/hTools2/dialogs/glyphs/anchors_rename.py | gferreira/hTools2 | a75a671b81a0f4ce5c82b2ad3e2f971ca3e3d98c | [
"BSD-3-Clause"
] | 2 | 2017-05-17T10:11:46.000Z | 2018-11-21T21:43:43.000Z | Lib/hTools2/dialogs/glyphs/anchors_rename.py | gferreira/hTools2 | a75a671b81a0f4ce5c82b2ad3e2f971ca3e3d98c | [
"BSD-3-Clause"
] | 4 | 2015-01-10T13:58:50.000Z | 2019-12-18T15:40:14.000Z | # [h] rename anchors in selected glyphs
from mojo.roboFont import CurrentFont
from vanilla import *
from hTools2 import hDialog
from hTools2.modules.anchors import rename_anchor
from hTools2.modules.fontutils import get_glyphs
from hTools2.modules.messages import no_glyph_selected, no_font_open
class renameAnchorsDialog(hDialog):
'''A dialog to rename the anchors in the selected glyphs of the current font.
.. image:: imgs/glyphs/anchors-rename.png
'''
column_1 = 33
column_2 = 70
def __init__(self):
self.title = 'anchors'
self.height = self.text_height*2 + self.padding_y*4 + self.button_height
self.w = HUDFloatingWindow((self.width, self.height), self.title)
x = self.padding_x
y = self.padding_y
# old name label
self.w._old_name_label = TextBox(
(x, y, self.column_1, self.text_height),
"old",
sizeStyle=self.size_style)
x += self.column_1
# old name
self.w._old_name_value = EditText(
(x, y, self.column_2, self.text_height),
placeholder='old name',
text='',
sizeStyle=self.size_style)
x = self.padding_x
y += self.text_height + self.padding_y
# new name label
self.w._new_name_label = TextBox(
(x, y, self.column_1, self.text_height),
"new",
sizeStyle=self.size_style)
x += self.column_1
# new name
self.w._new_name_value = EditText(
(x, y, self.column_2, self.text_height),
placeholder='new name',
text='',
sizeStyle=self.size_style)
# button
x = self.padding_x
y += self.text_height + self.padding_y
self.w.button_apply = SquareButton(
(x, y, -self.padding_x, self.button_height),
"rename",
callback=self.apply_callback,
sizeStyle=self.size_style)
# open window
self.w.open()
def apply_callback(self, sender):
f = CurrentFont()
if f is not None:
glyph_names = get_glyphs(f)
if len(glyph_names) > 0:
# get parameters
old = self.w._old_name_value.get()
new = self.w._new_name_value.get()
boolstring = (False, True)
# print info
print 'renaming anchors in glyphs...\n'
print '\told name: %s' % old
print '\tnew name: %s' % new
print
print '\t',
# change anchors names
for glyph_name in glyph_names:
print glyph_name,
# rename anchor
f[glyph_name].prepareUndo('rename anchor')
has_name = rename_anchor(f[glyph_name], old, new)
f[glyph_name].performUndo()
f[glyph_name].changed()
# done
f.changed()
print
print '\n...done.\n'
# no glyph selected
else:
print no_glyph_selected
# no font open
else:
print no_font_open
| 34.927835 | 81 | 0.519185 |
7956f08474fa1dcf8e2b3050c7c0c18be8fbf740 | 818 | py | Python | src/python/pants/backend/jvm/targets/annotation_processor.py | anthonyjpratti/pants | d98e53af6ddd877861231bce8343f8204da0a9d1 | [
"Apache-2.0"
] | 1 | 2020-08-26T03:30:31.000Z | 2020-08-26T03:30:31.000Z | src/python/pants/backend/jvm/targets/annotation_processor.py | anthonyjpratti/pants | d98e53af6ddd877861231bce8343f8204da0a9d1 | [
"Apache-2.0"
] | 1 | 2019-07-29T16:58:21.000Z | 2019-07-29T16:58:21.000Z | src/python/pants/backend/jvm/targets/annotation_processor.py | anthonyjpratti/pants | d98e53af6ddd877861231bce8343f8204da0a9d1 | [
"Apache-2.0"
] | null | null | null | # Copyright 2014 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from pants.backend.jvm.targets.exportable_jvm_library import ExportableJvmLibrary
class AnnotationProcessor(ExportableJvmLibrary):
"""A Java library containing annotation processors.
:API: public
"""
def __init__(self, processors=None, *args, **kwargs):
"""
:param resources: An optional list of file paths (DEPRECATED) or
``resources`` targets (which in turn point to file paths). The paths
indicate text file resources to place in this module's jar.
:param processors: A list of the fully qualified class names of the
annotation processors this library exports.
"""
super().__init__(*args, **kwargs)
self.processors = processors
| 34.083333 | 81 | 0.732274 |
7956f0ea374f8634bc17292482a7e17b88402d21 | 2,814 | py | Python | service-mgmt-tools/sm-tools/sm_tools/sm_action.py | starlingx-staging/stx-ha | 77d4e0c27c2144e192bb1cfc3fbc40509526cc39 | [
"Apache-2.0"
] | null | null | null | service-mgmt-tools/sm-tools/sm_tools/sm_action.py | starlingx-staging/stx-ha | 77d4e0c27c2144e192bb1cfc3fbc40509526cc39 | [
"Apache-2.0"
] | null | null | null | service-mgmt-tools/sm-tools/sm_tools/sm_action.py | starlingx-staging/stx-ha | 77d4e0c27c2144e192bb1cfc3fbc40509526cc39 | [
"Apache-2.0"
] | null | null | null | #
# Copyright (c) 2016 Wind River Systems, Inc.
#
# SPDX-License-Identifier: Apache-2.0
#
import os
import sys
import argparse
import sqlite3
from sm_api_msg_utils import restart_service as restart_service
from sm_api_msg_utils import restart_service_safe as restart_service_safe
from sm_api_msg_utils import database_running_name as database_name
def main():
filename = os.path.basename(sys.argv[0])
if "sm-manage" == filename:
action = "manage"
elif "sm-unmanage" == filename:
action = "unmanage"
elif "sm-restart-safe" == filename:
action = "restart-safe"
else:
action = "restart"
try:
parser = argparse.ArgumentParser(description='SM Action ')
subparsers = parser.add_subparsers(help='types')
# Service
service_parser = subparsers.add_parser('service', help='service action')
service_parser.set_defaults(which='service')
service_parser.add_argument('service', help='service name')
args = parser.parse_args()
if args.which == 'service':
database = sqlite3.connect(database_name)
cursor = database.cursor()
cursor.execute("SELECT * FROM SERVICES WHERE NAME = '%s';"
% args.service)
row = cursor.fetchone()
if row is None:
print("Given service (%s) does not exist." % args.service)
sys.exit()
database.close()
SM_VAR_RUN_SERVICES_DIR = '/var/run/sm/services'
unmanage_filepath = SM_VAR_RUN_SERVICES_DIR + '/'
unmanage_filename = "%s.unmanaged" % args.service
if 'manage' == action:
if os.path.exists(SM_VAR_RUN_SERVICES_DIR):
if os.path.isfile(unmanage_filepath + unmanage_filename):
os.remove(unmanage_filepath + unmanage_filename)
print("Service (%s) is now being managed." % args.service)
elif 'unmanage' == action:
if not os.path.exists(SM_VAR_RUN_SERVICES_DIR):
os.makedirs(SM_VAR_RUN_SERVICES_DIR)
if not os.path.isfile(unmanage_filepath + unmanage_filename):
open(unmanage_filepath + unmanage_filename, 'w').close()
print("Service (%s) is no longer being managed." % args.service)
elif 'restart-safe' == action:
restart_service_safe(args.service)
print("Service (%s) is restarting." % args.service)
else:
restart_service(args.service)
print("Service (%s) is restarting." % args.service)
sys.exit(0)
except KeyboardInterrupt:
sys.exit()
except Exception as e:
print(e)
sys.exit(-1)
| 31.617978 | 80 | 0.598792 |
7956f240b6125c7e493e9505da5a96427d2a8a36 | 8,291 | py | Python | sktime/transformers/dictionary_based/SAX.py | ashishpatel26/sktime | 24a79695f63bec1f6abf9f517d4e6dc792c306e7 | [
"BSD-3-Clause"
] | 1 | 2020-07-16T08:36:50.000Z | 2020-07-16T08:36:50.000Z | sktime/transformers/dictionary_based/SAX.py | ClaudiaSanches/sktime | 63e7839e80ca6d5fe5fc4f33389ec3bcacd8aa59 | [
"BSD-3-Clause"
] | null | null | null | sktime/transformers/dictionary_based/SAX.py | ClaudiaSanches/sktime | 63e7839e80ca6d5fe5fc4f33389ec3bcacd8aa59 | [
"BSD-3-Clause"
] | 1 | 2020-10-08T20:55:55.000Z | 2020-10-08T20:55:55.000Z | import sys
import numpy as np
import pandas as pd
import sktime.transformers.shapelets as shapelets
from sktime.transformers.dictionary_based.PAA import PAA
from sktime.utils.load_data import load_from_tsfile_to_dataframe as load_ts
from sktime.transformers.base import BaseTransformer
# TO DO: verify this returned pandas is consistent with sktime definition. Timestamps?
# TO DO: remove the call to normalize in shapelets, which should move to utils
class SAX(BaseTransformer):
__author__ = "Matthew Middlehurst"
""" SAX (Symbolic Aggregate approXimation) Transformer, as described in
Jessica Lin, Eamonn Keogh, Li Wei and Stefano Lonardi,
"Experiencing SAX: a novel symbolic representation of time series"
Data Mining and Knowledge Discovery, 15(2):107-144
Overview: for each series:
run a sliding window across the series
for each window
shorten the series with PAA (Piecewise Approximate Aggregation)
discretise the shortened seried into fixed bins
form a word from these discrete values
by default SAX produces a single word per series (window_size=0).
SAX returns a pandas data frame where column 0 is the histogram (sparse pd.series)
of each series.
Parameters
----------
word_length: int, length of word to shorten window to (using PAA) (default 8)
alphabet_size: int, number of values to discretise each value to (default to 4)
window_size: int, size of window for sliding. If 0, uses the whole series (default to 0)
remove_repeat_words: boolean, whether to use numerosity reduction (default False)
save_words: boolean, whether to use numerosity reduction (default False)
Attributes
----------
words: histor = []
breakpoints: = []
num_insts = 0
num_atts = 0
"""
def __init__(self,
word_length=8,
alphabet_size=4,
window_size=0,
remove_repeat_words=False,
save_words=False
):
self.word_length = word_length
self.alphabet_size = alphabet_size
self.window_size = window_size
self.remove_repeat_words = remove_repeat_words
self.save_words = save_words
self.words = []
self.breakpoints = []
self.num_insts = 0
self.num_atts = 0
def transform(self, X):
"""
Parameters
----------
X : array-like or sparse matrix of shape = [n_samples, num_atts]
The training input samples. If a Pandas data frame is passed, the column 0 is extracted
Returns
-------
dims: Pandas data frame with first dimension in column zero
"""
if self.alphabet_size < 2 or self.alphabet_size > 4:
raise RuntimeError("Alphabet size must be an integer between 2 and 4")
if self.word_length < 1 or self.word_length > 16:
raise RuntimeError("Word length must be an integer between 1 and 16")
if isinstance(X, pd.DataFrame):
if X.shape[1] > 1:
raise TypeError("SAX cannot handle multivariate problems yet")
elif isinstance(X.iloc[0,0], pd.Series):
X = np.asarray([a.values for a in X.iloc[:,0]])
else:
raise TypeError("Input should either be a 2d numpy array, or a pandas dataframe with a single column of Series objects (TSF cannot yet handle multivariate problems")
self.num_atts = X.shape[1]
if self.window_size == 0:
self.window_size = self.num_atts
self.breakpoints = self.generate_breakpoints()
self.num_insts = X.shape[0]
bags = pd.DataFrame()
dim = []
for i in range(self.num_insts):
bag = {}
lastWord = None
words = []
num_windows_per_inst = self.num_atts - self.window_size + 1
split = np.array(X[i, np.arange(self.window_size)[None, :] + np.arange(num_windows_per_inst)[:, None]])
split = shapelets.RandomShapeletTransform.zscore(split, axis=None) # move to utils or create new method?
paa = PAA(num_intervals=self.word_length)
patterns = paa.fit_transform(split)
patterns = np.asarray([a.values for a in patterns.iloc[:, 0]])
for n in range(patterns.shape[0]):
pattern = patterns[n, :]
word = self.create_word(pattern)
words.append(word)
lastWord = self.add_to_bag(bag, word, lastWord)
if self.save_words:
self.words.append(words)
dim.append(pd.Series(bag))
bags[0] = dim
return bags
def create_word(self, pattern):
word = BitWord()
for i in range(self.word_length):
for bp in range(self.alphabet_size):
if pattern[i] <= self.breakpoints[bp]:
word.push(bp)
break
return word
def add_to_bag(self, bag, word, last_word):
if self.remove_repeat_words and word.word == last_word:
return last_word
if word.word in bag:
bag[word.word] += 1
else:
bag[word.word] = 1
return word.word
def generate_breakpoints(self):
# Pre-made gaussian curve breakpoints from UEA TSC codebase
return {
2: [0, sys.float_info.max],
3: [-0.43, 0.43, sys.float_info.max],
4: [-0.67, 0, 0.67, sys.float_info.max],
5: [-0.84, -0.25, 0.25, 0.84, sys.float_info.max],
6: [-0.97, -0.43, 0, 0.43, 0.97, sys.float_info.max],
7: [-1.07, -0.57, -0.18, 0.18, 0.57, 1.07, sys.float_info.max],
8: [-1.15, -0.67, -0.32, 0, 0.32, 0.67, 1.15, sys.float_info.max],
9: [-1.22, -0.76, -0.43, -0.14, 0.14, 0.43, 0.76, 1.22, sys.float_info.max],
10: [-1.28, -0.84, -0.52, -0.25, 0.0, 0.25, 0.52, 0.84, 1.28, sys.float_info.max]
}[self.alphabet_size]
class BitWord:
# Used to represent a word for dictionary based classifiers such as BOSS an BOP.
# Can currently only handle an alphabet size of <= 4 and word length of <= 16.
# Current literature shows little reason to go beyond this, but the class will need changes/expansions
# if this is needed.
def __init__(self,
word=0,
length=0):
self.word = word
self.length = length
self.bits_per_letter = 2 # this ^2 == max alphabet size
self.word_space = 32 # max amount of bits to be stored, max word length == this/bits_per_letter
def push(self, letter):
# add letter to a word
self.word = (self.word << self.bits_per_letter) | letter
self.length += 1
def shorten(self, amount):
# shorten a word by set amount of letters
self.word = self.right_shift(self.word, amount * self.bits_per_letter)
self.length -= amount
def word_list(self):
# list of input integers to obtain current word
word_list = []
shift = self.word_space - (self.length * self.bits_per_letter)
for i in range(self.length-1, -1, -1):
word_list.append(self.right_shift(self.word << shift, self.word_space - self.bits_per_letter))
shift += self.bits_per_letter
return word_list
@staticmethod
def right_shift(left, right):
return (left % 0x100000000) >> right
if __name__ == "__main__":
testPath="C:\\Users\\ajb\\Dropbox\\Data\\TSCProblems\\Chinatown\\Chinatown_TRAIN.ts"
train_x, train_y = load_ts(testPath)
print("Correctness testing for SAX using Chinatown")
# print("First case used for testing")
# print(train_x.iloc[0,0])
p = SAX(window_size=24, alphabet_size=2,word_length=4,save_words=False)
print("Test 1: window_size =0, result should be single series for each")
x2=p.transform(train_x)
print("Correct single series SAX for case 1: = b,a,a,b,d,d,d,b")
print("Transform mean case 1: =")
dict=x2.iloc[0,0]
print(dict)
# for x in p.words:
# print(x)
| 37.179372 | 181 | 0.603908 |
7956f2ede37ba77f86d5d95e1f98a47d11197074 | 5,624 | py | Python | source/conf.py | timothijoe/DI-engine-docs | e8607933e0e7ea0056aa9c95ac27bd731333310e | [
"Apache-2.0"
] | null | null | null | source/conf.py | timothijoe/DI-engine-docs | e8607933e0e7ea0056aa9c95ac27bd731333310e | [
"Apache-2.0"
] | null | null | null | source/conf.py | timothijoe/DI-engine-docs | e8607933e0e7ea0056aa9c95ac27bd731333310e | [
"Apache-2.0"
] | null | null | null | # -*- coding: utf-8 -*-
#
# Configuration file for the Sphinx documentation builder.
#
# This file does only contain a selection of the most common options. For a
# full list see the documentation:
# http://www.sphinx-doc.org/en/master/config
# -- Path setup --------------------------------------------------------------
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#
import os
import sys
sys.path.insert(0, os.path.abspath('../../'))
print(sys.path)
f = os.popen('make -f diagrams_source.mk')
for item in f.readlines():
print(f)
print('diagrams is OK')
# -- Project information -----------------------------------------------------
project = 'DI-engine'
copyright = '2021, OpenDILab Contributors'
author = 'OpenDILab Contributors'
# The short X.Y version
version = ''
# The full version, including alpha/beta/rc tags
release = '0.1.0'
# -- General configuration ---------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#
# needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.doctest',
'sphinx.ext.mathjax',
'sphinx.ext.ifconfig',
'sphinx.ext.viewcode',
'sphinx.ext.githubpages',
'enum_tools.autoenum',
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
#
# source_suffix = ['.rst', '.md']
source_suffix = '.rst'
# The master toctree document.
master_doc = 'index'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This pattern also affects html_static_path and html_extra_path.
exclude_patterns = []
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = None
# -- Options for HTML output -------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
# html_theme = 'pytorch_sphinx_theme'
# html_theme_path = ["pytorch_sphinx_theme"]
html_theme = 'sphinx_rtd_theme'
# html_theme = 'alabaster'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#
# html_theme_options = {}
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Custom sidebar templates, must be a dictionary that maps document names
# to template names.
#
# The default sidebars (for documents that don't match any pattern) are
# defined by theme itself. Builtin themes are using these templates by
# default: ``['localtoc.html', 'relations.html', 'sourcelink.html',
# 'searchbox.html']``.
#
# html_sidebars = {}
# -- Options for HTMLHelp output ---------------------------------------------
# Output file base name for HTML help builder.
htmlhelp_basename = 'DI-enginedoc'
# -- Options for LaTeX output ------------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#
# 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#
# 'preamble': '',
# Latex figure (float) alignment
#
# 'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, 'DI-engine.tex', 'DI-engine Documentation', 'bao', 'manual'),
]
# -- Options for manual page output ------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [(master_doc, 'DI-engine', 'DI-engine Documentation', [author], 1)]
# -- Options for Texinfo output ----------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(
master_doc, 'DI-engine', 'DI-engine Documentation', author, 'DI-engine', 'One line description of project.',
'Miscellaneous'
),
]
# -- Options for Epub output -------------------------------------------------
# Bibliographic Dublin Core info.
epub_title = project
# The unique identifier of the text. This can be a ISBN number
# or the project homepage.
#
# epub_identifier = ''
# A unique identification for the text.
#
# epub_uid = ''
# A list of files that should not be packed into the epub file.
epub_exclude_files = ['search.html']
# -- Extension configuration -------------------------------------------------
| 31.418994 | 116 | 0.651316 |
7956f3309d2c3fc2df50d945a4a6f2f6471545f2 | 7,879 | py | Python | doc/source/conf.py | openstack/blazar-specs | 9794657234f7d46c7d3d826b92bc37ee54a2af97 | [
"Apache-2.0"
] | 2 | 2018-10-25T08:38:11.000Z | 2019-01-28T21:52:46.000Z | doc/source/conf.py | openstack/blazar-specs | 9794657234f7d46c7d3d826b92bc37ee54a2af97 | [
"Apache-2.0"
] | null | null | null | doc/source/conf.py | openstack/blazar-specs | 9794657234f7d46c7d3d826b92bc37ee54a2af97 | [
"Apache-2.0"
] | null | null | null | # Copyright (c) 2013 Mirantis Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# -- General configuration -----------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = [
'sphinx.ext.doctest',
'sphinx.ext.todo',
'sphinx.ext.viewcode',
'openstackdocstheme',
]
# openstackdocstheme options
openstackdocs_repo_name = 'openstack/blazar-specs'
openstackdocs_pdf_link = True
openstackdocs_auto_name = False
openstackdocs_bug_project = 'blazar'
openstackdocs_bug_tag = ''
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = 'Blazar Specs'
copyright = '2013-present, OpenStack Foundation'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['images/source/README.rst']
# The reST default role (used for this markup: `text`) to use for all documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'native'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# -- Options for HTML output ---------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
# html_theme_path = ["."]
html_theme = 'openstackdocs'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
html_title = 'Blazar Specs'
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
#html_static_path = ['_static']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
# html_last_updated_fmt = '%Y-%m-%d %H:%M'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'Blazar-Specsdoc'
# -- Options for LaTeX output --------------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
# openany: Skip blank pages in generated PDFs
# oneside: Use the same page layout for both even and odd pages
'extraclassoptions': 'openany,oneside',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual]).
latex_documents = [
('index', 'doc-blazar-specs.tex', 'Blazar Specs',
'OpenStack Foundation', 'manual', True),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output --------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'Blazar-specs', 'Blazar Specs', ['OpenStack Foundation'], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output ------------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index', 'Blazar-specs', 'Blazar Specs', 'OpenStack Foundation',
'Blazar-specs', 'Design specifications for the Blazar project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
| 32.692946 | 80 | 0.707069 |
7956f39a1144f36eacb2ef46c210d42c6a484359 | 440 | py | Python | tracpro/polls/migrations/0005_auto_20150127_1329.py | rapidpro/tracpro | a68a782a7ff9bb0ccee85368132d8847c280fea3 | [
"BSD-3-Clause"
] | 5 | 2015-07-21T15:58:31.000Z | 2019-09-14T22:34:00.000Z | tracpro/polls/migrations/0005_auto_20150127_1329.py | rapidpro/tracpro | a68a782a7ff9bb0ccee85368132d8847c280fea3 | [
"BSD-3-Clause"
] | 197 | 2015-03-24T15:26:04.000Z | 2017-11-28T19:24:37.000Z | tracpro/polls/migrations/0005_auto_20150127_1329.py | rapidpro/tracpro | a68a782a7ff9bb0ccee85368132d8847c280fea3 | [
"BSD-3-Clause"
] | 10 | 2015-03-24T12:26:36.000Z | 2017-02-21T13:08:57.000Z | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('polls', '0004_auto_20150126_1417'),
]
operations = [
migrations.AlterField(
model_name='issue',
name='flow_start_id',
field=models.IntegerField(null=True),
preserve_default=True,
),
]
| 20.952381 | 49 | 0.604545 |
7956f69d564be5ce306f9cf1850011acba49142d | 130 | py | Python | comment/urls.py | AutonomousCrazyshaking/music | 0180c0ae382860f1840fcdb31355df240321dfa3 | [
"MIT"
] | null | null | null | comment/urls.py | AutonomousCrazyshaking/music | 0180c0ae382860f1840fcdb31355df240321dfa3 | [
"MIT"
] | null | null | null | comment/urls.py | AutonomousCrazyshaking/music | 0180c0ae382860f1840fcdb31355df240321dfa3 | [
"MIT"
] | null | null | null | from django.urls import path
from .views import *
urlpatterns = [
path('<int:id>.html', commentView, name='comment')
] | 18.571429 | 55 | 0.653846 |
7956f81d3a6aa57a33f59feaa5ed78620192b584 | 24 | py | Python | netplotz/__init__.py | bmswens/netplotz | f3e533ef6f66cf721462243155a325c20e78d1fa | [
"MIT"
] | null | null | null | netplotz/__init__.py | bmswens/netplotz | f3e533ef6f66cf721462243155a325c20e78d1fa | [
"MIT"
] | null | null | null | netplotz/__init__.py | bmswens/netplotz | f3e533ef6f66cf721462243155a325c20e78d1fa | [
"MIT"
] | null | null | null | from .netplotz import *
| 12 | 23 | 0.75 |
7956f832ed8df2f0fc787a3abc631e1d4cfc50cd | 2,317 | py | Python | examples/bspump-lookup.py | thatch/BitSwanPump | 98a5b8d09f9b59d5361611cee0bd45e7b4c69e3f | [
"BSD-3-Clause"
] | 1 | 2020-08-20T12:56:58.000Z | 2020-08-20T12:56:58.000Z | examples/bspump-lookup.py | thatch/BitSwanPump | 98a5b8d09f9b59d5361611cee0bd45e7b4c69e3f | [
"BSD-3-Clause"
] | null | null | null | examples/bspump-lookup.py | thatch/BitSwanPump | 98a5b8d09f9b59d5361611cee0bd45e7b4c69e3f | [
"BSD-3-Clause"
] | null | null | null | #!/usr/bin/env python3
import logging
import bspump
import bspump.common
import bspump.file
import bspump.trigger
###
L = logging.getLogger(__name__)
###
class SamplePipeline(bspump.Pipeline):
def __init__(self, app, pipeline_id):
super().__init__(app, pipeline_id)
svc = app.get_service("bspump.PumpService")
self.Lookup = svc.locate_lookup("MyDictionarySlaveLookup")
self.Lookup.PubSub.subscribe("bspump.Lookup.changed!", self.lookup_updated)
self.PubSub.subscribe("bspump.pipeline.cycle_end!", self.cycle_end)
self.RunCountdown = 1
self.build(
bspump.file.FileCSVSource(app, self, config={
'path': './data/sample.csv',
'delimiter': ';',
'post': 'noop'
}).on(bspump.trigger.PubSubTrigger(app, "go!", self.PubSub)),
LookupTransformator(app, self, self.Lookup),
bspump.common.PPrintSink(app, self),
)
def lookup_updated(self, event_name):
# We have a lookup, so we can start pipeline
if self.RunCountdown == 1:
self.RunCountdown -= 1
self.PubSub.publish("go!")
def cycle_end(self, event_name, pipeline):
# The file is processed, halt the application
svc = app.get_service("bspump.PumpService")
svc.App.stop()
class MyDictionaryLookup(bspump.DictionaryLookup):
async def load(self):
# Called only when we are master (no master_url provided)
self.set(bspump.load_json_file('./data/country_names.json'))
return True
class LookupTransformator(bspump.Processor):
def __init__(self, app, pipeline, lookup, id=None, config=None):
super().__init__(app=app, pipeline=pipeline, id=id, config=config)
self.Lookup = lookup
def process(self, context, event):
event['Country'] = self.Lookup.get(event['Country'])
return event
if __name__ == '__main__':
'''
Run with Web API enabled:
/bspump-lookup.py -w 0.0.0.0:8083
'''
app = bspump.BSPumpApplication()
svc = app.get_service("bspump.PumpService")
# Construct lookups (in master/slave configuration)
lkp = svc.add_lookup(MyDictionaryLookup(app, "MyDictionaryMasterLookup"))
lkps = svc.add_lookup(MyDictionaryLookup(app, "MyDictionarySlaveLookup", config={
'master_url': 'http://localhost:8083/',
'master_lookup_id': 'MyDictionaryMasterLookup'
}))
# Construct and register Pipeline
pl = SamplePipeline(app, 'SamplePipeline')
svc.add_pipeline(pl)
app.run()
| 24.648936 | 82 | 0.724644 |
7956f83728d1fbf29a7cc6b06d500a2506f16918 | 373 | py | Python | source_code/2-1-beautifulsoup-basic.py | VickyMin1994/easy-scraping-tutorial | 75b7ffc79da397afa95342022c29cd72520f155f | [
"MIT"
] | 708 | 2017-12-29T05:32:34.000Z | 2022-03-25T14:29:05.000Z | source_code/2-1-beautifulsoup-basic.py | VickyMin1994/easy-scraping-tutorial | 75b7ffc79da397afa95342022c29cd72520f155f | [
"MIT"
] | 6 | 2018-01-06T07:58:31.000Z | 2020-10-26T15:57:46.000Z | source_code/2-1-beautifulsoup-basic.py | VickyMin1994/easy-scraping-tutorial | 75b7ffc79da397afa95342022c29cd72520f155f | [
"MIT"
] | 609 | 2017-12-29T10:04:20.000Z | 2022-03-23T18:32:37.000Z | from bs4 import BeautifulSoup
from urllib.request import urlopen
# if has Chinese, apply decode()
html = urlopen("https://mofanpy.com/static/scraping/basic-structure.html").read().decode('utf-8')
soup = BeautifulSoup(html, features='lxml')
print(soup.h1)
print('\n', soup.p)
all_href = soup.find_all('a')
all_href = [l['href'] for l in all_href]
print('\n', all_href)
| 23.3125 | 97 | 0.718499 |
7956f8d538372cef43087e9d3f1eb30551514f50 | 8,195 | py | Python | docs/conf.py | PetrDlouhy/django-like-lookup | 78767e66c270f55b6ee59f7bbf3b94af3a5251e0 | [
"MIT"
] | 2 | 2020-01-30T07:47:29.000Z | 2021-05-30T11:42:23.000Z | docs/conf.py | PetrDlouhy/django-like-lookup | 78767e66c270f55b6ee59f7bbf3b94af3a5251e0 | [
"MIT"
] | null | null | null | docs/conf.py | PetrDlouhy/django-like-lookup | 78767e66c270f55b6ee59f7bbf3b94af3a5251e0 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
#
# complexity documentation build configuration file, created by
# sphinx-quickstart on Tue Jul 9 22:26:36 2013.
#
# This file is execfile()d with the current directory set to its containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys, os
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#sys.path.insert(0, os.path.abspath('.'))
cwd = os.getcwd()
parent = os.path.dirname(cwd)
sys.path.append(parent)
import like_lookup
# -- General configuration -----------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = ['sphinx.ext.autodoc', 'sphinx.ext.viewcode']
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'Django like lookup'
copyright = u'2020, Petr Dlouhý'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = like_lookup.__version__
# The full version, including alpha/beta/rc tags.
release = like_lookup.__version__
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
#keep_warnings = False
# -- Options for HTML output ---------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'default'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'django-like-lookupdoc'
# -- Options for LaTeX output --------------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual]).
latex_documents = [
('index', 'django-like-lookup.tex', u'Django like lookup Documentation',
u'Petr Dlouhý', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output --------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'django-like-lookup', u'Django like lookup Documentation',
[u'Petr Dlouhý'], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output ------------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index', 'django-like-lookup', u'Django like lookup Documentation',
u'Petr Dlouhý', 'django-like-lookup', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
#texinfo_no_detailmenu = False
| 32.137255 | 80 | 0.718243 |
7956fa9d149cd0cbacdf01d283e213282ea63faf | 4,048 | py | Python | papers/pretty.py | RCHG/papers | e12f4917fbcedb37c3765c81b600578e7ea1f53f | [
"MIT"
] | null | null | null | papers/pretty.py | RCHG/papers | e12f4917fbcedb37c3765c81b600578e7ea1f53f | [
"MIT"
] | null | null | null | papers/pretty.py | RCHG/papers | e12f4917fbcedb37c3765c81b600578e7ea1f53f | [
"MIT"
] | null | null | null | import papers.boxea as boxea
import csv
import pickle
# import pandas as pd
class bcol:
# https://stackoverflow.com/a/287944/2192272
HEAD = '\033[95m'
BLUE = '\033[94m'
GREEN = '\033[92m'
WARN = '\033[93m'
FAIL = '\033[91m'
ENDC = '\033[0m'
BOLD = '\033[1m'
ULINE = '\033[4m'
# Here we read a csv but it might be a bit slow.
# Maybe there are other options. A solution will be
# create a option to replace journal name by the
# abbrev directly in the bibtex and not on the fly
# for visualization.
def read_journal_abbrv(journal):
fabbrev = '/usr/local/share/papers/journalList_dots.p'
abbrev = pickle.load( open( fabbrev, "rb" ) )
if journal in abbrev.keys():
journal_abbrev = abbrev[journal]
else:
journal_abbrev = journal
return journal_abbrev
def read_journal_abbrv_csv(journal):
with open('/usr/local/share/papers/journalList_dots.csv', mode='r') as infile:
reader = csv.reader(infile, delimiter=';')
for row in reader:
if row[0]==journal:
return row[1]
return journal
def read_journal_abbrv_dic(journal):
with open('/usr/local/share/papers/journalList_dots.csv', mode='r') as infile:
reader = csv.reader(infile, delimiter=';')
abbrev = {rows[0]:rows[1] for rows in reader}
if journal in abbrev.keys():
journal_abbrev = abbrev[journal]
else:
journal_abbrev = journal
return journal_abbrev
def boxed_status(lines, fstatus, bstatus, title):
"""boxedlist: from a list of lines it returns
a printable boxed output.
:param lines:
:param fstatus:
:param bstatus:
:param title:
"""
# Get dimensions ============================================
lenlines = [len(a) for a in lines]
maxlines = max(lenlines)
span = [maxlines-len(a) for a in lines]
# Add the top-rule ==========================================
lines[0]='+'+'-'*maxlines+'--+'
# Reformat the inner lines ==================================
for iline, line in enumerate(lines):
if iline>0:
lines[iline]='| '+lines[iline]+span[iline]*' '+' |'
# Add bottom-rule ===========================================
lines.append(lines[0])
boxlines = boxea.ascii_to_box(u'\n'.join(lines))
if "missing" in fstatus or "empty" in fstatus:
boxlines = boxlines.replace(fstatus, bcol.WARN+fstatus+bcol.ENDC)
else:
boxlines = boxlines.replace(fstatus, bcol.BLUE+fstatus+bcol.ENDC)
if "empty" in bstatus:
boxlines = boxlines.replace(bstatus, bcol.WARN+bstatus+bcol.ENDC)
elif "corrupted" in bstatus:
boxlines = boxlines.replace(bstatus, bcol.FAIL+bstatus+bcol.ENDC)
else:
boxlines = boxlines.replace(bstatus, bcol.BLUE+bstatus+bcol.ENDC)
boxlines = boxlines.replace(title, bcol.BOLD+title+bcol.ENDC)
return boxlines
def boxed_list(lines_out, cname, list_entries, total_entries):
strdel= '<xBo><xBl><xE><xG><xE>'
strname= '[bib: '+cname+']'
maxlines = max([len(a) for a in lines_out])
lenlines = [len(a) for a in lines_out]
str_number = '['+str(list_entries)+'/'+str(total_entries)+strdel+']'
len_number = len(str_number)
for iline, oline in enumerate(lines_out):
newstring = (maxlines-lenlines[iline])*' '+' |'
lines_out[iline] = lines_out[iline].replace('<xF>', newstring)
delta = len('<xBo><xBl><xE><xG><xE>')
header = '\n+---'+str_number+'---'+strname+(maxlines-4-len_number-len(strname)-3)*'-'+'+'
footer = '+-'+strdel+(maxlines-2-delta)*'-'+'+\n'
lines_out.insert(0,header)
lines_out.append(footer)
output = boxea.ascii_to_box(u"\n".join(lines_out))
output = output.replace(strdel+'-','─')
output = output.replace('<xBo>',bcol.BOLD)
output = output.replace('<xBl>',bcol.BLUE)
output = output.replace('<xE>' ,bcol.ENDC)
output = output.replace('<xG>' ,bcol.GREEN)
return output
| 34.016807 | 93 | 0.599308 |
7956fcb49b5800f29c9dffeb44cc407ce0cb5971 | 2,172 | py | Python | tests/contrib/sqlalchemy/test_mysql.py | SzySteve/dd-trace-py | 90d1d5981c72ea312c21ac04e5be47521d0f0f2e | [
"Apache-2.0",
"BSD-3-Clause"
] | null | null | null | tests/contrib/sqlalchemy/test_mysql.py | SzySteve/dd-trace-py | 90d1d5981c72ea312c21ac04e5be47521d0f0f2e | [
"Apache-2.0",
"BSD-3-Clause"
] | 1 | 2020-12-22T16:56:55.000Z | 2020-12-22T16:56:55.000Z | tests/contrib/sqlalchemy/test_mysql.py | kenferrara/dd-trace-py | 12e52e0ab804061e72b0f76214f5e4bb475ae20f | [
"Apache-2.0",
"BSD-3-Clause"
] | 1 | 2020-12-22T16:54:02.000Z | 2020-12-22T16:54:02.000Z | from sqlalchemy.exc import ProgrammingError
import pytest
from .mixins import SQLAlchemyTestMixin
from ..config import MYSQL_CONFIG
from ... import TracerTestCase, assert_is_measured
class MysqlConnectorTestCase(SQLAlchemyTestMixin, TracerTestCase):
"""TestCase for mysql-connector engine"""
VENDOR = 'mysql'
SQL_DB = 'test'
SERVICE = 'mysql'
ENGINE_ARGS = {'url': 'mysql+mysqlconnector://%(user)s:%(password)s@%(host)s:%(port)s/%(database)s' % MYSQL_CONFIG}
def setUp(self):
super(MysqlConnectorTestCase, self).setUp()
def tearDown(self):
super(MysqlConnectorTestCase, self).tearDown()
def check_meta(self, span):
# check database connection tags
self.assertEqual(span.get_tag('out.host'), MYSQL_CONFIG['host'])
self.assertEqual(span.get_metric('out.port'), MYSQL_CONFIG['port'])
def test_engine_execute_errors(self):
# ensures that SQL errors are reported
with pytest.raises(ProgrammingError):
with self.connection() as conn:
conn.execute('SELECT * FROM a_wrong_table').fetchall()
traces = self.tracer.writer.pop_traces()
# trace composition
self.assertEqual(len(traces), 1)
self.assertEqual(len(traces[0]), 1)
span = traces[0][0]
# span fields
assert_is_measured(span)
self.assertEqual(span.name, '{}.query'.format(self.VENDOR))
self.assertEqual(span.service, self.SERVICE)
self.assertEqual(span.resource, 'SELECT * FROM a_wrong_table')
self.assertEqual(span.get_tag('sql.db'), self.SQL_DB)
self.assertIsNone(span.get_tag('sql.rows') or span.get_metric('sql.rows'))
self.check_meta(span)
self.assertEqual(span.span_type, 'sql')
self.assertTrue(span.duration > 0)
# check the error
self.assertEqual(span.error, 1)
self.assertEqual(span.get_tag('error.type'), 'mysql.connector.errors.ProgrammingError')
self.assertTrue("Table 'test.a_wrong_table' doesn't exist" in span.get_tag('error.msg'))
self.assertTrue("Table 'test.a_wrong_table' doesn't exist" in span.get_tag('error.stack'))
| 40.981132 | 119 | 0.674954 |
7956fcfa6d566bc3d7c32d33f331a4c040855668 | 2,010 | py | Python | main.py | karipov/gpa-calculator | fd7995e35124f0b128085a360176a31ca123621e | [
"MIT"
] | null | null | null | main.py | karipov/gpa-calculator | fd7995e35124f0b128085a360176a31ca123621e | [
"MIT"
] | null | null | null | main.py | karipov/gpa-calculator | fd7995e35124f0b128085a360176a31ca123621e | [
"MIT"
] | null | null | null | import telebot
import config
import values
import dbhandler
from textparser import check_entry, parse_entry
bot = telebot.TeleBot(config.KEY)
def calculate_gpa(subject_list):
grade_list = [x[1] for x in subject_list] # no need for subject names
total_gpa = 0 # a counter
for grade in grade_list:
total_gpa += values.GRADES[grade]
return total_gpa / len(grade_list) # average GPA
@bot.message_handler(commands=['start']) # IDEA: FORCE REPLY??
def send_start(message):
bot.send_message(message.chat.id, values.ENTER_REPLY, parse_mode='HTML')
@bot.message_handler(func=lambda m: '-' in m.text, content_types=['text'])
def record_results(message):
if not check_entry(message.text):
bot.send_message(message.chat.id, values.FORMAT_REPLY)
return # no need to add the message to db as it is incorrect
subject_list = parse_entry(message.text)
subject, grade = subject_list # unpack the list to insert into function
dbhandler.create_table(message)
dbhandler.write_table(subject, grade, message)
@bot.message_handler(commands=['list'])
def send_list(message):
data = dbhandler.pull_data(message)
if not data: # warning is sent if list is empty
bot.send_message(message.chat.id, values.EMPTY_REPLY)
return
# data = [['History', 'A'], ['Math', 'A'], ['English', 'A+']]
# pair = ['History', 'A']
string = "Here are your current entries:\n"
for pair in data:
string += ' - '.join(pair)
string += '\n'
bot.send_message(message.chat.id, string)
@bot.message_handler(commands=['done'])
def send_results(message):
data = dbhandler.pull_data(message)
if not data:
bot.send_message(message.chat.id, values.EMPTY_REPLY)
return
average_GPA = str(round(calculate_gpa(data), 2)) # GPA is 2 d.p.
bot.send_message(message.chat.id, values.GPA_REPLY.format(average_GPA))
dbhandler.delete_table(message) # so user can enter new classes next time
bot.polling()
| 30 | 77 | 0.694527 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.