id stringlengths 1 8 | text stringlengths 6 1.05M | dataset_id stringclasses 1
value |
|---|---|---|
1725224 | <filename>stable_world/commands/auth.py<gh_stars>0
import click
from stable_world.interact.setup_user import setup_user
from stable_world import utils, application
from stable_world.interact.setup_user import setup_bucket_token
@click.group()
def main():
pass
@main.command()
@application.email_option
@application.password_option
@application.token_option
@application.pass_app
def login(app):
"only performs authentication step"
setup_user(app, login_only=True)
return
@main.command()
@application.email_option
@application.password_option
@application.token_option
@application.pass_app
def register(app):
"only performs authentication step"
confirm_password = <PASSWORD>
setup_user(app, login_only=False, confirm_password=<PASSWORD>)
return
@main.command()
@application.pass_app
def logout(app):
"expire local token"
app.update_netrc(token=None, email=None)
click.echo(
'\n\n '
'Token removed from %s file.'
'\n\n' % app.config_filename
)
return
@main.command()
@utils.login_optional
def whoami(app):
"show who you are logged in as"
email = app.client.whoami()
click.echo('\n\n Logged in as %s\n\n' % email)
return
@main.command()
@application.email_option
@application.password_option
@utils.bucket_option(required=False)
@application.pass_app
def token(app, bucket):
"Get your authentication token"
# Will raise not found exception
if bucket:
app.client.bucket(bucket)
token = setup_bucket_token(app, bucket)
print(" token:", token)
| StarcoderdataPython |
93032 | #!/usr/bin/env python3
import flask
import os
from PIL import Image, ImageFilter
import hashlib
import FileMimetypes as mime
app = flask.Flask(__name__)
app.jinja_env.trim_blocks = True
RELEASE_VERSION = '1.0.0'
app.config['APPLICATION_NAME'] = 'AutoGalleryIndex'
app.config['ROW_ITEMS_SHORT'] = 3
app.config['ROW_ITEMS_LONG'] = 5
app.config['EXCLUDE_HIDDEN_FILES'] = True
app.config['MAX_LINE_CHARACTERS'] = 20
app.config['MAX_LINES_PER_ENTRY'] = 3
app.config['THUMB_MAX_WIDTH'] = 178
app.config['THUMB_MAX_HEIGHT'] = 100
app.config['THUMB_IMAGE_TYPE'] = '.jpg'
log_message = print
class ThumbnailError(RuntimeError):
pass
def get_cache_location_abs():
return os.path.join(app.config['CACHE_ABS'], app.config['APPLICATION_NAME'])
def get_cache_location_url():
abs_path = get_cache_location_abs()
app.config['DOCROOT'] = app.config['DOCROOT']
if app.config['DOCROOT'] not in abs_path:
raise ValueError('The cache path (%s) must be inside the docroot (%s)' % (
app.config['DOCROOT'], abs_path))
return abs_path.replace(app.config['DOCROOT'], '')
def is_mobile_request(request_headers):
mobile_tags = ('Android', 'Windows Phone', 'iPod', 'iPhone')
if any((tag in request_headers.get('User-Agent') for tag in mobile_tags)):
return True
return False
def get_directory_contents(directory):
try:
items = os.listdir(directory)
if app.config['EXCLUDE_HIDDEN_FILES']:
items = [i for i in items if not i.startswith('.')]
except PermissionError as e:
items = []
return items
def grants_read_permission(path):
"""Only returns true if the specified file can be read"""
if os.access(path, os.R_OK):
return True
return False
def grants_write_permission(path):
"""Returns True if the specified path has the write bit set for the current user or group"""
if os.access(path, os.W_OK):
return True
return False
def get_cache_name_for_file(filepath):
mtime = os.path.getmtime(filepath)
basename = hashlib.sha1(('%s%f' % (filepath, mtime)).encode()).hexdigest()
return basename + app.config['THUMB_IMAGE_TYPE']
def create_thumbnail(input_path, output_path):
"""Create a thumbnail and save output in filesystem
output_path must be relative to cache home. Raises ThumbnailError on exceptions
and returns the path relative to webroot on success"""
if os.path.isabs(output_path):
raise OSError('Could not create thumbnail: Path (%s) is not a relative path' % (
output_path,))
if not os.path.isfile(input_path):
raise ThumbnailError('The input path (%s) does not appear to be valid' % input_path)
# Assume that this path has already been validated
output_path_abs = os.path.join(get_cache_location_abs(), output_path)
output_path_url = os.path.join(get_cache_location_url(), output_path)
if not os.path.exists(output_path_abs):
# Don't overwrite a pre-existing file
try:
thumb = Image.open(input_path)
aspect_ratio = thumb.size[0] / thumb.size[1]
if app.config['THUMB_MAX_WIDTH'] / aspect_ratio > app.config['THUMB_MAX_HEIGHT']:
height = app.config['THUMB_MAX_HEIGHT']
width = int(app.config['THUMB_MAX_HEIGHT'] * aspect_ratio)
else:
width = app.config['THUMB_MAX_WIDTH']
height = int(app.config['THUMB_MAX_WIDTH'] / aspect_ratio)
thumb = thumb.resize((width, height), Image.ANTIALIAS).filter(ImageFilter.DETAIL)
thumb.save(output_path_abs)
except Exception as e:
# Lots of things could have gone wrong here. For now, just shove the information about
# them into a single easily-caught exception
raise ThumbnailError(repr(e))
return output_path_url
@app.before_first_request
def test_cache_directory():
"""Returns True if the cache is able to be used; Otherwise False"""
cache_dir_path = get_cache_location_abs()
try:
if not os.path.exists(cache_dir_path):
os.makedirs(cache_dir_path)
if not grants_write_permission(cache_dir_path):
raise PermissionError
except PermissionError as e:
log_message('The current user (uid %d) does not have permission to write to %s' % (
os.getuid(), cache_dir_path))
return False
return True
def reformat_filename(filename):
n_max = app.config['MAX_LINE_CHARACTERS']
l_max = app.config['MAX_LINES_PER_ENTRY']
if len(filename) < n_max:
return filename
# HTML will automatically split lines on '-' and ' '. Manually break up the filename at '_'
result = ['']
for s in filename.split('_'):
if (len(result[-1]) + len(s) + 1) > n_max and len(s) < n_max:
result.append('')
result[-1] += ('_' if result[-1] else '') + s
if len(result) > l_max:
while len(result) > l_max:
del result[l_max-2]
result[-2] += '...'
filename = ' '.join(result)
return filename
@app.route('/<path:relpath>')
def gallery(relpath):
template_vars = {}
while relpath.endswith(os.path.sep):
relpath = relpath[:-1]
# from_root_relpath is the path from the apache webroot; relpath is only the path from the flask
# script "root". from_root_relpath is only useful for transforming paths into absolute local paths
from_root_relpath = os.path.join(flask.request.script_root, relpath).strip('/')
template_vars['display_path'] = os.path.sep + os.path.join(from_root_relpath, '')
abs_path = os.path.join(app.config['DOCROOT'], from_root_relpath)
if not os.path.exists(abs_path):
return flask.abort(404)
if os.path.isfile(abs_path):
return flask.send_from_directory(*os.path.split(abs_path))
template_vars['dir_contents'] = []
directory_items = get_directory_contents(abs_path)
for item in directory_items:
item_abs_path = os.path.join(abs_path, item)
if not grants_read_permission(item_abs_path):
continue
file_type = mime.get_type(item_abs_path)
thumb_path = flask.url_for('static', filename=os.path.join('icons', file_type))
if file_type == mime.MIME_IMAGE_GENERIC:
thumb_cached_location = get_cache_name_for_file(item_abs_path)
try:
thumb_path = create_thumbnail(item_abs_path, thumb_cached_location)
file_type = mime.MIME_IMAGE_THUMBED
except ThumbnailError as e:
log_message('Thumb error: %s' % str(e))
item_relpath = os.path.join(relpath, item)
item = reformat_filename(item)
# Item = name shown on page
# item_relpath = path to use for the link
# File type = mime.MIME_XXX type
# thumb_path = path to thumbnail for this item
template_vars['dir_contents'].append((item, item_relpath, file_type, thumb_path))
template_vars['dir_contents'].sort(key=lambda x: '..%s' % x[0].lower() if x[2] == mime.MIME_DIRECTORY else x[0].lower())
# Insert a special entry for the "back" button if applicable
if relpath:
dir_icon_path = flask.url_for('static', filename=os.path.join('icons', mime.MIME_DIRECTORY))
template_vars['dir_contents'].insert(0, ('Back',
os.path.dirname(relpath),
mime.MIME_DIRECTORY,
dir_icon_path))
if is_mobile_request(flask.request.headers):
template_vars['items_per_row'] = app.config['ROW_ITEMS_SHORT']
else:
template_vars['items_per_row'] = app.config['ROW_ITEMS_LONG']
for attr, value in ((attr, getattr(mime, attr)) for attr in dir(mime) if attr.startswith('MIME_')):
template_vars[attr] = value
template_vars['release_version'] = RELEASE_VERSION
return flask.render_template('Gallery.html', **template_vars)
@app.route('/')
def index():
return gallery('')
if __name__ == '__main__':
# For debugging only
app.config['DOCROOT'] = '/var/www/html/GalleryDemo'
app.config['CACHE_ABS'] = app.config['DOCROOT'] + '/cache'
app.run(host='0.0.0.0', port=9001, debug=True)
| StarcoderdataPython |
5079977 | # See bazel/README.md for details on how this system works.
CONTRIB_EXTENSIONS = {
#
# HTTP filters
#
#
# Network filters
#
"envoy.filters.network.kafka_broker": "//contrib/kafka/filters/network/source:kafka_broker_config_lib",
"envoy.filters.network.kafka_mesh": "//contrib/kafka/filters/network/source/mesh:config_lib",
}
| StarcoderdataPython |
9663778 | <reponame>ahkarimi/MMTOD<gh_stars>0
from flask import Flask, request, jsonify, render_template, session
import os
import pickle
import datetime
import time
import pandas as pd
import numpy as np
import random
import logging
##__________________________________ GPT-3 code __________________________________________##
from colorama import Fore, Back, Style
import torch
from transformers import GPT2Tokenizer, GPT2LMHeadModel, OpenAIGPTLMHeadModel, OpenAIGPTTokenizer
import sys, os
import pprint
import numpy as np
import torch
from image_handler import Handler
img_handler_obj = Handler()
# args = ArgsParser().parse()
# device = "cuda" if torch.cuda.is_available() else "cpu"
# n_gpu = 0 if args.no_cuda else torch.cuda.device_count()
pp = pprint.PrettyPrinter(indent=4)
prev_beliefs = {}
domain_queue = []
# sys.stdout.flush()
model_checkpoint = "./output/checkpoint-108420"
decoding = "DECODING METHOD HERE"
## if decoding == 'nucleus':
## TOP_P = float(sys.argv[3])
delay = 0.5
## multiwoz_db = MultiWozDB()
print('\nLoading Model', end="")
if 'openai' in model_checkpoint:
tokenizer = OpenAIGPTTokenizer.from_pretrained(model_checkpoint)
model = OpenAIGPTLMHeadModel.from_pretrained(model_checkpoint)
else:
tokenizer = GPT2Tokenizer.from_pretrained(model_checkpoint)
model = GPT2LMHeadModel.from_pretrained(model_checkpoint)
# model.load_state_dict(torch.load(model_checkpoint))
model.eval()
model.to('cpu')
break_tokens = tokenizer.encode(tokenizer.eos_token) + tokenizer.encode('?') + tokenizer.encode('!')
# break_tokens = tokenizer.encode(tokenizer.eos_token)
MAX_LEN = model.config.n_ctx
if 'openai-gpt' in model_checkpoint:
tokenizer.add_special_tokens({'bos_token': '<|endoftext|>'})
tokenizer.add_special_tokens({'eos_token': '<|endoftext|>'})
sample = 1
#print()
#print('\n What would you like to ask?')
# history = []
context = ''
input_text = ''
turn = 0
# dbmatch = 0
def get_belief_new_dbsearch(sent):
if '<|belief|>' in sent:
tmp = sent.strip(' ').split('<|belief|>')[-1].split('<|endofbelief|>')[0]
# elif 'belief.' in sent:
# tmp = sent.strip(' ').split('<belief>')[-1].split('<action>')[0]
# elif 'belief' not in sent:
# return []
else:
return []
# else:
# raise TypeError('unknown belief separator')
tmp = tmp.strip(' .,')
# assert tmp.endswith('<endofbelief>')
tmp = tmp.replace('<|endofbelief|>', '')
tmp = tmp.replace('<|endoftext|>', '')
belief = tmp.split(',')
new_belief = []
for bs in belief:
bs = bs.strip(' .,')
if bs not in new_belief:
new_belief.append(bs)
return new_belief
def convert_belief(belief):
dic = {}
for bs in belief:
if bs in [' ', '']:
continue
domain = bs.split(' ')[0]
slot = bs.split(' ')[1]
if slot == 'book':
slot = ' '.join(bs.split(' ')[1:3])
value = ' '.join(bs.split(' ')[3:])
else:
value = ' '.join(bs.split(' ')[2:])
if domain not in dic:
dic[domain] = {}
try:
dic[domain][slot] = value
except:
print(domain)
print(slot)
return dic
def get_turn_domain(beliefs, q):
for k in beliefs.keys():
if k not in q:
q.append(k)
turn_domain = k
return turn_domain
return q[-1]
def get_action_new(sent):
if '<|action|>' not in sent:
return []
elif '<|belief|>' in sent:
tmp = sent.split('<|belief|>')[-1].split('<|response|>')[0].split('<|action|>')[-1].strip()
elif '<|action|>' in sent:
tmp = sent.split('<|response|>')[0].split('<|action|>')[-1].strip()
else:
return []
tmp = tmp.strip(' .,')
# if not tmp.endswith('<endofaction>'):
# ipdb.set_trace()
tmp = tmp.replace('<|endofaction|>', '')
tmp = tmp.replace('<|endoftext|>', '')
action = tmp.split(',')
new_action = []
for act in action:
if act == '':
continue
act = act.strip(' .,')
if act not in new_action:
new_action.append(act)
return new_action
def get_response_new(sent, venuename):
if '<|response|>' in sent:
tmp = sent.split('<|belief|>')[-1].split('<|action|>')[-1].split('<|response|>')[-1]
else:
return ''
# if '<belief>' in sent:
# tmp = sent.split('<belief>')[-1].split('<action>')[-1].split('<response>')[-1]
# elif '<action>' in sent:
# tmp = sent.split('<action>')[-1].split('<response>')[-1]
# elif '<response>' in sent:
# tmp = sent.split('<response>')[-1]
# else:
# tmp = sent
tmp = tmp.strip(' .,')
# assert tmp.endswith('<endofresponse>')
tmp = tmp.replace('<|endofresponse|>', '')
tmp = tmp.replace('<|endoftext|>', '')
tokens = tokenizer.encode(tmp)
new_tokens = []
for tok in tokens:
# if tok in break_tokens:
if tok in tokenizer.encode(tokenizer.eos_token):
continue
new_tokens.append(tok)
# ipdb.set_trace()
response = tokenizer.decode(new_tokens).strip(' ,.')
response = response.replace('[venuename]', '{}'.format(venuename))
return response
def get_venuename(bs):
name = ''
if 'venuename' in bs[0]:
tmp_list = bs[0].split('venuename')[-1].split(' ')
#action = tmp_list[-1]
name = ' '. join(tmp_list[:-1])
return name
def get_open_span(bs):
action_names = []
for tmp in bs[0].split(';'):
if 'open span' in tmp:
action = tmp.split('open span')[-1].split(' ')[-1]
name = tmp.split('open span')[-1].split(action)[0]
action_names.append((name, action))
return action_names
##____________________________ End of GPT-3 code __________________________________________##
logging.basicConfig(level=logging.DEBUG)
app = Flask(__name__)
app.secret_key = 'MY_SECRET_KEY'
def label_Message(message):
logging.warning('In label_Message')
# load the model from disk
model_filename = 'model/model.pkl'
tfidf_filename = 'model/tfidf.pkl'
model = pickle.load(open(model_filename, 'rb'))
tfidf = pickle.load(open(tfidf_filename, 'rb'))
pred = model.predict(tfidf.transform([message]))
message_label = pred[0]
logging.warning('Out label_Message')
return message_label
def label_to_persian(label):
res = ''
if label == 'HAPPY':
res = 'خوشحال'
elif label == 'SAD':
res = 'ناراحت'
return
def Create_message(message):
global context
global turn
logging.warning('In create message')
global result
label = session['label']
state = session['state']
result = session['result']
result['response'] = ''
result['status'] = 'on'
result['has_image'] = 'False'
raw_text = message
input_text = raw_text.replace('you> ', '')
if input_text in ['q', 'quit']:
return "Ok, bye. Just for now!"
user = '<|user|> {}'.format(input_text)
context = context + ' ' + user
text = '<|endoftext|> <|context|> {} <|endofcontext|>'.format(context)
# print(context)
text = text.strip()
indexed_tokens = tokenizer.encode(text)
if len(indexed_tokens) > MAX_LEN:
indexed_tokens = indexed_tokens[-1 * MAX_LEN:]
# Convert indexed tokens in a PyTorch tensor
tokens_tensor = torch.tensor([indexed_tokens])
# If you have a GPU, put everything on cuda
tokens_tensor = tokens_tensor.to('cpu')
predicted_index = indexed_tokens[-1]
with torch.no_grad():
# Greedy decoding
while predicted_index not in break_tokens:
outputs = model(tokens_tensor)
predictions = outputs[0]
predicted_index = torch.argmax(predictions[0, -1, :]).item()
indexed_tokens += [predicted_index]
tokens_tensor = torch.tensor([indexed_tokens]).to('cpu')
if len(indexed_tokens) > MAX_LEN:
break
if tokenizer.decode(indexed_tokens).endswith('<|endofbelief|>'):
break
tmp_pred = tokenizer.decode(indexed_tokens)
print('\ntmp_pred:\n', tmp_pred)
belief_text = get_belief_new_dbsearch(tmp_pred)
print('\nbelief_text:\n', belief_text)
beliefs = convert_belief(belief_text)
# domain = list(beliefs.keys())[0]
domain = get_turn_domain(beliefs, domain_queue)
# Convert indexed tokens in a PyTorch tensor
tokens_tensor = torch.tensor([indexed_tokens])
# If you have a GPU, put everything on cuda
tokens_tensor = tokens_tensor.to('cpu')
predicted_index = indexed_tokens[-1]
truncate_action = False
# Predict all tokens
with torch.no_grad():
while predicted_index not in break_tokens:
outputs = model(tokens_tensor)
predictions = outputs[0]
predicted_index = torch.argmax(predictions[0, -1, :]).item()
indexed_tokens += [predicted_index]
if len(indexed_tokens) > MAX_LEN:
break
predicted_text = tokenizer.decode(indexed_tokens)
if '<|action|>' in predicted_text:
generated_actions = predicted_text.split('<|action|>')[-1].split('<|endofaction|>')[0].split(',')
new_actions = []
for a in generated_actions:
if a in ['', ' ']:
continue
new_actions.append(a.strip())
len_actions = len(new_actions)
if len(list(set(new_actions))) > len(new_actions) or (len_actions > 10 and not truncate_action):
# ipdb.set_trace()
actions = '<|action|> {} <|endofaction|>'.format(' , '.join(list(set(new_actions))))
indexed_tokens = tokenizer.encode('{} {}'.format(predicted_text.split('<|action|>')[0], actions))
# print('action truncated')
truncate_action = True
tokens_tensor = torch.tensor([indexed_tokens]).to('cpu')
predicted_text = tokenizer.decode(indexed_tokens)
print('\npredicted_text:\n', predicted_text)
action_text = get_action_new(predicted_text)
print('\naction_text:\n', action_text)
venuename = get_venuename(action_text)
#print('\nVenuename:\n', venuename)
response_text = get_response_new(predicted_text, venuename)
print('\nresponse_text:\n', response_text)
#print(predicted_text)
open_spans = get_open_span(action_text)
print('\open_spans:\n', open_spans)
# handling images
if venuename:
result['has_image'] = 'True'
images = img_handler_obj.get_imgs_url(query=venuename + "in Singapore", num_of_img=5)
result['image'] = images[0]
print(images)
delex_system = '{}'.format(response_text)
context = context + ' ' + delex_system
turn += 1
prev_beliefs = beliefs
result['response'] = response_text
session['result'] = result
return result
@app.route('/')
def index():
session['state'] = 'start'
session['label'] = ''
session['result'] = {}
return render_template('index2.html')
@app.route('/send_message', methods=['POST'])
def send_message():
message = request.form['message']
response_text = Create_message(message)
#print('\nRESPONSE TEXT ', response_text)
return jsonify(response_text)
| StarcoderdataPython |
5131148 | <reponame>adlibre/adlibre-monitoring<gh_stars>0
"""
Adlibre Deployment Script for CentOS / EL 5/6 / Amazon AMI
All commands should be idempotent
"""
from fabric.api import env, run, put, sudo, prefix
from fabric.contrib.files import append, comment, exists, sed
def _get_os_major_version():
""" Helper function to determine OS major version """
if exists("/etc/system-release"):
return "Amazon"
else:
return run("egrep -oe '[0-9]' /etc/redhat-release | head -n1")
def _install_epel():
version = _get_os_major_version()
env.warn_only = True
if run('rpm -q epel-release').failed:
if version == "5":
sudo('rpm -Uvh http://download.fedoraproject.org/pub/epel/5/i386/epel-release-5-4.noarch.rpm')
elif version == "6":
sudo('rpm -Uvh http://download.fedoraproject.org/pub/epel/6/i386/epel-release-6-8.noarch.rpm')
elif version == "7":
print("EL 7")
elif version == "Amazon":
print("Amazon AMI includes EPEL")
else:
from fabric.utils import error
msg = "version %s unknown" % (version)
error(msg)
def _install_sudo():
""" Install sudo """
if not exists("/usr/bin/sudo"):
run('yum -y -q install sudo')
def install_os_requirements():
""" Install OS requirements """
_install_sudo()
_install_epel()
def install_nrpe():
""" Install NRPE + plugins - active checks """
sudo('yum -y -q install nrpe')
sudo('chkconfig nrpe on')
sudo('yum -y -q install nagios-plugins-load nagios-plugins-disk')
def install_nsca():
""" Install NSCA Client - passive checks"""
sudo('yum -y -q --enablerepo=epel install nsca-client')
def _configure_nrpe(nrpe_allowed_hosts):
""" Configure NRPE to accept commands from our monitoring server """
sudo("sed -i -e 's/^allowed_hosts.*$/allowed_hosts=%s/g' /etc/nagios/nrpe.cfg" % (nrpe_allowed_hosts))
append('/etc/nagios/nrpe.cfg', 'include_dir=/etc/nagios/nrpe.d/', use_sudo=True)
sudo('service nrpe restart')
def deploy_nrpe_config(nrpe_allowed_hosts):
""" Deploy NRPE configuration """
sudo('mkdir -p /etc/nagios/nrpe.d')
put('nrpe.d/*', '/etc/nagios/nrpe.d/', use_sudo=True, mirror_local_mode=True)
sudo('mkdir -p /etc/nagios/plugins')
put('plugins/*', '/etc/nagios/plugins/', use_sudo=True, mirror_local_mode=True)
_configure_nrpe(nrpe_allowed_hosts)
def install_passive_checker(openvz=False):
""" Install Adlibre passive checking scripts & cron"""
install_nsca()
# fix perms so nagios user can send alerts from cron
sudo('chown root:nagios /etc/nagios/send_nsca.cfg')
sudo('chmod 660 /etc/nagios/send_nsca.cfg')
sudo('mkdir -p /etc/nagios/passive-checker')
put('passive-checker/*', '/etc/nagios/passive-checker/', use_sudo=True, mirror_local_mode=True)
if openvz:
# Allow nagios to read OpenVZ user_beancounters. NB for non OpenVZ hosts this is not required
append('/etc/sudoers', 'nagios,nrpe ALL=(ALL) NOPASSWD: /bin/cat /proc/user_beancounters', use_sudo=True)
# Install nagios cronjob
sudo('crontab -l -u nagios > /tmp/nagios.cron')
append('*/15 * * * * /etc/nagios/passive-checker/run.sh', '/tmp/nagios.cron', use_sudo=True)
sudo('crontab -u nagios /tmp/nagios.cron && rm -f /tmp/nagios.cron')
def all(nrpe_allowed_hosts, passive_checker=False, openvz=False):
""" Install all components """
install_os_requirements()
install_nrpe()
install_nsca()
deploy_nrpe_config(nrpe_allowed_hosts)
if passive_checker:
install_passive_checker(openvz)
| StarcoderdataPython |
4947842 | <gh_stars>0
from tkinter import *
from tkinter import ttk
import tkinter as tk
from tkinter.scrolledtext import ScrolledText
from tkinter.font import Font
class LikeSettings():
def __init__(self,gui_frame):
self.settings_tab = gui_frame
likes_today_text = tk.Label(self.settings_tab, text="max. Likes Today : ")
likes_today_text.place(x=5, y=5)
self.likes_today_text_entry = tk.Entry(self.settings_tab,width=7)
self.likes_today_text_entry.place(x=140, y=8)
self.likes_today_text_entry.insert(0,"")
self.like_random_status = IntVar()
self.like_random = Checkbutton(self.settings_tab, text="Like Photos", variable=self.like_random_status).place(x=5,y=30)
like_sleep_text = Label(self.settings_tab,text="Sleep for : ")
like_sleep_text.place(x=5,y=60)
self.like_sleep = Entry(self.settings_tab,width=8)
self.like_sleep.place(x=65,y=60)
self.like_sleep.insert(0,"")
def getLikeStatus(self):
return True if self.like_random_status.get() == 1 else False
def getMaxLikesToday(self):
return self.likes_today_text_entry.get()
def setMaxLikesToday(self,string):
self.likes_today_text_entry.insert(0,string)
def getLikeSleepText(self):
return self.like_sleep.get()
def getLikeSleepReal(self):
temp_sleep = self.like_sleep.get().split(",")
return int(temp_sleep[0]),int(temp_sleep[1])
def setLikeSleep(self,sleepstring):
self.like_sleep.insert(0,sleepstring)
| StarcoderdataPython |
18242 | <filename>nnutils/laplacian_loss.py
# --------------------------------------------------------
# Written by <NAME> (https://github.com/JudyYe)
# --------------------------------------------------------
from __future__ import print_function
from __future__ import absolute_import
from __future__ import division
# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.
# customize laplacian argument
import torch
def mesh_laplacian_smoothing(meshes, verts_packed=None, method: str = "uniform"):
r"""
Computes the laplacian smoothing objective for a batch of meshes.
This function supports three variants of Laplacian smoothing,
namely with uniform weights("uniform"), with cotangent weights ("cot"),
and cotangent cuvature ("cotcurv").For more details read [1, 2].
Args:
meshes: Meshes object with a batch of meshes.
method: str specifying the method for the laplacian.
Returns:
loss: Average laplacian smoothing loss across the batch.
Returns 0 if meshes contains no meshes or all empty meshes.
Consider a mesh M = (V, F), with verts of shape Nx3 and faces of shape Mx3.
The Laplacian matrix L is a NxN tensor such that LV gives a tensor of vectors:
for a uniform Laplacian, LuV[i] points to the centroid of its neighboring
vertices, a cotangent Laplacian LcV[i] is known to be an approximation of
the surface normal, while the curvature variant LckV[i] scales the normals
by the discrete mean curvature. For vertex i, assume S[i] is the set of
neighboring vertices to i, a_ij and b_ij are the "outside" angles in the
two triangles connecting vertex v_i and its neighboring vertex v_j
for j in S[i], as seen in the diagram below.
.. code-block:: python
a_ij
/\
/ \
/ \
/ \
v_i /________\ v_j
\ /
\ /
\ /
\ /
\/
b_ij
The definition of the Laplacian is LV[i] = sum_j w_ij (v_j - v_i)
For the uniform variant, w_ij = 1 / |S[i]|
For the cotangent variant,
w_ij = (cot a_ij + cot b_ij) / (sum_k cot a_ik + cot b_ik)
For the cotangent curvature, w_ij = (cot a_ij + cot b_ij) / (4 A[i])
where A[i] is the sum of the areas of all triangles containing vertex v_i.
There is a nice trigonometry identity to compute cotangents. Consider a triangle
with side lengths A, B, C and angles a, b, c.
.. code-block:: python
c
/|\
/ | \
/ | \
B / H| \ A
/ | \
/ | \
/a_____|_____b\
C
Then cot a = (B^2 + C^2 - A^2) / 4 * area
We know that area = CH/2, and by the law of cosines we have
A^2 = B^2 + C^2 - 2BC cos a => B^2 + C^2 - A^2 = 2BC cos a
Putting these together, we get:
B^2 + C^2 - A^2 2BC cos a
_______________ = _________ = (B/H) cos a = cos a / sin a = cot a
4 * area 2CH
[1] Desbrun et al, "Implicit fairing of irregular meshes using diffusion
and curvature flow", SIGGRAPH 1999.
[2] Nealan et al, "Laplacian Mesh Optimization", Graphite 2006.
"""
if meshes.isempty():
return torch.tensor(
[0.0], dtype=torch.float32, device=meshes.device, requires_grad=True
)
N = len(meshes)
if verts_packed is None:
verts_packed = meshes.verts_packed() # (sum(V_n), 3)
num_verts_per_mesh = meshes.num_verts_per_mesh() # (N,)
verts_packed_idx = meshes.verts_packed_to_mesh_idx() # (sum(V_n),)
weights = num_verts_per_mesh.gather(0, verts_packed_idx) # (sum(V_n),)
weights = 1.0 / weights.float()
# We don't want to backprop through the computation of the Laplacian;
# just treat it as a magic constant matrix that is used to transform
# verts into normals
with torch.no_grad():
if method == "uniform":
L = meshes.laplacian_packed()
elif method in ["cot", "cotcurv"]:
L, inv_areas = laplacian_cot(meshes)
if method == "cot":
norm_w = torch.sparse.sum(L, dim=1).to_dense().view(-1, 1)
idx = norm_w > 0
norm_w[idx] = 1.0 / norm_w[idx]
else:
norm_w = 0.25 * inv_areas
else:
raise ValueError("Method should be one of {uniform, cot, cotcurv}")
if method == "uniform":
loss = L.mm(verts_packed)
elif method == "cot":
loss = L.mm(verts_packed) * norm_w - verts_packed
elif method == "cotcurv":
loss = (L.mm(verts_packed) - verts_packed) * norm_w
loss = loss.norm(dim=1)
loss = loss * weights
return loss.sum() / N
def laplacian_cot(meshes):
"""
Returns the Laplacian matrix with cotangent weights and the inverse of the
face areas.
Args:
meshes: Meshes object with a batch of meshes.
Returns:
2-element tuple containing
- **L**: FloatTensor of shape (V,V) for the Laplacian matrix (V = sum(V_n))
Here, L[i, j] = cot a_ij + cot b_ij iff (i, j) is an edge in meshes.
See the description above for more clarity.
- **inv_areas**: FloatTensor of shape (V,) containing the inverse of sum of
face areas containing each vertex
"""
verts_packed = meshes.verts_packed() # (sum(V_n), 3)
faces_packed = meshes.faces_packed() # (sum(F_n), 3)
# V = sum(V_n), F = sum(F_n)
V, F = verts_packed.shape[0], faces_packed.shape[0]
face_verts = verts_packed[faces_packed]
v0, v1, v2 = face_verts[:, 0], face_verts[:, 1], face_verts[:, 2]
# Side lengths of each triangle, of shape (sum(F_n),)
# A is the side opposite v1, B is opposite v2, and C is opposite v3
A = (v1 - v2).norm(dim=1)
B = (v0 - v2).norm(dim=1)
C = (v0 - v1).norm(dim=1)
# Area of each triangle (with Heron's formula); shape is (sum(F_n),)
s = 0.5 * (A + B + C)
# note that the area can be negative (close to 0) causing nans after sqrt()
# we clip it to a small positive value
area = (s * (s - A) * (s - B) * (s - C)).clamp_(min=1e-12).sqrt()
# Compute cotangents of angles, of shape (sum(F_n), 3)
A2, B2, C2 = A * A, B * B, C * C
cota = (B2 + C2 - A2) / area
cotb = (A2 + C2 - B2) / area
cotc = (A2 + B2 - C2) / area
cot = torch.stack([cota, cotb, cotc], dim=1)
cot /= 4.0
# Construct a sparse matrix by basically doing:
# L[v1, v2] = cota
# L[v2, v0] = cotb
# L[v0, v1] = cotc
ii = faces_packed[:, [1, 2, 0]]
jj = faces_packed[:, [2, 0, 1]]
idx = torch.stack([ii, jj], dim=0).view(2, F * 3)
L = torch.sparse.FloatTensor(idx, cot.view(-1), (V, V))
# Make it symmetric; this means we are also setting
# L[v2, v1] = cota
# L[v0, v2] = cotb
# L[v1, v0] = cotc
L += L.t()
# For each vertex, compute the sum of areas for triangles containing it.
idx = faces_packed.view(-1)
inv_areas = torch.zeros(V, dtype=torch.float32, device=meshes.device)
val = torch.stack([area] * 3, dim=1).view(-1)
inv_areas.scatter_add_(0, idx, val)
idx = inv_areas > 0
inv_areas[idx] = 1.0 / inv_areas[idx]
inv_areas = inv_areas.view(-1, 1)
return L, inv_areas
| StarcoderdataPython |
9790269 | # Copyright (c) 2019-present, Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
#
def f_gold ( s ) :
n = len ( s )
a = [ 0 ] * n
for i in range ( n - 1 , - 1 , - 1 ) :
back_up = 0
for j in range ( i , n ) :
if j == i :
a [ j ] = 1
elif s [ i ] == s [ j ] :
temp = a [ j ]
a [ j ] = back_up + 2
back_up = temp
else :
back_up = a [ j ]
a [ j ] = max ( a [ j - 1 ] , a [ j ] )
return a [ n - 1 ]
#TOFILL
if __name__ == '__main__':
param = [
(' E',),
('0845591950',),
('00101011',),
('pLSvlwrACvFaoT',),
('7246',),
('1010101100000',),
('obPkcLSFp',),
('914757557818',),
('1',),
('PKvUWIQ',)
]
n_success = 0
for i, parameters_set in enumerate(param):
if f_filled(*parameters_set) == f_gold(*parameters_set):
n_success+=1
print("#Results: %i, %i" % (n_success, len(param))) | StarcoderdataPython |
29150 | # receive_msg.py
#
# SPDX-FileCopyrightText: Copyright 2021 <NAME>
#
# SPDX-License-Identifier: MIT
#
# Receive message from IOTA tangle
#
import iota_client
import os
import pprint
# Config
msg_meta = False
env_node_address = 'HORNET_NODE_ADDRESS'
# Print Message data
def show_message(message, meta=False):
if meta:
show = 'Message meta'
else:
show = 'Message'
print(
'''
{} data:
'''.format(show))
pprint.pprint(message)
# Connect to node and retrieve message
def main():
import argparse
parser = argparse.ArgumentParser(description='Receive message from IOTA tangle.')
parser.add_argument('--msg_id', dest='msg_id',
default='497c1b68e5480d07819bbd9c989c8d245fa748667a89fdf7dac884741f493326',
help='Id of message stored on tangle')
parser.add_argument('--node_info', dest='node_info',
default=False,
help='Print node information')
args = parser.parse_args()
message_id = args.msg_id
node_info = args.node_info
# Get node address out of environment
NODE_URL = os.getenv(env_node_address)
if not NODE_URL:
raise Exception("Please define environment variable with node URL.")
try:
# Initialize client
client = iota_client.Client(
nodes_name_password=[[NODE_URL]], node_sync_disabled=True)
except:
raise Exception('Node not found.')
# Check node status
if not client.get_health():
print('''
------------------
Node not healthy.
------------------''')
# Get node information
if node_info:
print('Node Information:')
pprint.pprint(client.get_info())
# Retrieve message from Tangle
message = client.get_message_data(message_id)
# Show results
show_message(message)
if msg_meta:
message_meta = client.get_message_metadata(message_id)
show_message(message_meta, True)
# Decode message
msg_str = bytes(message['payload']['indexation'][0]['data']).decode('utf-8')
print('''
Decoded message:
{}
'''.format(msg_str))
if __name__ == "__main__":
main() | StarcoderdataPython |
386841 | <reponame>appetito/2checkout_demo<gh_stars>0
import hashlib
from .twocheckout import Twocheckout
class Passback(Twocheckout):
def __init__(self, dict_):
super(self.__class__, self).__init__(dict_)
@classmethod
def check_hash(cls, params=None):
m = hashlib.md5()
m.update(params['secret'])
m.update(params['sid'])
m.update(params['order_number'])
m.update(params['total'])
check_hash = m.hexdigest()
check_hash = check_hash.upper()
if check_hash == params['key']:
return True
else:
return False
@classmethod
def check(cls, params=None):
if params is None:
params = dict()
if 'order_number' in params and 'total' in params:
check = Passback.check_hash(params)
if check:
response = { "response_code": "SUCCESS",
"response_message":"Hash Matched"
}
else:
response = { "response_code": "FAILED",
"response_message": "Hash Mismatch"
}
else:
return { "response_code": "ERROR",
"response_message": "You must pass secret word, sid, order_number, total"
}
return cls(response)
| StarcoderdataPython |
5047303 | from .detector import detect_faces
from .visualization_utils import show_results
from .align_trans import get_reference_facial_points, warp_and_crop_face
| StarcoderdataPython |
4951940 | # Created byMartin.cz
# Copyright (c) <NAME>. All rights reserved.
# import modules
import wx
import wx.dataview as wxdv
from .. import mwx
from .. import events
from .list_model import AuthorsListModel
class AuthorsList(wx.Panel):
"""Authors list panel."""
def __init__(self, parent):
"""Initializes authors list panel."""
# init panel
wx.Panel.__init__(self, parent, -1, style=wx.NO_FULL_REPAINT_ON_RESIZE)
# init buff
self._authors = []
# make ui
self._make_ui()
def SetAuthors(self, authors):
"""Sets authors to display."""
# unselect all
self._list_ctrl.UnselectAll()
# set data
before = len(self._authors)
self._authors[:] = authors if authors else []
after = len(self._authors)
diff = after - before
# add rows
if diff > 0:
for i in range(diff):
self._list_model.RowAppended()
# remove rows
elif diff < 0:
self._list_model.RowsDeleted(range(after, before))
# update list
self._list_model.Resort()
# post selection changed event
self._on_selection_changed(None)
def SetSelectedAuthors(self, authors):
"""Selects specified authors."""
# get authors IDs
ids = [x.dbid for x in authors]
# select items
for row, article in enumerate(self._authors):
if article.dbid in ids:
item = self._list_model.GetItem(row)
self._list_ctrl.Select(item)
def GetSelectedAuthors(self):
"""Gets list of selected authors."""
# get selected items
items = self._list_ctrl.GetSelections()
rows = [self._list_model.GetRow(x) for x in items]
# get authors
return [self._authors[x] for x in rows]
def _on_selection_changed(self, evt):
"""Handles row selection event."""
event = events.AuthorsSelectionChangedEvent(self.GetId())
wx.PostEvent(self, event)
def _on_item_activated(self, evt):
"""Handles row activation event."""
event = events.AuthorsItemActivatedEvent(self.GetId())
wx.PostEvent(self, event)
def _on_item_context_menu(self, evt):
"""Handles context menu event."""
event = events.AuthorsItemContextMenuEvent(self.GetId())
wx.PostEvent(self, event)
def _make_ui(self):
"""Makes panel UI."""
# init list control
self._list_ctrl = wxdv.DataViewCtrl(self, style=wx.NO_BORDER | wxdv.DV_ROW_LINES | wxdv.DV_VERT_RULES | wxdv.DV_MULTIPLE)
# associate model
self._list_model = AuthorsListModel(self._authors)
self._list_ctrl.AssociateModel(self._list_model)
# add columns
self._list_ctrl.AppendTextColumn("Expander", 0, width=0, mode=wxdv.DATAVIEW_CELL_INERT, align=wx.ALIGN_CENTER)
self._list_ctrl.AppendTextColumn("Last Name", 1, width=150, mode=wxdv.DATAVIEW_CELL_INERT, align=wx.ALIGN_LEFT)
self._list_ctrl.AppendTextColumn("First Name", 2, width=150, mode=wxdv.DATAVIEW_CELL_INERT, align=wx.ALIGN_LEFT)
self._list_ctrl.AppendTextColumn("Initials", 3, width=80, mode=wxdv.DATAVIEW_CELL_INERT, align=wx.ALIGN_LEFT)
self._list_ctrl.AppendTextColumn("Articles", 4, width=50, mode=wxdv.DATAVIEW_CELL_INERT, align=wx.ALIGN_LEFT)
# hide some columns
self._list_ctrl.Columns[0].SetHidden(True)
# set columns properties
for c in self._list_ctrl.Columns:
c.Sortable = True
c.Reorderable = False
c.GetRenderer().EnableEllipsize(wx.ELLIPSIZE_END)
# bind events
self._list_ctrl.Bind(wxdv.EVT_DATAVIEW_SELECTION_CHANGED, self._on_selection_changed)
self._list_ctrl.Bind(wxdv.EVT_DATAVIEW_ITEM_ACTIVATED, self._on_item_activated)
self._list_ctrl.Bind(wxdv.EVT_DATAVIEW_ITEM_CONTEXT_MENU, self._on_item_context_menu)
# add to sizer
self.Sizer = wx.BoxSizer(wx.VERTICAL)
self.Sizer.Add(self._list_ctrl, 1, wx.EXPAND | wx.ALL, mwx.LIST_CTRL_SPACE)
| StarcoderdataPython |
11386701 | <filename>main.py
from textblob import TextBlob
import tweepy
import sys # This module is part of the core Python stack.
import math # This module is part of the core Python stack.
from dotenv import load_dotenv
from pathlib import Path
import os
env_path = Path(".env") # env_path is an instance of the Path class.
load_dotenv(dotenv_path = env_path)
api_key = os.getenv("api_key")
api_secret_key = os.getenv("api_secret_key")
bearer_token = os.getenv("bearer_token")
access_token = os.getenv("access_token")
access_secret = os.getenv("access_secret")
# Now, let's create a twitter_api authentication handler.
auth_handler = tweepy.OAuthHandler(consumer_key = api_key, consumer_secret = api_secret_key)
auth_handler.set_access_token(access_token, access_secret)
api = tweepy.API(auth_handler) # This is how we build the connection to the twitter_api.
search_term = "biden"
tweet_amount = 1000
polarity = 0
positive = 0
negative = 0
neutral = 0
# Now, let's create a "Cursor" object :-
cursor_object = tweepy.Cursor(api.search, q= search_term, lang = "eu")
tweets = cursor_object.items(tweet_amount)
for tweet in tweets:
final_text = tweet.text.replace('RT', '')
if final_text.startswith(" @"):
position = final_text.index(":")
if final_text[position+1] == ' ':
final_text = final_text[position+2::1]
else:
final_text = final_text[position+1::1]
else:
if final_text.startswith("@"):
position = final_text.index(" ")
final_text = final_text[position+1::1]
# So, we have "cleaned up" our messages/tweets.
# Now, we will perform the "sentiment analysis" of our messages/tweets.
# For performing the sentiment analysis, we need to create a "TextBlob" object.
analysis = TextBlob(final_text) # We will be passing in our "cleaned up" tweet here as an argument.
tweet_polarity = analysis.polarity
if tweet_polarity < 15.00:
negative+= 1
elif tweet_polarity > 15.00:
positive+= 1
elif tweet_polarity == 0.00:
neutral+= 1
polarity = polarity + tweet_polarity
if polarity > 15.00:
print()
print("There is a positive trend.")
print()
else:
print()
print("There is a negative trend.")
print()
print("Polarity :- ", str(round(polarity, 2)))
print()
print("Positive Tweets :- ", positive)
print()
print("Negative Tweets :- ", negative)
print()
print("Neutral Tweets :- ", neutral)
| StarcoderdataPython |
270753 | <filename>instances/migrations/0002_alter_instance_url.py
# Generated by Django 4.0.1 on 2022-01-18 19:08
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('instances', '0001_initial'),
]
operations = [
migrations.AlterField(
model_name='instance',
name='url',
field=models.URLField(max_length=255, verbose_name='URL'),
),
]
| StarcoderdataPython |
1928233 | import pytest
import src.user_class
import src.database_access
def resetFunctions():
src.user_class.input = input
src.user_class.print = print
class TestIsPasswordSecure:
page = src.user_class.Page()
def test_password_character_limit_lower(self):
assert self.page.is_password_secure("P2$s") == False
assert self.page.is_password_secure("") == False
assert self.page.is_password_secure("<PASSWORD>") == False # 7 chars
assert self.page.is_password_secure("<PASSWORD>") == True # 8 chars
assert self.page.is_password_secure("<PASSWORD>") == True
def test_password_character_limit_upper(self):
assert self.page.is_password_secure("<PASSWORD>") == False
assert self.page.is_password_secure(
"<PASSWORD>") == False # 13 chars
assert self.page.is_password_secure("<PASSWORD>") == True # 12 chars
assert self.page.is_password_secure("<PASSWORD>") == True
def test_password_contains_capital(self):
assert self.page.is_password_secure("password1#") == False
assert self.page.is_password_secure("Password1#") == True
assert self.page.is_password_secure("<PASSWORD>") == False
assert self.page.is_password_secure("<PASSWORD>") == True
def test_password_contains_lowercase(self):
assert self.page.is_password_secure("PASSWORD1#") == False
assert self.page.is_password_secure("<PASSWORD>#") == True
assert self.page.is_password_secure("<PASSWORD>") == False
assert self.page.is_password_secure("a<PASSWORD>") == True
def test_password_contains_number(self):
assert self.page.is_password_secure("Password$$") == False
assert self.page.is_password_secure("Password1$") == True
def test_password_contains_special(self):
assert self.page.is_password_secure("Password12") == False
assert self.page.is_password_secure("Password1#") == True
class TestGetCredentials:
page = src.user_class.Page()
def testLoginIO(self):
input_values = ['randion', 'Password#1']
output = []
def mock_input(s):
output.append(s)
return input_values.pop(0)
src.user_class.input = mock_input
src.user_class.print = lambda s: output.append(s)
self.page.get_credentials(False)
resetFunctions()
assert output == [
'Enter username: ',
'Enter password: ',
]
def testRegisterIO(self):
input_values = ['randion', 'Password#1', 'Robby', '<PASSWORD>']
output = []
def mock_input(s):
output.append(s)
return input_values.pop(0)
src.user_class.input = mock_input
src.user_class.print = lambda s: output.append(s)
self.page.get_credentials(True)
resetFunctions()
assert output == [
'Enter username: ',
'Enter password: ',
'Enter first name: ',
'Enter last name: ',
]
class TestRegisterLogin:
page = src.user_class.Page()
db_name = "testing.sqlite3"
db = src.database_access.database_access(db_name)
src.user_class.db = db
def testUserRegistration(self):
input_values = ['randion', 'Password#1', 'Robby', '<PASSWORD>']
output = []
def mock_input(s):
return input_values.pop(0)
src.user_class.input = mock_input
src.user_class.print = lambda s: output.append(s)
self.page.register()
resetFunctions()
assert output == ["An account for randion was registered successfully"]
def testUserLoginCorrect(self):
input_values = ['randion', 'Password#1']
output = []
def mock_input(s):
output.append(s)
return input_values.pop(0)
src.user_class.input = mock_input
src.user_class.print = lambda s: output.append(s)
self.page.login()
resetFunctions()
assert output == [
'Enter username: ',
'Enter password: ',
"You have successfully logged in\n"
]
def testUserLoginIncorrect(self):
input_values = ['randion', 'Password#']
output = []
def mock_input(s):
output.append(s)
return input_values.pop(0)
src.user_class.input = mock_input
src.user_class.print = lambda s: output.append(s)
self.page.login()
resetFunctions()
assert output == [
'Enter username: ',
'Enter password: ',
"Incorrect username / password, please try again\n"
]
def testUserRegistrationLimit(self):
def mock_input(s):
return input_values.pop(0)
src.user_class.input = mock_input
for i in range(0, 4):
input_values = [
'randion' + str(i), 'Password#1' + str(i), 'Robby' + str(i), 'Ybbor' + str(i)]
self.page.register()
resetFunctions()
output = []
input = ['TomSawyer', 'Passworrd<PASSWORD>', 'Tommy', "Sawyer"]
def mock_input(s):
output.append(s)
src.user_class.input = mock_input
src.user_class.print = lambda s: output.append(s)
self.page.register()
resetFunctions()
assert output == [
"All permitted accounts have been created, please come backlater\n"
]
def testDatabaseUserPrint(self):
output = []
src.database_access.print = lambda s: output.append(s)
self.db.print_users()
src.database_access.print = print
assert output == [
('randion', '<PASSWORD>', '<PASSWORD>', '<PASSWORD>'),
('randion0', '<PASSWORD>', '<PASSWORD>', '<PASSWORD>'),
('randion1', 'Password#11', '<PASSWORD>', '<PASSWORD>'),
('randion2', 'Password#12', '<PASSWORD>', '<PASSWORD>'),
('randion3', '<PASSWORD>', '<PASSWORD>', '<PASSWORD>')
]
def testCleanUp(self): # Teardown
self.db.delete_users_table()
self.db.close()
assert True == True
class TestJobPosting():
page = src.user_class.Page()
page.username = "General Kenobi The Negotiator"
db_name = "testing.sqlite3"
db = src.database_access.database_access(db_name)
src.user_class.db = db
def testPostValidJob(self):
input_values = ['Worm Farmer', 'Farming worms',
'WormsRUs', 'Bikini Bottom', '20000']
output = []
def mock_input(s):
return input_values.pop(0)
src.user_class.input = mock_input
src.user_class.print = lambda s: output.append(s)
self.page.postjob()
resetFunctions()
assert output == [
"Thanks your job was posted! Returning to the previous menu..."
]
def testPostInvalidJob(self):
input_values = ['Worm Farmer0', 'Farming worms',
'WormsRUs', 'Bikini Bottom', 'Shmeckle', '20000']
output = []
def mock_input(s):
output.append(s)
return input_values.pop(0)
src.user_class.input = mock_input
src.user_class.print = lambda s: output.append(s)
self.page.postjob()
resetFunctions()
assert output == [
"Please enter the job's title: ",
"Please enter a description of the job: ",
"Who is the employer of the job? ",
"Where is this job located? ",
"Please estimate the salary of the job (only numbers): ",
"Not a valid number. Try again.",
"Please estimate the salary of the job (only numbers): ",
"Thanks your job was posted! Returning to the previous menu..."
]
def testJobPostLimit(self):
for i in range(1, 4):
input_values = [
'Worm Farmer' + str(i), 'Farming worms', 'WormsRUs', 'Bikini Bottom', '20000']
def mock_input(s):
return input_values.pop(0)
src.user_class.input = mock_input
self.page.postjob()
output = []
input_values = ['Not going to post', 'Farming worms',
'WormsRUs', 'Bikini Bottom', '20000']
def mock_input(s):
return input_values.pop(0)
src.user_class.input = mock_input
src.user_class.print = lambda s: output.append(s)
self.page.postjob()
resetFunctions()
assert output == [
'There are already 5 jobs. Please try again later\n'
]
def testDatabaseJobPrint(self):
output = []
src.database_access.print = lambda s: output.append(s)
self.db.print_jobs()
assert output == [('General Kenobi The Negotiator', 'Worm Farmer', 'Farming worms', 'WormsRUs', 'Bikini Bottom', 20000.0), ('General Kenobi The Negotiator', 'Worm Farmer0', 'Farming worms', 'WormsRUs', 'Bikini Bottom', 20000.0), ('General Kenobi The Negotiator', 'Worm Farmer1',
'Farming worms', 'WormsRUs', 'Bikini Bottom', 20000.0), ('General Kenobi The Negotiator', 'Worm Farmer2', 'Farming worms', 'WormsRUs', 'Bikini Bottom', 20000.0), ('General Kenobi The Negotiator', 'Worm Farmer3', 'Farming worms', 'WormsRUs', 'Bikini Bottom', 20000.0)]
src.database_access.print
def testCleanUp(self): # Teardown
self.db.delete_jobs_table()
self.db.close()
assert True == True
| StarcoderdataPython |
9789927 | from .framework import (
selenium_test,
SeleniumTestCase
)
class WorkflowManagementTestCase(SeleniumTestCase):
ensure_registered = True
@selenium_test
def test_import_from_url(self):
self.workflow_index_open()
self._workflow_import_from_url()
table_elements = self.workflow_index_table_elements()
assert len(table_elements) == 1
new_workflow = table_elements[0].find_element_by_css_selector(".menubutton")
assert 'TestWorkflow1 (imported from uploaded file)' in new_workflow.text, new_workflow.text
@selenium_test
def test_view(self):
self.workflow_index_open()
self._workflow_import_from_url()
self.workflow_index_click_option("View")
title = self.wait_for_selector(".page-body h3")
assert "TestWorkflow1" in title.text
# TODO: Test display of steps...
@selenium_test
def test_rename(self):
self.workflow_index_open()
self._workflow_import_from_url()
self.workflow_index_click_option("Rename")
rename_form_element = self.wait_for_selector("form")
self.fill(rename_form_element, {
"new_name": "CoolNewName"
})
self.click_submit(rename_form_element)
table_elements = self.workflow_index_table_elements()
renamed_workflow_button = table_elements[0].find_element_by_css_selector(".menubutton")
assert 'CoolNewName' in renamed_workflow_button.text, renamed_workflow_button.text
def _workflow_import_from_url(self):
self.click_selector(self.test_data["selectors"]["workflows"]["import_button"])
url = "https://raw.githubusercontent.com/galaxyproject/galaxy/dev/test/base/data/test_workflow_1.ga"
form_element = self.driver.find_element_by_css_selector("#center form")
url_element = form_element.find_element_by_css_selector("input[type='text']")
url_element.send_keys(url)
self.click_submit(form_element)
| StarcoderdataPython |
3570745 | <filename>pde_superresolution/xarray_beam.py
# Copyright 2018 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
""""Utilities for using xarray with beam."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import apache_beam as beam
import tensorflow as tf
from typing import Iterator, List
import xarray
def read_netcdf(path: str) -> xarray.Dataset:
"""Read a netCDF file from a path into memory."""
with tf.gfile.GFile(path, mode='rb') as f:
return xarray.open_dataset(f.read()).load()
def write_netcdf(ds: xarray.Dataset, path: str) -> None:
"""Write an xarray.Datset to the given path."""
with tf.gfile.GFile(path, 'w') as f:
f.write(ds.to_netcdf())
def _swap_dims_no_coordinate(
ds: xarray.Dataset, old_dim: str, new_dim: str) -> xarray.Dataset:
"""Like xarray.Dataset.swap_dims(), but works even for non-coordinates.
See https://github.com/pydata/xarray/issues/1855 for the upstream bug.
Args:
ds: old dataset.
old_dim: name of existing dimension name.
new_dim: name of new dimension name.
Returns:
Dataset with swapped dimensions.
"""
fix_dims = lambda dims: tuple(new_dim if d == old_dim else d for d in dims)
return xarray.Dataset(
{k: (fix_dims(v.dims), v.data, v.attrs) for k, v in ds.data_vars.items()},
{k: (fix_dims(v.dims), v.data, v.attrs) for k, v in ds.coords.items()},
ds.attrs)
def stack(ds: xarray.Dataset,
dim: str,
levels: List[str]) -> xarray.Dataset:
"""Stack multiple dimensions along a new dimension.
Unlike xarray's built-in stack:
1. This works for a single level.
2. Levels are turned into new coordinates, not levels in a MultiIndex.
Args:
ds: input dataset.
dim: name of the new stacked dimension. Should not be found on the input
dataset.
levels: list of names of dimensions on the input dataset. Variables along
these dimensions will be stacked together along the new dimension `dim`.
Returns:
Dataset with stacked data.
"""
if len(levels) == 1:
# xarray's stack doesn't work properly with one level
level = levels[0]
return _swap_dims_no_coordinate(ds, level, dim)
return ds.stack(**{dim: levels}).reset_index(dim)
def unstack(ds: xarray.Dataset,
dim: str,
levels: List[str]) -> xarray.Dataset:
"""Unstack a dimension into multiple dimensions.
Unlike xarray's built-in stack:
1. This works for a single level.
2. It does not expect levels to exist in a MultiIndex, but rather as 1D
coordinates.
Args:
ds: input dataset.
dim: name of an existing dimension on the input.
levels: list of names of 1D variables along the dimension `dim` in the
input dataset. Each of these will be a dimension on the output.
Returns:
Dataset with unstacked data, with each level turned into a new dimension.
"""
if len(levels) == 1:
# xarray's unstack doesn't work properly with one level
level = levels[0]
return _swap_dims_no_coordinate(ds, dim, level)
return ds.set_index(**{dim: levels}).unstack(dim)
class SplitDoFn(beam.DoFn):
"""DoFn that splits an xarray Dataset across a dimension."""
def __init__(self, dim: str, keep_dims: bool = False):
self.dim = dim
self.keep_dims = keep_dims
def process(self, element: xarray.Dataset) -> Iterator[xarray.Dataset]:
for i in range(element.sizes[self.dim]):
index = slice(i, i + 1) if self.keep_dims else i
yield element[{self.dim: index}].copy()
class ConcatCombineFn(beam.CombineFn):
"""CombineFn that concatenates across the given dimension."""
def __init__(self, dim: str):
self._dim = dim
def create_accumulator(self):
return []
def add_input(self,
accumulator: List[xarray.Dataset],
element: xarray.Dataset) -> List[xarray.Dataset]:
accumulator.append(element)
return accumulator
def merge_accumulators(
self, accumulators: List[List[xarray.Dataset]]) -> List[xarray.Dataset]:
return [xarray.concat(sum(accumulators, []), dim=self._dim)]
def extract_output(
self, accumulator: List[xarray.Dataset]) -> xarray.Dataset:
if accumulator:
ds = xarray.concat(accumulator, dim=self._dim)
else:
# NOTE(shoyer): I'm not quite sure why, but Beam needs to be able to run
# this step on a empty accumulator.
ds = xarray.Dataset()
return ds
| StarcoderdataPython |
8120155 | from django.db import migrations, models
import django.utils.timezone
import model_utils.fields
class Migration(migrations.Migration):
dependencies = [
('experiments', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='ExperimentKeyValue',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('created', model_utils.fields.AutoCreatedField(default=django.utils.timezone.now, verbose_name='created', editable=False)),
('modified', model_utils.fields.AutoLastModifiedField(default=django.utils.timezone.now, verbose_name='modified', editable=False)),
('experiment_id', models.PositiveSmallIntegerField(verbose_name='Experiment ID', db_index=True)),
('key', models.CharField(max_length=255)),
('value', models.TextField()),
],
options={
'verbose_name': 'Experiment Data',
'verbose_name_plural': 'Experiment Data',
},
),
migrations.AlterUniqueTogether(
name='experimentkeyvalue',
unique_together={('experiment_id', 'key')},
),
]
| StarcoderdataPython |
5068624 | <gh_stars>0
################################################################################
# Copyright (c) 2009-2020, National Research Foundation (SARAO)
#
# Licensed under the BSD 3-Clause License (the "License"); you may not use
# this file except in compliance with the License. You may obtain a copy
# of the License at
#
# https://opensource.org/licenses/BSD-3-Clause
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
################################################################################
"""A Timestamp object."""
import time
import numpy as np
from astropy.time import Time, TimeDelta
def delta_seconds(x):
"""Construct a `TimeDelta` in TAI seconds."""
return TimeDelta(x, format='sec', scale='tai')
class Timestamp:
"""Basic representation of time(s), in UTC seconds since Unix epoch.
This is loosely based on PyEphem's `Date` object, but uses an Astropy
`Time` object as internal representation. Like `Time` it can contain
a multi-dimensional array of timestamps.
The following input formats are accepted for a timestamp:
- A floating-point number, directly representing the number of UTC seconds
since the Unix epoch. Fractional seconds are allowed.
- A string or bytes with format 'YYYY-MM-DD HH:MM:SS.SSS' (Astropy 'iso'
format) or 'YYYY/MM/DD HH:MM:SS.SSS' (XEphem format), where the hours
and minutes, seconds, and fractional seconds are optional. It is always
in UTC. Examples are:
'1999-12-31 12:34:56.789'
'1999/12/31 12:34:56'
'1999-12-31 12:34'
b'1999-12-31'
- A :class:`~astropy.time.Time` object (NOT :class:`~astropy.time.TimeDelta`).
- Another :class:`Timestamp` object, which will result in a copy.
- A sequence or NumPy array of one of the above types.
- None, which uses the current time (the default).
Parameters
----------
timestamp : :class:`~astropy.time.Time`, :class:`Timestamp`, float, string,
bytes, sequence or array of any of the former, or None, optional
Timestamp, in various formats (if None, defaults to now)
Raises
------
ValueError
If `timestamp` is not in a supported format
Attributes
----------
time : :class:`~astropy.time.Time`
Underlying `Time` object
secs : float or array of float
Timestamp as UTC seconds since Unix epoch
Notes
-----
This differs from :class:`~astropy.time.Time` in the following respects:
- Numbers are interpreted as Unix timestamps during initialisation;
`Timestamp(1234567890)` is equivalent to `Time(1234567890, format='unix')`
(while `Time(1234567890)` is not allowed because it lacks a format).
- Arithmetic is done in seconds instead of days (in the absence of units).
- Date strings may contain slashes (a leftover from PyEphem / XEphem).
- Empty initialisation results in the current time, so `Timestamp()`
is equivalent to `Time.now()` (while `Time()` is not allowed).
"""
def __init__(self, timestamp=None):
if timestamp is None:
self.time = Time.now()
elif isinstance(timestamp, Timestamp):
self.time = timestamp.time.replicate()
elif isinstance(timestamp, TimeDelta):
raise ValueError(f'Cannot construct Timestamp from TimeDelta {timestamp}')
elif isinstance(timestamp, Time):
self.time = timestamp.replicate()
else:
# Convert to array to simplify both array/scalar and string/bytes handling
val = np.asarray(timestamp)
# Turn array of Timestamps into array of corresponding internal Time objects
if val.size > 0 and isinstance(val.flat[0], Timestamp):
val = np.vectorize(lambda ts: ts.time)(val)
time_format = None
if val.dtype.kind == 'U':
# Convert default PyEphem timestamp strings to ISO strings
val = np.char.replace(np.char.strip(val), '/', '-')
time_format = 'iso'
elif val.dtype.kind == 'S':
val = np.char.replace(np.char.strip(val), b'/', b'-')
time_format = 'iso'
elif val.dtype.kind in 'iuf':
# Consider any number to be a Unix timestamp
time_format = 'unix'
self.time = Time(val, format=time_format, scale='utc', precision=3)
@property
def secs(self):
"""Timestamp as UTC seconds since Unix epoch."""
return self.time.utc.unix
def __repr__(self):
"""Short machine-friendly string representation of timestamp object."""
# We need a custom formatter because suppress=True only works on values < 1e8
# and today's Unix timestamps are bigger than that
formatter = f'{{:.{self.time.precision:d}f}}'.format
with np.printoptions(threshold=2, edgeitems=1, formatter={'float': formatter}):
return f'Timestamp({self.secs})'
def __str__(self):
"""Verbose human-friendly string representation of timestamp object."""
return str(self.to_string())
def __eq__(self, other):
"""Test for equality."""
return self.time == Timestamp(other).time
def __ne__(self, other):
"""Test for inequality."""
return self.time != Timestamp(other).time
def __lt__(self, other):
"""Test for less than."""
return self.time < Timestamp(other).time
def __le__(self, other):
"""Test for less than or equal to."""
return self.time <= Timestamp(other).time
def __gt__(self, other):
"""Test for greater than."""
return self.time > Timestamp(other).time
def __ge__(self, other):
"""Test for greater than or equal to."""
return self.time >= Timestamp(other).time
def __add__(self, other):
"""Add seconds (as floating-point number) to timestamp and return result."""
return Timestamp(self.time + delta_seconds(other))
def __sub__(self, other):
"""Subtract seconds (floating-point time interval) from timestamp.
If used for the difference between two (absolute time) Timestamps
then the result is an interval in seconds (a floating-point number).
"""
if isinstance(other, Timestamp):
return (self.time - other.time).sec
elif isinstance(other, Time) and not isinstance(other, TimeDelta):
return (self.time - other).sec
else:
return Timestamp(self.time - delta_seconds(other))
def __mul__(self, other):
"""Multiply timestamp by numerical factor (useful for processing timestamps)."""
return Timestamp(self.secs * other)
def __truediv__(self, other):
"""Divide timestamp by numerical factor (useful for processing timestamps)."""
return Timestamp(self.secs / other)
def __radd__(self, other):
"""Add timestamp to seconds (as floating-point number) and return result."""
return Timestamp(self.time + delta_seconds(other))
def __iadd__(self, other):
"""Add seconds (as floating-point number) to timestamp in-place."""
self.time += delta_seconds(other)
return self
def __rsub__(self, other):
"""Subtract timestamp from seconds (as floating-point number).
Return resulting seconds (floating-point number). This is typically
used when calculating the interval between two absolute instants
of time.
"""
return (Timestamp(other).time - self.time).sec
def __isub__(self, other):
"""Subtract seconds (as floating-point number) from timestamp in-place."""
self.time -= delta_seconds(other)
return self
def __float__(self):
"""Convert scalar timestamp to floating-point UTC seconds."""
try:
return float(self.secs)
except TypeError as err:
raise TypeError('Float conversion only supported for scalar Timestamps') from err
def __hash__(self):
"""Base hash on internal timestamp, just like equality operator."""
return hash(self.time)
def local(self):
"""Local time string representation (str or array of str)."""
prec = self.time.precision
frac_secs, int_secs = np.modf(np.round(self.secs, decimals=prec))
def local_time_string(f, i):
format_string = '%Y-%m-%d %H:%M:%S.{:0{width}.0f} %Z'.format(
f * 10 ** prec, width=prec)
return time.strftime(format_string, time.localtime(i))
local_str = np.vectorize(local_time_string)(frac_secs, int_secs)
return local_str if local_str.ndim else local_str.item()
def to_string(self):
"""UTC string representation (str or array of str)."""
return self.time.iso
def to_mjd(self):
"""Convert timestamp to Modified Julian Day (MJD)."""
return self.time.mjd
| StarcoderdataPython |
4957816 | """
Objects for cast.py tests
"""
class OldBase:
def __init__(self, v):
self.v = v
class Target(OldBase):
pass
class NewBase:
pass
| StarcoderdataPython |
3204175 | """Modelling classes for Make 13 Lolo game mode."""
import tile_generators
__author__ = "<NAME> and <NAME>"
__copyright__ = "Copyright 2017, The University of Queensland"
__license__ = "MIT"
__version__ = "1.1.2"
import model
import game_regular
from modules.weighted_selector import WeightedSelector
class LevelTile(model.AbstractTile):
"""Tile whose value & type are equal, incrementing by one when joined."""
def __init__(self, value=1):
"""Constructor
Parameters:
value (int): The tile's value.
"""
super().__init__(None, value)
def get_type(self):
"""Returns the type (value) of this tile."""
return self.get_value()
def is_max(self):
return False
def is_combo_max(self):
return False
def join(self, others):
"""
Joins other tiles to this tile.
Parameters:
others (iterable(BasicTile)): The other tiles to join.
"""
self._value += 1
def __eq__(self, other):
return self._value == other._value
class Make13Game(game_regular.RegularGame):
"""Make13 Lolo game.
Groups of two or more can be combined to increase tile's value by one.
Game is won when a 13 is made.
"""
GAME_NAME = "Make 13"
def __init__(self, size=(6, 6), initial_tiles=4, goal_value=13, min_group=2,
animation=True, autofill=True):
"""Constructor
Parameters:
size (tuple<int, int>): The number of (rows, columns) in the game.
initial_tiles (int): The number of tiles.
goal_value (int): The value of the goal tile.
min_group (int): The minimum number of tiles required for a
connected group to be joinable.
animation (bool): If True, animation will be enabled.
autofill (bool): Automatically fills the grid iff True.
"""
self.goal_value = goal_value
self.initial_tiles = initial_tiles
super().__init__(size=size, min_group=min_group, animation=animation,
autofill=False)
self._selector = WeightedSelector({1: 1})
self.reset()
generator = tile_generators.WeightedGenerator(self._selector,
self._construct_tile)
rows, columns = size
self.grid = model.LoloGrid(generator, rows=rows, columns=columns,
animation=animation)
if autofill:
self.grid.fill()
self._score = self.get_default_score()
self.generator = generator
def get_default_score(self):
"""(int) Returns the default score."""
return max(tile.get_value() for _, tile in self.grid.items())
def reset(self):
"""Resets the game."""
weights = {i: self.get_tile_weight(i) for i in
range(1, self.initial_tiles + 1)}
self._selector.update(weights, clear=True)
super().reset()
def get_tile_weight(self, value):
"""(float) Returns the weighting for a tile of given value."""
return 2 ** (self.goal_value - value)
def _construct_tile(self, type, position, *args, **kwargs):
"""(LevelTile) Returns a new tile from the generator's selection.
Parameters:
type (*): The type of the tile.
position (tuple<int, int>): The position the tile will initially exist in. Unused.
*args: Extra positional arguments for the tile.
**kwargs: Extra keyword arguments for the tile.
"""
# TODO: remove when serialize is implemented properly
args = args[1:]
return LevelTile(type, *args, **kwargs)
def update_score_on_activate(self, current, connections):
"""Updates the score based upon the current tile & connected tiles that
were joined to it.
Parameter:
current (AbstractTile): The tile recently current to.
connected (tuple<AbstractTiles>): The tiles that were joined to
current.
"""
if current.get_value() > self._score:
# Update score
score = current.get_value()
self._score = score
# Unlock new tile
self._selector[score] = self.get_tile_weight(score)
self.set_score(score)
if current.get_value() == self.goal_value:
self.emit('game_over')
| StarcoderdataPython |
3583898 | from logger import logger
from mathtools import floor_div
"""A base class that implements the algorithms and minimum settings"""
def e(a: int, b: int):
"""
Euclidean algorithm(GCD) - An effective algorithm for finding the greatest common divisor of two integers
:param a: int first value
:param b: int second value
:return: int GCD value
"""
while True:
r = floor_div(a, b)[1]
if r == 0:
break
a = b
b = r
return b
def ef(a: int, b: int):
"""
Euclidean algorithm(GCD)*different implementation - An effective algorithm for finding
the greatest common divisor of two integers
:param a: int first value
:param b: int second value
:return: int GCD value
"""
lst = [a, b]
lst.sort(reverse=True)
a, b = lst
while True:
a = floor_div(a, b)[1]
if a == 0:
if logger:
print(f'result: {b}')
return b
b = floor_div(b, a)[1]
if b == 0:
if logger:
print(f'result: {a}')
return a
def eo(m, n):
var_list = []
b = 1
a_ = b
b_ = 0
a = b_
c, d = m, n
if logger:
print(f'eo.1: {a_, a, b_, b, c, d}')
while True:
q, r = floor_div(c, d)
var_list.append([a_, a, b_, b, c, d, q, r])
if logger:
print(f'eo.2: {q, r} = {c} / {d}')
print(f'eo.3: r = {r}')
if r == 0:
break
c = d
d = r
t = a_
a_ = a
a = t - q * a
t = b_
b_ = b
b = t - q * b
if logger:
print(f'eo.4: {var_list}')
if logger:
print(f'eo.end: {a} * {m} + {b} * {n} = {a * m} + {b * n} = {a * m + b * n} = {d}')
print(f'eo.end: {a, b} {d}')
return a, b, d, var_list
| StarcoderdataPython |
4883221 | <gh_stars>0
#
# Copyright (c), 2021, Quantum Espresso Foundation and SISSA (Scuola
# Internazionale Superiore di Studi Avanzati). All rights reserved.
# This file is distributed under the terms of the MIT License. See the
# file 'LICENSE' in the root directory of the present distribution, or
# http://opensource.org/licenses/MIT.
#
"""
A collection of functions for reading different files and quantities.
"""
import numpy as np
from xml.etree import ElementTree
__all__ = ['read_pseudo_file']
def read_pseudo_file(xml_file):
"""
Reads a pseudo-potential XML-like file in the QE UPF format (text), returning
the content of each tag in a dictionary. The file is read in strings and
completed with a root UPF tag when it lacks, to avoids an XML syntax error.
TODO: add support for UPF-schema files
"""
def iter_upf_file():
"""
Creates an iterator over the lines of an UPF file,
inserting the root <UPF> tag when missing.
"""
with open(xml_file, 'r') as f:
fake_root = None
for line in f:
if fake_root is not None:
line = line.replace('&input', '&input')
line = line.replace('&inputp', '&inputp')
yield line
else:
line = line.strip()
if line.startswith("<UPF") and line[4] in ('>', ' '):
yield line
fake_root = False
elif line:
yield "<UPF>"
yield line
fake_root = True
if fake_root is True:
yield "</UPF>"
pseudo = {}
psroot = ElementTree.fromstringlist(list(iter_upf_file()))
# PP_INFO
try:
pp_info = psroot.find('PP_INFO').text
except AttributeError:
pp_info = ""
try:
pp_input = psroot.find('PP_INFO/PP_INPUTFILE').text
except AttributeError:
pp_input = ""
pseudo.update(PP_INFO=dict(INFO=pp_info, PP_INPUT=pp_input))
# PP_HEADER
pseudo.update(PP_HEADER=dict(psroot.find('PP_HEADER').attrib))
# PP_MESH
pp_mesh = dict(psroot.find('PP_MESH').attrib)
pp_r = np.array([float(x) for x in psroot.find('PP_MESH/PP_R').text.split()])
pp_rab = np.array([float(x) for x in psroot.find('PP_MESH/PP_RAB').text.split()])
pp_mesh.update(PP_R=pp_r, PP_RAB=pp_rab)
pseudo.update(PP_MESH=pp_mesh)
# PP_LOCAL
node = psroot.find('PP_LOCAL')
if node is not None:
pp_local = np.array([x for x in map(float, node.text.split())])
else:
pp_local = None
pseudo.update(PP_LOCAL=pp_local)
# PP_RHOATOM
node = psroot.find('PP_RHOATOM')
if node is not None:
pp_rhoatom = np.array([v for v in map(float, node.text.split())])
else:
pp_rhoatom = None
pseudo.update(PP_RHOATOM=pp_rhoatom)
# PP_NONLOCAL
node = psroot.find('PP_NONLOCAL')
if node is not None:
betas = list()
dij = None
pp_aug = None
pp_q = None
for el in node:
if 'PP_BETA' in el.tag:
beta = dict(el.attrib)
val = np.array(x for x in map(float, el.text.split()))
beta.update(beta=val)
betas.append(beta)
elif 'PP_DIJ' in el.tag:
text = '\n'.join(el.text.strip().split('\n')[1:])
dij = np.array([x for x in map(float, text.split())])
elif 'PP_AUGMENTATION' in el.tag:
pp_aug = dict(el.attrib)
pp_qijl = list()
pp_qij = list()
for q in el:
if 'PP_QIJL' in q.tag:
qijl = dict(q.attrib)
val = np.array(x for x in map(float, q.text.split()))
qijl.update(qijl=val)
pp_qijl.append(qijl)
elif 'PP_QIJ' in q.tag:
qij = dict(q.attrib)
val = np.array(x for x in map(float, q.text.split()))
qij.update(qij=val)
pp_qij.append(qij)
elif q.tag == 'PP_Q':
pp_q = np.array(x for x in map(float, q.text.split()))
pp_aug.update(PP_QIJL=pp_qijl, PP_QIJ=pp_qij, PP_Q=pp_q)
pp_nonlocal = dict(PP_BETA=betas, PP_DIJ=dij, PP_AUGMENTATION=pp_aug)
else:
pp_nonlocal = None
pseudo.update(PP_NONLOCAL=pp_nonlocal)
return pseudo
| StarcoderdataPython |
4907119 | #!/usr/bin/env python
"""
This module provides MigrationBlock.Update data access object.
"""
from dbs.dao.Oracle.MigrationBlock.Update import Update as OraMigUpdate
class Update(OraMigUpdate):
pass
| StarcoderdataPython |
5082964 | # class Solution:
# def decodeString(self, s: str) -> str:
# pool = set('0123456789')
# stack = []
# for char in s:
# if char != ']':
# stack.append(char)
# else:
# chars = ''
# while stack[-1] != '[':
# chars = stack.pop() + chars
# stack.pop() # pop out '['
# reps = ''
# while stack and stack[-1] in pool:
# reps = stack.pop() + reps
# if reps == '':
# stack.append(chars)
# else:
# stack.append(chars * int(reps))
# return ''.join(stack)
class Solution:
def decodeString(self, s: str) -> str:
'''
replace the stack with a string
'''
pool = set('0123456789')
stack = ''
for char in s:
if char != ']':
stack += char
else:
chars = ''
while stack[-1] != '[':
chars = stack[-1] + chars
stack = stack[:-1]
stack = stack[:-1] # pop out '['
reps = ''
while stack and stack[-1] in pool:
reps = stack[-1] + reps
stack = stack[:-1]
if reps == '':
stack += chars
else:
stack += (chars * int(reps))
return stack
| StarcoderdataPython |
5160106 | <filename>pybind/nos/v7_1_0/brocade_maps_ext_rpc/__init__.py
from operator import attrgetter
import pyangbind.lib.xpathhelper as xpathhelper
from pyangbind.lib.yangtypes import RestrictedPrecisionDecimalType, RestrictedClassType, TypedListType
from pyangbind.lib.yangtypes import YANGBool, YANGListType, YANGDynClass, ReferenceType
from pyangbind.lib.base import PybindBase
from decimal import Decimal
from bitarray import bitarray
import __builtin__
import maps_re_apply_policy
import maps_get_all_policy
import maps_get_default_rules
class brocade_maps_ext(PybindBase):
"""
This class was auto-generated by the PythonClass plugin for PYANG
from YANG module brocade-maps-ext - based on the path /brocade_maps_ext_rpc. Each member element of
the container is represented as a class variable - with a specific
YANG type.
YANG Description: This sub module defines show system-monitor data model
Copyright (c) 2011 by Brocade Communications Systems, Inc.
All rights reserved.
"""
__slots__ = ('_pybind_generated_by', '_path_helper', '_yang_name', '_rest_name', '_extmethods', '__maps_re_apply_policy','__maps_get_all_policy','__maps_get_default_rules',)
_yang_name = 'brocade-maps-ext'
_rest_name = ''
_pybind_generated_by = 'container'
def __init__(self, *args, **kwargs):
path_helper_ = kwargs.pop("path_helper", None)
if path_helper_ is False:
self._path_helper = False
elif path_helper_ is not None and isinstance(path_helper_, xpathhelper.YANGPathHelper):
self._path_helper = path_helper_
elif hasattr(self, "_parent"):
path_helper_ = getattr(self._parent, "_path_helper", False)
self._path_helper = path_helper_
else:
self._path_helper = False
extmethods = kwargs.pop("extmethods", None)
if extmethods is False:
self._extmethods = False
elif extmethods is not None and isinstance(extmethods, dict):
self._extmethods = extmethods
elif hasattr(self, "_parent"):
extmethods = getattr(self._parent, "_extmethods", None)
self._extmethods = extmethods
else:
self._extmethods = False
self.__maps_get_default_rules = YANGDynClass(base=maps_get_default_rules.maps_get_default_rules, is_leaf=True, yang_name="maps-get-default-rules", rest_name="maps-get-default-rules", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=False, extensions={u'tailf-common': {u'hidden': u'rpccmd', u'actionpoint': u'maps-show-action-point'}}, namespace='urn:brocade.com:mgmt:brocade-maps-ext', defining_module='brocade-maps-ext', yang_type='rpc', is_config=True)
self.__maps_get_all_policy = YANGDynClass(base=maps_get_all_policy.maps_get_all_policy, is_leaf=True, yang_name="maps-get-all-policy", rest_name="maps-get-all-policy", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=False, extensions={u'tailf-common': {u'hidden': u'rpccmd', u'actionpoint': u'maps-show-action-point'}}, namespace='urn:brocade.com:mgmt:brocade-maps-ext', defining_module='brocade-maps-ext', yang_type='rpc', is_config=True)
self.__maps_re_apply_policy = YANGDynClass(base=maps_re_apply_policy.maps_re_apply_policy, is_leaf=True, yang_name="maps-re-apply-policy", rest_name="maps-re-apply-policy", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=False, extensions={u'tailf-common': {u'hidden': u'rpccmd', u'actionpoint': u'maps-show-action-point'}}, namespace='urn:brocade.com:mgmt:brocade-maps-ext', defining_module='brocade-maps-ext', yang_type='rpc', is_config=True)
load = kwargs.pop("load", None)
if args:
if len(args) > 1:
raise TypeError("cannot create a YANG container with >1 argument")
all_attr = True
for e in self._pyangbind_elements:
if not hasattr(args[0], e):
all_attr = False
break
if not all_attr:
raise ValueError("Supplied object did not have the correct attributes")
for e in self._pyangbind_elements:
nobj = getattr(args[0], e)
if nobj._changed() is False:
continue
setmethod = getattr(self, "_set_%s" % e)
if load is None:
setmethod(getattr(args[0], e))
else:
setmethod(getattr(args[0], e), load=load)
def _path(self):
if hasattr(self, "_parent"):
return self._parent._path()+[self._yang_name]
else:
return [u'brocade_maps_ext_rpc']
def _rest_path(self):
if hasattr(self, "_parent"):
if self._rest_name:
return self._parent._rest_path()+[self._rest_name]
else:
return self._parent._rest_path()
else:
return []
def _get_maps_re_apply_policy(self):
"""
Getter method for maps_re_apply_policy, mapped from YANG variable /brocade_maps_ext_rpc/maps_re_apply_policy (rpc)
YANG Description: reapply maps policy
"""
return self.__maps_re_apply_policy
def _set_maps_re_apply_policy(self, v, load=False):
"""
Setter method for maps_re_apply_policy, mapped from YANG variable /brocade_maps_ext_rpc/maps_re_apply_policy (rpc)
If this variable is read-only (config: false) in the
source YANG file, then _set_maps_re_apply_policy is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_maps_re_apply_policy() directly.
YANG Description: reapply maps policy
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=maps_re_apply_policy.maps_re_apply_policy, is_leaf=True, yang_name="maps-re-apply-policy", rest_name="maps-re-apply-policy", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=False, extensions={u'tailf-common': {u'hidden': u'rpccmd', u'actionpoint': u'maps-show-action-point'}}, namespace='urn:brocade.com:mgmt:brocade-maps-ext', defining_module='brocade-maps-ext', yang_type='rpc', is_config=True)
except (TypeError, ValueError):
raise ValueError({
'error-string': """maps_re_apply_policy must be of a type compatible with rpc""",
'defined-type': "rpc",
'generated-type': """YANGDynClass(base=maps_re_apply_policy.maps_re_apply_policy, is_leaf=True, yang_name="maps-re-apply-policy", rest_name="maps-re-apply-policy", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=False, extensions={u'tailf-common': {u'hidden': u'rpccmd', u'actionpoint': u'maps-show-action-point'}}, namespace='urn:brocade.com:mgmt:brocade-maps-ext', defining_module='brocade-maps-ext', yang_type='rpc', is_config=True)""",
})
self.__maps_re_apply_policy = t
if hasattr(self, '_set'):
self._set()
def _unset_maps_re_apply_policy(self):
self.__maps_re_apply_policy = YANGDynClass(base=maps_re_apply_policy.maps_re_apply_policy, is_leaf=True, yang_name="maps-re-apply-policy", rest_name="maps-re-apply-policy", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=False, extensions={u'tailf-common': {u'hidden': u'rpccmd', u'actionpoint': u'maps-show-action-point'}}, namespace='urn:brocade.com:mgmt:brocade-maps-ext', defining_module='brocade-maps-ext', yang_type='rpc', is_config=True)
def _get_maps_get_all_policy(self):
"""
Getter method for maps_get_all_policy, mapped from YANG variable /brocade_maps_ext_rpc/maps_get_all_policy (rpc)
YANG Description: Shows the existing MAPS Policies
"""
return self.__maps_get_all_policy
def _set_maps_get_all_policy(self, v, load=False):
"""
Setter method for maps_get_all_policy, mapped from YANG variable /brocade_maps_ext_rpc/maps_get_all_policy (rpc)
If this variable is read-only (config: false) in the
source YANG file, then _set_maps_get_all_policy is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_maps_get_all_policy() directly.
YANG Description: Shows the existing MAPS Policies
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=maps_get_all_policy.maps_get_all_policy, is_leaf=True, yang_name="maps-get-all-policy", rest_name="maps-get-all-policy", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=False, extensions={u'tailf-common': {u'hidden': u'rpccmd', u'actionpoint': u'maps-show-action-point'}}, namespace='urn:brocade.com:mgmt:brocade-maps-ext', defining_module='brocade-maps-ext', yang_type='rpc', is_config=True)
except (TypeError, ValueError):
raise ValueError({
'error-string': """maps_get_all_policy must be of a type compatible with rpc""",
'defined-type': "rpc",
'generated-type': """YANGDynClass(base=maps_get_all_policy.maps_get_all_policy, is_leaf=True, yang_name="maps-get-all-policy", rest_name="maps-get-all-policy", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=False, extensions={u'tailf-common': {u'hidden': u'rpccmd', u'actionpoint': u'maps-show-action-point'}}, namespace='urn:brocade.com:mgmt:brocade-maps-ext', defining_module='brocade-maps-ext', yang_type='rpc', is_config=True)""",
})
self.__maps_get_all_policy = t
if hasattr(self, '_set'):
self._set()
def _unset_maps_get_all_policy(self):
self.__maps_get_all_policy = YANGDynClass(base=maps_get_all_policy.maps_get_all_policy, is_leaf=True, yang_name="maps-get-all-policy", rest_name="maps-get-all-policy", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=False, extensions={u'tailf-common': {u'hidden': u'rpccmd', u'actionpoint': u'maps-show-action-point'}}, namespace='urn:brocade.com:mgmt:brocade-maps-ext', defining_module='brocade-maps-ext', yang_type='rpc', is_config=True)
def _get_maps_get_default_rules(self):
"""
Getter method for maps_get_default_rules, mapped from YANG variable /brocade_maps_ext_rpc/maps_get_default_rules (rpc)
YANG Description: Shows the existing MAPS default rules
"""
return self.__maps_get_default_rules
def _set_maps_get_default_rules(self, v, load=False):
"""
Setter method for maps_get_default_rules, mapped from YANG variable /brocade_maps_ext_rpc/maps_get_default_rules (rpc)
If this variable is read-only (config: false) in the
source YANG file, then _set_maps_get_default_rules is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_maps_get_default_rules() directly.
YANG Description: Shows the existing MAPS default rules
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=maps_get_default_rules.maps_get_default_rules, is_leaf=True, yang_name="maps-get-default-rules", rest_name="maps-get-default-rules", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=False, extensions={u'tailf-common': {u'hidden': u'rpccmd', u'actionpoint': u'maps-show-action-point'}}, namespace='urn:brocade.com:mgmt:brocade-maps-ext', defining_module='brocade-maps-ext', yang_type='rpc', is_config=True)
except (TypeError, ValueError):
raise ValueError({
'error-string': """maps_get_default_rules must be of a type compatible with rpc""",
'defined-type': "rpc",
'generated-type': """YANGDynClass(base=maps_get_default_rules.maps_get_default_rules, is_leaf=True, yang_name="maps-get-default-rules", rest_name="maps-get-default-rules", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=False, extensions={u'tailf-common': {u'hidden': u'rpccmd', u'actionpoint': u'maps-show-action-point'}}, namespace='urn:brocade.com:mgmt:brocade-maps-ext', defining_module='brocade-maps-ext', yang_type='rpc', is_config=True)""",
})
self.__maps_get_default_rules = t
if hasattr(self, '_set'):
self._set()
def _unset_maps_get_default_rules(self):
self.__maps_get_default_rules = YANGDynClass(base=maps_get_default_rules.maps_get_default_rules, is_leaf=True, yang_name="maps-get-default-rules", rest_name="maps-get-default-rules", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=False, extensions={u'tailf-common': {u'hidden': u'rpccmd', u'actionpoint': u'maps-show-action-point'}}, namespace='urn:brocade.com:mgmt:brocade-maps-ext', defining_module='brocade-maps-ext', yang_type='rpc', is_config=True)
maps_re_apply_policy = __builtin__.property(_get_maps_re_apply_policy, _set_maps_re_apply_policy)
maps_get_all_policy = __builtin__.property(_get_maps_get_all_policy, _set_maps_get_all_policy)
maps_get_default_rules = __builtin__.property(_get_maps_get_default_rules, _set_maps_get_default_rules)
_pyangbind_elements = {'maps_re_apply_policy': maps_re_apply_policy, 'maps_get_all_policy': maps_get_all_policy, 'maps_get_default_rules': maps_get_default_rules, }
| StarcoderdataPython |
4904841 | <gh_stars>1-10
import torch
import torch.utils.data as Data
BATCH_SIZE = 5
def show_batch(loader):
for epoch in range(3):
for step, (batch_x, batch_y) in enumerate(loader):
# training
print("step:{}, batch_x:{}, batch_y:{}".format(step, batch_x, batch_y))
if __name__ == '__main__':
x = torch.linspace(1, 10, 10)
y = torch.linspace(10, 1, 10)
print("~~~~~~~~~~~~~~~~~~~~~~")
print(x, y)
print("----------------------")
# 把数据放在数据库中
torch_dataset = Data.TensorDataset(x, y)
loader = Data.DataLoader(
# 从数据库中每次抽出batch size个样本
dataset=torch_dataset,
batch_size=BATCH_SIZE,
shuffle=True,
num_workers=1,
)
show_batch(loader)
| StarcoderdataPython |
1778951 | <gh_stars>0
from test import test
from test import h1
from test import h2 | StarcoderdataPython |
5053431 | <gh_stars>1000+
"""
raven.transport.base
~~~~~~~~~~~~~~~~~~~~
:copyright: (c) 2010-2012 by the Sentry Team, see AUTHORS for more details.
:license: BSD, see LICENSE for more details.
"""
from __future__ import absolute_import
# Helper for external transports
has_newstyle_transports = True
class Transport(object):
"""
All transport implementations need to subclass this class
You must implement a send method (or an async_send method if
sub-classing AsyncTransport).
"""
is_async = False
scheme = []
def send(self, url, data, headers):
"""
You need to override this to do something with the actual
data. Usually - this is sending to a server
"""
raise NotImplementedError
class AsyncTransport(Transport):
"""
All asynchronous transport implementations should subclass this
class.
You must implement a async_send method.
"""
is_async = True
def async_send(self, url, data, headers, success_cb, error_cb):
"""
Override this method for asynchronous transports. Call
`success_cb()` if the send succeeds or `error_cb(exception)`
if the send fails.
"""
raise NotImplementedError
| StarcoderdataPython |
11263013 | <gh_stars>1-10
from abc import ABC, abstractmethod
from time import sleep
from typing import Dict
from attrdict import AttrDict
from web3 import Web3
from .helpers import NOW, factor_in_new_try, validate_required_fields_interactively, validate_required_fields, \
set_dict_field
class Issuer:
"""A basic version of an Issuer as defined by the Blockcerts standard.
More at: https://github.com/IMSGlobal/cert-schema/blob/master/cert_schema/2.0/issuerSchema.json
:param name: Name of the issuer
:param id: Url to the issuer's public profile (must resolve to a valid jsonld)
:param email: Email to contact the issuer
:param image: base64-encoded PNG or SVG that represents the issuer (a logo for example)
:param revocation_list: Url to the issuer's public revocation list (must resolve to a valid jsonld)
:param public_key: Public key owned by the issuer (or authorized to issue on their behalf).
:param main_url: Url to the issuer's main website (must resolve to an eventual 200)
:param signature_name: (optional) Name of the person signing the certificate
:param signature_job_title: (optional) Title of the person signing the certificate
:param signature_image: (optional) base64-encoded PNG or SVG that represents the signature of the person signing.
:param intro_url: Url to the issuer's intro website (must resolve to an eventual 200)
Note: All three of signature_name, signature_job_title and signature_image must exist in order for the signature
section to be added to the Blockcert.
"""
REQUIRED_FIELDS = ["name", "id", "email", "revocation_list", "public_key", "main_url"]
def __init__(
self: str,
name: str,
id: str,
email: str,
image: str,
revocation_list: str,
public_key: str,
main_url: str,
signature_name: str = "",
signature_job_title: str = "",
signature_image: str = "",
intro_url: str = "",
):
self.name = name
self.id = id
self.email = email
self.image = image
self.revocation_list = revocation_list
self.public_key = public_key
self.signature_name = signature_name
self.signature_job_title = signature_job_title
self.signature_image = signature_image
self.main_url = main_url
self.intro_url = intro_url
validate_required_fields(self, self.REQUIRED_FIELDS)
def to_dict(self) -> Dict:
return dict(
name=self.name,
id=self.id,
email=self.email,
image=self.image,
revocation_list=self.revocation_list,
public_key=self.public_key,
main_url=self.main_url,
signature_name=self.signature_name,
signature_job_title=self.signature_job_title,
signature_image=self.signature_image,
intro_url=self.intro_url,
)
class Assertion:
"""A basic version of an Assertion as defined by the Blockcerts standard.
More at https://github.com/IMSGlobal/cert-schema/blob/master/cert_schema/2.0/schema.json
:param id: id of the assertion
:param name: name of the assertion
:param description: description of the assertion
:param image: base64-encoded PNG or SVG that represents the assertion (a logo for example)
:param narrative: narrative of the assertion
:param display_html: (optional) valid HTML that will be displayed when validating in public validators
"""
REQUIRED_FIELDS = ['id', 'name', 'description', 'narrative']
def __init__(self, id: str, name: str, description: str, image: str, narrative: str, display_html: str = ""):
self.id = id
self.name = name
self.description = description
self.image = image
self.narrative = narrative
self.display_html = display_html
validate_required_fields(self, self.REQUIRED_FIELDS)
def to_dict(self) -> Dict:
return dict(
id=self.id,
name=self.name,
description=self.description,
image=self.image,
narrative=self.narrative,
display_html=self.display_html,
)
class Recipient:
"""A basic version of a Recipient as defined by the Blockcerts standard.
More at https://github.com/IMSGlobal/cert-schema/blob/master/cert_schema/2.0/recipientSchema.json
:param name: name of the recipient
:param email: email of the recipient
:param public_key: public key of the recipient
:param email_hashed: is the email hashed?
"""
REQUIRED_FIELDS = ['name', 'email', 'public_key']
def __init__(self, name: str, email: str, public_key: str, email_hashed: bool = False,
additional_fields: dict = None):
self.name = name
self.email = email
self.public_key = public_key
self.email_hashed = email_hashed
self.additional_fields = additional_fields
validate_required_fields(self, self.REQUIRED_FIELDS)
def to_dict(self) -> Dict:
return dict(
name=self.name,
email=self.email,
public_key=self.public_key,
email_hashed=self.email_hashed,
additional_fields=self.additional_fields,
)
class AnchorHandler(ABC):
"""Common interface for anchoring mechanisms.
:param chain_name: name of the chain, for now one of 'ethereumMainnet' or 'ethereumRopsten', in the future at least
'bitcoinMainnet', 'bitcoinRegtest' and 'bitcoinTestnet' should be added.
:param signature_field: transaction field where the merkle root will be posted. For now only 'ETHData' will work, in
the future at least 'BTCOpReturn' should be added.
"""
def __init__(self, chain_name: str):
raise NotImplementedError
@abstractmethod
def anchor(self, data) -> str:
"""Anchor the given data to a Blockchain transaction.
:param data: data to be stored in a transaction, usually this is a merkle root but it may be a single hash if
the batch being issued consists of a single certificate.
:return: reference to the anchor, like a transaction id
"""
raise NotImplementedError
class EthereumAnchorHandler(AnchorHandler):
"""Use the Ethereum blockchain to anchor.
:param node_url: url to an Ethereum node (can also be a 3rd party service url as Infura)
:param public_key: Public key to use to sign the transaction (the personal account must be funded)
:param private_key: Public key to use to sign the transaction (the personal account must be funded)
:param key_created_at: Ethereum account creation date.
:param max_retry: (optional) Amount of allowed retries if the anchoring operation fails
:param gas_price: (optional) desired gas price for the anchoring transaction
:param gas_limit: (optional) desired gas limit for the anchoring transaction
:param account_to: (optional) desired destination of the anchoring transaction
:param chain_name: (optional) one of ethereumRopsten or ethereumMainnet
The fields listed in INTERACTIVELY_REQUIRED_FIELDS can be left out for an extra layer of security.
In that case, the user will be prompted for input.
"""
INTERACTIVELY_REQUIRED_FIELDS = ['node_url', 'private_key', 'public_key', 'key_created_at']
signature_field = 'ETHData'
def __init__(
self,
node_url: str,
public_key: str,
private_key: str,
key_created_at: str,
max_retry: int = 3,
gas_price: int = 20000000000,
gas_limit: int = 25000,
account_to: str = '0xdeaDDeADDEaDdeaDdEAddEADDEAdDeadDEADDEaD',
chain_name: str = 'ethereumRopsten'
):
self.node_url = node_url
self.private_key = private_key
self.public_key = public_key
self.key_created_at = key_created_at
validate_required_fields_interactively(self, self.INTERACTIVELY_REQUIRED_FIELDS)
self.max_retry = max_retry
self.gas_price = gas_price
self.gas_limit = gas_limit
self.web3 = Web3(Web3.HTTPProvider(self.node_url))
self.account_to = account_to
self.chain_name = chain_name
def to_dict(self) -> Dict:
return dict(
node_url=self.node_url,
public_key=self.public_key,
private_key=self.private_key,
key_created_at=self.key_created_at,
max_retry=self.max_retry,
gas_price=self.gas_price,
gas_limit=self.gas_limit,
account_to=self.account_to,
chain_name=self.chain_name,
)
def _get_signed_tx(self, merkle_root: str, gas_price: int, gas_limit: int, try_count: int) -> AttrDict:
"""Prepare a raw transaction and sign it with the private key."""
nonce = self.web3.eth.getTransactionCount(self.public_key)
tx_info = {
'nonce': nonce,
'to': self.account_to,
'value': 0,
'gas': gas_limit,
'gasPrice': gas_price,
'data': merkle_root,
}
if try_count:
tx_info['nonce'] = tx_info['nonce'] + try_count
tx_info['gas'] = factor_in_new_try(tx_info['gas'], try_count)
tx_info['gasPrice'] = factor_in_new_try(tx_info['gasPrice'], try_count)
signed_tx = self.web3.eth.account.sign_transaction(tx_info, self.private_key)
return signed_tx
def _ensure_balance(self) -> None:
"""Make sure that the Ethereum account's balance is enough to cover the tx costs."""
assert self.web3.eth.getBalance(self.public_key) >= self.gas_limit * self.gas_price
def anchor(self, merkle_root) -> str:
"""Store the merkle root as data in an Ethereum transaction and return the tx id."""
for i in range(self.max_retry):
assert self.web3.isConnected()
self._ensure_balance()
signed_tx = self._get_signed_tx(merkle_root, self.gas_price, self.gas_limit, i)
try:
tx_hash = self.web3.eth.sendRawTransaction(signed_tx.rawTransaction)
tx_id = self.web3.toHex(tx_hash)
return tx_id
except Exception as e:
if i >= self.max_retry - 1:
raise
sleep(10 * i)
continue
class Blockcert:
"""A basic version of a Blockcert as defined by the standard.
More at:
- https://github.com/IMSGlobal/cert-schema/blob/master/cert_schema/2.0/
- http://www.imsglobal.org/sites/default/files/Badges/OBv2p0Final/index.html
:param id: unique id of this specific Blockcert
:param issuer: Issuer object, contains info about who issues the Blockcert
:param assertion: Assertion object, contains info about what is being claimed by the Issuer about the Recipient
:param recipient: Recipient object, contains info about the entity receiving this Blockcert
:param expires_at: string representation of an expiration date, like "2025-02-07T23:52:16.636+00:00"
"""
def __init__(self, id: str, issuer: Issuer, assertion: Assertion, recipient: Recipient, expires_at: str = "",
additional_per_recipient_fields: list = None, additional_global_fields: list = None):
self.id = id
self.issuer = issuer
self.assertion = assertion
self.recipient = recipient
self.expires_at = expires_at
self.additional_per_recipient_fields = additional_per_recipient_fields
self.additional_global_fields = additional_global_fields
self.anchor_tx_id = None
self.proof = None
def to_dict(self) -> Dict:
"""Get a dictionary representation of a Blockcert."""
raw_dict = {
"@context": [
"https://w3id.org/openbadges/v2",
"https://w3id.org/blockcerts/v2",
{
"displayHtml": {
"@id": "schema:description"
}
}
],
"type": "Assertion",
"issuedOn": NOW,
"id": "urn:uuid:" + self.id,
"recipient": {
"type": "email",
"identity": self.recipient.email,
"hashed": self.recipient.email_hashed
},
"recipientProfile": {
"type": [
"RecipientProfile",
"Extension"
],
"name": self.recipient.name,
"publicKey": "ecdsa-koblitz-pubkey:" + self.recipient.public_key
},
"badge": {
"type": "BadgeClass",
"id": "urn:uuid:" + self.assertion.id,
"name": self.assertion.name,
"description": self.assertion.description,
"image": self.assertion.image,
"issuer": {
"id": self.issuer.id,
"type": "Profile",
"name": self.issuer.name,
"url": self.issuer.main_url,
"email": self.issuer.email,
"image": self.issuer.image,
"revocationList": self.issuer.revocation_list
},
"criteria": {
"narrative": self.assertion.narrative
}
},
"verification": {
"type": [
"MerkleProofVerification2017",
"Extension"
],
"publicKey": "ecdsa-koblitz-pubkey:" + self.issuer.public_key
}
}
if self.issuer.intro_url:
raw_dict["badge"]["issuer"]["introductionUrl"] = self.issuer.intro_url
if self.assertion.display_html:
raw_dict["displayHtml"] = self.assertion.display_html
if self.issuer.signature_image and self.issuer.signature_job_title and self.issuer.signature_name:
raw_dict['signatureLines'] = [
{
"type": [
"SignatureLine",
"Extension"
],
"jobTitle": self.issuer.signature_job_title,
"image": self.issuer.signature_image,
"name": self.issuer.signature_name
}
]
if self.proof:
raw_dict['signature'] = self.proof
if self.expires_at:
raw_dict['expires'] = self.expires_at
if self.additional_global_fields:
for field in self.additional_global_fields:
raw_dict = set_dict_field(raw_dict, field['path'], field['value'])
if self.additional_per_recipient_fields:
for field in self.additional_per_recipient_fields:
raw_dict = set_dict_field(raw_dict, field['path'], self.recipient.additional_fields[field['field']])
return raw_dict
class Batch(ABC):
"""Common interface for batching operations."""
@abstractmethod
def run(self):
"""Execute all the ordererd steps to create a batch of final certificates."""
raise NotImplementedError
| StarcoderdataPython |
8155796 | <gh_stars>0
import folium
from geopy.exc import GeocoderUnavailable
from geopy.geocoders import Nominatim
from flask import Flask, render_template, request, url_for, flash, redirect
from twitter2 import get_friends
def create_map(friends: list) -> None:
"""creates html file with locations of your twitter friends
Args:
friends (list): names and their locations
"""
map = folium.Map(location=(49.817545, 24.023932), zoom_start=5, control_scale=True)
friends_locations = {}
for friend in friends:
if friend[1] in friends_locations.keys():
friends_locations[friend[1]].append(friend[0])
else:
friends_locations[friend[1]] = [friend[0]]
friends_layer = folium.FeatureGroup("Your friends")
for location, names in friends_locations.items():
iframe = folium.IFrame(
html=create_html_popup(names),
width=250,
height=100,
)
friends_layer.add_child(
folium.Marker(
location=location,
popup=folium.Popup(iframe),
icon=folium.Icon(
color="blue",
icon="fa-brands fa-twitter",
prefix="fa",
),
)
)
map.add_child(friends_layer)
map.add_child(folium.LayerControl())
map.save("templates/friends_map.html")
def create_html_popup(friends: list) -> str:
"""creates html popup for markrer
Args:
friends (list): list of names
Returns:
str: html in string format
"""
html_template = "Friends:"
for friend in friends:
html_template += f"""<br>
<p>{friend}</p><br>
"""
return html_template
def find_friends(user: str, friends_number: str) -> list:
"""finds certain number of friends locations from twitter api
Args:
user (str): username in twitter to search for friends
friends_number (str): limitation for number of locations to search
Returns:
list: names of friiends and their locations
"""
friends = []
data = get_friends(user)
count = 0
for friend in data["users"]:
count += 1
if len(friends) < int(friends_number):
name = friend["screen_name"]
location = friend["location"]
coords = find_coords(location)
if coords != (-69, -179):
friends.append((name, coords))
return friends
def find_coords(location: str) -> tuple:
"""finds coordinates based on address
Args:
location (str): address of coords
Raises:
GeocoderUnavailable: Error for unknown location
Returns:
tuple: latitude and longitude
"""
# utility for finding location
from geopy.extra.rate_limiter import RateLimiter
geolocator = Nominatim(user_agent="my-request")
geocode = RateLimiter(geolocator.geocode, min_delay_seconds=1)
try:
coords = geocode(location)
if coords != None:
return coords.latitude, coords.longitude
else:
raise GeocoderUnavailable
except GeocoderUnavailable:
return -69, -179
# --------------------------Flask application----------------------------------------
app = Flask(__name__)
app.config["SECRET_KEY"] = "32fc7730408b163d0fab37cd6ecce7be3c79c33e248db78b"
@app.route("/", methods=("GET", "POST"))
def create_view():
"""Supports main page of web application and gets user and friends number
Returns:
tuple: username and friends number and redirect to map
"""
if request.method == "POST":
user = request.form["user"]
friends_number = request.form["friends_number"]
if not user:
flash("User is required!")
elif not friends_number or not friends_number.isnumeric():
flash("Number of friends is required!")
else:
friends = find_friends(user, friends_number)
create_map(friends)
return redirect(url_for("map_view"))
return render_template("index.html")
@app.route("/map/")
def map_view():
"""show map of friends
Returns:
html: renders friends_map.html
"""
return render_template("friends_map.html")
if __name__ == "__main__":
app.run(debug=True)
| StarcoderdataPython |
8176067 | from tkinter import StringVar, IntVar
DEFAULT_TRAIN_DIR = '../experiments/train_0_4'
DEFAULT_DELAY_MS = 0
DEFAULT_NUM_GAMES = 1
DEFAULT_SHOW_GAMES = False
class Config(object):
"""
All options that are set in the Main GUI (see window.py) can be accessed in this class.
"""
def __init__(self, master):
self.master = master
self.train_dir_obj = StringVar(master=master, value=DEFAULT_TRAIN_DIR)
self.delay_in_ms_obj = StringVar(master=master, value=str(DEFAULT_DELAY_MS))
self.num_games_obj = StringVar(master=master, value=str(DEFAULT_NUM_GAMES))
self.show_games_obj = IntVar(master=master, value=int(DEFAULT_SHOW_GAMES))
def get_train_dir(self):
return str(self.train_dir_obj.get())
def get_delay_in_sec(self):
return int(self.delay_in_ms_obj.get()) / 1000.0
def get_num_games(self):
return int(self.num_games_obj.get())
def show_games(self):
return self.show_games_obj.get() == 1
def print(self):
print('Train directory: ' + self.get_train_dir())
print('Delay in ms: ' + str(self.get_delay_in_sec()))
print('Num games: ' + str(self.get_num_games()))
print('Show games: ' + str(self.show_games()))
| StarcoderdataPython |
9650984 | from functools import reduce
from .homogeneous import Translation, UniformScale, Rotation, Affine, Homogeneous
def transform_about_centre(obj, transform):
r"""
Return a Transform that implements transforming an object about
its centre. The given object must be transformable and must implement
a method to provide the object centre. More precisely, the object will be
translated to the origin (according to it's centre), transformed, and then
translated back to it's previous position.
Parameters
----------
obj : :map:`Transformable`
A transformable object that has the ``centre`` method.
transform : :map:`ComposableTransform`
A composable transform.
Returns
-------
transform : :map:`Homogeneous`
A homogeneous transform that implements the scaling.
"""
to_origin = Translation(-obj.centre(), skip_checks=True)
back_to_centre = Translation(obj.centre(), skip_checks=True)
# Fast path - compose in-place in order to ensure only a single matrix
# is returned
if isinstance(transform, Homogeneous):
# Translate to origin, transform, then translate back
return to_origin.compose_before(transform).compose_before(back_to_centre)
else: # Fallback to transform chain
return reduce(
lambda a, b: a.compose_before(b), [to_origin, transform, back_to_centre]
)
def scale_about_centre(obj, scale):
r"""
Return a Homogeneous Transform that implements scaling an object about
its centre. The given object must be transformable and must implement
a method to provide the object centre.
Parameters
----------
obj : :map:`Transformable`
A transformable object that has the ``centre`` method.
scale : `float` or ``(n_dims,)`` `ndarray`
The scale factor as defined in the :map:`Scale` documentation.
Returns
-------
transform : :map:`Homogeneous`
A homogeneous transform that implements the scaling.
"""
s = UniformScale(scale, obj.n_dims, skip_checks=True)
return transform_about_centre(obj, s)
def rotate_ccw_about_centre(obj, theta, degrees=True):
r"""
Return a Homogeneous Transform that implements rotating an object
counter-clockwise about its centre. The given object must be transformable
and must implement a method to provide the object centre.
Parameters
----------
obj : :map:`Transformable`
A transformable object that has the ``centre`` method.
theta : `float`
The angle of rotation clockwise about the origin.
degrees : `bool`, optional
If ``True`` theta is interpreted as degrees. If ``False``, theta is
interpreted as radians.
Returns
-------
transform : :map:`Homogeneous`
A homogeneous transform that implements the rotation.
"""
if obj.n_dims != 2:
raise ValueError("CCW rotation is currently only supported for " "2D objects")
r = Rotation.init_from_2d_ccw_angle(theta, degrees=degrees)
return transform_about_centre(obj, r)
def shear_about_centre(obj, phi, psi, degrees=True):
r"""
Return an affine transform that implements shearing (distorting) an
object about its centre. The given object must be transformable and must
implement a method to provide the object centre.
Parameters
----------
obj : :map:`Transformable`
A transformable object that has the ``centre`` method.
phi : `float`
The angle of shearing in the X direction.
psi : `float`
The angle of shearing in the Y direction.
degrees : `bool`, optional
If ``True``, then phi and psi are interpreted as degrees. If ``False``
they are interpreted as radians.
Returns
-------
transform : :map:`Affine`
An affine transform that implements the shearing.
Raises
------
ValueError
Shearing can only be applied on 2D objects
"""
if obj.n_dims != 2:
raise ValueError("Shearing is currently only supported for 2D objects")
s = Affine.init_from_2d_shear(phi, psi, degrees=degrees)
return transform_about_centre(obj, s)
| StarcoderdataPython |
362861 | <reponame>NymanRobin/crl-interactivesessions<gh_stars>1-10
"""Robot Framework variable file for robottests.
This is variable file for docker-robottests cluster"""
import os
import sys
__copyright__ = 'Copyright (C) 2019-2020, Nokia'
PYTHON_MAJOR = sys.version_info.major
def get_port(target):
envpy = 'PY{}'.format(PYTHON_MAJOR)
return os.environ['PORT_{envpy}_{target}'.format(envpy=envpy,
target=target)]
def update_sshshell_dicts(*shelldicts):
for s in shelldicts:
s['host'] = s.get('host', 'localhost')
s.update({'user': 'root',
'password': '<PASSWORD>'})
DICT__HOST1 = {'port': get_port('PY2')}
DICT__HOST2 = {'port': get_port('PY3')}
DICT__GW = {'port': get_port('GW')}
DICT__HOST1_VIA_GW = {'host': 'py{}-py2'.format(PYTHON_MAJOR)}
DICT__HOST2_VIA_GW = {'host': 'py{}-py3'.format(PYTHON_MAJOR)}
DICT__HOST3_VIA_GW = {'host': 'py{}-py3-no-symlink'.format(PYTHON_MAJOR),
'python_command': 'python3'}
DICT__HOST4_VIA_GW = {'host': 'py{}-py3-no-symlink'.format(PYTHON_MAJOR),
'init_env': 'content: alias python=python3'}
SUDOSHELL = {'shellname': 'BashShell', 'cmd': 'sudo /bin/bash'}
KEYAUTHENTICATEDSHELL = {'host': 'localhost',
'initial_prompt': '# ',
'shellname': 'KeyAuthenticatedSshShell'}
update_sshshell_dicts(DICT__HOST1,
DICT__HOST2,
DICT__GW,
DICT__HOST1_VIA_GW,
DICT__HOST2_VIA_GW,
DICT__HOST3_VIA_GW,
DICT__HOST4_VIA_GW
)
| StarcoderdataPython |
1873772 | import numpy as np
import pandas as pd
import random
def prob_to_samplenum(total_num, prob_list):
"""
Transfer a list of probability
"""
sample_num_list = []
for each_prob in prob_list:
sample_num_list.append(round(total_num * each_prob))
while sum(sample_num_list) < total_num:
sample_num_list[random.randint(0, len(prob_list)) - 1] += 1
return sample_num_list
| StarcoderdataPython |
190775 | # Copyright (c) Scrapy developers.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# 3. Neither the name of Scrapy nor the names of its contributors may be
# used to endorse or promote products derived from this software without
# specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
import time
from http.cookiejar import IPV4_RE
from urllib.parse import urlparse
from urllib.request import Request
def _potential_domain_matches(domain):
"""
Potential domain matches for a cookie.
>>> _potential_domain_matches('www.example.com')
['www.example.com', 'example.com', '.www.example.com', '.example.com']
From scrapy.http.cookies.potential_domain_matches().
"""
matches = [domain]
try:
start = domain.index('.') + 1
end = domain.rindex('.')
while start < end:
matches.append(domain[start:])
start = domain.index('.', start) + 1
except ValueError:
pass
return matches + ['.' + d for d in matches]
def cookies_for_url(jar, url):
"""
Get cookies for an URL from a cookielib CookieJar.
Adapted from scrapy.http.cookies.CookieJar.add_cookie_header().
"""
host = urlparse(url).hostname
if not IPV4_RE.search(host):
hosts = _potential_domain_matches(host)
if host.find(".") == -1:
hosts += host + ".local"
else:
hosts = [host]
jar._policy._now = jar._now = int(time.time())
for host in hosts:
if host in jar._cookies:
# TODO: origin and unverifiable.
req = Request(url)
for cookie in jar._cookies_for_domain(host, req):
yield cookie
| StarcoderdataPython |
1870668 | <filename>tests/test_dsci532_group12.py<gh_stars>1-10
from dsci532_group12 import dsci532_group12
| StarcoderdataPython |
1807241 | <reponame>tomstitt/PyMFEM<filename>mfem/_ser/mesh_operators.py
# This file was automatically generated by SWIG (http://www.swig.org).
# Version 4.0.2
#
# Do not make changes to this file unless you know what you are doing--modify
# the SWIG interface file instead.
from sys import version_info as _swig_python_version_info
if _swig_python_version_info < (2, 7, 0):
raise RuntimeError("Python 2.7 or later required")
# Import the low-level C/C++ module
if __package__ or "." in __name__:
from . import _mesh_operators
else:
import _mesh_operators
try:
import builtins as __builtin__
except ImportError:
import __builtin__
_swig_new_instance_method = _mesh_operators.SWIG_PyInstanceMethod_New
_swig_new_static_method = _mesh_operators.SWIG_PyStaticMethod_New
def _swig_repr(self):
try:
strthis = "proxy of " + self.this.__repr__()
except __builtin__.Exception:
strthis = ""
return "<%s.%s; %s >" % (self.__class__.__module__, self.__class__.__name__, strthis,)
def _swig_setattr_nondynamic_instance_variable(set):
def set_instance_attr(self, name, value):
if name == "thisown":
self.this.own(value)
elif name == "this":
set(self, name, value)
elif hasattr(self, name) and isinstance(getattr(type(self), name), property):
set(self, name, value)
else:
raise AttributeError("You cannot add instance attributes to %s" % self)
return set_instance_attr
def _swig_setattr_nondynamic_class_variable(set):
def set_class_attr(cls, name, value):
if hasattr(cls, name) and not isinstance(getattr(cls, name), property):
set(cls, name, value)
else:
raise AttributeError("You cannot add class attributes to %s" % cls)
return set_class_attr
def _swig_add_metaclass(metaclass):
"""Class decorator for adding a metaclass to a SWIG wrapped class - a slimmed down version of six.add_metaclass"""
def wrapper(cls):
return metaclass(cls.__name__, cls.__bases__, cls.__dict__.copy())
return wrapper
class _SwigNonDynamicMeta(type):
"""Meta class to enforce nondynamic attributes (no new attributes) for a class"""
__setattr__ = _swig_setattr_nondynamic_class_variable(type.__setattr__)
import weakref
import mfem._ser.array
import mfem._ser.mem_manager
import mfem._ser.vector
import mfem._ser.mesh
import mfem._ser.matrix
import mfem._ser.operators
import mfem._ser.sort_pairs
import mfem._ser.ncmesh
import mfem._ser.gridfunc
import mfem._ser.coefficient
import mfem._ser.globals
import mfem._ser.intrules
import mfem._ser.sparsemat
import mfem._ser.densemat
import mfem._ser.eltrans
import mfem._ser.fe
import mfem._ser.geom
import mfem._ser.fespace
import mfem._ser.fe_coll
import mfem._ser.lininteg
import mfem._ser.handle
import mfem._ser.restriction
import mfem._ser.element
import mfem._ser.table
import mfem._ser.hash
import mfem._ser.bilininteg
import mfem._ser.linearform
import mfem._ser.vertex
import mfem._ser.vtk
import mfem._ser.estimators
import mfem._ser.bilinearform
class MeshOperator(object):
r"""Proxy of C++ mfem::MeshOperator class."""
thisown = property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc="The membership flag")
def __init__(self, *args, **kwargs):
raise AttributeError("No constructor defined - class is abstract")
__repr__ = _swig_repr
NONE = _mesh_operators.MeshOperator_NONE
CONTINUE = _mesh_operators.MeshOperator_CONTINUE
STOP = _mesh_operators.MeshOperator_STOP
REPEAT = _mesh_operators.MeshOperator_REPEAT
MASK_UPDATE = _mesh_operators.MeshOperator_MASK_UPDATE
MASK_ACTION = _mesh_operators.MeshOperator_MASK_ACTION
REFINED = _mesh_operators.MeshOperator_REFINED
DEREFINED = _mesh_operators.MeshOperator_DEREFINED
REBALANCED = _mesh_operators.MeshOperator_REBALANCED
MASK_INFO = _mesh_operators.MeshOperator_MASK_INFO
def Apply(self, mesh):
r"""Apply(MeshOperator self, Mesh mesh) -> bool"""
return _mesh_operators.MeshOperator_Apply(self, mesh)
Apply = _swig_new_instance_method(_mesh_operators.MeshOperator_Apply)
def Stop(self):
r"""Stop(MeshOperator self) -> bool"""
return _mesh_operators.MeshOperator_Stop(self)
Stop = _swig_new_instance_method(_mesh_operators.MeshOperator_Stop)
def Repeat(self):
r"""Repeat(MeshOperator self) -> bool"""
return _mesh_operators.MeshOperator_Repeat(self)
Repeat = _swig_new_instance_method(_mesh_operators.MeshOperator_Repeat)
def Continue(self):
r"""Continue(MeshOperator self) -> bool"""
return _mesh_operators.MeshOperator_Continue(self)
Continue = _swig_new_instance_method(_mesh_operators.MeshOperator_Continue)
def Refined(self):
r"""Refined(MeshOperator self) -> bool"""
return _mesh_operators.MeshOperator_Refined(self)
Refined = _swig_new_instance_method(_mesh_operators.MeshOperator_Refined)
def Derefined(self):
r"""Derefined(MeshOperator self) -> bool"""
return _mesh_operators.MeshOperator_Derefined(self)
Derefined = _swig_new_instance_method(_mesh_operators.MeshOperator_Derefined)
def Rebalanced(self):
r"""Rebalanced(MeshOperator self) -> bool"""
return _mesh_operators.MeshOperator_Rebalanced(self)
Rebalanced = _swig_new_instance_method(_mesh_operators.MeshOperator_Rebalanced)
def GetActionInfo(self):
r"""GetActionInfo(MeshOperator self) -> int"""
return _mesh_operators.MeshOperator_GetActionInfo(self)
GetActionInfo = _swig_new_instance_method(_mesh_operators.MeshOperator_GetActionInfo)
def Reset(self):
r"""Reset(MeshOperator self)"""
return _mesh_operators.MeshOperator_Reset(self)
Reset = _swig_new_instance_method(_mesh_operators.MeshOperator_Reset)
__swig_destroy__ = _mesh_operators.delete_MeshOperator
# Register MeshOperator in _mesh_operators:
_mesh_operators.MeshOperator_swigregister(MeshOperator)
class MeshOperatorSequence(MeshOperator):
r"""Proxy of C++ mfem::MeshOperatorSequence class."""
thisown = property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc="The membership flag")
__repr__ = _swig_repr
def __init__(self):
r"""__init__(MeshOperatorSequence self) -> MeshOperatorSequence"""
_mesh_operators.MeshOperatorSequence_swiginit(self, _mesh_operators.new_MeshOperatorSequence())
__swig_destroy__ = _mesh_operators.delete_MeshOperatorSequence
def Append(self, mc):
r"""Append(MeshOperatorSequence self, MeshOperator mc)"""
return _mesh_operators.MeshOperatorSequence_Append(self, mc)
Append = _swig_new_instance_method(_mesh_operators.MeshOperatorSequence_Append)
def GetSequence(self):
r"""GetSequence(MeshOperatorSequence self) -> mfem::Array< mfem::MeshOperator * > &"""
return _mesh_operators.MeshOperatorSequence_GetSequence(self)
GetSequence = _swig_new_instance_method(_mesh_operators.MeshOperatorSequence_GetSequence)
def Reset(self):
r"""Reset(MeshOperatorSequence self)"""
return _mesh_operators.MeshOperatorSequence_Reset(self)
Reset = _swig_new_instance_method(_mesh_operators.MeshOperatorSequence_Reset)
# Register MeshOperatorSequence in _mesh_operators:
_mesh_operators.MeshOperatorSequence_swigregister(MeshOperatorSequence)
class ThresholdRefiner(MeshOperator):
r"""Proxy of C++ mfem::ThresholdRefiner class."""
thisown = property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc="The membership flag")
__repr__ = _swig_repr
def __init__(self, est):
r"""__init__(ThresholdRefiner self, ErrorEstimator est) -> ThresholdRefiner"""
_mesh_operators.ThresholdRefiner_swiginit(self, _mesh_operators.new_ThresholdRefiner(est))
def SetTotalErrorNormP(self, *args, **kwargs):
r"""SetTotalErrorNormP(ThresholdRefiner self, double norm_p=mfem::infinity())"""
return _mesh_operators.ThresholdRefiner_SetTotalErrorNormP(self, *args, **kwargs)
SetTotalErrorNormP = _swig_new_instance_method(_mesh_operators.ThresholdRefiner_SetTotalErrorNormP)
def SetTotalErrorGoal(self, err_goal):
r"""SetTotalErrorGoal(ThresholdRefiner self, double err_goal)"""
return _mesh_operators.ThresholdRefiner_SetTotalErrorGoal(self, err_goal)
SetTotalErrorGoal = _swig_new_instance_method(_mesh_operators.ThresholdRefiner_SetTotalErrorGoal)
def SetTotalErrorFraction(self, fraction):
r"""SetTotalErrorFraction(ThresholdRefiner self, double fraction)"""
return _mesh_operators.ThresholdRefiner_SetTotalErrorFraction(self, fraction)
SetTotalErrorFraction = _swig_new_instance_method(_mesh_operators.ThresholdRefiner_SetTotalErrorFraction)
def SetLocalErrorGoal(self, err_goal):
r"""SetLocalErrorGoal(ThresholdRefiner self, double err_goal)"""
return _mesh_operators.ThresholdRefiner_SetLocalErrorGoal(self, err_goal)
SetLocalErrorGoal = _swig_new_instance_method(_mesh_operators.ThresholdRefiner_SetLocalErrorGoal)
def SetMaxElements(self, max_elem):
r"""SetMaxElements(ThresholdRefiner self, long max_elem)"""
return _mesh_operators.ThresholdRefiner_SetMaxElements(self, max_elem)
SetMaxElements = _swig_new_instance_method(_mesh_operators.ThresholdRefiner_SetMaxElements)
def PreferNonconformingRefinement(self):
r"""PreferNonconformingRefinement(ThresholdRefiner self)"""
return _mesh_operators.ThresholdRefiner_PreferNonconformingRefinement(self)
PreferNonconformingRefinement = _swig_new_instance_method(_mesh_operators.ThresholdRefiner_PreferNonconformingRefinement)
def PreferConformingRefinement(self):
r"""PreferConformingRefinement(ThresholdRefiner self)"""
return _mesh_operators.ThresholdRefiner_PreferConformingRefinement(self)
PreferConformingRefinement = _swig_new_instance_method(_mesh_operators.ThresholdRefiner_PreferConformingRefinement)
def SetNCLimit(self, nc_limit):
r"""SetNCLimit(ThresholdRefiner self, int nc_limit)"""
return _mesh_operators.ThresholdRefiner_SetNCLimit(self, nc_limit)
SetNCLimit = _swig_new_instance_method(_mesh_operators.ThresholdRefiner_SetNCLimit)
def GetNumMarkedElements(self):
r"""GetNumMarkedElements(ThresholdRefiner self) -> long"""
return _mesh_operators.ThresholdRefiner_GetNumMarkedElements(self)
GetNumMarkedElements = _swig_new_instance_method(_mesh_operators.ThresholdRefiner_GetNumMarkedElements)
def GetThreshold(self):
r"""GetThreshold(ThresholdRefiner self) -> double"""
return _mesh_operators.ThresholdRefiner_GetThreshold(self)
GetThreshold = _swig_new_instance_method(_mesh_operators.ThresholdRefiner_GetThreshold)
def Reset(self):
r"""Reset(ThresholdRefiner self)"""
return _mesh_operators.ThresholdRefiner_Reset(self)
Reset = _swig_new_instance_method(_mesh_operators.ThresholdRefiner_Reset)
__swig_destroy__ = _mesh_operators.delete_ThresholdRefiner
# Register ThresholdRefiner in _mesh_operators:
_mesh_operators.ThresholdRefiner_swigregister(ThresholdRefiner)
class ThresholdDerefiner(MeshOperator):
r"""Proxy of C++ mfem::ThresholdDerefiner class."""
thisown = property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc="The membership flag")
__repr__ = _swig_repr
def __init__(self, est):
r"""__init__(ThresholdDerefiner self, ErrorEstimator est) -> ThresholdDerefiner"""
_mesh_operators.ThresholdDerefiner_swiginit(self, _mesh_operators.new_ThresholdDerefiner(est))
def SetThreshold(self, thresh):
r"""SetThreshold(ThresholdDerefiner self, double thresh)"""
return _mesh_operators.ThresholdDerefiner_SetThreshold(self, thresh)
SetThreshold = _swig_new_instance_method(_mesh_operators.ThresholdDerefiner_SetThreshold)
def SetOp(self, op):
r"""SetOp(ThresholdDerefiner self, int op)"""
return _mesh_operators.ThresholdDerefiner_SetOp(self, op)
SetOp = _swig_new_instance_method(_mesh_operators.ThresholdDerefiner_SetOp)
def SetNCLimit(self, nc_limit):
r"""SetNCLimit(ThresholdDerefiner self, int nc_limit)"""
return _mesh_operators.ThresholdDerefiner_SetNCLimit(self, nc_limit)
SetNCLimit = _swig_new_instance_method(_mesh_operators.ThresholdDerefiner_SetNCLimit)
def Reset(self):
r"""Reset(ThresholdDerefiner self)"""
return _mesh_operators.ThresholdDerefiner_Reset(self)
Reset = _swig_new_instance_method(_mesh_operators.ThresholdDerefiner_Reset)
__swig_destroy__ = _mesh_operators.delete_ThresholdDerefiner
# Register ThresholdDerefiner in _mesh_operators:
_mesh_operators.ThresholdDerefiner_swigregister(ThresholdDerefiner)
class Rebalancer(MeshOperator):
r"""Proxy of C++ mfem::Rebalancer class."""
thisown = property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc="The membership flag")
__repr__ = _swig_repr
def Reset(self):
r"""Reset(Rebalancer self)"""
return _mesh_operators.Rebalancer_Reset(self)
Reset = _swig_new_instance_method(_mesh_operators.Rebalancer_Reset)
def __init__(self):
r"""__init__(Rebalancer self) -> Rebalancer"""
_mesh_operators.Rebalancer_swiginit(self, _mesh_operators.new_Rebalancer())
__swig_destroy__ = _mesh_operators.delete_Rebalancer
# Register Rebalancer in _mesh_operators:
_mesh_operators.Rebalancer_swigregister(Rebalancer)
| StarcoderdataPython |
4856846 | <filename>src/gausskernel/dbmind/tools/predictor/python/settings.py<gh_stars>0
"""
openGauss is licensed under Mulan PSL v2.
You can use this software according to the terms and conditions of the Mulan PSL v2.
You may obtain a copy of Mulan PSL v2 at:
http://license.coscl.org.cn/MulanPSL2
THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND,
EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT,
MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE.
See the Mulan PSL v2 for more details.
Copyright (c) 2020 Huawei Technologies Co.,Ltd.
Description: The settings for AiEngine.
"""
import os
# Flask settings
DEFAULT_FLASK_SERVER_HOST = '127.0.0.1'
DEFAULT_FLASK_SERVER_PORT = '5000'
DEFAULT_FLASK_DEBUG = '0' # Do not use debug mode in production
# Path settings
PATH_UPLOAD = 'uploads/'
PATH_MODELS = 'saved_models/'
PATH_LOG = 'log/'
PATH_ENGINE_LOG = 'e_log/model_logs'
# Path for certifications
PATH_SSL = "path_to_CA"
PATH_CA = PATH_SSL + '/demoCA/cacert.pem'
PATH_SERVER_KEY = PATH_SSL + '/server.key'
PATH_SERVER_CRT = PATH_SSL + '/server.crt'
# GPU configuration set as '-1' if no gpu is available, default two gpus
GPU_CONFIG = '0,1'
# Path for logs
base_path = os.path.dirname(__file__)
PATH_MODELS_INFO = os.path.realpath(os.path.join(base_path, PATH_MODELS))
| StarcoderdataPython |
9774281 | <reponame>Yif-Yang/pytorch-SimSiam
import torch
import torch.nn as nn
import math
# from models.resnet import resnet50
from torchvision.models import resnet50
import torch.nn.functional as F
def D(p, z, version='original'): # negative cosine similarity
if version == 'original':
z = z.detach() # stop gradient
p = F.normalize(p, dim=1) # l2-normalize
z = F.normalize(z, dim=1) # l2-normalize
return -(p*z).sum(dim=1).mean()
elif version == 'simplified':# same thing, much faster. Scroll down, speed test in __main__
return - F.cosine_similarity(p, z.detach(), dim=-1).mean()
else:
raise Exception
def negcos(p, z):
# z = z.detach()
p = F.normalize(p, dim=1)
z = F.normalize(z, dim=1)
return -(p*z.detach()).sum(dim=1).mean()
# return - nn.functional.cosine_similarity(p, z.detach(), dim=-1).mean()
class ProjectionMLP(nn.Module):
def __init__(self, in_dim, mid_dim, out_dim):
super(ProjectionMLP, self).__init__()
self.l1 = nn.Sequential(
nn.Linear(in_dim, mid_dim),
nn.BatchNorm1d(mid_dim),
nn.ReLU(inplace=True)
)
self.l2 = nn.Sequential(
nn.Linear(mid_dim, mid_dim),
nn.BatchNorm1d(mid_dim),
nn.ReLU(inplace=True)
)
self.l3 = nn.Sequential(
nn.Linear(mid_dim, out_dim),
nn.BatchNorm1d(out_dim)
)
self.is_cifar_flag = False
def is_cifar(self, is_cifar=True):
self.is_cifar_flag = is_cifar
print('cifar settings')
def forward(self, x):
x = self.l1(x)
if not self.is_cifar_flag:
x = self.l2(x)
x = self.l3(x)
return x
class PredictionMLP(nn.Module):
def __init__(self, in_dim, mid_dim, out_dim):
super(PredictionMLP, self).__init__()
self.l1 = nn.Sequential(
nn.Linear(in_dim, mid_dim),
nn.BatchNorm1d(mid_dim),
nn.ReLU(inplace=True)
)
self.l2 = nn.Linear(mid_dim, out_dim)
def forward(self, x):
x = self.l1(x)
x = self.l2(x)
return x
class SimSiam(nn.Module):
def __init__(self, backbone='resnet50', d=2048, is_cifar=False):
super(SimSiam, self).__init__()
# if backbone == 'resnet50':
# net = resnet50()
# else:
# raise NotImplementedError('Backbone model not implemented.')
net=backbone
num_ftrs = net.fc.in_features
net = list(net.children())
# net = net[:3] + net[4:]
self.features = nn.Sequential(*net[:-1])
# num_ftrs = net.fc.out_features
# self.features = net
# projection MLP
self.projection = ProjectionMLP(num_ftrs, 2048, 2048)
# prediction MLP
self.prediction = PredictionMLP(2048, 512, 2048)
if is_cifar:
self.projection.is_cifar()
self.reset_parameters()
def forward(self, x):
x = self.features(x)
x = x.view(x.size(0), -1)
# projection
z = self.projection(x)
# prediction
p = self.prediction(z)
return z, p
# def forward(self, x, x1):
# x, x = self.features(x)
# x = x.view(x.size(0), -1)
# # projection
# z = self.projection(x)
# # prediction
# p = self.prediction(z)
# return z, p
# def forward(self, x1, x2):
# x1, x2 = self.features(x1), self.features(x2)
# # x1 = x1.view(x1.size(0), -1)
# # x2 = x2.view(x2.size(0), -1)
# f, h = self.projection, self.prediction
# z1, z2 = f(x1), f(x2)
# p1, p2 = h(z1), h(z2)
# L = D(p1, z2) / 2 + D(p2, z1) / 2
# return L
def reset_parameters(self):
# reset conv initialization to default uniform initialization
for m in self.modules():
if isinstance(m, nn.Conv2d):
n = m.kernel_size[0] * m.kernel_size[1] * m.in_channels
stdv = 1. / math.sqrt(n)
m.weight.data.uniform_(-stdv, stdv)
if m.bias is not None:
m.bias.data.uniform_(-stdv, stdv)
elif isinstance(m, nn.Linear):
stdv = 1. / math.sqrt(m.weight.size(1))
m.weight.data.uniform_(-stdv, stdv)
if m.bias is not None:
m.bias.data.uniform_(-stdv, stdv) | StarcoderdataPython |
1681659 | <reponame>Exdenta/torchsat
from pathlib import Path
import math
import numpy as np
import pytest
import tifffile
import torch
from PIL import Image
from torchsat.transforms import transforms_cls
tiff_files = [
'./tests/fixtures/different-types/tiff_1channel_float.tif',
'./tests/fixtures/different-types/tiff_1channel_uint16.tif',
'./tests/fixtures/different-types/tiff_1channel_uint8.tif',
'./tests/fixtures/different-types/tiff_3channel_float.tif',
'./tests/fixtures/different-types/tiff_3channel_uint16.tif',
'./tests/fixtures/different-types/tiff_3channel_uint8.tif',
'./tests/fixtures/different-types/tiff_8channel_float.tif',
'./tests/fixtures/different-types/tiff_8channel_uint16.tif',
'./tests/fixtures/different-types/tiff_8channel_uint8.tif',
]
jpeg_files = [
'./tests/fixtures/different-types/jpeg_1channel_uint8.jpeg',
'./tests/fixtures/different-types/jpeg_3channel_uint8.jpeg',
'./tests/fixtures/different-types/jpeg_1channel_uint8.png',
'./tests/fixtures/different-types/jpeg_3channel_uint8.png',
]
def read_img(fp):
if Path(fp).suffix in ['.tif', '.tiff']:
img = tifffile.imread(fp)
else:
img = np.array(Image.open(fp))
return img
@pytest.mark.parametrize('fp', tiff_files+jpeg_files)
def test_ToTensor(fp):
img = read_img(fp)
result = transforms_cls.Compose([
transforms_cls.ToTensor()
])(img)
assert type(result) == torch.Tensor
assert len(result.shape) == 3
assert result.shape[1:3] == img.shape[0:2]
@pytest.mark.parametrize('fp', tiff_files+jpeg_files)
def test_Normalize(fp):
img = read_img(fp)
channels = 1 if img.ndim==2 else img.shape[2]
mean = [img.mean()] if channels==1 else np.array(img.mean(axis=(0, 1))).tolist()
std = [img.std()] if channels==1 else np.array(img.std(axis=(0, 1))).tolist()
result = transforms_cls.Compose([
transforms_cls.ToTensor(),
transforms_cls.Normalize(mean, std)
])(img)
assert type(result) == torch.Tensor
assert len(result.shape) == 3
assert result.shape[1:3] == img.shape[0:2]
@pytest.mark.parametrize('fp', tiff_files+jpeg_files)
def test_ToGray(fp):
img = read_img(fp)
result = transforms_cls.Compose([
transforms_cls.ToGray()
])(img)
assert result.dtype == img.dtype
assert result.ndim == 2
result = transforms_cls.Compose([
transforms_cls.ToGray(output_channels=5)
])(img)
assert result.shape == (img.shape[0], img.shape[1], 5)
assert result.dtype == img.dtype
@pytest.mark.parametrize('fp', tiff_files+jpeg_files)
def test_GaussianBlur(fp):
img = read_img(fp)
result = transforms_cls.Compose([
transforms_cls.GaussianBlur(kernel_size=5)
])(img)
assert result.shape == img.shape
assert result.dtype == img.dtype
@pytest.mark.parametrize('fp', tiff_files+jpeg_files)
def test_RandomNoise(fp):
img = read_img(fp)
for item in ['gaussian', 'salt', 'pepper', 's&p']:
result = transforms_cls.Compose([
transforms_cls.RandomNoise(mode=item)
])(img)
assert result.shape == img.shape
assert result.dtype == img.dtype
@pytest.mark.parametrize('fp', tiff_files+jpeg_files)
def test_RandomBrightness(fp):
img = read_img(fp)
result = transforms_cls.Compose([
transforms_cls.RandomBrightness()
])(img)
assert result.shape == img.shape
assert result.dtype == img.dtype
result = transforms_cls.Compose([
transforms_cls.RandomBrightness(max_value=10)
])(img)
assert result.shape == img.shape
assert result.dtype == img.dtype
if result.ndim == 2:
assert abs(float(result[0,0]) - float(img[0,0])) <=10
else:
assert abs(float(result[0,0,0]) - float(img[0,0,0])) <=10
@pytest.mark.parametrize('fp', tiff_files+jpeg_files)
def test_RandomContrast(fp):
img = read_img(fp)
result = transforms_cls.Compose([
transforms_cls.RandomContrast()
])(img)
assert result.shape == img.shape
assert result.dtype == img.dtype
result = transforms_cls.Compose([
transforms_cls.RandomContrast(max_factor=1.2)
])(img)
assert result.shape == img.shape
assert result.dtype == img.dtype
if result.ndim == 2:
assert abs(float(result[0,0]) / float(img[0,0])) <=1.2
else:
assert abs(float(result[0,0,0]) / float(img[0,0,0])) <=1.2
@pytest.mark.parametrize('fp', tiff_files+jpeg_files)
def test_Resize(fp):
img = read_img(fp)
result = transforms_cls.Compose([
transforms_cls.Resize(300),
transforms_cls.ToTensor(),
])(img)
assert result.shape[1:3] == torch.Size([300, 300])
assert type(result) == torch.Tensor
result = transforms_cls.Compose([
transforms_cls.Resize(833),
])(img)
assert result.shape[0:2] == (833, 833)
assert result.dtype == img.dtype
result = transforms_cls.Compose([
transforms_cls.Resize((500,300)),
])(img)
assert result.shape[0:2] == (500, 300)
assert result.dtype == img.dtype
@pytest.mark.parametrize('fp', tiff_files+jpeg_files)
def test_CenterCrop(fp):
img = read_img(fp)
result = transforms_cls.Compose([
transforms_cls.CenterCrop(300),
])(img)
assert result.shape[0:2] == (300,300)
assert result.dtype == img.dtype
result = transforms_cls.Compose([
transforms_cls.CenterCrop((500,300)),
])(img)
assert result.shape[0:2] == (500,300)
assert result.dtype == img.dtype
with pytest.raises(ValueError) as excinfo:
transforms_cls.CenterCrop(1000)(img)
assert 'the output_size should' in str(excinfo.value)
@pytest.mark.parametrize('fp', tiff_files+jpeg_files)
def test_Pad(fp):
img = read_img(fp)
# constant value
result = transforms_cls.Pad(10, fill=1)(img)
if result.ndim == 2:
assert result[0,0] == 1
else:
assert result[0,0,0] == 1
# reflect value
result = transforms_cls.Pad(20, padding_mode='reflect')(img)
assert result.shape[0:2] == (img.shape[0]+40, img.shape[1]+40)
if result.ndim == 2:
assert result[0,0] == img[20,20]
else:
assert result[0,0,0] == img[20,20,0]
assert result.dtype == img.dtype
# all padding mode methods
for item in ['reflect','edge','linear_ramp','maximum','mean' , 'median', 'minimum','symmetric','wrap']:
# for item in ['edge']:
result = transforms_cls.Pad(10, padding_mode=item)(img)
assert result.dtype == img.dtype
assert result.shape[0:2] == (img.shape[0]+20, img.shape[1]+20)
result = transforms_cls.Pad((10,20), padding_mode=item)(img)
assert result.shape[0:2] == (img.shape[0]+40, img.shape[1]+20)
assert result.dtype == img.dtype
result = transforms_cls.Pad((10,20,30,40), padding_mode=item)(img)
assert result.shape[0:2] == (img.shape[0]+60, img.shape[1]+40)
assert result.dtype == img.dtype
result = transforms_cls.Compose([
transforms_cls.Pad(10, fill=1),
transforms_cls.ToTensor()
])(img)
assert type(result) == torch.Tensor
@pytest.mark.parametrize('fp', tiff_files+jpeg_files)
def test_RandomCrop(fp):
img = read_img(fp)
result = transforms_cls.RandomCrop(111)(img)
assert result.dtype == img.dtype
assert result.shape[0:2] == (111,111)
result = transforms_cls.RandomCrop((100, 200))(img)
assert result.dtype == img.dtype
assert result.shape[0:2] == (100,200)
@pytest.mark.parametrize('fp', tiff_files+jpeg_files)
def test_RandomHorizontalFlip(fp):
img = read_img(fp)
result = transforms_cls.RandomHorizontalFlip(p=1)(img)
assert result.dtype == img.dtype
assert result.shape[0:2] == img.shape[0:2]
if result.ndim == 2:
height, width = img.shape
assert result[0,width-1] == img[0,0]
else:
height, width, depth = img.shape
assert (result[0,width-1,:] == img[0,0,:]).any() == True
# tensor
result = transforms_cls.Compose([
transforms_cls.RandomHorizontalFlip(p=1),
transforms_cls.ToTensor()
])(img)
assert type(result) == torch.Tensor
assert result.shape[1:3] == img.shape[0:2]
@pytest.mark.parametrize('fp', tiff_files+jpeg_files)
def test_RandomVerticalFlip(fp):
img = read_img(fp)
result = transforms_cls.RandomVerticalFlip(p=1)(img)
assert result.dtype == img.dtype
assert result.shape[0:2] == img.shape[0:2]
if result.ndim == 2:
height, width = img.shape
assert result[height-1,0] == img[0,0]
else:
height, width, depth = img.shape
assert (result[height-1,0,:] == img[0,0,:]).any() == True
# tensor
result = transforms_cls.Compose([
transforms_cls.RandomVerticalFlip(p=1),
transforms_cls.ToTensor()
])(img)
assert type(result) == torch.Tensor
assert result.shape[1:3] == img.shape[0:2]
@pytest.mark.parametrize('fp', tiff_files+jpeg_files)
def test_Flip(fp):
img = read_img(fp)
result = transforms_cls.RandomFlip(p=0)(img)
assert result.dtype == img.dtype
assert result.shape[0:2] == img.shape[0:2]
if result.ndim == 2:
height, width = img.shape
assert result[0,0] == img[0,0]
else:
height, width, depth = img.shape
assert (result[0,0,:] == img[0,0,:]).any() == True
# tensor
result = transforms_cls.Compose([
transforms_cls.RandomFlip(p=0.1),
transforms_cls.ToTensor()
])(img)
assert type(result) == torch.Tensor
assert result.shape[1:3] == img.shape[0:2]
@pytest.mark.parametrize('fp', tiff_files+jpeg_files)
def test_RandomResizedCrop(fp):
img = read_img(fp)
result = transforms_cls.RandomResizedCrop((500,300), 300)(img)
assert result.dtype == img.dtype
assert result.shape[0:2] == (300,300)
result = transforms_cls.RandomResizedCrop(500, (500,300))(img)
assert result.shape[0:2] == (500,300)
@pytest.mark.parametrize('fp', tiff_files+jpeg_files)
def test_ElasticTransform(fp):
img = read_img(fp)
result = transforms_cls.ElasticTransform()(img)
assert result.dtype == img.dtype
assert result.shape[0:2] == img.shape[0:2]
@pytest.mark.parametrize('fp', tiff_files+jpeg_files)
def test_RandomRotation(fp):
img = read_img(fp)
result = transforms_cls.RandomRotation(45)(img)
assert result.dtype == img.dtype
assert result.shape[0:2] == img.shape[0:2]
result = transforms_cls.RandomRotation((-10, 30))(img)
assert result.dtype == img.dtype
assert result.shape[0:2] == img.shape[0:2]
result = transforms_cls.RandomRotation((-10, 30), center=(200,250))(img)
assert result.dtype == img.dtype
assert result.shape[0:2] == img.shape[0:2]
@pytest.mark.parametrize('fp', tiff_files+jpeg_files)
def test_RandomShift(fp):
img = read_img(fp)
result = transforms_cls.RandomShift(max_percent=0.1)(img)
assert result.dtype == img.dtype
assert result.shape[0:2] == img.shape[0:2] | StarcoderdataPython |
8010044 | <gh_stars>0
#!/usr/bin/env python3
from termolator_fact_txt import *
def main(args):
## infile is the output file from distributional term extraction
infile = args[1]
file_type = args[2]
if not file_type.lower() in ['.htm','.html','.txt','.hml','.xml','.xhtml','.sgm','.sgml','.xhml']:
print('Warning: File type must be a member of the list',['.htm','.html','.txt','.hml','.xml','.xhtml','.sgm','.sgml','.xhml'])
print('Halting Program. Choose a member of this list and run this function again.')
return('Fail')
input_file = infile+file_type
txt2_file = infile+'.txt2'
txt3_file = infile+'.txt3'
fact_file = infile+'.fact'
create_termolotator_fact_txt_files(input_file,txt2_file,txt3_file,fact_file)
if __name__ == '__main__': sys.exit(main(sys.argv))
| StarcoderdataPython |
11252772 | <gh_stars>0
from comments.serializers.populated import PopulatedNestedCommentSerializer
from group_members.serializers.populated import PopulatedUsersMemberSerializer
from projects.serializers.common import ProjectSerializer
from ..serializers.common import DetailTicketSerializer, TicketSerializer
from jwt_auth.serializers.common import NestedUserSerializer
class PopulatedTicketSerializer(TicketSerializer):
owner = PopulatedUsersMemberSerializer()
user_owner = NestedUserSerializer()
project = ProjectSerializer()
assigned_user = PopulatedUsersMemberSerializer()
# comments = PopulatedNestedCommentSerializer(many=True)
class PopulatedTicketWithOwnerSerializer(DetailTicketSerializer):
owner = PopulatedUsersMemberSerializer()
user_owner = NestedUserSerializer()
assigned_user = PopulatedUsersMemberSerializer()
comments = PopulatedNestedCommentSerializer(many=True)
| StarcoderdataPython |
11371576 | import unittest
from MapperSwitch import MapperSwitch
from Wiring import Wiring
class TestMapperSwitch(unittest.TestCase):
def setUp(self):
self.wiringCfg={
"0": [
0
],
"1": [
2,0
],
"2": [
1,0
]
}
self.mapper=MapperSwitch(Wiring(self.wiringCfg))
def testSignal(self):
self.assertEqual(1,len(self.mapper.signalIn(0)))
self.assertEqual(2,len(self.mapper.signalIn(1)))
def testinvalidOperation(self):
with self.assertRaises(Exception) as c:
self.mapper.reverseSignal(0)
| StarcoderdataPython |
12859224 | <filename>tests/data/demo-project/demo/c/d.py
def e():
print("This is function e in file d!")
class E:
def __init__(self):
self.content = "This is class E in file d"
def print(self):
print(self.content)
| StarcoderdataPython |
3513921 | <gh_stars>0
import re
count=0
num=[]
name = input("Enter file:")
if len(name) < 1 : name = "sum.txt"
handle = open(name)
words=list()
#hour=list()
for line in handle :
line=handle.read()
y=re.findall('[0-9]+',line)
print(len(y))
list_of_floats = []
for item in y:
list_of_floats.append(float(item))
print(sum(list_of_floats))
#y=re.findall('[0-9]+',X)
| StarcoderdataPython |
1635199 | <gh_stars>1-10
#MenuTitle: Toggle Axis 3
# -*- coding: utf-8 -*-
__doc__="""
Toggles along masters across the 1st axis in current tab.
"""
from Foundation import NSUserDefaults, NSString
font = Glyphs.font
tab = font.currentTab
selectedMaster = font.selectedFontMaster
theAxisIndex = 3 # toggle across axis number
theAxisIndex -= 1 # cuz in python we count from 0
try:
def getTheAxisValue( master ):
return master.axes[ theAxisIndex ]
# find "related" masters along the axis
relatedMasters = []
for master in font.masters:
if master != selectedMaster:
related = True
for i, axis in enumerate( master.axes ):
if i != theAxisIndex:
if axis != selectedMaster.axes[ i ]:
related = False
if related is True:
relatedMasters.append( master )
# sort by italic value
relatedMasters.sort( key = getTheAxisValue )
# find next master
toggleTo = None
for master in relatedMasters:
if master.axes[ theAxisIndex ] > selectedMaster.axes[ theAxisIndex ]:
toggleTo = master
break
if not toggleTo:
toggleTo = relatedMasters[0]
# toggle master in current tab
for i, master in enumerate(font.masters):
if master == toggleTo:
tab.masterIndex = i
except:
print('sorry, there is no axis %s' % theAxisIndex)
| StarcoderdataPython |
11336732 | <reponame>mberkay0/wounderful
from django import forms
from .models import UploadImage
from crispy_forms.helper import FormHelper
class UploadImageForm(forms.ModelForm):
helper = FormHelper()
helper.form_show_labels = True
class Meta:
model = UploadImage
fields = [
'images',
]
| StarcoderdataPython |
3581182 | # -*- coding: utf-8 -*-
# Form implementation generated from reading ui file 'DataFitting.ui'
#
# Created by: PyQt5 UI code generator 5.12
#
# WARNING! All changes made in this file will be lost!
from PyQt5 import QtCore, QtGui, QtWidgets
class Ui_MainWindow(object):
def setupUi(self, MainWindow):
MainWindow.setObjectName("MainWindow")
MainWindow.resize(1143, 692)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Preferred, QtWidgets.QSizePolicy.Preferred)
sizePolicy.setHorizontalStretch(1)
sizePolicy.setVerticalStretch(1)
sizePolicy.setHeightForWidth(MainWindow.sizePolicy().hasHeightForWidth())
MainWindow.setSizePolicy(sizePolicy)
MainWindow.setMinimumSize(QtCore.QSize(900, 640))
MainWindow.setStyleSheet("#MainWindow{\n"
"}\n"
"\n"
"#centralWidget{\n"
" background: rgb(50, 50, 50);\n"
"}\n"
"\n"
"QCheckBox{\n"
" color:rgb(186, 184, 172);\n"
"}\n"
"\n"
"QLineEdit{\n"
" color:rgb(186, 184, 172);\n"
" background:rgb(80, 80, 80);\n"
" border: 1px black;\n"
" selection-background-color:rgb(208, 146, 0);\n"
"}\n"
"\n"
"QPushButton{\n"
" color:rgb(186, 184, 172);\n"
" background:rgb(40, 40, 40);\n"
" border-color:rgb(0, 0, 0);\n"
" selection-background-color:rgb(208, 146, 0);\n"
"}\n"
"\n"
"QRadioButton{\n"
" color:rgb(186, 184, 172);\n"
" background:rgb(40, 40, 40);\n"
" border-color:rgb(0, 0, 0);\n"
" selection-background-color:rgb(208, 146, 0);\n"
"}\n"
"\n"
"QComboBox{\n"
" color:rgb(186, 184, 172);\n"
" background:rgb(80, 80, 80);\n"
" selection-background-color: rgb(208, 146, 0);\n"
"}\n"
"\n"
"#CB_D2{\n"
"\n"
"}\n"
"\n"
"QComboBox QAbstractItemView {\n"
" border:0px;\n"
" color:rgb(186, 184, 172);\n"
" background:rgb(80, 80, 80);\n"
" selection-background-color: rgb(208, 146, 0);\n"
"}\n"
"\n"
"\n"
"QLabel{\n"
" color:rgb(208, 146, 0);\n"
"}\n"
"\n"
"#LBL_ApplicationSlogan{\n"
" color:rgb(186, 184, 172);\n"
"}\n"
"\n"
"QFrame[frameShape=\"4\"],\n"
"QFrame[frameShape=\"5\"]\n"
"{\n"
"\n"
" color:rgb(208, 146, 0);\n"
"}\n"
"\n"
"Line{\n"
" color:rgb(208, 146, 0);\n"
" background-color: rgb(100, 0, 0);\n"
" alternate-background-color: rgb(0, 100, 0);\n"
" border-color: rgb(0, 0,100);\n"
" gridline-color: rgb(0, 200, 0);\n"
" selection-color: rgb(200, 0, 0);\n"
" selection-background-color: rgb(0, 0, 200);\n"
"}")
self.centralWidget = QtWidgets.QWidget(MainWindow)
self.centralWidget.setMinimumSize(QtCore.QSize(104, 80))
self.centralWidget.setAutoFillBackground(False)
self.centralWidget.setStyleSheet("")
self.centralWidget.setObjectName("centralWidget")
self.gridLayout = QtWidgets.QGridLayout(self.centralWidget)
self.gridLayout.setContentsMargins(11, 11, 11, 11)
self.gridLayout.setSpacing(6)
self.gridLayout.setObjectName("gridLayout")
self.verticalLayout = QtWidgets.QVBoxLayout()
self.verticalLayout.setSpacing(6)
self.verticalLayout.setObjectName("verticalLayout")
self.horizontalLayout = QtWidgets.QHBoxLayout()
self.horizontalLayout.setSpacing(6)
self.horizontalLayout.setObjectName("horizontalLayout")
self.verticalLayout_2 = QtWidgets.QVBoxLayout()
self.verticalLayout_2.setSpacing(6)
self.verticalLayout_2.setObjectName("verticalLayout_2")
self.horizontalLayout_3 = QtWidgets.QHBoxLayout()
self.horizontalLayout_3.setSpacing(6)
self.horizontalLayout_3.setObjectName("horizontalLayout_3")
self.PB_Dir = QtWidgets.QPushButton(self.centralWidget)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Fixed, QtWidgets.QSizePolicy.Fixed)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.PB_Dir.sizePolicy().hasHeightForWidth())
self.PB_Dir.setSizePolicy(sizePolicy)
self.PB_Dir.setObjectName("PB_Dir")
self.horizontalLayout_3.addWidget(self.PB_Dir)
self.LE_Dir = QtWidgets.QLineEdit(self.centralWidget)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Fixed)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.LE_Dir.sizePolicy().hasHeightForWidth())
self.LE_Dir.setSizePolicy(sizePolicy)
self.LE_Dir.setMinimumSize(QtCore.QSize(200, 0))
self.LE_Dir.setObjectName("LE_Dir")
self.horizontalLayout_3.addWidget(self.LE_Dir)
self.verticalLayout_2.addLayout(self.horizontalLayout_3)
self.PB_Dir_Load = QtWidgets.QPushButton(self.centralWidget)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Fixed)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.PB_Dir_Load.sizePolicy().hasHeightForWidth())
self.PB_Dir_Load.setSizePolicy(sizePolicy)
self.PB_Dir_Load.setObjectName("PB_Dir_Load")
self.verticalLayout_2.addWidget(self.PB_Dir_Load)
self.CB_Files = QtWidgets.QComboBox(self.centralWidget)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Fixed)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.CB_Files.sizePolicy().hasHeightForWidth())
self.CB_Files.setSizePolicy(sizePolicy)
self.CB_Files.setObjectName("CB_Files")
self.verticalLayout_2.addWidget(self.CB_Files)
self.PB_File = QtWidgets.QPushButton(self.centralWidget)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Fixed)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.PB_File.sizePolicy().hasHeightForWidth())
self.PB_File.setSizePolicy(sizePolicy)
self.PB_File.setObjectName("PB_File")
self.verticalLayout_2.addWidget(self.PB_File)
self.line_4 = QtWidgets.QFrame(self.centralWidget)
self.line_4.setFrameShadow(QtWidgets.QFrame.Plain)
self.line_4.setFrameShape(QtWidgets.QFrame.HLine)
self.line_4.setObjectName("line_4")
self.verticalLayout_2.addWidget(self.line_4)
self.horizontalLayout_4 = QtWidgets.QHBoxLayout()
self.horizontalLayout_4.setContentsMargins(-1, -1, 10, -1)
self.horizontalLayout_4.setSpacing(6)
self.horizontalLayout_4.setObjectName("horizontalLayout_4")
self.LBL_Mode = QtWidgets.QLabel(self.centralWidget)
self.LBL_Mode.setObjectName("LBL_Mode")
self.horizontalLayout_4.addWidget(self.LBL_Mode)
spacerItem = QtWidgets.QSpacerItem(40, 20, QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Minimum)
self.horizontalLayout_4.addItem(spacerItem)
self.RB_Mode_MDC = QtWidgets.QRadioButton(self.centralWidget)
self.RB_Mode_MDC.setObjectName("RB_Mode_MDC")
self.buttonGroup = QtWidgets.QButtonGroup(MainWindow)
self.buttonGroup.setObjectName("buttonGroup")
self.buttonGroup.addButton(self.RB_Mode_MDC)
self.horizontalLayout_4.addWidget(self.RB_Mode_MDC)
self.RB_Mode_EDC = QtWidgets.QRadioButton(self.centralWidget)
self.RB_Mode_EDC.setObjectName("RB_Mode_EDC")
self.buttonGroup.addButton(self.RB_Mode_EDC)
self.horizontalLayout_4.addWidget(self.RB_Mode_EDC)
self.RB_Mode_Free = QtWidgets.QRadioButton(self.centralWidget)
self.RB_Mode_Free.setObjectName("RB_Mode_Free")
self.buttonGroup.addButton(self.RB_Mode_Free)
self.horizontalLayout_4.addWidget(self.RB_Mode_Free)
self.PB_DC_Editing = QtWidgets.QPushButton(self.centralWidget)
self.PB_DC_Editing.setObjectName("PB_DC_Editing")
self.horizontalLayout_4.addWidget(self.PB_DC_Editing)
self.verticalLayout_2.addLayout(self.horizontalLayout_4)
self.line_7 = QtWidgets.QFrame(self.centralWidget)
self.line_7.setFrameShadow(QtWidgets.QFrame.Plain)
self.line_7.setFrameShape(QtWidgets.QFrame.HLine)
self.line_7.setObjectName("line_7")
self.verticalLayout_2.addWidget(self.line_7)
self.horizontalLayout_5 = QtWidgets.QHBoxLayout()
self.horizontalLayout_5.setSpacing(6)
self.horizontalLayout_5.setObjectName("horizontalLayout_5")
self.LBL_Fit = QtWidgets.QLabel(self.centralWidget)
self.LBL_Fit.setObjectName("LBL_Fit")
self.horizontalLayout_5.addWidget(self.LBL_Fit)
spacerItem1 = QtWidgets.QSpacerItem(20, 20, QtWidgets.QSizePolicy.Maximum, QtWidgets.QSizePolicy.Minimum)
self.horizontalLayout_5.addItem(spacerItem1)
self.CB_BandFitfunction = QtWidgets.QComboBox(self.centralWidget)
self.CB_BandFitfunction.setObjectName("CB_BandFitfunction")
self.horizontalLayout_5.addWidget(self.CB_BandFitfunction)
self.PB_Fit_ARPES = QtWidgets.QPushButton(self.centralWidget)
self.PB_Fit_ARPES.setObjectName("PB_Fit_ARPES")
self.horizontalLayout_5.addWidget(self.PB_Fit_ARPES)
self.verticalLayout_2.addLayout(self.horizontalLayout_5)
self.horizontalLayout_6 = QtWidgets.QHBoxLayout()
self.horizontalLayout_6.setSpacing(6)
self.horizontalLayout_6.setObjectName("horizontalLayout_6")
self.label_2 = QtWidgets.QLabel(self.centralWidget)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Fixed, QtWidgets.QSizePolicy.Preferred)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.label_2.sizePolicy().hasHeightForWidth())
self.label_2.setSizePolicy(sizePolicy)
self.label_2.setObjectName("label_2")
self.horizontalLayout_6.addWidget(self.label_2)
self.LE_Update_Bandfit = QtWidgets.QLineEdit(self.centralWidget)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Fixed)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.LE_Update_Bandfit.sizePolicy().hasHeightForWidth())
self.LE_Update_Bandfit.setSizePolicy(sizePolicy)
self.LE_Update_Bandfit.setObjectName("LE_Update_Bandfit")
self.horizontalLayout_6.addWidget(self.LE_Update_Bandfit)
self.horizontalLayout_6.setStretch(0, 1)
self.horizontalLayout_6.setStretch(1, 1)
self.verticalLayout_2.addLayout(self.horizontalLayout_6)
self.horizontalLayout_7 = QtWidgets.QHBoxLayout()
self.horizontalLayout_7.setSpacing(6)
self.horizontalLayout_7.setObjectName("horizontalLayout_7")
self.PB_Update_Bandfit = QtWidgets.QPushButton(self.centralWidget)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Fixed, QtWidgets.QSizePolicy.Fixed)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.PB_Update_Bandfit.sizePolicy().hasHeightForWidth())
self.PB_Update_Bandfit.setSizePolicy(sizePolicy)
self.PB_Update_Bandfit.setObjectName("PB_Update_Bandfit")
self.horizontalLayout_7.addWidget(self.PB_Update_Bandfit)
self.LBL_Fitfunc = QtWidgets.QLabel(self.centralWidget)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Fixed, QtWidgets.QSizePolicy.Preferred)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.LBL_Fitfunc.sizePolicy().hasHeightForWidth())
self.LBL_Fitfunc.setSizePolicy(sizePolicy)
self.LBL_Fitfunc.setMinimumSize(QtCore.QSize(200, 0))
self.LBL_Fitfunc.setMaximumSize(QtCore.QSize(220, 16777215))
font = QtGui.QFont()
font.setPointSize(7)
self.LBL_Fitfunc.setFont(font)
self.LBL_Fitfunc.setObjectName("LBL_Fitfunc")
self.horizontalLayout_7.addWidget(self.LBL_Fitfunc)
spacerItem2 = QtWidgets.QSpacerItem(20, 20, QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Minimum)
self.horizontalLayout_7.addItem(spacerItem2)
self.verticalLayout_2.addLayout(self.horizontalLayout_7)
self.line_8 = QtWidgets.QFrame(self.centralWidget)
self.line_8.setFrameShadow(QtWidgets.QFrame.Plain)
self.line_8.setFrameShape(QtWidgets.QFrame.HLine)
self.line_8.setObjectName("line_8")
self.verticalLayout_2.addWidget(self.line_8)
self.LBL_ARPES_Output = QtWidgets.QLabel(self.centralWidget)
self.LBL_ARPES_Output.setObjectName("LBL_ARPES_Output")
self.verticalLayout_2.addWidget(self.LBL_ARPES_Output)
self.TE_ARPES_output = QtWidgets.QTextEdit(self.centralWidget)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Expanding)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.TE_ARPES_output.sizePolicy().hasHeightForWidth())
self.TE_ARPES_output.setSizePolicy(sizePolicy)
self.TE_ARPES_output.setReadOnly(True)
self.TE_ARPES_output.setObjectName("TE_ARPES_output")
self.verticalLayout_2.addWidget(self.TE_ARPES_output)
spacerItem3 = QtWidgets.QSpacerItem(20, 40, QtWidgets.QSizePolicy.Minimum, QtWidgets.QSizePolicy.Expanding)
self.verticalLayout_2.addItem(spacerItem3)
self.horizontalLayout.addLayout(self.verticalLayout_2)
self.line_3 = QtWidgets.QFrame(self.centralWidget)
self.line_3.setFrameShadow(QtWidgets.QFrame.Plain)
self.line_3.setFrameShape(QtWidgets.QFrame.VLine)
self.line_3.setObjectName("line_3")
self.horizontalLayout.addWidget(self.line_3)
self.WDGT_ARPES = MPL_WIDGET(self.centralWidget)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Minimum, QtWidgets.QSizePolicy.Preferred)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.WDGT_ARPES.sizePolicy().hasHeightForWidth())
self.WDGT_ARPES.setSizePolicy(sizePolicy)
self.WDGT_ARPES.setMinimumSize(QtCore.QSize(500, 300))
self.WDGT_ARPES.setObjectName("WDGT_ARPES")
self.horizontalLayout.addWidget(self.WDGT_ARPES)
self.horizontalLayout.setStretch(1, 1)
self.horizontalLayout.setStretch(2, 1)
self.verticalLayout.addLayout(self.horizontalLayout)
self.line_2 = QtWidgets.QFrame(self.centralWidget)
self.line_2.setFrameShadow(QtWidgets.QFrame.Plain)
self.line_2.setFrameShape(QtWidgets.QFrame.HLine)
self.line_2.setObjectName("line_2")
self.verticalLayout.addWidget(self.line_2)
self.horizontalLayout_2 = QtWidgets.QHBoxLayout()
self.horizontalLayout_2.setSpacing(6)
self.horizontalLayout_2.setObjectName("horizontalLayout_2")
self.verticalLayout_3 = QtWidgets.QVBoxLayout()
self.verticalLayout_3.setSpacing(6)
self.verticalLayout_3.setObjectName("verticalLayout_3")
self.LBL_ProfFit = QtWidgets.QLabel(self.centralWidget)
self.LBL_ProfFit.setObjectName("LBL_ProfFit")
self.verticalLayout_3.addWidget(self.LBL_ProfFit)
self.CB_DCFitfunction = QtWidgets.QComboBox(self.centralWidget)
self.CB_DCFitfunction.setObjectName("CB_DCFitfunction")
self.verticalLayout_3.addWidget(self.CB_DCFitfunction)
self.line_5 = QtWidgets.QFrame(self.centralWidget)
self.line_5.setFrameShadow(QtWidgets.QFrame.Plain)
self.line_5.setFrameShape(QtWidgets.QFrame.HLine)
self.line_5.setObjectName("line_5")
self.verticalLayout_3.addWidget(self.line_5)
self.label = QtWidgets.QLabel(self.centralWidget)
self.label.setObjectName("label")
self.verticalLayout_3.addWidget(self.label)
self.LE_Parameters = QtWidgets.QLineEdit(self.centralWidget)
self.LE_Parameters.setEnabled(True)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Fixed, QtWidgets.QSizePolicy.Fixed)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.LE_Parameters.sizePolicy().hasHeightForWidth())
self.LE_Parameters.setSizePolicy(sizePolicy)
self.LE_Parameters.setMinimumSize(QtCore.QSize(150, 0))
self.LE_Parameters.setObjectName("LE_Parameters")
self.verticalLayout_3.addWidget(self.LE_Parameters)
self.LBL_Parameters = QtWidgets.QLabel(self.centralWidget)
font = QtGui.QFont()
font.setPointSize(7)
self.LBL_Parameters.setFont(font)
self.LBL_Parameters.setObjectName("LBL_Parameters")
self.verticalLayout_3.addWidget(self.LBL_Parameters)
self.horizontalLayout_9 = QtWidgets.QHBoxLayout()
self.horizontalLayout_9.setSpacing(6)
self.horizontalLayout_9.setObjectName("horizontalLayout_9")
self.PB_Update = QtWidgets.QPushButton(self.centralWidget)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Fixed, QtWidgets.QSizePolicy.Fixed)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.PB_Update.sizePolicy().hasHeightForWidth())
self.PB_Update.setSizePolicy(sizePolicy)
self.PB_Update.setObjectName("PB_Update")
self.horizontalLayout_9.addWidget(self.PB_Update)
self.ChB_Estimate_Params_Profile = QtWidgets.QCheckBox(self.centralWidget)
self.ChB_Estimate_Params_Profile.setObjectName("ChB_Estimate_Params_Profile")
self.horizontalLayout_9.addWidget(self.ChB_Estimate_Params_Profile)
self.verticalLayout_3.addLayout(self.horizontalLayout_9)
self.line_6 = QtWidgets.QFrame(self.centralWidget)
self.line_6.setFrameShadow(QtWidgets.QFrame.Plain)
self.line_6.setFrameShape(QtWidgets.QFrame.HLine)
self.line_6.setObjectName("line_6")
self.verticalLayout_3.addWidget(self.line_6)
self.horizontalLayout_8 = QtWidgets.QHBoxLayout()
self.horizontalLayout_8.setSpacing(6)
self.horizontalLayout_8.setObjectName("horizontalLayout_8")
self.PB_FitProf = QtWidgets.QPushButton(self.centralWidget)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Fixed, QtWidgets.QSizePolicy.Fixed)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.PB_FitProf.sizePolicy().hasHeightForWidth())
self.PB_FitProf.setSizePolicy(sizePolicy)
self.PB_FitProf.setObjectName("PB_FitProf")
self.horizontalLayout_8.addWidget(self.PB_FitProf)
self.PB_Profile_Editing = QtWidgets.QPushButton(self.centralWidget)
self.PB_Profile_Editing.setObjectName("PB_Profile_Editing")
self.horizontalLayout_8.addWidget(self.PB_Profile_Editing)
self.verticalLayout_3.addLayout(self.horizontalLayout_8)
self.PB_P2A = QtWidgets.QPushButton(self.centralWidget)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Fixed, QtWidgets.QSizePolicy.Fixed)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.PB_P2A.sizePolicy().hasHeightForWidth())
self.PB_P2A.setSizePolicy(sizePolicy)
self.PB_P2A.setObjectName("PB_P2A")
self.verticalLayout_3.addWidget(self.PB_P2A)
spacerItem4 = QtWidgets.QSpacerItem(20, 40, QtWidgets.QSizePolicy.Minimum, QtWidgets.QSizePolicy.Expanding)
self.verticalLayout_3.addItem(spacerItem4)
self.horizontalLayout_2.addLayout(self.verticalLayout_3)
self.line = QtWidgets.QFrame(self.centralWidget)
self.line.setFrameShadow(QtWidgets.QFrame.Plain)
self.line.setFrameShape(QtWidgets.QFrame.VLine)
self.line.setObjectName("line")
self.horizontalLayout_2.addWidget(self.line)
self.WDGT_Profile = MPL_WIDGET(self.centralWidget)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Minimum, QtWidgets.QSizePolicy.Minimum)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.WDGT_Profile.sizePolicy().hasHeightForWidth())
self.WDGT_Profile.setSizePolicy(sizePolicy)
self.WDGT_Profile.setMinimumSize(QtCore.QSize(500, 250))
self.WDGT_Profile.setMaximumSize(QtCore.QSize(16777215, 200))
self.WDGT_Profile.setObjectName("WDGT_Profile")
self.horizontalLayout_2.addWidget(self.WDGT_Profile)
self.horizontalLayout_2.setStretch(1, 10)
self.horizontalLayout_2.setStretch(2, 10)
self.verticalLayout.addLayout(self.horizontalLayout_2)
self.verticalLayout.setStretch(0, 1)
self.verticalLayout.setStretch(1, 1)
self.gridLayout.addLayout(self.verticalLayout, 0, 0, 1, 1)
MainWindow.setCentralWidget(self.centralWidget)
self.actionClose = QtWidgets.QAction(MainWindow)
self.actionClose.setObjectName("actionClose")
self.actionSave_Settings = QtWidgets.QAction(MainWindow)
self.actionSave_Settings.setObjectName("actionSave_Settings")
self.actionLoad_Calibration = QtWidgets.QAction(MainWindow)
self.actionLoad_Calibration.setObjectName("actionLoad_Calibration")
self.actionSet_Standard_Calibration = QtWidgets.QAction(MainWindow)
self.actionSet_Standard_Calibration.setObjectName("actionSet_Standard_Calibration")
self.retranslateUi(MainWindow)
QtCore.QMetaObject.connectSlotsByName(MainWindow)
def retranslateUi(self, MainWindow):
_translate = QtCore.QCoreApplication.translate
MainWindow.setWindowTitle(_translate("MainWindow", "pyMass"))
self.PB_Dir.setText(_translate("MainWindow", "Directory"))
self.LE_Dir.setText(_translate("MainWindow", "Filelocation"))
self.PB_Dir_Load.setText(_translate("MainWindow", "Load Files"))
self.PB_File.setText(_translate("MainWindow", "Load"))
self.LBL_Mode.setText(_translate("MainWindow", "Mode"))
self.RB_Mode_MDC.setText(_translate("MainWindow", "MDC"))
self.RB_Mode_EDC.setText(_translate("MainWindow", "EDC"))
self.RB_Mode_Free.setText(_translate("MainWindow", "Free"))
self.PB_DC_Editing.setText(_translate("MainWindow", "Enable Editing"))
self.LBL_Fit.setText(_translate("MainWindow", "Fittype (Band)"))
self.PB_Fit_ARPES.setText(_translate("MainWindow", "Fit"))
self.label_2.setText(_translate("MainWindow", "Start Parameters"))
self.LE_Update_Bandfit.setText(_translate("MainWindow", "Enter Parameters"))
self.PB_Update_Bandfit.setText(_translate("MainWindow", "Update"))
self.LBL_Fitfunc.setText(_translate("MainWindow", "TextLabel"))
self.LBL_ARPES_Output.setText(_translate("MainWindow", "Output"))
self.LBL_ProfFit.setText(_translate("MainWindow", "Fit function"))
self.label.setText(_translate("MainWindow", "Start Parameters"))
self.LE_Parameters.setText(_translate("MainWindow", "Enter Parameters"))
self.LBL_Parameters.setText(_translate("MainWindow", "TextLabel"))
self.PB_Update.setText(_translate("MainWindow", "Update"))
self.ChB_Estimate_Params_Profile.setToolTip(_translate("MainWindow", "Estimate the Fit parameters between left and right border"))
self.ChB_Estimate_Params_Profile.setText(_translate("MainWindow", "Estimate"))
self.PB_FitProf.setText(_translate("MainWindow", "Fit Profile"))
self.PB_Profile_Editing.setText(_translate("MainWindow", "Enable Editing"))
self.PB_P2A.setText(_translate("MainWindow", "Peak 2 ARPES"))
self.actionClose.setText(_translate("MainWindow", "Close"))
self.actionSave_Settings.setText(_translate("MainWindow", "Save Calibration"))
self.actionLoad_Calibration.setText(_translate("MainWindow", "Load Calibration"))
self.actionSet_Standard_Calibration.setText(_translate("MainWindow", "Set Standard Calibration"))
from mplwidget import MPL_WIDGET
if __name__ == "__main__":
import sys
app = QtWidgets.QApplication(sys.argv)
MainWindow = QtWidgets.QMainWindow()
ui = Ui_MainWindow()
ui.setupUi(MainWindow)
MainWindow.show()
sys.exit(app.exec_())
| StarcoderdataPython |
11205536 | <gh_stars>1-10
import re
black = ['à', 'di', 'au', 'aux', 'des', 'l', 'sous', 'et', 'du', 'les', 'd', 'lès', 'la', 'le', 'en', 'de', 'sur', 'saint', 'sainte']
# Adelans-et-le-Val-de-Bithaine -> Adelans-et-le-Val-de-Bithaine, adelans-et-le-val, adelans
# Return full name first
def subpart_generator(name):
if '-' not in name:
return (name,)
s = name.lower().replace('\'', ' ').split('-')
l = map(lambda i: s[i] not in black and not s[i][0].isdigit() and '-'.join(s[0:i+1]), range(len(s)-2, -1, -1))
l = list(filter(lambda i: i, l))
if len(l) > 0:
l.insert(0, name)
return l
else:
return (name,)
def make_labels(helper, result):
if result.labels:
return
ret = []
for city in subpart_generator(result.city):
# Most complet first
if result.type == 'municipality':
ret.extend(([result.postcode, city], [city, result.postcode], [result.postcode], [city]))
elif result.type in ['street', 'locality']:
ret.extend(([result.name, result.postcode, city], [result.name, city, result.postcode], [result.name, result.postcode], [result.name, city]))
elif result.type == 'housenumber':
ret.extend(([result.housenumber, result.name, result.postcode, city], [result.housenumber, result.name, city, result.postcode], [result.housenumber, result.name, result.postcode], [result.housenumber, result.name, city]))
result.labels.extend(list(map(lambda a: ' '.join(a), ret)))
| StarcoderdataPython |
386780 | #!/usr/bin/env python
#coding:utf-8
#########################################################################################################################
## Version : 0.0.7-1
## Developer : Yannyann (https://github.com/a2d8a4v)
## Website : https://www.yannyann.com
## License : MIT License
#########################################################################################################################
import sys,urllib2,socket,json
try:
socket.setdefaulttimeout(5)
if len(sys.argv) == 1:
apiurl = "http://ip-api.com/json"
elif len(sys.argv) == 2:
apiurl = "http://ip-api.com/json/%s" % sys.argv[1]
content = urllib2.urlopen(apiurl).read()
content = json.JSONDecoder().decode(content)
if content['status'] == 'success':
print(content['isp'])
else:
print("NO")
except:
print("Usage:%s IP" % sys.argv[0])
| StarcoderdataPython |
3520063 | <gh_stars>1-10
fileref = open("olympics.txt","r") # open file olympics.txt to read only
contents = fileref.read() # is going to bring in the entire contents
#of the file as a single string
print(contents[:100])
fileref.seek(0) # rewind fileref
lines = fileref.readlines() # read line by line
print("File lines: ", len(lines))
for line in lines[:4]: # each string in list lines has a '\n' in the end
print(line.strip("\n")) # and print() always print '\n', so use strip to avoid "\n\n"
# more pythonic way to read whole file
fileref.seek(0) # rewind fileref
number_of_lines = 0
for line in fileref.readlines():
number_of_lines += 1
print(line.strip("\n"))
fileref.close()
# Ex1 : Using the file school_prompt2.txt, find the number of characters
# in the file and assign that value to the variable num_char
f = open("school_prompt2.txt", "r")
content = f.read()
num_char = len(content)
print("school_prompt2.txt number of characters: ", num_char)
f.close()
# Ex2 : create a string called first_forty that is comprised of
# the first 40 characters of emotion_words2.txt.
f = open("emotion_words2.txt", "r")
content = f.read()
first_forty = content[:40]
print("emotion_words2.txt first 40 characters are : ", first_forty)
f.close()
| StarcoderdataPython |
1947112 | """
scope.py
the code in this file is meant to demonstrate how variable scope
works in python 3.8. When writing this code, I referred to the Python 3.8
documentation to ensure that I was using the correct technical vocabulary
in my own documentation, but all code written is my own.
Doc pages I referred to:
Execution Model https://docs.python.org/3.8/reference/executionmodel.html
Objects, Values, and Types https://docs.python.org/3.8/reference/datamodel.html
<NAME> - Apr 11, 2020 - MIT License
"""
# the scope of a code block or variable is determined when the block/var is named
from baz import Baz
# because Baz is imported at the module level, it is globally defined.
bar = 2
# bar is a global variable because it is defined within module block
# all function and class definitions at the module level are also global
class Baz():
# because this class is defined after we imported another class named Baz,
# this particular definition will be resolved instead of the imported Baz.
# see the scopeTest method and the output of this file when run to confirm.
def __init__(self):
self.bar = 3
#not only is this bar local, but can only be accessed as attr of a named
#instance of this class, because it is attached to the self keyword.
#in addition, this variable can be changed in a particular instance,
#but that will not change the value of the variable in the class defintion.
# variables declared using the "self." prefix is known as
# an instance attribute.
def scopeTest(self):
# this method is a class attribute.
# this means that it is shared across all instances of the class.
# unlike an instance attribute, a class attribute may be called
# by calling through the class (Baz().scopeTest()) or by calling
# through an instance (baz = Baz(), baz.scopeTest()).
# the defintion of a class instance may be changed at an instance
# level (so, a particular instance could change the function of scopeTest),
# but the original class definition would remain unaltered.
# even though this method may be called without creating an instance,
# it cannot be called as a function without referencing the class
# for example, simply calling scopeTest() without Baz(). or an instance
# would result in a NameError.
print("inside scopeTest function scope")
print("printing bar")
print(bar)
# because there's a variable named bar in the nearest enclosing scope,
# this will always print the global bar rather than, say, a local bar
# defined at the same scope as an instance of this class.
# see the foo() fxn for an example.
# HOWEVER, see the following for an example where the global would
# not be resolved:
# bar = 5
# if the above line were uncommented, there would be an error, namely
# an UnboundLocalError, as the print(bar) statement above would be called
# before bar was assigned in local scope. In this case, the executed code
# cannot just resolve the global bar, as the local bar is within the same
# scope, and bar is not preceded by a global or nonlocal statement. .
print("printing self.bar")
print(self.bar)
def foo():
baz = Baz()
print("executing foo()")
bar = 1 # this is a local variable bound to the function scope
# this variable may only be used within function scope,
# and it does not reassign the global bar variable.
print(bar)
baz.bar = 6
print("printing baz.scopeTesi() inside foo()")
baz.scopeTest()
print("printing Baz().scopeTest() inside foo()")
Baz().scopeTest()
print("exiting foo() scope")
foo() # should print 1
print("printing bar outside of foo()")
print(bar) # should print 2
| StarcoderdataPython |
180093 | from __future__ import annotations
from pathlib import Path
from typer import echo
from ..resolvers import clone_github, clone_local
from .resolver import Resolver
from .runner import Runner
from .variables import get_variables, read_variables
class NooCore:
def __init__(self, allow_shell: bool = False) -> None:
self.resolver = Resolver()
self.shell = allow_shell
def clone(self, name: str, noofile: str, dest: Path) -> None:
spec = self.resolver.resolve(noofile)
echo(f"Starting clone process for {spec.name or name}.")
if not spec.remote:
echo(f"No remote specified for {spec.name or name}")
return
if spec.remote.startswith("git:"):
clone_github(spec.remote[4:], dest)
elif spec.remote.startswith("file:"):
clone_local(Path(spec.remote[5:]), dest)
else:
raise ValueError(f"Invalid remote: {spec.remote}")
variables = get_variables(name)
variables["var"].update(read_variables(spec.read))
runner = Runner(self, dest, spec.steps, variables, self.shell)
runner.run()
def mod(self, noofile: str, dest: Path) -> None:
spec = self.resolver.resolve(noofile)
echo(f"Starting modification for {spec.name or 'unnamed'}.")
variables = get_variables()
variables["var"].update(read_variables(spec.read))
runner = Runner(self, dest, spec.steps, variables, self.shell)
runner.run()
| StarcoderdataPython |
11312066 | <reponame>Mulham/Django-Project
# Generated by Django 3.2.3 on 2021-05-31 00:37
import books.models
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('books', '0004_auto_20210530_2358'),
]
operations = [
migrations.AlterField(
model_name='book',
name='image',
field=models.ImageField(upload_to='upload/'),
),
migrations.AlterField(
model_name='book',
name='url',
field=models.FileField(upload_to='books', validators=[books.models.validate_file_extension]),
),
]
| StarcoderdataPython |
1801071 | <reponame>mklewitz-kisura/dotfiles<gh_stars>0
"""
The :mod:`jedi.api.classes` module contains the return classes of the API.
These classes are the much bigger part of the whole API, because they contain
the interesting information about completion and goto operations.
"""
import warnings
import re
from jedi._compatibility import u
from jedi import settings
from jedi import common
from jedi.parser.cache import parser_cache
from jedi.cache import memoize_method
from jedi.evaluate import representation as er
from jedi.evaluate import instance
from jedi.evaluate import imports
from jedi.evaluate import compiled
from jedi.evaluate.filters import ParamName
from jedi.evaluate.imports import ImportName
from jedi.api.keywords import KeywordName
def _sort_names_by_start_pos(names):
return sorted(names, key=lambda s: s.start_pos or (0, 0))
def defined_names(evaluator, context):
"""
List sub-definitions (e.g., methods in class).
:type scope: Scope
:rtype: list of Definition
"""
filter = next(context.get_filters(search_global=True))
names = [name for name in filter.values()]
return [Definition(evaluator, n) for n in _sort_names_by_start_pos(names)]
class BaseDefinition(object):
_mapping = {
'posixpath': 'os.path',
'riscospath': 'os.path',
'ntpath': 'os.path',
'os2emxpath': 'os.path',
'macpath': 'os.path',
'genericpath': 'os.path',
'posix': 'os',
'_io': 'io',
'_functools': 'functools',
'_sqlite3': 'sqlite3',
'__builtin__': '',
'builtins': '',
}
_tuple_mapping = dict((tuple(k.split('.')), v) for (k, v) in {
'argparse._ActionsContainer': 'argparse.ArgumentParser',
}.items())
def __init__(self, evaluator, name):
self._evaluator = evaluator
self._name = name
"""
An instance of :class:`jedi.parser.reprsentation.Name` subclass.
"""
self.is_keyword = isinstance(self._name, KeywordName)
# generate a path to the definition
self._module = name.get_root_context()
if self.in_builtin_module():
self.module_path = None
else:
self.module_path = self._module.py__file__()
"""Shows the file path of a module. e.g. ``/usr/lib/python2.7/os.py``"""
@property
def name(self):
"""
Name of variable/function/class/module.
For example, for ``x = None`` it returns ``'x'``.
:rtype: str or None
"""
return self._name.string_name
@property
def type(self):
"""
The type of the definition.
Here is an example of the value of this attribute. Let's consider
the following source. As what is in ``variable`` is unambiguous
to Jedi, :meth:`jedi.Script.goto_definitions` should return a list of
definition for ``sys``, ``f``, ``C`` and ``x``.
>>> from jedi import Script
>>> source = '''
... import keyword
...
... class C:
... pass
...
... class D:
... pass
...
... x = D()
...
... def f():
... pass
...
... for variable in [keyword, f, C, x]:
... variable'''
>>> script = Script(source)
>>> defs = script.goto_definitions()
Before showing what is in ``defs``, let's sort it by :attr:`line`
so that it is easy to relate the result to the source code.
>>> defs = sorted(defs, key=lambda d: d.line)
>>> defs # doctest: +NORMALIZE_WHITESPACE
[<Definition module keyword>, <Definition class C>,
<Definition instance D>, <Definition def f>]
Finally, here is what you can get from :attr:`type`:
>>> defs[0].type
'module'
>>> defs[1].type
'class'
>>> defs[2].type
'instance'
>>> defs[3].type
'function'
"""
tree_name = self._name.tree_name
resolve = False
if tree_name is not None:
# TODO move this to their respective names.
definition = tree_name.get_definition()
if definition.type == 'import_from' and \
tree_name in definition.get_defined_names():
resolve = True
if isinstance(self._name, imports.SubModuleName) or resolve:
for context in self._name.infer():
return context.api_type
return self._name.api_type
def _path(self):
"""The path to a module/class/function definition."""
def to_reverse():
name = self._name
if name.api_type == 'module':
try:
name = list(name.infer())[0].name
except IndexError:
pass
if name.api_type == 'module':
module_context, = name.infer()
for n in reversed(module_context.py__name__().split('.')):
yield n
else:
yield name.string_name
parent_context = name.parent_context
while parent_context is not None:
try:
method = parent_context.py__name__
except AttributeError:
try:
yield parent_context.name.string_name
except AttributeError:
pass
else:
for name in reversed(method().split('.')):
yield name
parent_context = parent_context.parent_context
return reversed(list(to_reverse()))
@property
def module_name(self):
"""
The module name.
>>> from jedi import Script
>>> source = 'import json'
>>> script = Script(source, path='example.py')
>>> d = script.goto_definitions()[0]
>>> print(d.module_name) # doctest: +ELLIPSIS
json
"""
return self._module.name.string_name
def in_builtin_module(self):
"""Whether this is a builtin module."""
return isinstance(self._module, compiled.CompiledObject)
@property
def line(self):
"""The line where the definition occurs (starting with 1)."""
start_pos = self._name.start_pos
if start_pos is None:
return None
return start_pos[0]
@property
def column(self):
"""The column where the definition occurs (starting with 0)."""
start_pos = self._name.start_pos
if start_pos is None:
return None
return start_pos[1]
def docstring(self, raw=False, fast=True):
r"""
Return a document string for this completion object.
Example:
>>> from jedi import Script
>>> source = '''\
... def f(a, b=1):
... "Document for function f."
... '''
>>> script = Script(source, 1, len('def f'), 'example.py')
>>> doc = script.goto_definitions()[0].docstring()
>>> print(doc)
f(a, b=1)
<BLANKLINE>
Document for function f.
Notice that useful extra information is added to the actual
docstring. For function, it is call signature. If you need
actual docstring, use ``raw=True`` instead.
>>> print(script.goto_definitions()[0].docstring(raw=True))
Document for function f.
:param fast: Don't follow imports that are only one level deep like
``import foo``, but follow ``from foo import bar``. This makes
sense for speed reasons. Completing `import a` is slow if you use
the ``foo.docstring(fast=False)`` on every object, because it
parses all libraries starting with ``a``.
"""
return _Help(self._name).docstring(fast=fast, raw=raw)
@property
def doc(self):
"""
.. deprecated:: 0.8.0
Use :meth:`.docstring` instead.
.. todo:: Remove!
"""
warnings.warn("Deprecated since Jedi 0.8. Use docstring() instead.", DeprecationWarning, stacklevel=2)
return self.docstring(raw=False)
@property
def raw_doc(self):
"""
.. deprecated:: 0.8.0
Use :meth:`.docstring` instead.
.. todo:: Remove!
"""
warnings.warn("Deprecated since Jedi 0.8. Use docstring() instead.", DeprecationWarning, stacklevel=2)
return self.docstring(raw=True)
@property
def description(self):
"""A textual description of the object."""
return u(self._name.string_name)
@property
def full_name(self):
"""
Dot-separated path of this object.
It is in the form of ``<module>[.<submodule>[...]][.<object>]``.
It is useful when you want to look up Python manual of the
object at hand.
Example:
>>> from jedi import Script
>>> source = '''
... import os
... os.path.join'''
>>> script = Script(source, 3, len('os.path.join'), 'example.py')
>>> print(script.goto_definitions()[0].full_name)
os.path.join
Notice that it returns ``'os.path.join'`` instead of (for example)
``'posixpath.join'``. This is not correct, since the modules name would
be ``<module 'posixpath' ...>```. However most users find the latter
more practical.
"""
path = list(self._path())
# TODO add further checks, the mapping should only occur on stdlib.
if not path:
return None # for keywords the path is empty
with common.ignored(KeyError):
path[0] = self._mapping[path[0]]
for key, repl in self._tuple_mapping.items():
if tuple(path[:len(key)]) == key:
path = [repl] + path[len(key):]
return '.'.join(path if path[0] else path[1:])
def goto_assignments(self):
if self._name.tree_name is None:
return self
names = self._evaluator.goto(self._name.parent_context, self._name.tree_name)
return [Definition(self._evaluator, n) for n in names]
def _goto_definitions(self):
# TODO make this function public.
return [Definition(self._evaluator, d.name) for d in self._name.infer()]
@property
@memoize_method
def params(self):
"""
Raises an ``AttributeError``if the definition is not callable.
Otherwise returns a list of `Definition` that represents the params.
"""
def get_param_names(context):
param_names = []
if context.api_type == 'function':
param_names = list(context.get_param_names())
if isinstance(context, instance.BoundMethod):
param_names = param_names[1:]
elif isinstance(context, (instance.AbstractInstanceContext, er.ClassContext)):
if isinstance(context, er.ClassContext):
search = '__init__'
else:
search = '__call__'
names = context.get_function_slot_names(search)
if not names:
return []
# Just take the first one here, not optimal, but currently
# there's no better solution.
inferred = names[0].infer()
param_names = get_param_names(next(iter(inferred)))
if isinstance(context, er.ClassContext):
param_names = param_names[1:]
return param_names
elif isinstance(context, compiled.CompiledObject):
return list(context.get_param_names())
return param_names
followed = list(self._name.infer())
if not followed or not hasattr(followed[0], 'py__call__'):
raise AttributeError()
context = followed[0] # only check the first one.
return [_Param(self._evaluator, n) for n in get_param_names(context)]
def parent(self):
context = self._name.parent_context
if context is None:
return None
if isinstance(context, er.FunctionExecutionContext):
# TODO the function context should be a part of the function
# execution context.
context = er.FunctionContext(
self._evaluator, context.parent_context, context.tree_node)
return Definition(self._evaluator, context.name)
def __repr__(self):
return "<%s %s>" % (type(self).__name__, self.description)
def get_line_code(self, before=0, after=0):
"""
Returns the line of code where this object was defined.
:param before: Add n lines before the current line to the output.
:param after: Add n lines after the current line to the output.
:return str: Returns the line(s) of code or an empty string if it's a
builtin.
"""
if self.in_builtin_module():
return ''
path = self._name.get_root_context().py__file__()
lines = parser_cache[path].lines
line_nr = self._name.start_pos[0]
start_line_nr = line_nr - before
return ''.join(lines[start_line_nr:line_nr + after + 1])
class Completion(BaseDefinition):
"""
`Completion` objects are returned from :meth:`api.Script.completions`. They
provide additional information about a completion.
"""
def __init__(self, evaluator, name, stack, like_name_length):
super(Completion, self).__init__(evaluator, name)
self._like_name_length = like_name_length
self._stack = stack
# Completion objects with the same Completion name (which means
# duplicate items in the completion)
self._same_name_completions = []
def _complete(self, like_name):
append = ''
if settings.add_bracket_after_function \
and self.type == 'Function':
append = '('
if isinstance(self._name, ParamName) and self._stack is not None:
node_names = list(self._stack.get_node_names(self._evaluator.grammar))
if 'trailer' in node_names and 'argument' not in node_names:
append += '='
name = self._name.string_name
if like_name:
name = name[self._like_name_length:]
return name + append
@property
def complete(self):
"""
Return the rest of the word, e.g. completing ``isinstance``::
isinstan# <-- Cursor is here
would return the string 'ce'. It also adds additional stuff, depending
on your `settings.py`.
Assuming the following function definition::
def foo(param=0):
pass
completing ``foo(par`` would give a ``Completion`` which `complete`
would be `am=`
"""
return self._complete(True)
@property
def name_with_symbols(self):
"""
Similar to :attr:`name`, but like :attr:`name` returns also the
symbols, for example assuming the following function definition::
def foo(param=0):
pass
completing ``foo(`` would give a ``Completion`` which
``name_with_symbols`` would be "param=".
"""
return self._complete(False)
def docstring(self, raw=False, fast=True):
if self._like_name_length >= 3:
# In this case we can just resolve the like name, because we
# wouldn't load like > 100 Python modules anymore.
fast = False
return super(Completion, self).docstring(raw=raw, fast=fast)
@property
def description(self):
"""Provide a description of the completion object."""
# TODO improve the class structure.
return Definition.description.__get__(self)
def __repr__(self):
return '<%s: %s>' % (type(self).__name__, self._name.string_name)
@memoize_method
def follow_definition(self):
"""
Return the original definitions. I strongly recommend not using it for
your completions, because it might slow down |jedi|. If you want to
read only a few objects (<=20), it might be useful, especially to get
the original docstrings. The basic problem of this function is that it
follows all results. This means with 1000 completions (e.g. numpy),
it's just PITA-slow.
"""
defs = self._name.infer()
return [Definition(self._evaluator, d.name) for d in defs]
class Definition(BaseDefinition):
"""
*Definition* objects are returned from :meth:`api.Script.goto_assignments`
or :meth:`api.Script.goto_definitions`.
"""
def __init__(self, evaluator, definition):
super(Definition, self).__init__(evaluator, definition)
@property
def description(self):
"""
A description of the :class:`.Definition` object, which is heavily used
in testing. e.g. for ``isinstance`` it returns ``def isinstance``.
Example:
>>> from jedi import Script
>>> source = '''
... def f():
... pass
...
... class C:
... pass
...
... variable = f if random.choice([0,1]) else C'''
>>> script = Script(source, column=3) # line is maximum by default
>>> defs = script.goto_definitions()
>>> defs = sorted(defs, key=lambda d: d.line)
>>> defs
[<Definition def f>, <Definition class C>]
>>> str(defs[0].description) # strip literals in python2
'def f'
>>> str(defs[1].description)
'class C'
"""
typ = self.type
tree_name = self._name.tree_name
if typ in ('function', 'class', 'module', 'instance') or tree_name is None:
if typ == 'function':
# For the description we want a short and a pythonic way.
typ = 'def'
return typ + ' ' + u(self._name.string_name)
elif typ == 'param':
code = tree_name.get_definition().get_code(
include_prefix=False,
include_comma=False
)
return typ + ' ' + code
definition = tree_name.get_definition()
# Remove the prefix, because that's not what we want for get_code
# here.
txt = definition.get_code(include_prefix=False)
# Delete comments:
txt = re.sub('#[^\n]+\n', ' ', txt)
# Delete multi spaces/newlines
txt = re.sub('\s+', ' ', txt).strip()
return txt
@property
def desc_with_module(self):
"""
In addition to the definition, also return the module.
.. warning:: Don't use this function yet, its behaviour may change. If
you really need it, talk to me.
.. todo:: Add full path. This function is should return a
`module.class.function` path.
"""
position = '' if self.in_builtin_module else '@%s' % (self.line)
return "%s:%s%s" % (self.module_name, self.description, position)
@memoize_method
def defined_names(self):
"""
List sub-definitions (e.g., methods in class).
:rtype: list of Definition
"""
defs = self._name.infer()
return sorted(
common.unite(defined_names(self._evaluator, d) for d in defs),
key=lambda s: s._name.start_pos or (0, 0)
)
def is_definition(self):
"""
Returns True, if defined as a name in a statement, function or class.
Returns False, if it's a reference to such a definition.
"""
if self._name.tree_name is None:
return True
else:
return self._name.tree_name.is_definition()
def __eq__(self, other):
return self._name.start_pos == other._name.start_pos \
and self.module_path == other.module_path \
and self.name == other.name \
and self._evaluator == other._evaluator
def __ne__(self, other):
return not self.__eq__(other)
def __hash__(self):
return hash((self._name.start_pos, self.module_path, self.name, self._evaluator))
class CallSignature(Definition):
"""
`CallSignature` objects is the return value of `Script.function_definition`.
It knows what functions you are currently in. e.g. `isinstance(` would
return the `isinstance` function. without `(` it would return nothing.
"""
def __init__(self, evaluator, executable_name, bracket_start_pos, index, key_name_str):
super(CallSignature, self).__init__(evaluator, executable_name)
self._index = index
self._key_name_str = key_name_str
self._bracket_start_pos = bracket_start_pos
@property
def index(self):
"""
The Param index of the current call.
Returns None if the index cannot be found in the curent call.
"""
if self._key_name_str is not None:
for i, param in enumerate(self.params):
if self._key_name_str == param.name:
return i
if self.params:
param_name = self.params[-1]._name
if param_name.tree_name is not None:
if param_name.tree_name.get_definition().star_count == 2:
return i
return None
if self._index >= len(self.params):
for i, param in enumerate(self.params):
tree_name = param._name.tree_name
if tree_name is not None:
# *args case
if tree_name.get_definition().star_count == 1:
return i
return None
return self._index
@property
def bracket_start(self):
"""
The indent of the bracket that is responsible for the last function
call.
"""
return self._bracket_start_pos
@property
def call_name(self):
"""
.. deprecated:: 0.8.0
Use :attr:`.name` instead.
.. todo:: Remove!
The name (e.g. 'isinstance') as a string.
"""
warnings.warn("Deprecated since Jedi 0.8. Use name instead.", DeprecationWarning, stacklevel=2)
return self.name
@property
def module(self):
"""
.. deprecated:: 0.8.0
Use :attr:`.module_name` for the module name.
.. todo:: Remove!
"""
return self._executable.get_root_node()
def __repr__(self):
return '<%s: %s index %s>' % \
(type(self).__name__, self._name.string_name, self.index)
class _Param(Definition):
"""
Just here for backwards compatibility.
"""
def get_code(self):
"""
.. deprecated:: 0.8.0
Use :attr:`.description` and :attr:`.name` instead.
.. todo:: Remove!
A function to get the whole code of the param.
"""
warnings.warn("Deprecated since version 0.8. Use description instead.", DeprecationWarning, stacklevel=2)
return self.description
class _Help(object):
"""
Temporary implementation, will be used as `Script.help() or something in
the future.
"""
def __init__(self, definition):
self._name = definition
@memoize_method
def _get_contexts(self, fast):
if isinstance(self._name, ImportName) and fast:
return {}
if self._name.api_type == 'statement':
return {}
return self._name.infer()
def docstring(self, fast=True, raw=True):
"""
The docstring ``__doc__`` for any object.
See :attr:`doc` for example.
"""
# TODO: Use all of the followed objects as output. Possibly divinding
# them by a few dashes.
for context in self._get_contexts(fast=fast):
return context.py__doc__(include_call_signature=not raw)
return ''
| StarcoderdataPython |
158412 | <gh_stars>10-100
"""ebsd module to manipulate Electron Back Scattered data sets."""
import h5py
import numpy as np
import os
from pymicro.crystal.microstructure import Orientation
from pymicro.crystal.lattice import Symmetry, CrystallinePhase, Lattice
class OimPhase(CrystallinePhase):
"""A class to handle a phase. This is just a child of the class
`CrystallinePhase` where we add 2 additional attributes: `hklFamilies` and
`categories`.
"""
def __init__(self, id):
CrystallinePhase.__init__(self, phase_id=id, name='unknown', lattice=None)
self.hklFamilies = []
self.categories = []
class OimHklFamily:
def __init__(self):
self.hkl = [0, 0, 0]
self.useInIndexing = 0
self.diffractionIntensity = 0.0
self.showBands = 0
class OimScan:
"""OimScan class to handle files from EDAX software OIM."""
def __init__(self, shape, resolution=(1.0, 1.0)):
"""Create an empty EBSD scan."""
self.x_star = 0
self.y_star = 0
self.z_star = 0
self.working_distance = 0
self.grid_type = 'SqrGrid'
self.cols = shape[0]
self.rows = shape[1]
self.xStep = resolution[0]
self.yStep = resolution[1]
self.operator = ''
self.sample_id = ''
self.scan_id = ''
self.phase_list = []
self.init_arrays()
def __repr__(self):
"""Provide a string representation of the class."""
s = 'EBSD scan of size %d x %d' % (self.cols, self.rows)
s += '\nspatial resolution: xStep=%.1f, yStep=%.1f' % (self.xStep, self.yStep)
return s
def init_arrays(self):
"""Memory allocation for all necessary arrays."""
self.euler = np.zeros((self.cols, self.rows, 3))
self.x = np.zeros((self.cols, self.rows))
self.y = np.zeros((self.cols, self.rows))
self.iq = np.zeros((self.cols, self.rows))
self.ci = np.zeros((self.cols, self.rows))
self.phase = np.zeros((self.cols, self.rows), dtype='int')
@staticmethod
def from_file(file_path):
"""Create a new EBSD scan by reading a data file.
At present, only hdf5 format is supported.
:param str file_path: the path to the EBSD scan.
:raise ValueError: if the scan is not in format HDF5.
:return: a new `OimScan` instance.
"""
base_name, ext = os.path.splitext(os.path.basename(file_path))
print(base_name, ext)
if ext in ['.h5', '.hdf5']:
scan = OimScan.read_h5(file_path)
elif ext == '.osc':
scan = OimScan.read_osc(file_path)
elif ext == '.ang':
scan = OimScan.read_ang(file_path)
elif ext == '.ctf':
scan = OimScan.read_ctf(file_path)
else:
raise ValueError('only HDF5, OSC, ANG or CTF formats are '
'supported, please convert your scan')
return scan
@staticmethod
def read_osc(file_path):
"""Read a scan in binary OSC format.
Code inspired from the MTEX project loadEBSD_osc.m function.
:param str file_path: the path to the osc file to read.
:param tuple size: the size of the ebsd scan in form (cols, rows).
:return: a new instance of OimScan populated with the data from the file.
"""
scan = OimScan((0, 0))
# the data section is preceded by this pattern
start_hex = ['B9', '0B', 'EF', 'FF', '02', '00', '00', '00']
start_bytes = np.array([int(byte, 16) for byte in start_hex])
with open(file_path, 'r') as f:
print('reading EBSD scan from file %s' % file_path)
header = np.fromfile(f, dtype=np.uint32, count=8)
n = header[6]
print('%d data points in EBSD scan' % n)
f.seek(0)
buffer = np.fromfile(f, dtype=np.uint8, count=2**20)
# search for the start pattern
start = np.where(np.correlate(buffer, start_bytes, mode='valid')
== np.dot(start_bytes, start_bytes))[0][0]
print('start sequence located at byte %d' % start)
f.seek(start + 8)
# data count
data_count = np.fromfile(f, dtype=np.uint32, count=1)[0]
if round(((data_count / 4 - 2) / 10) / n) != 1:
f.seek(start + 8)
# the next 8 bytes are float values for xStep and yStep
scan.xStep = np.fromfile(f, dtype=np.float32, count=1)[0]
scan.yStep = np.fromfile(f, dtype=np.float32, count=1)[0]
print('spatial resolution: xStep=%.1f, yStep=%.1f' % (scan.xStep, scan.yStep))
# now read the payload which contains 10 fields for the n measurements
data = np.fromfile(f, count=n*10, dtype=np.float32)
data = np.reshape(data, (n, 10))
scan.cols = int(max(data[:, 3]) / scan.xStep + 1)
scan.rows = int(max(data[:, 4]) / scan.yStep + 1)
print('size of scan is %d x %d' % (scan.cols, scan.rows))
assert n == scan.cols * scan.rows
scan.init_arrays()
scan.euler[:, :, 0] = np.reshape(data[:, 0], (scan.rows, scan.cols)).T
scan.euler[:, :, 1] = np.reshape(data[:, 1], (scan.rows, scan.cols)).T
scan.euler[:, :, 2] = np.reshape(data[:, 2], (scan.rows, scan.cols)).T
scan.x = np.reshape(data[:, 3], (scan.rows, scan.cols)).T
scan.y = np.reshape(data[:, 4], (scan.rows, scan.cols)).T
scan.iq = np.reshape(data[:, 5], (scan.rows, scan.cols)).T
scan.ci = np.reshape(data[:, 6], (scan.rows, scan.cols)).T
scan.phase = np.reshape(data[:, 7], (scan.rows, scan.cols)).T
return scan
@staticmethod
def read_ang(file_path):
"""Read a scan in ang ascii format.
:raise ValueError: if the grid type in not square.
:param str file_path: the path to the ang file to read.
:return: a new instance of OimScan populated with the data from the file.
"""
scan = OimScan((0, 0))
with open(file_path, 'r') as f:
# start by parsing the header
line = f.readline().strip()
while line.startswith('#'):
tokens = line.split()
if len(tokens) <= 2:
line = f.readline().strip()
continue
if tokens[1] == 'TEM_PIXperUM':
pass
elif tokens[1] == 'x-star':
scan.x_star = float(tokens[2])
elif tokens[1] == 'y-star':
scan.y_star = float(tokens[2])
elif tokens[1] == 'z-star':
scan.z_star = float(tokens[2])
elif tokens[1] == 'WorkingDistance':
scan.working_distance = float(tokens[2])
elif tokens[1] == 'Phase':
phase = OimPhase(int(tokens[2]))
line = f.readline().strip()
phase.name = line.split()[2]
line = f.readline().strip()
try:
phase.formula = line.split()[2]
except IndexError:
phase.formula = ''
line = f.readline().strip()
line = f.readline().strip()
sym = Symmetry.from_tsl(int(line.split()[2]))
tokens = f.readline().strip().split()
# convert lattice constants to nm
lattice = Lattice.from_parameters(float(tokens[2]) / 10,
float(tokens[3]) / 10,
float(tokens[4]) / 10,
float(tokens[5]),
float(tokens[6]),
float(tokens[7]),
symmetry=sym)
phase.set_lattice(lattice)
scan.phase_list.append(phase)
elif tokens[1] == 'GRID:':
scan.grid_type = tokens[2]
print('grid type is %s' % tokens[2])
if scan.grid_type != 'SqrGrid':
raise ValueError('only square grid is supported, please convert your scan')
elif tokens[1] == 'XSTEP':
scan.xStep = float(tokens[2])
elif tokens[1] == 'YSTEP':
scan.yStep = float(tokens[2])
elif tokens[1].startswith('NCOLS'):
scan.cols = int(tokens[2])
elif tokens[1].startswith('NROWS'):
scan.rows = int(tokens[2])
elif tokens[1] == 'OPERATOR:':
scan.operator = tokens[2]
elif tokens[1] == 'SAMPLEID:':
scan.sample_id = tokens[2] if len(tokens) >= 3 else ''
elif tokens[1] == 'SCANID:':
scan.scan_id = tokens[2] if len(tokens) >= 3 else ''
line = f.readline().strip()
print('finished reading header, scan size is %d x %d' % (scan.cols, scan.rows))
# now read the payload
data = np.zeros((scan.cols * scan.rows, len(line.split())))
data[0] = np.fromstring(line, sep=' ')
i = 1
for line in f:
data[i] = np.fromstring(line, sep=' ')
i += 1
# we have read all the data, now repack everything into the different arrays
scan.init_arrays()
scan.euler[:, :, 0] = np.reshape(data[:, 0], (scan.rows, scan.cols)).T
scan.euler[:, :, 1] = np.reshape(data[:, 1], (scan.rows, scan.cols)).T
scan.euler[:, :, 2] = np.reshape(data[:, 2], (scan.rows, scan.cols)).T
scan.x = np.reshape(data[:, 3], (scan.rows, scan.cols)).T
scan.y = np.reshape(data[:, 4], (scan.rows, scan.cols)).T
scan.iq = np.reshape(data[:, 5], (scan.rows, scan.cols)).T
scan.ci = np.reshape(data[:, 6], (scan.rows, scan.cols)).T
scan.phase = np.reshape(data[:, 7], (scan.rows, scan.cols)).T
return scan
def read_ctf(file_path):
"""Read a scan in Channel Text File format.
:raise ValueError: if the job mode is not grid.
:param str file_path: the path to the ctf file to read.
:return: a new instance of OimScan populated with the data from the file.
"""
scan = OimScan((0, 0))
with open(file_path, 'r') as f:
# start by parsing the header
line = f.readline().strip()
while not line.startswith('Phases'):
tokens = line.split()
if tokens[0] == 'JobMode':
scan.grid_type = tokens[1]
if scan.grid_type != 'Grid':
raise ValueError('only square grid is supported, please convert your scan')
elif tokens[0] == 'XCells':
scan.cols = int(tokens[1])
elif tokens[0] == 'YCells':
scan.rows = int(tokens[1])
elif tokens[0] == 'XStep':
scan.xStep = float(tokens[1])
elif tokens[0] == 'YStep':
scan.yStep = float(tokens[1])
line = f.readline().strip()
# read the phases
tokens = line.split()
n_phases = int(tokens[1])
for i in range(n_phases):
# read this phase (lengths, angles, name, ?, space group, description)
line = f.readline().strip()
tokens = line.split()
phase = CrystallinePhase(i + 1)
phase.name = tokens[2]
phase.name = tokens[5]
sym = Symmetry.from_space_group(int(tokens[4]))
lattice_lengths = tokens[0].split(';')
lattice_angles = tokens[1].split(';')
# convert lattice constants to nm
lattice = Lattice.from_parameters(float(lattice_lengths[0]) / 10,
float(lattice_lengths[1]) / 10,
float(lattice_lengths[2]) / 10,
float(lattice_angles[0]),
float(lattice_angles[1]),
float(lattice_angles[2]),
symmetry=sym)
phase.set_lattice(lattice)
print('adding phase %s' % phase)
scan.phase_list.append(phase)
# read the line before the data
line = f.readline().strip()
# Phase X Y Bands Error Euler1 Euler2 Euler3 MAD BC BS
# now read the payload
data = np.zeros((scan.cols * scan.rows, len(line.split())))
i = 0
for line in f:
data[i] = np.fromstring(line, sep=' ')
i += 1
# we have read all the data, now repack everything into the different arrays
scan.init_arrays()
scan.euler[:, :, 0] = np.reshape(data[:, 5], (scan.rows, scan.cols)).T
scan.euler[:, :, 1] = np.reshape(data[:, 6], (scan.rows, scan.cols)).T
scan.euler[:, :, 2] = np.reshape(data[:, 7], (scan.rows, scan.cols)).T
scan.x = np.reshape(data[:, 1], (scan.rows, scan.cols)).T
scan.y = np.reshape(data[:, 2], (scan.rows, scan.cols)).T
scan.iq = np.reshape(data[:, 9], (scan.rows, scan.cols)).T
scan.ci = np.reshape(data[:, 10], (scan.rows, scan.cols)).T
scan.phase = np.reshape(data[:, 0], (scan.rows, scan.cols)).T
return scan
def read_header(self, header):
# read the header, it contains the following keys: 'Camera Azimuthal Angle', 'Camera Elevation Angle',
# 'Coordinate System', 'Grid Type', 'Notes', 'Operator', 'Pattern Center Calibration', 'Phase', 'Sample ID',
# 'Sample Tilt', 'Scan ID', 'Step X', 'Step Y', 'Working Distance', 'nColumns', 'nRows'
self.x_star = header['Pattern Center Calibration']['x-star'][0]
self.y_star = header['Pattern Center Calibration']['y-star'][0]
self.z_star = header['Pattern Center Calibration']['z-star'][0]
self.working_distance = header['Camera Elevation Angle'][0]
self.grid_type = header['Grid Type'][0].decode('utf-8')
if self.grid_type != 'SqrGrid':
raise ValueError('only square grid is supported, please convert your scan')
self.cols = header['nColumns'][0]
self.rows = header['nRows'][0]
self.xStep = header['Step X'][0]
self.yStep = header['Step Y'][0]
self.operator = header['Operator'][0].decode('utf-8')
self.sample_id = header['Sample ID'][0].decode('utf-8')
self.scan_id = header['Scan ID'][0].decode('utf-8')
# get the different phases
for key in header['Phase'].keys():
phase = header['Phase'][key]
# each phase has the following keys: 'Formula', 'Info', 'Lattice Constant a', 'Lattice Constant alpha',
# 'Lattice Constant b', 'Lattice Constant beta', 'Lattice Constant c', 'Lattice Constant gamma',
# 'Laue Group', 'MaterialName', 'NumberFamilies', 'Point Group', 'Symmetry', 'hkl Families'
phase = OimPhase(int(key))
phase.name = header['Phase'][key]['MaterialName'][0].decode('utf-8')
phase.formula = header['Phase'][key]['Formula'][0].decode('utf-8')
phase.description = header['Phase'][key]['Info'][0].decode('utf-8')
# create a crystal lattice for this phase
sym = Symmetry.from_tsl(header['Phase'][key]['Symmetry'][0])
# convert lattice constants to nm
a = header['Phase'][key]['Lattice Constant a'][0] / 10
b = header['Phase'][key]['Lattice Constant b'][0] / 10
c = header['Phase'][key]['Lattice Constant c'][0] / 10
alpha = header['Phase'][key]['Lattice Constant alpha'][0]
beta = header['Phase'][key]['Lattice Constant beta'][0]
gamma = header['Phase'][key]['Lattice Constant gamma'][0]
lattice = Lattice.from_parameters(a, b, c, alpha, beta, gamma, symmetry=sym)
phase.set_lattice(lattice)
for row in header['Phase'][key]['hkl Families']:
family = OimHklFamily()
family.hkl = [row[0], row[1], row[2]]
family.useInIndexing = row[4]
family.diffractionIntensity = row[3]
family.showBands = row[5]
phase.hklFamilies.append(family)
phase.categories = [0, 0, 0, 0, 0]
self.phase_list.append(phase)
@staticmethod
def read_h5(file_path):
"""Read a scan in H5 format.
:raise ValueError: if the grid type in not square.
:param str file_path: the path to the h5 file to read.
:return: a new instance of OimScan populated with the data from the file.
"""
scan = OimScan((0, 0))
with h5py.File(file_path, 'r') as f:
# find out the scan key (the third one)
key_list = [key for key in f.keys()]
scan_key = key_list[2]
print('reading EBSD scan %s from file %s' % (scan_key, file_path))
header = f[scan_key]['EBSD']['Header']
scan.read_header(header)
# now initialize the fields
scan.init_arrays()
data = f[scan_key]['EBSD']['Data']
scan.euler[:, :, 0] = np.reshape(
data['Phi1'], (scan.rows, scan.cols)).transpose(1, 0)
scan.euler[:, :, 1] = np.reshape(
data['Phi'], (scan.rows, scan.cols)).transpose(1, 0)
scan.euler[:, :, 2] = np.reshape(
data['Phi2'], (scan.rows, scan.cols)).transpose(1, 0)
scan.x = np.reshape(data['X Position'],
(scan.rows, scan.cols)).transpose(1, 0)
scan.y = np.reshape(data['Y Position'],
(scan.rows, scan.cols)).transpose(1, 0)
scan.iq = np.reshape(data['IQ'], (scan.rows, scan.cols)).transpose(1, 0)
scan.ci = np.reshape(data['CI'], (scan.rows, scan.cols)).transpose(1, 0)
scan.phase = np.reshape(data['Phase'],
(scan.rows, scan.cols)).transpose(1, 0)
return scan
def get_phase(self, phase_id=1):
"""Look for a phase with the given id in the list.
:raise ValueError: if the phase_id cannot be found.
:param int phase_id: the id of the phase.
:return: the phase instance with the corresponding id
"""
try:
phase_index = [phase.phase_id for phase in self.phase_list].index(phase_id)
except ValueError:
raise(ValueError('phase %d not in list' % phase_id))
return self.phase_list[phase_index]
def compute_ipf_maps(self):
"""Compute the IPF maps for the 3 cartesian directions.
.. warning::
This function is not vectorized and will be slow for large EBSD maps.
"""
self.ipf001 = np.empty_like(self.euler)
self.ipf010 = np.empty_like(self.euler)
self.ipf100 = np.empty_like(self.euler)
for i in range(self.rows):
for j in range(self.cols):
o = Orientation.from_euler(np.degrees(self.euler[j, i]))
try:
sym = self.get_phase(int(self.phase[j, i])).get_symmetry()
# compute IPF-Z
self.ipf001[j, i] = o.ipf_color(axis=np.array([0., 0., 1.]),
symmetry=sym)
# compute IPF-Y
self.ipf010[j, i] = o.ipf_color(axis=np.array([0., 1., 0.]),
symmetry=sym)
# compute IPF-X
self.ipf100[j, i] = o.ipf_color(axis=np.array([1., 0., 0.]),
symmetry=sym)
except ValueError:
self.ipf001[j, i] = [0., 0., 0.]
self.ipf010[j, i] = [0., 0., 0.]
self.ipf100[j, i] = [0., 0., 0.]
progress = 100 * (i + 1) / self.rows
print('computing IPF maps: {0:.2f} %'.format(progress), end='\r')
def segment_grains(self, tol=5., min_ci=0.2):
"""Segment the grains based on the euler angle maps.
The segmentation is carried out using a region growing algorithm based
on an orientation similarity criterion.
The id 0 is reserved to the background which is assigned to pixels with
a confidence index lower than 0.2. Other pixels are first marqued as
unlabeled using -1, then pixels are evaluated one by one.
A new grain is created and non already assigned neighboring pixels are
evaluated based on the crystal misorientation. If the misorientation is
lower than `tol`, the pixel is assigned to the current grain and its
neighbors added to the list of candidates. When no more candidates are
present, the next pixel is evaluated and a new grain is created.
.. warning::
This function does not account yet for multiple phases. Grains should
be created separately for each crystallographic phase.
:param float tol: misorientation tolerance in degrees.
:param float min_ci: minimum confidence index for a pixel to be a valid
EBSD measurement.
:raise ValueError: if no phase is present in the scan.
:return: a numpy array of the grain labels.
"""
if not len(self.phase_list) > 0:
raise ValueError('at least one phase must be present in this EBSD '
'scan to segment the grains')
# segment the grains
print('grain segmentation for EBSD scan, misorientation tolerance={:.1f}, '
'minimum confidence index={:.1f}'.format(tol, min_ci))
grain_ids = np.zeros_like(self.iq, dtype='int')
grain_ids += -1 # mark all pixels as non assigned
# start by assigning bad pixel to grain 0
grain_ids[self.ci <= min_ci] = 0
n_grains = 0
progress = 0
for j in range(self.rows):
for i in range(self.cols):
if grain_ids[i, j] >= 0:
continue # skip pixel
# create new grain with the pixel as seed
n_grains += 1
# print('segmenting grain %d' % n_grains)
grain_ids[i, j] = n_grains
candidates = [(i, j)]
# apply region growing based on the angle misorientation (strong connectivity)
while len(candidates) > 0:
pixel = candidates.pop()
sym = self.phase_list[self.phase[pixel]].get_symmetry()
# print('* pixel is {}, euler: {}'.format(pixel, np.degrees(euler[pixel])))
# get orientation of this pixel
o = Orientation.from_euler(np.degrees(self.euler[pixel]))
# look around this pixel
east = (pixel[0] - 1, pixel[1])
north = (pixel[0], pixel[1] - 1)
west = (pixel[0] + 1, pixel[1])
south = (pixel[0], pixel[1] + 1)
neighbors = [east, north, west, south]
# look at unlabeled connected pixels
neighbor_list = [n for n in neighbors if
0 <= n[0] < self.cols and
0 <= n[1] < self.rows and
grain_ids[n] == -1]
# print(' * neighbors list is {}'.format([east, north, west, south]))
for neighbor in neighbor_list:
# check misorientation
o_neighbor = Orientation.from_euler(np.degrees(self.euler[neighbor]))
mis, _, _ = o.disorientation(o_neighbor, crystal_structure=sym)
if mis * 180 / np.pi < tol:
# add to this grain
grain_ids[neighbor] = n_grains
# add to the list of candidates
candidates.append(neighbor)
progress = 100 * np.sum(grain_ids >= 0) / (self.cols * self.rows)
print('segmentation progress: {0:.2f} %'.format(progress), end='\r')
print('\n%d grains were segmented' % len(np.unique(grain_ids)))
return grain_ids
def change_orientation_reference_frame(self):
"""Change the reference frame for orientation data.
In OIM, the reference frame for orientation data (euler angles) is
termed A1A2A3 and differs from the sample reference frame XYZ. This can
be set befor the acquisition but the default case is:
X = -A2, Y = -A1, Z = -A3.
This methods change the reference frame used for the euler angles.
"""
# transformation matrix from A1A2A3 to XYZ
T = np.array([[0., -1., 0.], # X is -A2
[-1., 0., 0.], # Y is -A1
[0., 0., -1.]]) # Z is -A3
for j in range(self.rows):
for i in range(self.cols):
o_tsl = Orientation.from_euler(np.degrees(self.euler[i, j, :]))
g_xyz = np.dot(o_tsl.orientation_matrix(), T.T) # move to XYZ local frame
o_xyz = Orientation(g_xyz)
self.euler[i, j, :] = np.radians(o_xyz.euler)
progress = 100 * (j * self.cols + i) / (self.cols * self.rows)
print('changing orientation reference frame progress: {0:.2f} %'.format(progress), end='\r')
print('\n')
def to_h5(self, file_name):
"""Write the EBSD scan as a hdf5 file compatible OIM software (in
progress).
:param str file_name: name of the output file.
"""
f = h5py.File('%s.h5' % file_name, 'w')
f.attrs[' Manufacturer'] = np.string_('EDAX')
f.attrs[' Version'] = np.string_('OIM Analysis 7.3.0 x64 [09-01-15]')
# create the group containing the data
data_container = f.create_group('DataContainer')
ebsd = data_container.create_group('EBSD')
ebsd_header = ebsd.create_group('Header')
ebsd_header.create_dataset('Camera Azimuthal Angle', data=np.array([0.0], dtype=np.float32))
ebsd_header.create_dataset('Camera Elevation Angle', data=np.array([self.working_distance], dtype=np.float32))
pattern_center = ebsd_header.create_group('Pattern Center Calibration')
pattern_center.create_dataset('x-star', data=np.array(self.x_star, dtype=np.float32))
pattern_center.create_dataset('y-star', data=np.array(self.y_star, dtype=np.float32))
pattern_center.create_dataset('z-star', data=np.array(self.z_star, dtype=np.float32))
ebsd_data = ebsd.create_group('Data')
ci = ebsd_data.create_dataset('CI', data=self.ci)
iq = ebsd_data.create_dataset('IQ', data=self.iq)
phase = ebsd_data.create_dataset('Phase', data=self.phase)
phi1 = ebsd_data.create_dataset('Phi1', data=self.euler[:, :, 0])
phi = ebsd_data.create_dataset('Phi', data=self.euler[:, :, 1])
phi2 = ebsd_data.create_dataset('Phi2', data=self.euler[:, :, 2])
x = ebsd_data.create_dataset('X Position', data=self.x)
y = ebsd_data.create_dataset('Y Position', data=self.y)
f.close()
| StarcoderdataPython |
1717079 | <gh_stars>0
from typing import List, Optional, Union
from pydantic import BaseModel
try:
from typing import Literal # type: ignore
except ImportError:
from typing_extensions import Literal
VERSION = 1
class Model(BaseModel):
class Config:
extra = "forbid"
class Variable(Model):
name: str
type: Literal["string", "int", "real"]
"Implicitly 64 bit integer and double-precision floating point?"
description: Optional[str]
# TODO: clamping behavior for out of range?
class Formula(Model):
# TODO: nodetype: Literal["formula"]
expression: str
parser: Literal["TFormula", "numexpr"]
parameters: List[int]
"Index to Correction.inputs[]"
# py3.7+: ForwardRef can be used instead of strings
Content = Union["Binning", "MultiBinning", "Category", Formula, float]
class Binning(Model):
nodetype: Literal["binning"]
edges: List[float]
"Edges of the binning, where edges[i] <= x < edges[i+1] => f(x, ...) = content[i](...)"
content: List[Content]
class MultiBinning(Model):
"""N-dimensional rectangular binning"""
nodetype: Literal["multibinning"]
edges: List[List[float]]
"""Bin edges for each input
C-ordered array, e.g. content[d1*d2*d3*i0 + d2*d3*i1 + d3*i2 + i3] corresponds
to the element at i0 in dimension 0, i1 in dimension 1, etc. and d0 = len(edges[0]), etc.
"""
content: List[Content]
class Category(Model):
nodetype: Literal["category"]
# TODO: should be Union[List[str], List[int]]
keys: List[Union[str, int]]
content: List[Content]
Binning.update_forward_refs()
MultiBinning.update_forward_refs()
Category.update_forward_refs()
class Correction(Model):
name: str
"A useful name"
description: Optional[str]
"Detailed description of the correction"
version: int
"Version"
inputs: List[Variable]
output: Variable
data: Content
class CorrectionSet(Model):
schema_version: Literal[VERSION]
"Schema version"
corrections: List[Correction]
if __name__ == "__main__":
import os
import sys
dirname = sys.argv[-1]
with open(os.path.join(dirname, f"schemav{VERSION}.json"), "w") as fout:
fout.write(CorrectionSet.schema_json(indent=4))
| StarcoderdataPython |
6637452 | from django.conf.urls import include, url
from django.contrib import admin
urlpatterns = [
# Examples:
url(r'^$', 'example_basic.translate.views.hello', name='hello'),
url(r'^apples$', 'example_basic.translate.views.apples', name='apples'),
url(r'^apples/python$', 'example_basic.translate.views.pythonic_apples', name='pythonic_apples'),
url(r'^po', 'example_basic.translate.views.po', name='po'),
url(r'^i18n/', include('django.conf.urls.i18n')),
]
| StarcoderdataPython |
3221062 | <gh_stars>10-100
"""Script for generating the ACHIEVEMENTS.md file, which is a human-readable
version of the PRs json record.
"""
import argparse
import dataclasses
import json
import pathlib
import sys
import datetime
from typing import List
import jinja2
from sorald._helpers import jsonkeys
ENCODING = "utf8"
TEMPLATE = r"""# Achievements
This document presents an overview of the pull requests performed with Sorald.
{% for pr in pull_requests %}
## [{{ pr.repo_slug }}#{{ pr.number }}](https://github.com/{{ pr.repo_slug }}/pull/{{ pr.number }})
This PR was opened at {{ pr.created_at }}{% if pr.closed_at %} and {{ pr.status }} at {{ pr.closed_at }}{% endif %}.{% if pr.contains_manual_edits %}
Some manual edits were performed after applying Sorald.{% elif not pr.is_legacy %}
The patch was generated fully automatically with Sorald.{% else %}
This is a legacy PR made before detailed record-keeping, and so we cannot say if any manual edits have been applied.{% endif %}
{% if pr.repairs|length > 0 %}
It provide{% if pr.closed_at %}d{% else %}s{% endif %} the following repairs:
{% for repair in pr.repairs %}
* [Rule {{ repair.rule_key }}](https://rules.sonarsource.com/java/RSPEC-{{ repair.rule_key }})
- Number of violations found: {{ repair.num_violations_found }}
- Number of violations repaired: {{ repair.num_violations_repaired }}{% endfor %}
{% else %}
Detailed repair information is missing for this PR.
{% endif %}{% endfor %}
"""
PRS_JSON_ARG = "--prs-json-file"
OUTPUT_ARG = "--output"
@dataclasses.dataclass
class RepairStats:
rule_key: int
num_violations_found: str
num_violations_repaired: str
@dataclasses.dataclass
class PullRequest:
repo_slug: str
number: int
created_at: str
closed_at: str
status: str
contains_manual_edits: bool
repairs: List[RepairStats]
is_legacy: bool
def main(args: List[str]):
parsed_args = parse_args(args)
generate_achievements_file(
prs_json=parsed_args.prs_json_file,
output_file=parsed_args.output,
template=TEMPLATE,
)
def parse_args(args: List[str]) -> argparse.Namespace:
parser = argparse.ArgumentParser(
prog="achievements",
description="Script for generating the ACHIEVEMENTS.md file, "
"detailing pull requests performed with Sorald.",
)
parser.add_argument(
"-p",
PRS_JSON_ARG,
help="path to the prs.json file",
type=pathlib.Path,
required=True,
)
parser.add_argument(
"-o",
OUTPUT_ARG,
help="path to the output Markdown file",
type=pathlib.Path,
required=True,
)
return parser.parse_args(args)
def generate_achievements_file(
prs_json: pathlib.Path,
output_file: pathlib.Path,
template: str,
) -> None:
pull_requests = sorted(
parse_pull_requests(prs_json),
key=lambda pr: datetime.datetime.fromisoformat(pr.created_at),
reverse=True,
)
rendered_content = jinja2.Template(template).render(pull_requests=pull_requests)
output_file.write_text(rendered_content, encoding=ENCODING)
def parse_pull_requests(prs_json: pathlib.Path) -> List[PullRequest]:
prs_data = json.loads(prs_json.read_text(ENCODING))
def _is_legacy(data: dict) -> bool:
return data[jsonkeys.RECORD.SECTION_KEY][jsonkeys.RECORD.IS_LEGACY]
return [
PullRequest(
repo_slug=data[jsonkeys.TOP_LEVEL.REPO_SLUG],
number=pr_meta[jsonkeys.PR.NUMBER],
created_at=pr_meta[jsonkeys.PR.CREATED_AT],
closed_at=pr_meta[jsonkeys.PR.CLOSED_AT] or pr_meta[jsonkeys.PR.MERGED_AT],
status="merged"
if pr_meta[jsonkeys.PR.IS_MERGED]
else pr_meta[jsonkeys.PR.STATE],
contains_manual_edits=len(data[jsonkeys.MANUAL_EDITS.SECTION_KEY] or [])
> 0,
repairs=get_all_repairs(
data[jsonkeys.SORALD_STATS.SECTION_KEY], _is_legacy(data)
),
is_legacy=_is_legacy(data),
)
for _, data in prs_data.items()
if (pr_meta := data[jsonkeys.PR.SECTION_KEY])
]
def get_all_repairs(sorald_stats: dict, is_legacy: bool) -> List[RepairStats]:
return (
[parse_legacy_repair_stats(sorald_stats)]
if is_legacy
else list(
map(
parse_repair_stats,
sorted(
sorald_stats.get(jsonkeys.SORALD_STATS.REPAIRS) or [],
key=lambda rep: int(rep[jsonkeys.SORALD_STATS.RULE_KEY]),
),
)
)
)
def parse_repair_stats(repair_data: dict) -> RepairStats:
return RepairStats(
rule_key=int(repair_data[jsonkeys.SORALD_STATS.RULE_KEY]),
num_violations_found=repair_data[jsonkeys.SORALD_STATS.VIOLATIONS_BEFORE],
num_violations_repaired=repair_data[jsonkeys.SORALD_STATS.VIOLATIONS_BEFORE]
- repair_data[jsonkeys.SORALD_STATS.VIOLATIONS_AFTER],
)
def parse_legacy_repair_stats(repair_data: dict) -> RepairStats:
num_violations = repair_data[jsonkeys.SORALD_STATS.LEGACY.NUM_VIOLATIONS]
return RepairStats(
rule_key=int(repair_data[jsonkeys.SORALD_STATS.LEGACY.RULE_KEY]),
num_violations_found=num_violations,
num_violations_repaired=num_violations,
)
if __name__ == "__main__":
main(sys.argv[1:])
| StarcoderdataPython |
3206767 | from cell_models import kernik, paci_2018, protocols
from scipy.interpolate import interp1d
import numpy as np
class ModelTarget():
"""
protocol – type from protocols
model – initialize Paci or Kernik model
target_type – Spontaneous, Voltage clamp, paced, SAP
tr – trace object
"""
def __init__(self, protocol, model, protocol_type, tr):
self.protocol = protocol
self.model = model
self.protocol_type = protocol_type
self.tr = tr
def compare_individual(self, individual_tr):
if isinstance(self.protocol, protocols.SpontaneousProtocol):
return self.get_sap_error(individual_tr)
elif isinstance(self.protocol, protocols.VoltageClampProtocol):
return self.get_vc_error(individual_tr)
else:
return self.get_current_clamp_error(individual_tr)
def get_sap_error(self, individual_tr):
target_ap, target_bounds, max_t = self.tr.get_last_ap()
individual_ap, ind_bounds, max_t_ind = individual_tr.get_last_ap()
if self.tr.default_unit == 'milli':
target_conversion = 1
else:
target_conversion = 1000
target_ap.t = target_ap.t * target_conversion
target_ap.V = target_ap.V * target_conversion
if individual_tr.default_unit == 'milli':
ind_conversion = 1
else:
ind_conversion = 1000
individual_ap.t = individual_ap.t * ind_conversion
individual_ap.V = individual_ap.V * ind_conversion
#import matplotlib.pyplot as plt
#plt.plot(individual_ap.t, individual_ap.V)
#plt.plot(target_ap.t, target_ap.V)
#interp_time = np.linspace(target_ap.t[0], target_ap.t.values[-1], 3000)
#f = interp1d(target_ap.t, target_ap.V)
#interp_V = f(interp_time)
errors = []
for i, t in enumerate(target_ap.t):
curr_idx = (individual_ap.t - t).abs().idxmin()
diff = abs(target_ap.V[i] - individual_ap.V[curr_idx])
t_diff = abs(individual_ap.t[curr_idx] - t)
#penalty for large CL difference
if t_diff > 10:
errors.append(diff*3)
else:
errors.append(diff)
return sum(errors)
| StarcoderdataPython |
369824 | <gh_stars>1-10
#! /usr/bin/env python3
# -*- coding: utf-8 -*-
#
# tcp_socket_client.py
# chsocket
#
# 🎂"Here's to the crazy ones. The misfits. The rebels.
# The troublemakers. The round pegs in the square holes.
# The ones who see things differently. They're not found
# of rules. And they have no respect for the status quo.
# You can quote them, disagree with them, glority or vilify
# them. About the only thing you can't do is ignore them.
# Because they change things. They push the human race forward.
# And while some may see them as the creazy ones, we see genius.
# Because the poeple who are crazy enough to think thay can change
# the world, are the ones who do."
#
# Created by <NAME> on 03/06/19 09:56.
# Copyright © 2019. <NAME>.
# All rights reserved.
#
# Distributed under terms of the
# MIT
"""
创建TCP连接Socket Sina.com
"""
import socket # 导入socket库
# 创建一个socket
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
# 建立连接
s.connect(('www.sina.com', 80))
# 建立TCP连接之后,发送请求,获取首页内容
s.send(b'GET / HTTP/1.1\r\nHost: www.sina.com.cn\r\nConnection: close\r\n\r\n')
# 接受新浪服务器返回的数据
buffer = []
while True:
# 每次最多接受1K 字节
d = s.recv(1024)
if d:
buffer.append(d)
else:
break
# 接收的数据包括HTTP头和网页本身,需要将HTTP头和网页分离
data = b''.join(buffer)
s.close() # 关闭Socket
header, html = data.split(b'\r\n\r\n', 1)
print(header.decode('utf-8'))
# 把接收到的数据写入文件:
with open('sina.html', 'wb') as f:
f.write(html)
| StarcoderdataPython |
1703942 | <filename>IsoperimetricUnittests.py
import unittest
import Isoperimetric
from sympy import var, pi
from sympy.functions import exp, log, cos
# import SPCVUnittests
from Isoperimetric import t, x, x_diff
C1 = Isoperimetric.Isoperimetric.C1
C2 = Isoperimetric.Isoperimetric.C2
lambda_0 = Isoperimetric.Isoperimetric.lambda_0
lambda_1 = var('lambda_1')
lambda_2 = var('lambda_2')
# ToDo Think about inheritance
class TestIsoperimetricBase(unittest.TestCase):
# ToDo Why setUp yellow???
def setUp(self, f0, t0, t1, x0, x1, f_list, alphas,
general_solution, coefficients, particular_solution, extreme_value):
self.f0 = f0
self.t0 = t0
self.t1 = t1
self.x0 = x0
self.x1 = x1
self.f_list = f_list
self.alphas = alphas
self.general_solution = general_solution
self.coefficients = coefficients
self.particular_solution = particular_solution
self.extreme_value = extreme_value
self.Isoperimetric = Isoperimetric.Isoperimetric(f0=self.f0,
t0=self.t0,
t1=self.t1,
x0=self.x0,
x1=self.x1,
f_list=self.f_list,
alphas=self.alphas)
self.Isoperimetric.solve()
def test_f0(self):
self.assertEqual(self.Isoperimetric.f0,
self.f0)
def test_t0(self):
self.assertEqual(self.Isoperimetric.t0,
self.t0)
def test_t1(self):
self.assertEqual(self.Isoperimetric.t1,
self.t1)
def test_x0(self):
self.assertEqual(self.Isoperimetric.x0,
self.x0)
def test_x1(self):
self.assertEqual(self.Isoperimetric.x1,
self.x1)
def test_f_list(self):
self.assertEqual(self.Isoperimetric.f_list,
self.f_list)
def test_alphas(self):
self.assertEqual(self.Isoperimetric.alphas,
self.alphas)
def test_general_solution(self):
self.assertEqual(self.Isoperimetric.general_solution,
self.general_solution)
def test_coefficients(self):
self.assertEqual(self.Isoperimetric.coefficients,
self.coefficients)
def test_particular_solution(self):
self.assertEqual(self.Isoperimetric.particular_solution,
self.particular_solution)
def test_extreme_value(self):
self.assertEqual(self.Isoperimetric.extreme_value,
self.extreme_value)
def runTest(self):
self.test_f0()
self.test_t0()
self.test_t1()
self.test_x0()
self.test_x1()
self.test_f_list()
self.test_alphas()
self.test_general_solution()
self.test_coefficients()
self.test_particular_solution()
self.test_extreme_value()
class TestIsoperimetric1(TestIsoperimetricBase):
def setUp(self):
super().setUp(f0=x_diff**2,
t0=0,
t1=1,
x0=0,
x1=1,
f_list=[x],
alphas=[0],
general_solution=C1 + C2*t + lambda_1*t**2/(4*lambda_0),
coefficients={C1: 0,
lambda_1/lambda_0: 12,
C2: -2},
particular_solution=3*t**2 - 2*t,
extreme_value=4)
class TestIsoperimetric2(TestIsoperimetricBase):
def setUp(self):
super().setUp(f0=x_diff**2,
t0=0,
t1=1,
x0=0,
x1=1,
f_list=[t * x],
alphas=[0],
general_solution=C1 + C2*t + lambda_1*t**3/(12*lambda_0),
coefficients={C1: 0,
lambda_1/lambda_0: 30,
C2: -3/2},
particular_solution=5*t**3/2 - 3*t/2,
extreme_value=6)
class TestIsoperimetric3(TestIsoperimetricBase):
def setUp(self):
super().setUp(f0=x_diff**2,
t0=0,
t1=1,
x0=0,
x1=0,
f_list=[x, t * x],
alphas=[1, 0],
general_solution=C1 + C2*t + lambda_1*t**2/(4*lambda_0) + \
lambda_2*t**3/(12*lambda_0),
coefficients={C1: 0,
lambda_2/lambda_0: 720,
lambda_1/lambda_0: -384,
C2: 36},
particular_solution=60*t**3 - 96*t**2 + 36*t,
extreme_value=192)
class TestIsoperimetric4(TestIsoperimetricBase):
def setUp(self):
super().setUp(f0=x_diff**2,
t0=0,
t1=pi,
x0=1,
x1=-1,
f_list=[x * cos(t)],
alphas=[pi / 2],
general_solution=C1 + C2*t - lambda_1*cos(t)/(2*lambda_0),
coefficients={C1: 0,
lambda_1/lambda_0: -2,
C2: 0},
particular_solution=cos(t),
extreme_value=pi/2)
# ToDo May have errors
class TestIsoperimetric5(TestIsoperimetricBase):
def setUp(self):
super().setUp(f0=t ** 2 * x_diff ** 2,
t0=1,
t1=2,
x0=1,
x1=2,
f_list=[t * x],
alphas=[7 / 3],
general_solution=C1 + C2/t + lambda_1*t/(4*lambda_0),
coefficients={C1: 0,
C2: 0,
lambda_1/lambda_0: 4},
particular_solution=t,
extreme_value=7 / 3)
if __name__ == '__main__':
suite = unittest.TestSuite()
suite.addTests([TestIsoperimetric1(),
TestIsoperimetric2(),
TestIsoperimetric3(),
TestIsoperimetric4(),
TestIsoperimetric5()])
runner = unittest.TextTestRunner()
runner.run(suite)
| StarcoderdataPython |
4936468 | # coding: Latin-1
# Copyright © 2018 The Things Network
# Use of this source code is governed by the
# MIT license that can be found in the LICENSE file.
import unittest
import ttn
from ttn.utils import stubs
class TestHandlerClient(unittest.TestCase):
def setUp(self):
self.handler = ttn.HandlerClient(stubs.apptest["appId"],
stubs.apptest["accessKey"])
def test_handler(self):
self.appclient = self.handler.application()
self.mqttclient = self.handler.data()
assert isinstance(self.appclient,
ttn.ApplicationClient) and \
isinstance(self.mqttclient, ttn.MQTTClient)
| StarcoderdataPython |
3501128 | <gh_stars>10-100
"""Main script to output accuracy statistics and other benchmarks of the inference pipeline.
.. code-block:: console
$ python -m chesscog.report.prepare_recognition_results --help
usage: prepare_recognition_results.py [-h] [--results RESULTS]
[--dataset DATASET]
Prepare results for LaTeX
optional arguments:
-h, --help show this help message and exit
--results RESULTS parent results folder
--dataset DATASET the dataset to evaluate
"""
import pandas as pd
import re
import argparse
from recap import URI
import chesscog
if __name__ == "__main__":
parser = argparse.ArgumentParser(description="Prepare results for LaTeX")
parser.add_argument("--results", help="parent results folder",
type=str, default="results://recognition")
parser.add_argument("--dataset", help="the dataset to evaluate",
type=str, default="train")
args = parser.parse_args()
df = pd.read_csv(URI(args.results) / f"{args.dataset}.csv")
total = len(df)
# End-to-end accuracy
num_correct_boards = (df["num_incorrect_squares"] == 0).sum()
print("End-to-end accuracy:", num_correct_boards / total)
# End-to-end accuracy allowing one mistake
num_correct_boards_allowing_one_mistake = (
df["num_incorrect_squares"] <= 1).sum()
print("End-to-end accuracy, allowing one mistake:",
num_correct_boards_allowing_one_mistake / total)
# Mean misclassified
mean_misclassified = df["num_incorrect_squares"].mean()
print("Mean number of incorrect squares:", mean_misclassified)
# Correctly detected corners
df = df[(df["num_incorrect_corners"] != 4) | (df["error"] != "None")]
num_correct_corners = len(df)
print("Corner detection accuracy:", num_correct_corners / total)
# Occupancy classification
num_squares = 64 * len(df)
num_occupancy_mistakes = df["occupancy_classification_mistakes"].sum()
print("Per-square occupancy classification accuracy:",
1 - num_occupancy_mistakes / num_squares)
# Piece classification
num_occupancy_correct = num_squares - num_occupancy_mistakes
num_piece_mistakes = df["piece_classification_mistakes"].sum()
print("Per-square piece classification accuracy:",
1 - num_piece_mistakes / num_occupancy_correct)
# Performance profiling
time_cols = [x for x in df.columns if x.startswith("time_")]
for c in time_cols:
print("Time for", c[len("time_"):])
print(" mean:", df[c].mean())
print(" std: ", df[c].std())
totals = df[time_cols].sum(axis=1)
print("Mean total time:", totals.mean())
print(" std:", totals.std())
| StarcoderdataPython |
1839102 | <filename>python/demo.py<gh_stars>10-100
# demo.py
#
# This example script show how to process image in the GPU via
# python, numpy, pyimagej and clij.
#
# Author: <NAME>, <EMAIL>
# August 2019
#######################################################3333
# init pyimage to get access to jar files
import imagej
ij = imagej.init('C:/programs/fiji-win64/Fiji.app/')
# load some image data
from skimage import io
sk_img = io.imread('https://samples.fiji.sc/blobs.png')
# init clijpy to get access to the GPU
from jnius import autoclass
CLIJx = autoclass('net.haesleinhuepf.clijx.CLIJx')
clijx = CLIJx.getInstance();
# convert and array to an ImageJ2 img:
import numpy as np
np_arr = np.array(sk_img)
ij_img = ij.py.to_java(np_arr)
# push the image to the GPU
input = clijx.push(ij_img)
# allocate memory for the result image
output = clijx.create(input)
# blur the image
clijx.op.blur(input, output, 5.0, 5.0, 0.0);
# pull image back from GPU
ij_img_result = clijx.pull(output);
# convert to numpy/python
np_arr_result = ij.py.rai_to_numpy(ij_img_result);
# show the input and the result image
from matplotlib import pyplot as plt
plt.subplot(121)
plt.imshow(np_arr)
plt.subplot(122)
plt.imshow(np_arr_result)
plt.show()
print("Bye") | StarcoderdataPython |
130971 | <filename>src/data/warehouse.py
import collections
import time
_DATA = collections.ChainMap()
_HISTORY = [_DATA]
_DEADLINE_MS = 10
def register(path, value):
if path in _DATA.maps[0]:
raise KeyError('%s already specified' % path)
_DATA[path] = value
def get(path):
value = _DATA[path]
if callable(value):
start = time.time()
_DATA[path] = value()
delta = (time.time() - start) * 1000
if delta > _DATA['_DEADLINE_MS']:
raise Exception('Deadline exceeded loading %s (%.0f ms)' % (path, delta))
return _DATA[path]
def init(register_base=True, deadline_ms=_DEADLINE_MS):
if '_INITIALIZED' in _DATA.maps[0]:
raise Exception('Already initialized')
set_deadline_ms(deadline_ms)
if register_base:
register('/letter/frequency', _get_letter_frequency)
def reset():
_DATA.clear()
def save():
global _DATA
_HISTORY.append(_DATA)
_DATA = _DATA.new_child()
def restore():
global _DATA
_DATA = _HISTORY.pop()
def set_deadline_ms(deadline_ms):
_DATA['_DEADLINE_MS'] = deadline_ms
def _get_letter_frequency():
return collections.OrderedDict(sorted(list(zip('abcdefghijklmnopqrstuvwxyz', [
# abcdef.
9081174698, 419765694, 596623239, 361493758, 593086170, 297285731,
# ghijkl.
227771642, 220523502, 3086225277, 180739802, 195647953, 252900442,
341583838, 437961375, 246429812, 303249898, 139563801, 323534251, # mnopqr.
565123981, 388448018, 179615587, 204486977, 252231566, 508609523, # stuvwx.
195011703, 132095202, # yz.
])), key=lambda x: x[1], reverse=True))
| StarcoderdataPython |
9706301 | import random
class FloatField(object):
def __init__(self, width:int, height:int, defaultValue:float = 0, _data = None):
self.width = width
self.height = height
if _data:
self.data = _data
else:
self.data = self.__createField(width, height, defaultValue)
#
def __createField(self, width:int, height:int, value:float = 0):
rows = []
for y in range(0, height):
cols = []
for x in range(0, width):
cols.append(value)
rows.append(cols)
return rows
#
@property
def maximum(self):
ret = self.data[0][0]
for y in range(0, self.height):
for x in range(0, self.width):
v = self.data[y][x]
if v > ret:
ret = v
return ret
#
@property
def minimum(self):
ret = self.data[0][0]
for y in range(0, self.height):
for x in range(0, self.width):
v = self.data[y][x]
if v < ret:
ret = v
return ret
#
def clone(self):
rows = []
for y in range(0, self.height):
cols = []
for x in range(0, self.width):
cols.append(self.data[y][x])
rows.append(cols)
return FloatField(self.width, self.height, _data = rows)
#
def add(self, value:float, maxValue:float = 1):
if maxValue is None:
for y in range(0, self.height):
for x in range(0, self.width):
self.data[y][x] = value
else:
for y in range(0, self.height):
for x in range(0, self.width):
v = self.data[y][x] + value
if v > maxValue:
v = maxValue
self.data[y][x] = v
return self
#
def subtract(self, value:float, minValue:float = 0):
if minValue is None:
for y in range(0, self.height):
for x in range(0, self.width):
self.data[y][x] = value
else:
for y in range(0, self.height):
for x in range(0, self.width):
v = self.data[y][x] - value
if v < minValue:
v = minValue
self.data[y][x] = v
return self
#
def fill(self, value:float):
for y in range(0, self.height):
for x in range(0, self.width):
self.data[y][x] = value
return self
#
def fillRandom(self):
for y in range(0, self.height):
for x in range(0, self.width):
self.data[y][x] = random.random()
return self
#
def smooth(self, windowSize:int):
w1 = -windowSize
w2 = windowSize + 1
data2 = self.__createField(self.width, self.height)
for y in range(0, self.height):
for x in range(0, self.width):
sum = 0
count = 0
for iy in range(w1, w2):
yy = y + iy
for ix in range(w1, w2):
xx = x + ix
if (yy >= 0) and (yy < self.height) and (xx >= 0) and (xx < self.width):
sum += self.data[yy][xx]
count += 1
data2[y][x] = sum / count
self.data = data2
return self
#
#
| StarcoderdataPython |
1606736 | """
Unit test for selection operators.
"""
import random
from math import nan
import numpy as np
import pytest
from leap_ec import Individual
from leap_ec import ops, statistical_helpers
from leap_ec.binary_rep.problems import MaxOnes
from leap_ec.data import test_population
from leap_ec.real_rep.problems import SpheroidProblem
##############################
# Tests for sus_selection()
##############################
def test_sus_selection1():
''' Test of a deterministic case of stochastic universal sampling '''
# Make a population where sus_selection has an obvious
# reproducible choice
pop = [Individual(np.array([0, 0, 0]), problem=MaxOnes()),
Individual(np.array([1, 1, 1]), problem=MaxOnes())]
pop = Individual.evaluate_population(pop)
# This selection operator will always choose the [1, 1, 1] individual
# since [0, 0, 0] has zero fitness
selector = ops.sus_selection(pop)
selected = next(selector)
assert np.all(selected.genome == [1, 1, 1])
selected = next(selector)
assert np.all(selected.genome == [1, 1, 1])
# run one more time to test shuffle
selected = next(selector)
assert np.all(selected.genome == [1, 1, 1])
@pytest.mark.stochastic
def test_sus_selection_shuffle():
''' Test of a stochastic case of SUS selection '''
# Make a population where sus_selection has an obvious
# reproducible choice
# Proportions here should be 1/4 and 3/4, respectively
pop = [Individual(np.array([0, 1, 0]), problem=MaxOnes()),
Individual(np.array([1, 1, 1]), problem=MaxOnes())]
# Assign a unique identifier to each individual
pop[0].id = 0
pop[1].id = 1
# We first need to evaluate all the individuals so that
# selection has fitnesses to compare
pop = Individual.evaluate_population(pop)
selected = ops.sus_selection(pop)
N = 1000
p_thresh = 0.1
observed_dist = statistical_helpers.collect_distribution(
lambda: next(selected).id, samples=N)
expected_dist = {pop[0].id: 0.25*N, pop[1].id: 0.75*N}
print(f"Observed: {observed_dist}")
print(f"Expected: {expected_dist}")
assert(statistical_helpers.stochastic_equals(expected_dist,
observed_dist, p=p_thresh))
def test_sus_selection_offset():
''' Test of SUS selection with a non-default offset '''
pop = [Individual(np.array([0, 0, 0]), problem=MaxOnes()),
Individual(np.array([1, 1, 1]), problem=MaxOnes())]
# evaluate population and negate fitness of second individual
pop = Individual.evaluate_population(pop)
pop[1].fitness = -pop[1].fitness
# now we try to evaluate normally (this should throw a ValueError)
# due to the negative fitness
with pytest.raises(ValueError):
selector = ops.sus_selection(pop)
selected = next(selector)
# it should work by setting the offset to +3
# this adds 3 to each fitness value, making the second
# individual's fitness 0.
selector = ops.sus_selection(pop, offset=3)
# we expect the first individual to always be selected
# since the new zero point is now -3.
selected = next(selector)
assert np.all(selected.genome == [0, 0, 0])
selected = next(selector)
assert np.all(selected.genome == [0, 0, 0])
def test_sus_selection_pop_min():
''' Test of SUS selection with pop-min offset '''
# Create a population of positive fitness individuals
# scaling the fitness by the population minimum makes it so the
# least fit member never gets selected.
pop = [Individual(np.array([0, 1, 0]), problem=MaxOnes()),
Individual(np.array([1, 1, 1]), problem=MaxOnes())]
pop = Individual.evaluate_population(pop)
selector = ops.sus_selection(pop, offset='pop-min')
# we expect that the second individual is always selected
# since the new zero point will be at the minimum fitness
# of the population
selected = next(selector)
assert np.all(selected.genome == [1, 1, 1])
selected = next(selector)
assert np.all(selected.genome == [1, 1, 1])
def test_sus_selection_custom_key():
''' Test of SUS selection with custom evaluation '''
pop = [Individual(np.array([0, 0, 0]), problem=MaxOnes()),
Individual(np.array([1, 1, 1]), problem=MaxOnes())]
def custom_key(individual):
''' Returns fitness based on MaxZeros '''
return np.count_nonzero(individual.genome == 0)
pop = Individual.evaluate_population(pop)
selector = ops.sus_selection(pop, key=custom_key)
# we expect the first individual to always be selected
# since its genome is all 0s
selected = next(selector)
assert np.all(selected.genome == [0, 0, 0])
selected = next(selector)
assert np.all(selected.genome == [0, 0, 0])
def test_sus_selection_num_points():
''' Test of SUS selection with varying `n` random points '''
# the second individual should always be selected
pop = [Individual(np.array([0, 0, 0]), problem=MaxOnes()),
Individual(np.array([1, 1, 1]), problem=MaxOnes())]
pop = Individual.evaluate_population(pop)
# with negative points
with pytest.raises(ValueError):
selector = ops.sus_selection(pop, n=-1)
selected = next(selector)
# with n = None (default)
selector = ops.sus_selection(pop, n=None)
selected = next(selector)
assert np.all(selected.genome == [1, 1, 1])
# with n less than len(population)
selector = ops.sus_selection(pop, n=1)
selected = next(selector)
assert np.all(selected.genome == [1, 1, 1])
selected = next(selector)
assert np.all(selected.genome == [1, 1, 1])
# with n greater than len(population)
selector = ops.sus_selection(pop, n=3)
selected = next(selector)
assert np.all(selected.genome == [1, 1, 1])
selected = next(selector)
assert np.all(selected.genome == [1, 1, 1])
selected = next(selector)
assert np.all(selected.genome == [1, 1, 1])
selected = next(selector)
assert np.all(selected.genome == [1, 1, 1])
selected = next(selector)
assert np.all(selected.genome == [1, 1, 1])
##############################
# Tests for proportional_selection()
##############################
def test_proportional_selection1():
''' Test of a deterministic case of proportional selection '''
# Make a population where proportional_selection has an obvious
# reproducible choice
pop = [Individual(np.array([0, 0, 0]), problem=MaxOnes()),
Individual(np.array([1, 1, 1]), problem=MaxOnes())]
parents = Individual.evaluate_population(pop)
# This selection operator will always select the [1, 1, 1] individual since
# [0, 0, 0] has zero fitness
selector = ops.proportional_selection(parents)
selected = next(selector)
assert np.all(selected.genome == [1, 1, 1])
selected = next(selector)
assert np.all(selected.genome == [1, 1, 1])
@pytest.mark.stochastic
def test_proportional_selection2():
''' Test of a stochastic proportional selection '''
# Make a population where fitness proportional selection has an obvious
# reproducible choice
# Proportions here should be 1/4 and 3/4, respectively
pop = [Individual(np.array([0, 1, 0]), problem=MaxOnes()),
Individual(np.array([1, 1, 1]), problem=MaxOnes())]
# Assign a unique identifier to each individual
pop[0].id = 0
pop[1].id = 1
# We first need to evaluate all the individuals so that
# selection has fitnesses to compare
pop = Individual.evaluate_population(pop)
selected = ops.proportional_selection(pop)
N = 1000
p_thresh = 0.1
observed_dist = statistical_helpers.collect_distribution(
lambda: next(selected).id, samples=N)
expected_dist = {pop[0].id: 0.25*N, pop[1].id: 0.75*N}
print(f"Observed: {observed_dist}")
print(f"Expected: {expected_dist}")
assert(statistical_helpers.stochastic_equals(expected_dist,
observed_dist, p=p_thresh))
def test_proportional_selection_offset():
''' Test of proportional selection with a non-default offset '''
pop = [Individual(np.array([0, 0, 0]), problem=MaxOnes()),
Individual(np.array([1, 1, 1]), problem=MaxOnes())]
# evaluate population and negate fitness of second individual
pop = Individual.evaluate_population(pop)
pop[1].fitness = -pop[1].fitness
# now we try to evaluate normally (this should throw a ValueError)
# due to the negative fitness
with pytest.raises(ValueError):
selector = ops.proportional_selection(pop)
selected = next(selector)
# it should work by setting the offset to +3
# this adds 3 to each fitness value, making the second
# individual's fitness 0.
selector = ops.proportional_selection(pop, offset=3)
# we expect the first individual to always be selected
# since the new zero point is now -3.
selected = next(selector)
assert np.all(selected.genome == [0, 0, 0])
selected = next(selector)
assert np.all(selected.genome == [0, 0, 0])
def test_proportional_selection_pop_min():
''' Test of proportional selection with pop-min offset '''
# Create a population of positive fitness individuals
# scaling the fitness by the population minimum makes it so the
# least fit member never gets selected.
pop = [Individual(np.array([0, 1, 0]), problem=MaxOnes()),
Individual(np.array([1, 1, 1]), problem=MaxOnes())]
pop = Individual.evaluate_population(pop)
selector = ops.proportional_selection(pop, offset='pop-min')
# we expect that the second individual is always selected
# since the new zero point will be at the minimum fitness
# of the population
selected = next(selector)
assert np.all(selected.genome == [1, 1, 1])
selected = next(selector)
assert np.all(selected.genome == [1, 1, 1])
def test_proportional_selection_custom_key():
''' Test of proportional selection with custom evaluation '''
pop = [Individual(np.array([0, 0, 0]), problem=MaxOnes()),
Individual(np.array([1, 1, 1]), problem=MaxOnes())]
def custom_key(individual):
''' Returns fitness based on MaxZeros '''
return np.count_nonzero(individual.genome == 0)
pop = Individual.evaluate_population(pop)
selector = ops.proportional_selection(pop, key=custom_key)
# we expect the first individual to always be selected
# since its genome is all 0s
selected = next(selector)
assert np.all(selected.genome == [0, 0, 0])
selected = next(selector)
assert np.all(selected.genome == [0, 0, 0])
##############################
# Tests for naive_cyclic_selection()
##############################
def test_naive_cyclic_selection():
""" Test of the naive deterministic cyclic selection """
pop = [Individual(np.array([0, 0]), problem=MaxOnes()),
Individual(np.array([0, 1]), problem=MaxOnes())]
# This selection operator will deterministically cycle through the
# given population
selector = ops.naive_cyclic_selection(pop)
selected = next(selector)
assert np.all(selected.genome == [0, 0])
selected = next(selector)
assert np.all(selected.genome == [0, 1])
# And now we cycle back to the first individual
selected = next(selector)
assert np.all(selected.genome == [0, 0])
##############################
# Tests for cyclic_selection()
##############################
def test_cyclic_selection():
""" Test of the deterministic cyclic selection """
# Set seed so that we get consistent test results. I.e., it is possible
# by happenstance for some tests to fail even though they're actually ok.
# E.g., the cyclic selection tests will test if the test_sequence
# shuffles between a complete cycle, but there's a chance that the same
# test_sequence may come up in the random shuffle, so the test will fail.
# However, if we set a random seed ahead of time, then we can control for
# those pathological scenarios.
random.seed(123)
# We're just going to use integers for the population as that's
# sufficient for testing this selection operator; we don't want to get in
# the weeds with comparing individuals for test_sequence equivalency
# testing.
pop = list(range(4))
# This selection operator will deterministically cycle through the
# given population
selector = ops.cyclic_selection(pop)
# first cycle should be the same order as we started
first_iteration = [next(selector) for _ in range(len(pop))]
assert pop == first_iteration
# the second iteration should be shuffled
second_iteration = [next(selector) for _ in range(len(pop))]
assert pop != second_iteration
##############################
# Tests for truncation_selection()
##############################
def test_truncation_selection():
""" Basic truncation selection test"""
pop = [Individual(np.array([0, 0, 0]), problem=MaxOnes()),
Individual(np.array([0, 0, 1]), problem=MaxOnes()),
Individual(np.array([1, 1, 0]), problem=MaxOnes()),
Individual(np.array([1, 1, 1]), problem=MaxOnes())]
# We first need to evaluate all the individuals so that truncation
# selection has fitnesses to compare
pop = Individual.evaluate_population(pop)
truncated = ops.truncation_selection(pop, 2)
assert len(truncated) == 2
# Just to make sure, check that the two best individuals from the
# original population are in the selected population
assert pop[2] in truncated
assert pop[3] in truncated
def test_truncation_parents_selection():
""" Test (mu + lambda), i.e., parents competing with offspring
Create parent and offspring populations such that each has an "best" individual that will be selected by
truncation selection.
"""
parents = [Individual(np.array([0, 0, 0]), problem=MaxOnes()),
Individual(np.array([1, 1, 0]), problem=MaxOnes())]
parents = Individual.evaluate_population(parents)
offspring = [Individual(np.array([0, 0, 1]), problem=MaxOnes()),
Individual(np.array([1, 1, 1]), problem=MaxOnes())]
offspring = Individual.evaluate_population(offspring)
truncated = ops.truncation_selection(offspring, 2, parents=parents)
assert len(truncated) == 2
assert parents[1] in truncated
assert offspring[1] in truncated
def test_truncation_selection_with_nan1():
"""If truncation selection encounters a NaN and non-NaN fitness
while maximizing, the non-NaN wins.
"""
# Make a population where binary tournament_selection has an obvious
# reproducible choice
problem = MaxOnes()
pop = [Individual(np.array([0, 0, 0]), problem=problem),
Individual(np.array([1, 1, 1]), problem=problem)]
# We first need to evaluate all the individuals so that truncation
# selection has fitnesses to compare
pop = Individual.evaluate_population(pop)
# Now set the "best" to NaN
pop[1].fitness = nan
best = ops.truncation_selection(pop, size=1)
assert pop[0] == best[0]
def test_truncation_selection_with_nan2():
"""If truncation selection encounters a NaN and non-NaN fitness
while minimizing, the non-NaN wins.
"""
problem = SpheroidProblem(maximize=False)
pop = []
pop.append(Individual(np.array([0]), problem=problem))
pop.append(Individual(np.array([1]), problem=problem))
pop = Individual.evaluate_population(pop)
# First *normal* selection should yield the 0 as the "best"
best = ops.truncation_selection(pop, size=1)
assert pop[0] == best[0]
# But now let's set that best to a NaN, which *should* force the other
# individual to be selected.
pop[0].fitness = nan
best = ops.truncation_selection(pop, size=1)
assert pop[1] == best[0]
##############################
# Tests for tournament_selection()
##############################
@pytest.mark.stochastic
def test_tournament_selection1():
"""If there are just two individuals in the population, then binary tournament
selection will select the better one with 75% probability."""
# Make a population where binary tournament_selection has an obvious
# reproducible choice
pop = [Individual(np.array([0, 0, 0]), problem=MaxOnes()),
Individual(np.array([1, 1, 1]), problem=MaxOnes())]
# Assign a unique identifier to each individual
pop[0].id = 0
pop[1].id = 1
# We first need to evaluate all the individuals so that
# selection has fitnesses to compare
pop = Individual.evaluate_population(pop)
selected = ops.tournament_selection(pop)
N = 1000
p_thresh = 0.1
observed_dist = statistical_helpers.collect_distribution(lambda: next(selected).id, samples=N)
expected_dist = { pop[0].id: 0.25*N, pop[1].id: 0.75*N }
print(f"Observed: {observed_dist}")
print(f"Expected: {expected_dist}")
assert(statistical_helpers.stochastic_equals(expected_dist, observed_dist, p=p_thresh))
@pytest.mark.stochastic
def test_tournament_selection2():
"""If there are just two individuals in the population, and we set select_worst=True,
then binary tournament selection will select the worse one with 75% probability."""
# Make a population where binary tournament_selection has an obvious
# reproducible choice
pop = [Individual(np.array([0, 0, 0]), problem=MaxOnes()),
Individual(np.array([1, 1, 1]), problem=MaxOnes())]
# Assign a unique identifier to each individual
pop[0].id = 0
pop[1].id = 1
# We first need to evaluate all the individuals so that
# selection has fitnesses to compare
pop = Individual.evaluate_population(pop)
selected = ops.tournament_selection(pop, select_worst=True)
N = 1000
p_thresh = 0.1
observed_dist = statistical_helpers.collect_distribution(lambda: next(selected).id, samples=N)
expected_dist = { pop[0].id: 0.75*N, pop[1].id: 0.25*N }
print(f"Observed: {observed_dist}")
print(f"Expected: {expected_dist}")
assert(statistical_helpers.stochastic_equals(expected_dist, observed_dist, p=p_thresh))
def test_tournament_selection_indices():
"""If an empty list is provided to tournament selection, it should be populated with
the index of the selected individual.
If we select a second individual, the list should be cleared and populated with the
index of the second individual."""
pop = test_population
indices = []
op = ops.tournament_selection(indices=indices)
# Select an individual
s = next(op(pop))
# Ensure the returned index is correct
assert(len(indices) == 1)
idx = indices[0]
assert(idx >= 0)
assert(idx < len(pop))
assert(pop[idx] is s)
# Select another individual
s = next(op(pop))
# Ensure the returned index is correct
assert(len(indices) == 1)
idx = indices[0]
assert(idx >= 0)
assert(idx < len(pop))
assert(pop[idx] is s)
##############################
# Tests for random_selection()
##############################
@pytest.mark.stochastic
def test_random_selection1():
"""If there are just two individuals in the population, then random
selection will select the better one with 50% probability."""
pop = [Individual(np.array([0, 0, 0]), problem=MaxOnes()),
Individual(np.array([1, 1, 1]), problem=MaxOnes())]
# Assign a unique identifier to each individual
pop[0].id = 0
pop[1].id = 1
# We first need to evaluate all the individuals so that
# selection has fitnesses to compare
pop = Individual.evaluate_population(pop)
selected = ops.random_selection(pop)
N = 1000
p_thresh = 0.1
observed_dist = statistical_helpers.collect_distribution(lambda: next(selected).id, samples=N)
expected_dist = { pop[0].id: 0.5*N, pop[1].id: 0.5*N }
print(f"Observed: {observed_dist}")
print(f"Expected: {expected_dist}")
assert(statistical_helpers.stochastic_equals(expected_dist, observed_dist, p=p_thresh))
def test_random_selection_indices():
"""If an empty list is provided to random selection, it should be populated with
the index of the selected individual.
If we select a second individual, the list should be cleared and populated with the
index of the second individual."""
pop = test_population
indices = []
op = ops.random_selection(indices=indices)
# Select an individual
s = next(op(pop))
# Ensure the returned index is correct
assert(len(indices) == 1)
idx = indices[0]
assert(idx >= 0)
assert(idx < len(pop))
assert(pop[idx] is s)
# Select another individual
s = next(op(pop))
# Ensure the returned index is correct
assert(len(indices) == 1)
idx = indices[0]
assert(idx >= 0)
assert(idx < len(pop))
assert(pop[idx] is s)
| StarcoderdataPython |
9678593 | <reponame>wafec/wafec-tests-openstack-stub
import psutil
import unittest
from wafec_tests_openstack_stub._configuration import interception_config
from wafec_tests_openstack_base.interception import generate_key
from wafec_tests_openstack_stub.interception import Interception
interception_config.dat_file = "../../resources/dat_file.txt"
class InterceptionTests(unittest.TestCase):
def setUp(self):
self.x = 'test'
self.trace = 'test'
self.name = 'test'
self.ps = psutil.Process().name()
self.key = generate_key(self.name, self.x, self.trace, self.ps)
self.method = 'fault_test'
with open(interception_config.dat_file, 'w') as dat_file:
dat_file.write(f'{self.key} {self.method}\n')
def test_should_handle_fault(self):
result = Interception.should_handle_fault(self.name, self.x, self.trace)
self.assertEqual(self.method, result)
| StarcoderdataPython |
9639844 | <reponame>jaraco/pycoreutils
from ..exception import ExtraOperandException
import calendar
import time
def parseargs(p):
"""
Add arguments and `func` to `p`.
:param p: ArgumentParser
:return: ArgumentParser
"""
p.set_defaults(func=func, firstweekday=6)
p.description = "Displays a calendar"
p.usage = (
'%(prog)s [OPTION]... [[MONTH] YEAR]\n'
+ ' %(prog)s -y [OPTION]... [YEAR]...'
)
p.add_argument('args', nargs='*')
p.add_argument(
"-M",
action="store_const",
dest="firstweekday",
const=0,
help="Weeks start on Monday",
)
p.add_argument(
"-S",
action="store_const",
dest="firstweekday",
const=6,
help="Weeks start on Sunday",
)
p.add_argument(
"-y",
action="store_true",
dest="year",
help="Display a calendar for the specified year",
)
return p
def func(args):
now = time.localtime()
calen = calendar.TextCalendar(args.firstweekday)
if args.year:
if args.args != []:
for arg in args.args:
print(calen.formatyear(int(arg)), end='')
else:
print(calen.formatyear(now.tm_year), end='')
else:
if len(args.args) > 2:
raise ExtraOperandException(args.prog, args.args[1])
elif len(args.args) == 2:
print(calen.formatmonth(int(args.args[1]), int(args.args[0])), end='')
elif len(args.args) == 1:
print(calen.formatyear(int(args.args[0])), end='')
else:
print(calen.formatmonth(now.tm_year, now.tm_mon), end='')
| StarcoderdataPython |
3332628 | from flask import g
def login_log():
print('username: ', g.username)
def login_id_log(id):
pass | StarcoderdataPython |
1766828 | import asyncio
import time
import aiohttp
import binascii
import hashlib
from bosch_thermostat_http.helper import crawl
import bosch_thermostat_http as bosch
from bosch_thermostat_http.const import (FIRMWARE_VERSION, HARDWARE_VERSION,
UUID, SENSORS, DHW, HC, GATEWAY,
OPERATION_MODE, DHW_OFFTEMP_LEVEL,
HC_CURRENT_ROOMSETPOINT)
async def main():
"""
Provide data_file.txt with ip, access_key, password and check
if you can retrieve data from your thermostat.
"""
async with aiohttp.ClientSession() as session:
data_file = open("data_file.txt", "r")
data = data_file.read().splitlines()
gateway = bosch.Gateway(session,
host=data[0],
access_key=data[1],
password=data[2])
print(await gateway.check_connection())
#await gateway.initialize_circuits(DHW)
#await gateway.initialize_circuits(HC)
#dhws = gateway.dhw_circuits
#dhw = dhws[0]
#print("getting property")
#print(dhw.get_property(DHW_OFFTEMP_LEVEL))
#await dhw.update()
#print(dhw.get_property(DHW_OFFTEMP_LEVEL))
# await hc.set_operation_mode("manual")
await gateway.set_value("/heatingCircuits/hc1/suWiSwitchMode", "forced")
#time.sleep(5)
print(await gateway.get("/heatingCircuits/hc1/currentSuWiMode"))
print(await gateway.get("/heatingCircuits/hc1/suWiSwitchMode"))
#print(await gateway.get("/heatingCircuits/hc1/currentRoomSetpoint"))
#await gateway.set_value("/heatingCircuits/hc1/manualRoomSetpoint", 22.0)
#time.sleep(3)
#print(await gateway.get("/heatingCircuits/hc1/currentRoomSetpoint"))
#print(await gateway.get("/heatingCircuits/hc1/operationMode"))
await session.close()
asyncio.get_event_loop().run_until_complete(main()) | StarcoderdataPython |
8152412 | # Spiderling: A Spider spawned by the BroodMother.
# DO NOT MODIFY THIS FILE
# Never try to directly create an instance of this class, or modify its member variables.
# Instead, you should only be reading its variables and calling its functions.
from games.spiders.spider import Spider
# <<-- Creer-Merge: imports -->> - Code you add between this comment and the end comment will be preserved between Creer re-runs.
# you can add additional import(s) here
# <<-- /Creer-Merge: imports -->>
class Spiderling(Spider):
"""The class representing the Spiderling in the Spiders game.
A Spider spawned by the BroodMother.
"""
def __init__(self):
"""Initializes a Spiderling with basic logic as provided by the Creer code generator."""
Spider.__init__(self)
# private attributes to hold the properties so they appear read only
self._busy = ""
self._moving_on_web = None
self._moving_to_nest = None
self._number_of_coworkers = 0
self._work_remaining = 0
@property
def busy(self):
"""When empty string this Spiderling is not busy, and can act. Otherwise a string representing what it is busy with, e.g. 'Moving', 'Attacking'.
:rtype: str
"""
return self._busy
@property
def moving_on_web(self):
"""The Web this Spiderling is using to move. None if it is not moving.
:rtype: Web
"""
return self._moving_on_web
@property
def moving_to_nest(self):
"""The Nest this Spiderling is moving to. None if it is not moving.
:rtype: Nest
"""
return self._moving_to_nest
@property
def number_of_coworkers(self):
"""The number of Spiderlings busy with the same work this Spiderling is doing, speeding up the task.
:rtype: int
"""
return self._number_of_coworkers
@property
def work_remaining(self):
"""How much work needs to be done for this Spiderling to finish being busy. See docs for the Work forumla.
:rtype: float
"""
return self._work_remaining
def attack(self, spiderling):
""" Attacks another Spiderling
Args:
spiderling (Spiderling): The Spiderling to attack.
Returns:
bool: True if the attack was successful, False otherwise.
"""
return self._run_on_server('attack', spiderling=spiderling)
def move(self, web):
""" Starts moving the Spiderling across a Web to another Nest.
Args:
web (Web): The Web you want to move across to the other Nest.
Returns:
bool: True if the move was successful, False otherwise.
"""
return self._run_on_server('move', web=web)
# <<-- Creer-Merge: functions -->> - Code you add between this comment and the end comment will be preserved between Creer re-runs.
# if you want to add any client side logic (such as state checking functions) this is where you can add them
# <<-- /Creer-Merge: functions -->>
| StarcoderdataPython |
1952750 | """
This module contains functions to group an arbitrary set of positions
into a number of strategies, each one of the following "basic" strategies
class AssetStrategy(BasicStrategy): A strategy that involves going long or short in an asset
class OffsetStrategy(BasicStrategy): A strategy that involves being simultaneously long and short the same asset
class CoveredStrategy(BasicStrategy): A strategy where an asset is used to cover the risk from the sale of an option
class SpreadStrategy(BasicStrategy): A strategy where two options with inverse risk profiles are used to create a defined risk
This is primarily used to calculate the maintenance margin requirement, but could be useful for other reasons
You could also inherit from these to create more specific strategies, like:
class NakedOptionStrategy(AssetStrategy): ?
class CreditSpreadStrategy(SpreadStrategy): ?
usage: group_into_basic_strategies(positions:Position)
Special thanks to /u/EdKaim for the outline of this process:
https://www.reddit.com/r/options/comments/6iivnu/generic_method_of_calculating_margin_requirements/dj7msph/
"""
from ..assets import Asset, Option, Call, Put
from ..positions import Position
class BasicStrategy:
def __init__(self, strategy_type=None, quantity=1):
self.strategy_type = strategy_type if strategy_type is not None else 'basic'
self.quantity = quantity
class AssetStrategy(BasicStrategy):
"""
A strategy that involves going long or short in an asset
"""
def __init__(self, asset:Asset, quantity=1):
super(AssetStrategy, self).__init__('asset', quantity)
self.asset = asset
self.direction = 'short' if self.quantity < 0 else 'long'
class OffsetStrategy(BasicStrategy):
"""
A strategy that involves being simultaneously long and short the same asset
"""
def __init__(self, asset:Asset, quantity=1):
super(OffsetStrategy, self).__init__('offset', quantity)
self.asset = asset
class SpreadStrategy(BasicStrategy):
"""
A strategy where two options with inverse risk profiles are used to create a defined risk
"""
def __init__(self, sell_option:Option, buy_option:Option, quantity=1):
super(SpreadStrategy, self).__init__('spread', quantity)
if sell_option.option_type != buy_option.option_type:
raise Exception("SpreadStrategy: option types of sell and buy must match")
if sell_option.underlying != buy_option.underlying :
raise Exception("SpreadStrategy: underlying types of sell and buy must match")
if sell_option.strike == buy_option.strike :
raise Exception("SpreadStrategy: strikes of sell and buy must be different")
self.sell_option = sell_option
self.buy_option = buy_option
self.option_type = sell_option.option_type
self.quantity = abs(quantity)
if sell_option.option_type == 'put':
self.spread_type = 'credit' if self.sell_option.strike > self.buy_option.strike else 'debit'
else:
self.spread_type = 'credit' if self.sell_option.strike < self.buy_option.strike else 'debit'
class CoveredStrategy(BasicStrategy):
"""
A strategy where an asset is used to cover the risk from the sale of an option
"""
def __init__(self, asset:Asset, sell_option:Option, quantity=1):
super(CoveredStrategy, self).__init__('covered', quantity)
if asset != sell_option.underlying:
raise Exception("CoveredStrategy: option underlying must be the same as asset")
self.asset = asset
self.sell_option = sell_option
self.quantity = abs(quantity)
def create_asset_strategies(positions, underlying):
positions = [_ for _ in positions if
(isinstance(_.asset, Option) and _.asset.underlying == underlying) or (_.asset == underlying)]
if len(positions) == 0: return []
asset_strategies = []
long_equity = AssetStrategy(asset=underlying, quantity=sum([_.quantity for _ in positions if not isinstance(_.asset, Option) and _.quantity > 0]))
short_equity = AssetStrategy(asset=underlying, quantity=sum([_.quantity for _ in positions if not isinstance(_.asset, Option) and _.quantity < 0]))
# get the short calls/puts
short_calls = []
for call in [p for p in positions if
isinstance(p.asset, Option) and p.quantity < 0 and p.asset.option_type == 'call']:
for x in range(0, abs(call.quantity)):
short_calls.append(AssetStrategy(asset=call.asset, quantity=-1))
short_puts = []
for put in [p for p in positions if
isinstance(p.asset, Option) and p.quantity < 0 and p.asset.option_type == 'put']:
for x in range(0, abs(put.quantity)):
short_puts.append(AssetStrategy(asset=put.asset, quantity=-1))
# get all the long calls/puts
long_calls = []
for call in [p for p in positions if
isinstance(p.asset, Option) and p.quantity > 0 and p.asset.option_type == 'call']:
for x in range(0, abs(call.quantity)):
long_calls.append(AssetStrategy(asset=call.asset, quantity=1))
long_puts = []
for put in [p for p in positions if
isinstance(p.asset, Option) and p.quantity > 0 and p.asset.option_type == 'put']:
for x in range(0, abs(put.quantity)):
long_puts.append(AssetStrategy(asset=put.asset, quantity=1))
return [] + [long_equity]+[short_equity]+long_puts+short_puts+long_calls+short_calls
def _group_into_basic_strategies_in_underlying(underlying, positions):
positions = [_ for _ in positions if (isinstance(_.asset, Option) and _.asset.underlying == underlying) or (_.asset == underlying)]
strategies = []
long_equity = AssetStrategy(asset=underlying, quantity=sum([_.quantity for _ in positions if not isinstance(_.asset, Option) and _.quantity > 0]))
short_equity = AssetStrategy(asset=underlying, quantity=sum([_.quantity for _ in positions if not isinstance(_.asset, Option) and _.quantity < 0]))
# get the short calls/puts
short_calls = []
for call in [p for p in positions if
isinstance(p.asset, Option) and p.quantity < 0 and p.asset.option_type == 'call']:
for x in range(0, abs(int(call.quantity))):
short_calls.append(AssetStrategy(asset=call.asset, quantity=-1))
short_puts = []
for put in [p for p in positions if
isinstance(p.asset, Option) and p.quantity < 0 and p.asset.option_type == 'put']:
for x in range(0, abs(int(put.quantity))):
short_puts.append(AssetStrategy(asset=put.asset, quantity=-1))
# get all the long calls/puts
long_calls = []
for call in [p for p in positions if
isinstance(p.asset, Option) and p.quantity > 0 and p.asset.option_type == 'call']:
for x in range(0, abs(int(call.quantity))):
long_calls.append(AssetStrategy(asset=call.asset, quantity=1))
long_puts = []
for put in [p for p in positions if
isinstance(p.asset, Option) and p.quantity > 0 and p.asset.option_type == 'put']:
for x in range(0, abs(int(put.quantity))):
long_puts.append(AssetStrategy(asset=put.asset, quantity=1))
# sort by in the moneyness
short_calls = sorted(short_calls, key = lambda k: k.asset.strike, reverse=False)
long_calls = sorted(long_calls, key = lambda k: k.asset.strike, reverse=False)
short_puts = sorted(short_puts, key = lambda k: k.asset.strike, reverse=True)
long_puts = sorted(long_puts, key = lambda k: k.asset.strike, reverse=True)
for short_call in short_calls:
if long_equity.quantity >= 100:
# if there are enough shares to cover this call, cover it and don't hit margin
strategies.append(CoveredStrategy(asset=underlying, sell_option=short_call.asset))
long_equity.quantity -= 100
elif len(long_calls) > 0:
# if there are still any long calls, use them to build spreads
long_call = long_calls.pop(0)
strategies.append(SpreadStrategy( buy_option=long_call.asset, sell_option=short_call.asset))
else:
# if not then we just have to add this one as a naked short call
strategies.append(short_call)
for short_put in short_puts:
if short_equity.quantity >= 100:
# if there are enough shares to cover this put, cover it and don't hit margin
strategies.append(CoveredStrategy(asset=underlying, sell_option=short_put.asset))
short_put.quantity -= 100
elif len(long_puts) > 0:
# if there are still any long puts, use them to build spreads
long_put = long_puts.pop(0)
strategies.append(SpreadStrategy( buy_option=long_put.asset, sell_option=short_put.asset))
else:
# if not then we just have to add this one as a naked short put
strategies.append(short_put)
# ok, now to close everything up
# we can ignore the short option lists now because we're done with those
# but we need to add everything long and also the long/short equities
strategies += long_calls + long_puts + [long_equity] + [short_equity]
return strategies
def group_into_basic_strategies(positions):
# get a unique list of underlyings
underlyings = list(set([_.asset.underlying.symbol for _ in positions if isinstance(_.asset, Option)]))
# add all the strategies for each underlying
strategies = []
for underlying in underlyings:
strategies += _group_into_basic_strategies_in_underlying(underlying=underlying, positions=positions)
return strategies | StarcoderdataPython |
1686423 | import pickle
import tensorflow as tf
import numpy as np
from baselines.ddpg.memory import Memory
from baselines.ddpg.ddpg import normalize, denormalize
from baselines.ddpg.models import Discriminator
class Expert:
def __init__(self, limit, env):
self.limit = limit
self.env = env
self.memory = Memory(limit=self.limit,
action_shape=self.env.action_space.shape,
observation_shape=self.env.observation_space.shape)
self.file_dir = None
def load_file(self, file_dir, print_reward=False):
self.file_dir = file_dir
expert_file = open(self.file_dir, 'rb')
expert_data = pickle.load(expert_file)
expert_file.close()
k = 0
if print_reward:
total_rew = 0.
ep_rew = 0.
nep = 1.
for episode_sample in expert_data:
for step_sample in episode_sample:
k = k+1
if k <= self.limit:
if print_reward:
ep_rew += step_sample[2]
if step_sample[4]:
nep += 1
total_rew += ep_rew
ep_rew = 0
self.memory.append(step_sample[0], step_sample[1], step_sample[2], step_sample[3], step_sample[4])
else:
if print_reward:
print('Successfully loaded expert files, average reward ',total_rew/nep)
return
if print_reward:
print('Successfully loaded expert files, average reward ',total_rew/nep)
def load_file_trpo(self, file_dir):
self.file_dir = file_dir
traj_data = np.load(file_dir)
if self.limit is None:
obs = traj_data["obs"][:]
acs = traj_data["acs"][:]
else:
obs = traj_data["obs"][:self.limit]
acs = traj_data["acs"][:self.limit]
episode_num = len(acs)
'''
step_num = 0
for i in range(episode_num):
step_num += len(acs[i])
print("Total Step is:", step_num, "\nTotal_Episode is:", episode_num)
'''
for i in range(episode_num):
episode_len = len(acs[i])
for j in range(episode_len):
done = True if (j == episode_len - 1) else False
self.memory.append(obs[i][j], acs[i][j], 0., 0., done)
def sample(self, batch_size):
return self.memory.sample(batch_size)
def set_tf(self, actor, critic, obs0, actions, obs_rms, ret_rms, observation_range, return_range, supervise=False, critic_only=False,
actor_only=False, both_ours_sup = False, gail = False, pofd = False):
self.expert_state = tf.placeholder(tf.float32, shape=(None,) + self.env.observation_space.shape,
name='expert_state')
self.expert_action = tf.placeholder(tf.float32, shape=(None,) + self.env.action_space.shape,
name='expert_action')
normalized_state = tf.clip_by_value(normalize(self.expert_state, obs_rms),
observation_range[0], observation_range[1])
expert_actor = actor(normalized_state, reuse=True)
normalized_q_with_expert_data = critic(normalized_state, self.expert_action, reuse=True)
normalized_q_with_expert_actor = critic(normalized_state, expert_actor, reuse=True)
self.Q_with_expert_data = denormalize(
tf.clip_by_value(normalized_q_with_expert_data, return_range[0], return_range[1]), ret_rms)
self.Q_with_expert_actor = denormalize(
tf.clip_by_value(normalized_q_with_expert_actor, return_range[0], return_range[1]), ret_rms)
if supervise:
self.actor_loss = tf.nn.l2_loss(self.expert_action-expert_actor)
self.critic_loss = 0
else:
self.critic_loss = tf.reduce_mean(tf.nn.relu(self.Q_with_expert_actor - self.Q_with_expert_data))
self.actor_loss = -tf.reduce_mean(self.Q_with_expert_actor)
if critic_only:
self.actor_loss = 0
if actor_only:
self.critic_loss = 0
#self.dist = tf.reduce_mean(self.Q_with_expert_data - self.Q_with_expert_actor)
if both_ours_sup:
self.actor_loss = tf.nn.l2_loss(self.expert_action-expert_actor) - tf.reduce_mean(self.Q_with_expert_actor)
self.critic_loss = tf.reduce_mean(tf.nn.relu(self.Q_with_expert_actor - self.Q_with_expert_data))
if gail or pofd:
discriminator = Discriminator()
d_with_expert_data = discriminator(normalized_state, self.expert_action)
d_with_gen_data = discriminator(obs0, actions, reuse=True)
self.discriminator_loss = tf.reduce_mean(tf.log(d_with_gen_data))+tf.reduce_mean(tf.log(1-d_with_expert_data))
self.actor_loss = -tf.reduce_mean(tf.log(d_with_gen_data))
| StarcoderdataPython |
1977403 | <gh_stars>0
import pathlib
PACKAGE_ROOT = pathlib.Path(__file__).resolve().parents[1]
TRAINED_MODEL_DIR = PACKAGE_ROOT / 'trained_models'
DATASET_DIR = PACKAGE_ROOT / 'datasets'
# data
TESTING_DATA_FILE = 'test.csv'
TRAINING_DATA_FILE = 'train.csv'
TARGET = 'SalePrice'
# variables
FEATURES = ['MSSubClass', 'MSZoning', 'Neighborhood',
'OverallQual', 'OverallCond', 'YearRemodAdd',
'RoofStyle', 'MasVnrType', 'BsmtQual', 'BsmtExposure',
'HeatingQC', 'CentralAir', '1stFlrSF', 'GrLivArea',
'BsmtFullBath', 'KitchenQual', 'Fireplaces', 'FireplaceQu',
'GarageType', 'GarageFinish', 'GarageCars', 'PavedDrive',
'LotFrontage',
# this one is only to calculate temporal variable:
'YrSold'] | StarcoderdataPython |
4860614 | <filename>src/utils/paths.py
#!/usr/bin/env python3
from dataclasses import dataclass
from pathlib import Path
@dataclass
class Tools:
root: Path
cmake_file: Path
cmake_file_no_patch: Path
compile: Path
test: Path
gen_polls: Path
scores: Path
def validate(self):
return self.root.exists() and self.cmake_file.exists() \
and self.cmake_file_no_patch.exists() and self.compile.exists() \
and self.gen_polls.exists() and self.test.exists() and self.scores.exists()
@dataclass
class ChallengePaths:
name: str
source: Path
info: Path
polls: Path
poller: Path
povs: Path
def get_polls(self):
for_release = self.polls / Path('for-release')
for_testing = self.polls / Path('for-testing')
polls = []
if for_release.exists():
polls.extend(str(file) for file in for_release.iterdir() if file.suffix == ".xml")
if for_testing.exists():
polls.extend(str(file) for file in for_testing.iterdir() if file.suffix == ".xml")
return polls
def get_povs(self):
if not self.povs.exists():
return []
return [str(f) for f in self.povs.iterdir() if f.name.startswith('pov')]
@dataclass
class LibPaths:
root: Path
polls: Path
povs: Path
challenges: Path
def validate(self):
return self.root.exists() and self.polls.exists() and self.povs.exists() \
and self.challenges.exists()
def get_challenges(self):
return [challenge.name for challenge in self.challenges.iterdir() if challenge.is_dir()]
def get_polls_path(self, challenge_name: str):
return self.polls / Path(challenge_name, 'poller')
def get_challenge_paths(self, challenge_name):
source = self.challenges / Path(challenge_name)
readme = source / Path("README.md")
polls = self.polls / Path(challenge_name, 'poller')
povs = self.povs / challenge_name
poller = self.challenges / Path(challenge_name, 'poller')
return ChallengePaths(challenge_name, source, readme, polls, poller, povs)
| StarcoderdataPython |
8189739 | # <NAME>
medal = 1142338
if sm.canHold(medal):
sm.chatScript("You have earned a new medal.")
sm.startQuest(parentID)
sm.completeQuest(parentID) | StarcoderdataPython |
3447903 | <reponame>Kovszasz/MYG
# Generated by Django 2.2.6 on 2019-10-26 08:27
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('api', '0025_auto_20191026_0805'),
]
operations = [
migrations.AlterField(
model_name='memecontent',
name='id',
field=models.AutoField(default='', primary_key=True, serialize=False),
),
]
| StarcoderdataPython |
4909715 | <filename>arrange.py
#arrange.py
#Arranges the fiels according to their types for later classification
#uses shutil, os
import os
import shutil
FOLDER_TYPES = {'pPDF':['pdf'],
'Pimages':['png','jpeg','jpg','gif', 'tiff', 'psd', 'ico'],
'Pvideos':['mp4','mkv','avi','3gp'],
'Paudios':['mp3','wav'],
'Pprograms':['exe', 'app', 'out'],
'Pdocs':['xlsx','doc','xlsx','pptx','csv','txt','ppt', 'odt', 'rtf', 'ods', 'txt', 'pps']
}
RESULT_DIR = 'CleanedPy'
def identifyType(ext):
'''
Accept extenssion Example .pdf .mp4 and
return a category type from FOLDER_TYPES Dictionary
'''
for key,value in FOLDER_TYPES.items():
if ext[1:] in value:
return key
break
else:
return None
def makeFolders(lst):
'''
Accept A List of Folder name and
create that category name folder in RESULT_DIR
'''
if os.path.exists(RESULT_DIR) is False:
os.mkdir(RESULT_DIR)
for name in lst:
if name in os.listdir(RESULT_DIR):
return
os.mkdir(os.path.join(RESULT_DIR,name))
def moveFiles(src,dst):
'''
Accept source and destination and move files .
Return True if File is Copied other wise False
'''
res = True
try:
pass
shutil.move(src,os.path.join(RESULT_DIR,dst))
except:
res = False
return res
#Create Output and category folder if not Exists
makeFolders(FOLDER_TYPES.keys())
def startProcess(folder,file):
'''
Accept file name and parent folder_name(folder)
Return a Tuple(TRUE|FALSE,TYPE_OF_FILE)
'''
types = os.path.splitext(file)[1].lower()
src = os.path.join(folder,file)
dst = identifyType(types)
if dst is not None:
return moveFiles(src,dst),dst
return False,'Others(Not_moved)'
def strong_arrange():
TOTAL_COUNT={}
for foldername, subfolders, filenames in os.walk(folder):
for file in filenames:
if os.path.isfile(os.path.join(folder,file)):
status,types = startProcess(folder,file)
if types in TOTAL_COUNT:
TOTAL_COUNT[types] = TOTAL_COUNT[types]+1
else:
TOTAL_COUNT[types] = 1
return TOTAL_COUNT
def arrange():
TOTAL_COUNT = {}
for file in os.listdir(folder):
if os.path.isfile(os.path.join(folder,file)):
status,types = startProcess(folder,file)
if types in TOTAL_COUNT:
TOTAL_COUNT[types] = TOTAL_COUNT[types]+1
else:
TOTAL_COUNT[types]=1
return TOTAL_COUNT
if __name__ == '__main__':
print("Arrange files")
folder=os.getcwd()
print(folder)
choice = int(input("Press 1 for Weak arrange\nPress 2 for Strong arrange\n0 to exit\noption:"))
if choice == 1:
res = arrange()
if choice == 2:
res = strong_arrange()
if choice == 0:
exit(0)
#Final Result
message = "Result"
others="Others(Not_moved)"
print(f'{message:*^30s}')
for key,value in res.items():
if key == others:
continue
print(f'{value} file moved into Category {os.path.join(RESULT_DIR,key)}')
print(f'{res[others]} file Not moved')
| StarcoderdataPython |
54546 | """
78 Two bags of Potatoes - https://codeforces.com/problemset/problem/239/A
"""
y,k,n = map(int,input().split())
f=[]
x=k-y%k
while(x<n-y+1):
f.append(str(x))
x+=k
if len(f):
print(' '.join(f))
else:
print('-1') | StarcoderdataPython |
12805004 | # -*- coding: utf-8 -*-
try:
# Python 2.7
from collections import OrderedDict
except:
# Python 2.6
from gluon.contrib.simplejson.ordered_dict import OrderedDict
from gluon import current
from gluon.html import *
from gluon.storage import Storage
def config(settings):
"""
Template for UN OCHA Regional Office of Caucasus and Central Asia (ROCCA) Humanitarian Data Platform
http://eden.sahanafoundation.org/wiki/Deployments/OCHAROCCA
"""
T = current.T
# -------------------------------------------------------------------------
# Pre-Populate
settings.base.prepopulate += ("OCHAROCCA", "default/users")
settings.base.system_name = T("OCHA Regional Office of Caucasus and Central Asia (ROCCA) Humanitarian Data Platform")
settings.base.system_name_short = T("Humanitarian Data Platform")
# Levels for the LocationSelector
gis_levels = ("L0", "L1", "L2", "L3")
# =========================================================================
# System Settings
# -------------------------------------------------------------------------
# Authorization Settings
# Users can self-register
#settings.security.self_registration = False
# Users need to verify their email
settings.auth.registration_requires_verification = True
# Users don't need to be approved
settings.auth.registration_requires_approval = True
#settings.auth.registration_requests_organisation = True
#settings.auth.registration_organisation_required = True
# Approval emails get sent to all admins
settings.mail.approver = "ADMIN"
settings.auth.registration_link_user_to = {"staff": T("Staff")}
settings.auth.registration_link_user_to_default = ["staff"]
settings.auth.registration_roles = {"organisation_id": ["USER"],
}
settings.auth.show_utc_offset = False
settings.auth.show_link = False
# -------------------------------------------------------------------------
# Security Policy
settings.security.policy = 5 # Apply Controller, Function and Table ACLs
settings.security.map = True
# -------------------------------------------------------------------------
# Theme (folder to use for views/layout.html)
settings.base.theme = "OCHAROCCA"
settings.ui.formstyle_row = "bootstrap"
settings.ui.formstyle = "bootstrap"
settings.ui.filter_formstyle = "bootstrap"
#settings.gis.map_height = 600
#settings.gis.map_width = 854
# -------------------------------------------------------------------------
# L10n (Localization) settings
settings.L10n.languages = OrderedDict([
("en", "English"),
# Only needed to import the l10n names
#("hy", "Armenian"),
#("az", "Azerbaijani"),
#("ka", "Georgian"),
#("kk", "Kazakh"),
#("ky", "Kyrgyz"),
#("ru", "Russian"),
#("tg", "Tajik"),
#("tk", "Turkmen"),
#("uk", "Ukrainian"),
#("uz", "Uzbek"),
])
# Default Language
settings.L10n.default_language = "en"
# Default timezone for users
settings.L10n.utc_offset = "+0600"
# Unsortable 'pretty' date format
settings.L10n.date_format = "%d %b %Y"
# Number formats (defaults to ISO 31-0)
# Decimal separator for numbers (defaults to ,)
settings.L10n.decimal_separator = "."
# Thousands separator for numbers (defaults to space)
settings.L10n.thousands_separator = ","
# Uncomment this to Translate CMS Series Names
# - we want this on when running s3translate but off in normal usage as we use the English names to lookup icons in render_posts
#settings.L10n.translate_cms_series = True
# Uncomment this to Translate Location Names
#settings.L10n.translate_gis_location = True
# Restrict the Location Selector to just certain countries
settings.gis.countries = ("AM",
"AZ",
"GE",
"KZ",
"KG",
"TJ",
"TM",
"UA",
"UZ",
)
# Until we add support to S3LocationSelector to set dropdowns from LatLons
#settings.gis.check_within_parent_boundaries = False
# Uncomment to hide Layer Properties tool
#settings.gis.layer_properties = False
# Uncomment to display the Map Legend as a floating DIV
settings.gis.legend = "float"
# Uncomment to Hide the Toolbar from the main Map
settings.gis.toolbar = False
# Uncomment to show WMS Browser in Map Widgets (e.g. Profile & Summary pages)
# - NB This also requires the active gis_config to have one configured
settings.gis.widget_wms_browser = True
# GeoNames username
settings.gis.geonames_username = "ocharocca"
# Use PCodes for Locations import
settings.gis.lookup_code = "PCode"
# -------------------------------------------------------------------------
# Events
# Make Event Types Hierarchical
settings.event.types_hierarchical = True
# -------------------------------------------------------------------------
# Vulnerability
# Make Indicator Types Hierarchical
settings.vulnerability.indicator_hierarchical = True
# -------------------------------------------------------------------------
# Enable this for a UN-style deployment
#settings.ui.cluster = True
# Enable this to use the label 'Camp' instead of 'Shelter'
#settings.ui.camp = True
# -------------------------------------------------------------------------
# Uncomment to restrict the export formats available
#settings.ui.export_formats = ["xls"]
settings.ui.update_label = "Edit"
# -------------------------------------------------------------------------
# Summary Pages
settings.ui.summary = [#{"common": True,
# "name": "cms",
# "widgets": [{"method": "cms"}]
# },
{"name": "table",
"label": "Table",
"widgets": [{"method": "datatable"}]
},
{"name": "map",
"label": "Map",
"widgets": [{"method": "map", "ajax_init": True}],
},
{"name": "charts",
"label": "Reports",
"widgets": [{"method": "report", "ajax_init": True}]
},
]
settings.search.filter_manager = False
# =========================================================================
# Menu
current.response.menu = (
#{"name": T("Places"),
# "c": "gis",
# "f": "location",
# "icon": "globe",
# },
{"name": T("Demographics"),
"c": "stats",
"f": "demographic_data",
"icon": "group",
},
{"name": T("Baseline"),
"c": "vulnerability",
"f": "data",
"icon": "signal",
},
#{"name": T("Stakeholders"),
# "c": "org",
# "f": "organisation",
# "icon": "sitemap",
# "count": 0
# },
{"name": T("Disasters"),
"c": "event",
"f": "event",
"icon": "bolt",
},
#{"name": T("Facilities"),
# "c": "org",
# "f": "facility",
# "icon": "home",
# },
)
for item in current.response.menu:
item["url"] = URL(item["c"],
item["f"],
args = ["summary" if item["f"] not in ["organisation"]
else "datalist"])
current.response.countries = (
{"name": T("Armenia"),
"code": "am"
},
{"name": T("Azerbaijan"),
"code": "az"
},
{"name": T("Georgia"),
"code": "ge"
},
{"name": T("Kazakhstan"),
"code": "kz"
},
{"name": T("Kyrgyzstan"),
"code": "kg"
},
{"name": T("Tajikistan"),
"code": "tj"
},
{"name": T("Turkmenistan"),
"code": "tm"
},
{"name": T("Ukraine"),
"code": "ua"
},
{"name": T("Uzbekistan"),
"code": "uz"
}
)
# =========================================================================
# Custom Controllers
# =========================================================================
def customise_gis_location_controller(**attr):
s3 = current.response.s3
# Custom PreP
standard_prep = s3.prep
def custom_prep(r):
# Call standard prep
if callable(standard_prep):
result = standard_prep(r)
if not result:
return False
s3db = current.s3db
# Hack to provide additional languages for L10n location names
# without activating them in the GUI
l10n_languages = dict(settings.L10n.languages)
l10n_languages["ky"] = "Kyrgyz"
l10n_languages["ru"] = "Russian"
l10n_languages["hy"] = "Armenian"
l10n_languages["az"] = "Azerbaijani"
l10n_languages["ka"] = "Georgian"
l10n_languages["kk"] = "Kazakh"
l10n_languages["tg"] = "Tajik"
l10n_languages["tk"] = "Turkmen"
l10n_languages["uk"] = "Ukraine"
l10n_languages["uz"] = "Uzbek"
from s3 import IS_ISO639_2_LANGUAGE_CODE
s3db.gis_location_name.language.requires = IS_ISO639_2_LANGUAGE_CODE(select=l10n_languages)
if r.interactive or r.representation == "aadata":
if r.vars.get("location.level__ne"):
s3.crud_strings["gis_location"] = Storage(
title_list = T("Administrative Areas"),
)
else:
s3.crud_strings["gis_location"] = Storage(
#label_create = T("Record Disaster"),
#title_display = T("Disaster Details"),
title_list = T("Locations")
)
# Remove level column & filter
list_fields = s3db.get_config("gis_location", "list_fields")
list_fields.remove("level")
filter_widgets = s3db.get_config("gis_location", "filter_widgets")
# NB Fragile: dependent on filters defined in gis/location controller
filter_widgets.pop(1)
if r.method != "import":
table = s3db.gis_location
# Custom filtered components for custom list_fields
s3db.add_components("gis_location",
gis_location_name = {"name": "name_ru",
"joinby": "location_id",
"filterby": "language",
"filterfor": ("ru",),
},
gis_location_tag = ({"name": "pcode",
"joinby": "location_id",
"filterby": "tag",
"filterfor": ("PCode",),
},
{"name": "lat_lon_source",
"joinby": "location_id",
"filterby": "tag",
"filterfor": ("LatLon Source",),
},
{"name": "lat_lon_date",
"joinby": "location_id",
"filterby": "tag",
"filterfor": ("LatLon Date",),
},
),
)
from s3 import S3MultiSelectWidget, S3SQLCustomForm, S3SQLInlineComponent
table.parent.widget = S3MultiSelectWidget(multiple=False)
crud_form = S3SQLCustomForm("name",
#"name_ru.name_l10n",
S3SQLInlineComponent(
"name_ru",
label = T("Russian Name"),
multiple = False,
fields = [("", "name_l10n")],
),
"level",
S3SQLInlineComponent(
"pcode",
label = T("PCode"),
multiple = False,
fields = [("", "value")],
),
S3SQLInlineComponent(
"lat_lon_source",
label = T("Lat/Lon Source"),
multiple = False,
fields = [("", "value")],
),
S3SQLInlineComponent(
"lat_lon_date",
label = T("Lat/Lon Date"),
multiple = False,
fields = [("", "value")],
),
#"pcode.value",
"parent",
)
NONE = current.messages["NONE"]
levels = current.gis.get_location_hierarchy()
table.level.represent = lambda l: levels[l] if l else NONE
#field = table.inherited
#field.label = T("Mapped?")
#field.represent = lambda v: T("No") if v else T("Yes")
filter_widgets = s3db.get_config("gis_location", "filter_widgets")
# Remove L2 & L3 filters
# NB Fragile: dependent on filters defined in gis/location controller
filter_widgets.pop()
filter_widgets.pop()
s3db.configure("gis_location",
crud_form = crud_form,
filter_widgets = filter_widgets,
list_fields = ["name",
# @ToDo: Investigate whether we can support this style & hence not need to define custom components
#(T("Russian Name"), "name.name_l10n?location_name.language=ru"),
#("PCode", "tag.value?location_tag.tag=PCode"),
(T("Russian Name"), "name_ru.name_l10n"),
"level",
("PCode", "pcode.value"),
"L0", "L1", "L2",
"inherited",
]
)
return True
s3.prep = custom_prep
return attr
settings.customise_gis_location_controller = customise_gis_location_controller
# -------------------------------------------------------------------------
def customise_event_event_controller(**attr):
s3 = current.response.s3
# Custom PreP
standard_prep = s3.prep
def custom_prep(r):
# Call standard prep
if callable(standard_prep):
result = standard_prep(r)
if not result:
return False
r.table.start_date.writable = True
return True
s3.prep = custom_prep
# Remove rheader
attr["rheader"] = None
return attr
settings.customise_event_event_controller = customise_event_event_controller
# -------------------------------------------------------------------------
def customise_event_event_resource(r, tablename):
"""
Customise event_event resource
- List Fields
- CRUD Strings
- Form
- Filter
- Report
Runs after controller customisation
But runs before prep
"""
from s3 import S3SQLCustomForm, S3SQLInlineComponent, IS_LOCATION, S3LocationSelector
db = current.db
s3db = current.s3db
table = r.table
table.name.label = T("Disaster Number")
location_field = s3db.event_event_location.location_id
location_field.requires = IS_LOCATION()
location_field.widget = S3LocationSelector(levels=gis_levels)
impact_fields = OrderedDict(killed = "Killed",
total_affected = "Total Affected",
est_damage = "Estimated Damage (US$ Million)",
)
ptable = s3db.stats_impact_type
rows = db(ptable.name.belongs(impact_fields.values())).select(ptable.id,
ptable.name,
)
parameters = rows.as_dict(key="name")
impact_components = []
impact_crud_form_fields = []
impact_list_fields = []
impact_report_fields = []
for tag, label in impact_fields.items():
parameter = parameters[label]["id"]
impact_components.append({"name": tag,
"link": "event_event_impact",
"joinby": "event_id",
"key": "impact_id",
"filterby": "parameter_id",
"filterfor": (parameter,),
})
label = T(label)
impact_crud_form_fields.append(S3SQLInlineComponent(tag,
label = label,
link = False,
multiple = False,
fields = [("", "value")],
filterby = dict(field = "parameter_id",
options = parameter
)
))
impact_list_fields.append((label, "%s.value" % tag))
impact_report_fields.append((T("Total %(param)s") % dict(param=label), "sum(%s.value)" % tag))
s3db.add_components("event_event",
stats_impact = impact_components,
)
crud_form = S3SQLCustomForm("name",
"event_type_id",
"start_date",
"end_date",
# @ToDo: Inline location_id field
#S3SQLInlineComponent("event_location",
# label = T("Location"),
# multiple = False,
# fields = [("", "location_id")],
# ),
"comments",
*impact_crud_form_fields
)
list_fields = [#"name",
"event_type_id",
]
lappend = list_fields.append
for level in gis_levels:
location_level = "event_location.location_id$%s" % level
lappend(location_level)
s3db.add_components("gis_location",
gis_location_tag = {"name": "pcode",
"joinby": "location_id",
"filterby": "tag",
"filterfor": ("PCode",),
},
)
lappend(("PCode", "event_location.location_id$pcode.value"))
list_fields.extend(("start_date",
"end_date",
))
list_fields.extend(impact_list_fields)
report_facts = [(T("Number of Disasters"), "count(id)")]
report_facts.extend(impact_report_fields)
report_options = s3db.get_config("event_event", "report_options")
report_options.fact = report_facts
s3db.configure("event_event",
crud_form = crud_form,
list_fields = list_fields,
)
if r.interactive:
# Labels
table.comments.label = T("Description")
current.response.s3.crud_strings["event_event"] = Storage(
label_create = T("Record Disaster"),
title_display = T("Disaster Details"),
title_list = T("Disasters"),
title_update = T("Edit Disaster"),
label_list_button = T("List Disasters"),
label_delete_button = T("Delete Disaster"),
msg_record_created = T("Disaster added"),
msg_record_modified = T("Disaster updated"),
msg_record_deleted = T("Disaster deleted"),
msg_list_empty = T("No Disasters currently registered"))
settings.customise_event_event_resource = customise_event_event_resource
# -------------------------------------------------------------------------
def represent_year(date):
if date:
return date.strftime("%Y")
else:
return ""
# -------------------------------------------------------------------------
def customise_stats_demographic_data_resource(r, tablename):
"""
Customise event_event resource
- Configure fields
Runs after controller customisation
But runs before prep
"""
s3db = current.s3db
table = r.table
table.date.label = T("Year")
table.date.represent = represent_year
# Add PCode
s3db.add_components("gis_location",
gis_location_tag = {"name": "pcode",
"joinby": "location_id",
"filterby": "tag",
"filterfor": ("PCode",),
},
)
list_fields = s3db.get_config(r.tablename, "list_fields")
list_fields.insert(7, ("PCode", "location_id$pcode.value"))
settings.customise_stats_demographic_data_resource = customise_stats_demographic_data_resource
# -------------------------------------------------------------------------
def customise_vulnerability_data_resource(r, tablename):
"""
Customise vulnerability_data resource
- List Fields
- CRUD Strings
- Form
- Filter
- Report
Runs after controller customisation
But runs before prep
"""
db = current.db
s3db = current.s3db
table = r.table
# Higher precision wanted for the Multidimensional Poverty Index
from s3 import IS_FLOAT_AMOUNT
table.value.represent = lambda v: \
IS_FLOAT_AMOUNT.represent(v, precision=3)
def represent_indicator(id):
# @ToDo: Implement with S3Represent
itable = db.vulnerability_indicator
row = db(itable.parameter_id == id).select(itable.name,
itable.description,
limitby=(0, 1)
).first()
if row:
represent = SPAN(row.name,
_class = "s3-popover")
represent["_data-content"] = row.description
return represent
else:
return ""
table.parameter_id.represent = represent_indicator
table.date.label = T("Year")
table.date.represent = represent_year
table.end_date.label = T("Until")
table.end_date.represent = represent_year
list_fields = s3db.get_config(r.tablename, "list_fields")
list_fields.insert(list_fields.index("date") + 1, "end_date")
s3db.add_components("gis_location",
gis_location_tag = {"name": "pcode",
"joinby": "location_id",
"filterby": "tag",
"filterfor": ("PCode",),
},
)
list_fields.insert(7, ("PCode", "location_id$pcode.value"))
if r.interactive:
current.response.s3.crud_strings["vulnerability_data"] = Storage(
label_create = T("Create Baseline Data"),
title_display = T("Baselines Data"),
title_list = T("Baseline Data"),
title_update = T("Edit Baseline Data"),
label_list_button = T("List Baseline Data"),
label_delete_button = T("Delete Baseline Data"),
msg_record_created = T("Baseline Data added"),
msg_record_modified = T("Baseline Data updated"),
msg_record_deleted = T("Baseline Data deleted"),
msg_list_empty = T("No Baseline Data"))
settings.customise_vulnerability_data_resource = customise_vulnerability_data_resource
# -------------------------------------------------------------------------
def customise_org_facility_resource(r, tablename):
"""
Customise event_event resource
- List Fields
- Form
- Filter
- Report
Runs after controller customisation
But runs before prep
"""
s3db = current.s3db
from s3 import IS_LOCATION, S3LocationSelector
levels = ("L0", "L1", "L2")
loc_field = r.table.location_id
loc_field.requires = IS_LOCATION()
loc_field.widget = S3LocationSelector(levels=levels,
show_address = True,
)
list_fields = ["name",
(T("Type"),"facility_type.name"),
#"organisation_id",
"location_id",
"contact",
"phone1",
"email",
"comments",
]
from s3 import S3OptionsFilter, S3TextFilter
filter_widgets = [S3TextFilter(["name",
"site_facility_type.facility_type_id",
#"organisation_id",
"location_id",
"contact",
"phone1",
"email",
"comments"
],
label = T("Search"),
),
S3OptionsFilter("site_facility_type.facility_type_id",
header = True,
label = T("Type of Place"),
),
#S3OptionsFilter("organisation_id",
# header = True,
# represent = "%(name)s",
# ),
]
report_fields = [#"name",
"site_facility_type.facility_type_id",
"site_org_group.group_id",
"location_id$L3",
"organisation_id",
]
report_options = Storage(
rows = report_fields,
cols = [],
fact = [(T("Number of Facilities"), "count(name)")],
defaults = Storage(rows = "site_facility_type.facility_type_id",
#cols = "site_org_group.group_id",
fact = "count(name)",
totals = True,
chart = "barchart:rows",
table = "collapse",
)
)
# Custom Crud Form
from s3 import S3SQLCustomForm, S3SQLInlineComponentMultiSelectWidget
crud_form = S3SQLCustomForm("name",
S3SQLInlineComponentMultiSelectWidget(
"facility_type",
#label = T("Type of Place"),
field = "facility_type_id",
),
#"organisation_id",
"location_id",
"contact",
"phone1",
"email",
"comments",
)
s3db.configure(tablename,
crud_form = crud_form,
filter_widgets = filter_widgets,
list_fields = list_fields,
report_options = report_options,
)
settings.customise_org_facility_resource = customise_org_facility_resource
# =========================================================================
# Modules
# Comment/uncomment modules here to disable/enable them
settings.modules = OrderedDict([
# Core modules which shouldn't be disabled
("default", Storage(
name_nice = "Home",
restricted = False, # Use ACLs to control access to this module
access = None, # All Users (inc Anonymous) can see this module in the default menu & access the controller
module_type = None # This item is not shown in the menu
)),
("admin", Storage(
name_nice = "Administration",
#description = "Site Administration",
restricted = True,
access = "|1|", # Only Administrators can see this module in the default menu & access the controller
module_type = None # This item is handled separately for the menu
)),
("appadmin", Storage(
name_nice = "Administration",
#description = "Site Administration",
restricted = True,
module_type = None # No Menu
)),
# ("errors", Storage(
# name_nice = "Ticket Viewer",
# #description = "Needed for Breadcrumbs",
# restricted = False,
# module_type = None # No Menu
# )),
# ("sync", Storage(
# name_nice = "Synchronization",
# #description = "Synchronization",
# restricted = True,
# access = "|1|", # Only Administrators can see this module in the default menu & access the controller
# module_type = None # This item is handled separately for the menu
# )),
("translate", Storage(
name_nice = "Translation Functionality",
#description = "Selective translation of strings based on module.",
module_type = None,
)),
("gis", Storage(
name_nice = "Map",
#description = "Situation Awareness & Geospatial Analysis",
restricted = True,
module_type = 1, # 1st item in the menu
)),
# ("pr", Storage(
# name_nice = "Persons",
#description = "Central point to record details on People",
# restricted = True,
# access = "|1|", # Only Administrators can see this module in the default menu (access to controller is possible to all still)
# module_type = None
# )),
("org", Storage(
name_nice = "Organizations",
#description = 'Lists "who is doing what & where". Allows relief agencies to coordinate their activities',
restricted = True,
module_type = None
)),
# All modules below here should be possible to disable safely
# ("hrm", Storage(
# name_nice = "Contacts",
#description = "Human Resources Management",
# restricted = True,
# module_type = None,
# )),
("cms", Storage(
name_nice = "Content Management",
restricted = True,
module_type = None,
)),
("doc", Storage(
name_nice = "Documents",
#description = "A library of digital resources, such as photos, documents and reports",
restricted = True,
module_type = None,
)),
("event", Storage(
name_nice = "Disasters",
#description = "Events",
restricted = True,
module_type = None
)),
("stats", Storage(
name_nice = "Statistics",
restricted = True,
module_type = None
)),
("vulnerability", Storage(
name_nice = "Vulnerability",
restricted = True,
module_type = None
)),
])
# END ========================================================================= | StarcoderdataPython |
4965064 | <reponame>Morgenz/bbq<filename>src/retention/policy/retention_policy.py
import logging
from src.retention.policy.filter.grace_period_after_deletion_filter import \
GracePeriodAfterDeletionFilter
from src.retention.policy.filter.most_recent_daily_backup_filter import \
MostRecentDailyBackupFilter
from src.retention.policy.filter.only_one_version_old_backup_filter import \
OnlyOneVersionForOldBackupFilter
from src.retention.policy.filter.ten_young_backup_versions_filter import \
TenYoungBackupVersionsFilter
class RetentionPolicy(object):
def __init__(self):
self.filters = [MostRecentDailyBackupFilter(),
TenYoungBackupVersionsFilter(),
OnlyOneVersionForOldBackupFilter(),
GracePeriodAfterDeletionFilter()]
def get_backups_eligible_for_deletion(self, backups, table_reference):
backups_to_retain = backups
for policy_filter in self.filters:
filtered_backups = policy_filter.filter(backups=backups_to_retain,
table_reference=table_reference)
logging.info("%s backups were filtered by using filter %s ",
len(backups_to_retain) - len(filtered_backups),
type(policy_filter).__name__)
backups_to_retain = filtered_backups
backups_to_delete = [b for b in backups
if b not in backups_to_retain]
logging.info("%s out of %s backups "
"selected for deletion for table '%s'.",
len(backups_to_delete), len(backups), table_reference)
return backups_to_delete
| StarcoderdataPython |
8147392 | def get(*args):
print("In cool_api with args {}".format(args))
return args
| StarcoderdataPython |
1822649 | <gh_stars>0
# importing required libraries
import http.client
import requests
import flask
from flask import jsonify, request
from requests.exceptions import HTTPError
import json
import vendor
# API config
API_KEY = '<KEY>'
API_SECRET = '08011240158e81fe65f363b1c5ec1fe782ff86a17d5b9618591830edead399568b6f27fa64007243d49e8ae6cfc214dd53c6d2f3e89bfc7930078432de88a841bf31eac8'
API_ENDPOINT = 'https://kfs2.moibit.io'
# test URL : API_URL = "http://api.open-notify.org/astros.json"
# check connection stability , makes it easier to debug , default ping URL = 'https://google.com'
def check_connection_status(API_URL='https://google.com'):
# try to get a valid HttpResponse from google
try:
response = requests.get(API_URL)
# If the response was successful, no Exception will be raised
response.raise_for_status()
print('Connection Stable')
# True => the connection is stable and the network libraries are working properly
return True
# catch error if server sends a response code other than 'OK' - 200
except HTTPError as http_err:
# if it is a known error , output the error on the terminal and return False
print("False , Error : {error}".format(error=http_err))
return False
# if an unknown exception , i.e. other than an Http Exception occurs , then catch here
except Exception as err:
# print to console and return false
print("False , Error : {error}".format(error=err))
return False
# if the raised error is completely unexpected
else:
# simply print the following message to the terminal and return false
print("An UnKnown Error Occured")
return False
# Matic API based getter function
# returns the amount of consumed storage on moibit : total alloted = 2 GB free space decentralized on matic blockchain
def get_used_storage():
# fetch the storage usage data
try:
# default parameters containing tokens required for authorization
PARAMS = {
'api_key': API_KEY,
'api_secret': API_SECRET
}
# test using requests library : response = requests.get('{API}/moibit/v0/storageused'.format(API=API_ENDPOINT), params=PARAMS)
# passing tokens in request header , request times out after 10 seconds : Counter measure for sniffing
response = requests.get(
'{API}/moibit/v0/storageused'.format(API=API_ENDPOINT),
headers={
'api_key': API_KEY,
'api_secret': API_SECRET
},
timeout=10
)
# check the status code returned by the moibit server
response.raise_for_status()
# print the status code to the terminal
print(response.status_code)
# convert the data into json format
json_response = response.json()
# print this json data to the console
print(response.json())
# return the json response for use by the calling function
return json_response
if json_response['meta']['message'] == 'got storage used successfully':
# checks if the json object received has the required parameters : debug
if json_response['data']['storageUsed'] is not None and json_response['data']['unit'] is not None:
print(str(json_response['data']['storageUsed'])+' '+json_response['data']['unit'])
# debug : return str(json_response['data']['storageUsed'])+' '+json_response['data']['unit']
else:
# prints to the terminal in case there is something wrong with the data
print("Corrupted Data")
# debug : return False
else:
# prints to the terminal in case there is something wrong with the data
print("Corrupted Data")
# debug : return False
# catch the error which might be thrown
except HTTPError as http_err:
# if it is a known error , output the error on the terminal and return False
print("False , Error : {error}".format(error=http_err))
return json.loads({'error' : 'unsuccessful fetch'})
# if it is a known error but not a HttpResponse error , output the error on the terminal and return False
except Exception as err:
print("False , Error : {error}".format(error=err))
return json.loads({'error' : 'unsuccessful fetch'})
# experimental requests , secure header method ; DO NOT USE
def experimental_function_write_message_to_moibit(message):
# trying fetching response
try:
# body parameters required for fetching
PARAMS = {
'fileName': 'matic_transaction_ledger',
'text': message,
'create': 'false',
'createFolders': 'false',
'pinVersion': 'false'
}
# fetching using requests : response = requests.get('{API}/moibit/v0/storageused'.format(API=API_ENDPOINT), params=PARAMS)
# posting message and appending to end of existing file
response = requests.post(
'{API}/moibit/v0/writetexttofile'.format(API=API_ENDPOINT),
headers={
'api_key': API_KEY, # API_KEY : to be shifted and stored into .env file
'api_secret': API_SECRET # API_SECRET : to be shifted and stored into .env file
},
params=PARAMS,
timeout=10 # timeout after 10 seconds , to avoid hanging of server and occupied ports
)
# check the status code sent by moibit server
response.raise_for_status()
# print the response to the terminal
print(response.status_code)
# convert response to json format
json_response = response.json()
# print out the json object to the console
print(response.json())
# if it is a known error , output the error on the terminal and return False
except HTTPError as http_err:
print("False , Error : {error}".format(error=http_err))
return json.loads({'error' : 'unsuccessful fetch'})
# if it is a known error but not a HttpResponse error , output the error on the terminal and return False
except Exception as err:
print("False , Error : {error}".format(error=err))
return json.loads({'error' : 'unsuccessful fetch'})
# Matic API based setter function
# writes a given message to the moibit matic_transaction_ledger file
def write_to_moibit(message, flush='false'):
# establish connection to API_URL and try to fetch response
try:
# establishing http connection
conn = http.client.HTTPSConnection("kfs2.moibit.io")
# creating a unique key for the input , dosent really matter as we wont be using it much
transactionKEY = vendor.create_transaction_id(5)
# message structure in stringified json format , sent for appending to matic_transaction_ledger
payload = "{\"fileName\":\"matic_transaction_ledger\",\"text\":\"{\\\"blocks\\\":[{\\\"key\\\":\\\"" + str(
transactionKEY) + "\\\",\\\"text\\\":\\\""+str(
message)+"\\\",\\\"type\\\":\\\"unstyled\\\",\\\"depth\\\":0,\\\"inlineStyleRanges\\\":[],\\\"entityRanges\\\":[],\\\"data\\\":{}}],\\\"entityMap\\\":{}}\",\"create\":\""+str(
flush)+"\",\"createFolders\":\"false\",\"pinVersion\":\"false\"}"
# request headers for authetication
headers = {
'api_key': API_KEY,
'api_secret': API_SECRET,
'content-type': "application/json"
}
# sending the POST request with payload and authentication settings
conn.request("POST", "/moibit/v0/writetexttofile", payload, headers)
# fetch response from the server
res = conn.getresponse()
# read response from server
data = res.read()
# deocode response fetched from server
decoded_data = data.decode("utf-8")
# load fetched data as json for enhancing ease of handling data
decoded_data = json.loads(decoded_data)
# print the decoded data to the terminal : debug
print(decoded_data)
# testing for decoded data to contain json object as dictionary : print(decoded_data['meta']['message'])
return decoded_data
# in case of any error while writing to matic_transaction_ledger
except HTTPError as http_err:
# if it is a known error , output the error on the terminal and return False
print("False , Error : {error}".format(error=http_err))
return json.loads({'error' : 'unsuccessful fetch'})
except Exception as err:
# if it is a known error but not a HttpResponse error , output the error on the terminal and return False
print("False , Error : {error}".format(error=err))
return json.loads({'error' : 'unsuccessful fetch'})
# Matic API based getter function
# returns the entire data store on the matic_transaction_ledger in json format
def read_data_moibit():
# try to fetch entire data from the transaction ledger
try:
# establish http connection with the site
conn = http.client.HTTPSConnection("kfs2.moibit.io")
# simple payload : fileName which needs to be fetched : matic_transaction_ledger in our case
payload = "{\"fileName\":\"matic_transaction_ledger\"}"
# headers for authentication
headers = {
'api_key': API_KEY,
'api_secret': API_SECRET,
'content-type': "application/json"
}
# send the request to the server
conn.request("POST", "/moibit/v0/readfile", payload, headers)
# get moibit server response
res = conn.getresponse()
# read returned response
data = res.read()
# decode the data received
decoded_data = data.decode("utf-8")
# print the decoded data to the terminal
print(decoded_data)
# return the received data in json format
return json.loads(decoded_data)
# in case of any error while writing to matic_transaction_ledger
except HTTPError as http_err:
# if it is a known error , output the error on the terminal and return False
print("False , Error : {error}".format(error=http_err))
return json.loads({'error' : 'unsuccessful fetch'})
except Exception as err:
# if it is a known error but not a HttpResponse error , output the error on the terminal and return False
print("False , Error : {error}".format(error=err))
return json.loads({'error' : 'unsuccessful fetch'})
# Matic API based setter function
# flushes the entire file and populates with demo string
def flush_transaction_file():
return write_to_moibit('!@#$%^&', 'true')
# Matic API based setter function
# pushing the completed transaction once it has been completed to the matic blockchain
def matic_push(completed_transactions, transactionID):
# the sha checksum of the transaction
sha256_checksum = completed_transactions[transactionID][0] # added
# the transaction ID
transaction_ID = completed_transactions[transactionID][1] # added
# just another extra check just before pushing to the blockchain
if transactionID == transaction_ID:
# the customer ID
customerID = completed_transactions[transactionID][2] # added
# public key of the customer
customerPublicKey = completed_transactions[transactionID][3] # added
# time at which the post / transaction was created : UNIX
transaction_creation_timestamp = completed_transactions[transactionID][4] # added
# the bidder ID
bidderID = completed_transactions[transactionID][5] # added
# the public key of the bidder
bidderPublicKey = completed_transactions[transactionID][6] # added
# the time at which the bid was made
bid_creation_timestamp = completed_transactions[transactionID][7] # added
# verification message which was used to verify the transaction
verification_message_sha = completed_transactions[transactionID][8] # added
# making the transaction string for easy writing to the matic blockchain using moibit API
# adding sha checksum
transaction_string = "sha256_trans : " + str(sha256_checksum) + " , "
# adding transaction ID
transaction_string += "transID : " + str(transaction_ID) + " , "
# adding custoemr ID
transaction_string += "customerID : " + str(customerID) + " , "
# adding customer public key
transaction_string += "customerPublicKey : " + str(customerPublicKey) + " , "
# adding the transaction creation timestamp
transaction_string += "trans_timestamp : " + str(transaction_creation_timestamp) + " , "
# adding the bidder ID
transaction_string += "bidderID : " + str(bidderID) + " , "
# adding the bidder public key
transaction_string += "bidderPublicKey : " + str(bidderPublicKey) + " , "
# adding the bid timestamp
transaction_string += "bid_timestamp : " + str(bid_creation_timestamp) + " , "
# adding the sha code of the verification message used to verify the message
transaction_string += "message_sha : " + str(verification_message_sha) + " ."
write_to_moibit(transaction_string)
# For DEBUGGING :
# check_connection_status()
# print("Testing the used storage function : ")
# get_used_storage()
# print("Testing the moibit write function : ")
# write_message_to_moibit('my name is <NAME> and I have written this message using an API')
| StarcoderdataPython |
9628627 | # B2082-数字统计
n = list(map(int, input().split()))
s = 0
for i in range(n[0], n[1] + 1):
i = str(i)
s += i.count('2')
print(s) | StarcoderdataPython |
11364975 | <reponame>chatopera/compose4py<filename>setup.py
# -*- coding: utf-8 -*-
from setuptools import setup, find_packages
from compose4py import __version__
with open("README.md", "r") as fh:
long_description = "".join(fh.readlines())
setup(
name='compose4py',
version=__version__,
description='Onion Model in Python',
long_description=long_description,
long_description_content_type="text/markdown",
author='<NAME>',
author_email='<EMAIL>',
url='https://github.com/chatopera/compose4py',
license="Apache Software License",
classifiers=[
'Intended Audience :: Developers',
'License :: OSI Approved :: Apache Software License',
'Operating System :: OS Independent',
'Natural Language :: Chinese (Simplified)',
'Natural Language :: Chinese (Traditional)',
'Programming Language :: Python',
'Programming Language :: Python :: 3',
'Topic :: Utilities'],
keywords='onion,compose,chain',
packages=find_packages(),
install_requires=[
])
| StarcoderdataPython |
296789 | # Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
# SPDX-License-Identifier: MIT-0
import boto3
import botocore
from botocore.exceptions import WaiterError
from botocore.waiter import WaiterModel, create_waiter_with_client
import logging
import os
import secrets
import time
import json
from cryptography.hazmat.primitives.asymmetric.ec import EllipticCurvePrivateKey
import pem
from cryptography import x509
from cryptography.x509.oid import AttributeOID, NameOID
from cryptography.hazmat import backends
from cryptography.hazmat.primitives import hashes
from cryptography.hazmat.primitives import serialization
from cryptography.hazmat.primitives.asymmetric import rsa, dsa, ec, ed448, ed25519
logger = logging.getLogger()
logger.setLevel(logging.INFO)
cryptography_backend = backends.default_backend()
# ENV VARIABLES
DELAY = 1
MAX_ATTEMPTS = 6
ISSUE_NAME = "CertificateIssued"
RENEW_NAME = "CertificateRenewed"
waiter_config = {
"version": 2,
"waiters": {
"CertificateIssued": {
"operation": "DescribeCertificate",
"delay": DELAY,
"maxAttempts": MAX_ATTEMPTS,
"acceptors": [
{
"matcher": "path",
"expected": "ISSUED",
"argument": "Certificate.Status",
"state": "success"
},
{
"matcher": "path",
"expected": "PENDING_VALIDATION",
"argument": "Certificate.Status",
"state": "retry"
},
{
"matcher": "path",
"expected": "FAILED",
"argument": "Certificate.Status",
"state": "failure"
}
]
},
"CertificateRenewed": {
"operation": "DescribeCertificate",
"delay": DELAY,
"maxAttempts": MAX_ATTEMPTS,
"acceptors": [
{
"matcher": "path",
"expected": "INELIGIBLE",
"argument": "Certificate.RenewalEligibility",
"state": "success"
},
{
"matcher": "path",
"expected": "PENDING_AUTO_RENEWAL",
"argument": "Certificate.RenewalSummary.RenewalStatus",
"state": "retry"
},
{
"matcher": "path",
"expected": "ELIGIBLE",
"argument": "Certificate.RenewalEligibility",
"state": "retry"
},
{
"matcher": "path",
"expected": "PENDING_VALIDATION",
"argument": "Certificate.RenewalSummary.RenewalStatus",
"state": "retry"
},
{
"matcher": "path",
"expected": "FAILED",
"argument": "Certificate.RenewalSummary.RenewalStatus",
"state": "failure"
}
]
}
}
}
# Main Function
def lambda_handler(event, context):
"""Secrets Manager Rotation Template
This is a template for creating an AWS Secrets Manager rotation lambda
Args:
event (dict): Lambda dictionary of event parameters. These keys must include the following:
- SecretId: The secret ARN or identifier
- ClientRequestToken: The ClientRequestToken of the secret version
- Step: The rotation step (one of createSecret, setSecret, testSecret, or finishSecret)
context (LambdaContext): The Lambda runtime information
Raises:
ResourceNotFoundException: If the secret with the specified arn and stage does not exist
ValueError: If the secret is not properly configured for rotation
KeyError: If the event parameters do not contain the expected keys
"""
arn = event['SecretId']
token = event['ClientRequestToken']
step = event['Step']
# Setup the client
service_client = boto3.client('secretsmanager')
# Make sure the version is staged correctly
metadata = service_client.describe_secret(SecretId=arn)
if not metadata['RotationEnabled']:
logger.error("Secret %s is not enabled for rotation" % arn)
raise ValueError("Secret %s is not enabled for rotation" % arn)
versions = metadata['VersionIdsToStages']
if token not in versions:
logger.error("Secret version %s has no stage for rotation of secret %s." % (token, arn))
raise ValueError("Secret version %s has no stage for rotation of secret %s." % (token, arn))
if "AWSCURRENT" in versions[token]:
logger.info("Secret version %s already set as AWSCURRENT for secret %s." % (token, arn))
return
elif "AWSPENDING" not in versions[token]:
logger.error("Secret version %s not set as AWSPENDING for rotation of secret %s." % (token, arn))
raise ValueError("Secret version %s not set as AWSPENDING for rotation of secret %s." % (token, arn))
if step == "createSecret":
create_secret(service_client, arn, token)
elif step == "setSecret": # dont need this
set_secret(service_client, arn, token)
elif step == "testSecret": # dont need this
test_secret(service_client, arn, token)
elif step == "finishSecret":
finish_secret(service_client, arn, token)
else:
raise ValueError("Invalid step parameter")
############################################################################################################
####################################### HELPER FUNCTIONS ###################################################
############################################################################################################
def create_secret(service_client, arn, token):
"""Create the secret
This method first checks for the existence of a secret for the passed in token. If one does not exist, it will generate a
new secret and put it with the passed in token.
Args:
service_client (client): The secrets manager service client
arn (string): The secret ARN or other identifier
token (string): The ClientRequestToken associated with the secret version
Raises:
ResourceNotFoundException: If the secret with the specified arn and stage does not exist
"""
# Make sure the current secret exists
current_dict = get_secret_dict(service_client, arn, 'AWSCURRENT')
#Clients
acm_client = boto3.client('acm', region_name = current_dict["CA_ARN"].split(":")[3])
acm_pca_client = boto3.client('acm-pca', region_name = current_dict["CA_ARN"].split(":")[3])
waiter_model = WaiterModel(waiter_config)
issue_waiter = create_waiter_with_client(ISSUE_NAME, waiter_model, acm_client)
renew_waiter = create_waiter_with_client(RENEW_NAME, waiter_model, acm_client)
# Now try to get the secret version, if that fails, put a new secret
try:
get_secret_dict(service_client, arn, 'AWSPENDING', token)
logger.info("createSecret: Successfully retrieved secret for %s." % arn)
except service_client.exceptions.ResourceNotFoundException:
if current_dict['CERTIFICATE_TYPE'] == 'ACM_ISSUED':
current_dict = generate_acm_managed(current_dict, acm_client, renew_waiter, issue_waiter)
else:
key = ""
if 'CERTIFICATE_ARN' in current_dict: # renew certificate
key = serialization.load_pem_private_key(current_dict['PRIVATE_KEY_PEM'].encode(), password=<PASSWORD>, backend=cryptography_backend)
else: # need to create new certificate
# keypair object
key = generate_private_key(
current_dict["KEY_ALGORITHM"],
"" if "KEY_SIZE" not in current_dict else current_dict["KEY_SIZE"],
"" if "EC_CURVE" not in current_dict else current_dict["EC_CURVE"])
try:
## issue PCA certificate
current_dict = generate_customer_managed(current_dict, acm_pca_client, key)
except Exception as e:
logger.error("CreateSecret: Unable to create secret with error: %s" % (e))
# Put the secret
service_client.put_secret_value(SecretId=arn, ClientRequestToken=token, SecretString=json.dumps(current_dict), VersionStages=['AWSPENDING'])
logger.info("createSecret: Successfully put secret for ARN %s and version %s." % (arn, token))
def set_secret(service_client, arn, token):
"""Set the secret
This method should set the AWSPENDING secret in the service that the secret belongs to. For example, if the secret is a database
credential, this method should take the value of the AWSPENDING secret and set the user's password to this value in the database.
Args:
service_client (client): The secrets manager service client
arn (string): The secret ARN or other identifier
token (string): The ClientRequestToken associated with the secret version
"""
# This is where the secret should be set in the service
# raise NotImplementedError
# can implement if not concerned about application interruption
return
def test_secret(service_client, arn, token):
"""Test the secret
This method should validate that the AWSPENDING secret works in the service that the secret belongs to. For example, if the secret
is a database credential, this method should validate that the user can login with the password in AWSPENDING and that the user has
all of the expected permissions against the database.
Args:
service_client (client): The secrets manager service client
arn (string): The secret ARN or other identifier
token (string): The ClientRequestToken associated with the secret version
"""
# This is where the secret should be tested against the service
# raise NotImplementedError
# can implement if not concerned about application interruption
return
def finish_secret(service_client, arn, token):
"""Finish the secret
This method finalizes the rotation process by marking the secret version passed in as the AWSCURRENT secret.
Args:
service_client (client): The secrets manager service client
arn (string): The secret ARN or other identifier
token (string): The ClientRequestToken associated with the secret version
Raises:
ResourceNotFoundException: If the secret with the specified arn does not exist
"""
# First describe the secret to get the current version
metadata = service_client.describe_secret(SecretId=arn)
current_version = None
for version in metadata["VersionIdsToStages"]:
if "AWSCURRENT" in metadata["VersionIdsToStages"][version]:
if version == token:
# The correct version is already marked as current, return
logger.info("finishSecret: Version %s already marked as AWSCURRENT for %s" % (version, arn))
return
current_version = version
break
# Finalize by staging the secret version current
service_client.update_secret_version_stage(SecretId=arn, VersionStage="AWSCURRENT", MoveToVersionId=token, RemoveFromVersionId=current_version)
logger.info("finishSecret: Successfully set AWSCURRENT stage to version %s for secret %s." % (token, arn))
def get_secret_dict(service_client, arn, stage, token=None):
"""Gets the secret dictionary corresponding for the secret arn, stage, and token
This helper function gets credentials for the arn and stage passed in and returns the dictionary by parsing the JSON string
Args:
service_client (client): The secrets manager service client
arn (string): The secret ARN or other identifier
token (string): The ClientRequestToken associated with the secret version, or None if no validation is desired
stage (string): The stage identifying the secret version
Returns:
SecretDictionary: Secret dictionary
Raises:
ResourceNotFoundException: If the secret with the specified arn and stage does not exist
ValueError: If the secret is not valid JSON
"""
required_fields = []
# Only do VersionId validation against the stage if a token is passed in
if token:
secret = service_client.get_secret_value(SecretId=arn, VersionId=token, VersionStage=stage)
else:
secret = service_client.get_secret_value(SecretId=arn, VersionStage=stage)
plaintext = secret['SecretString']
secret_dict = json.loads(plaintext)
if 'CERTIFICATE_TYPE' not in secret_dict: # check that we got a certificate type
raise KeyError("Certificate Type (CERTIFICATE_TYPE) must be set to generate the proper certificate")
if secret_dict['CERTIFICATE_TYPE'] == 'ACM_ISSUED':
required_fields = ["CA_ARN", "COMMON_NAME", "ENVIRONMENT"]
else:
required_fields = ["CA_ARN", "COMMON_NAME", "TEMPLATE_ARN", "KEY_ALGORITHM", "KEY_SIZE", "SIGNING_ALGORITHM", "SIGNING_HASH"]
for field in required_fields:
if field not in secret_dict:
raise KeyError("%s key is missing from secret JSON" % field)
# Parse and return the secret JSON string
return secret_dict
def generate_private_key(key_type, size, curve):
"""
Generates a private key using existing data for context
Supports (RSA, DSA, Ed25519, Ed448, and EllipticCurve keys)
Args:
key_type: The type of key to generate
size: the size of key to generate
curve: (optional) if generating an EC key pair
Raises:
ValueError: if key type is not supported
"""
# See https://docs.aws.amazon.com/acm/latest/userguide/acm-certificate.html
# To confirm that the algorithm is supported before creating
if key_type == "TYPE_RSA":
return rsa.generate_private_key(
public_exponent=65537,
key_size=int(size),
backend=cryptography_backend
)
if key_type == "TYPE_DSA":
return dsa.generate_private_key(
key_size=int(size),
backend=cryptography_backend
)
if key_type == "TYPE_ED25519":
return ed25519.Ed25519PrivateKey.generate()
if key_type == "TYPE_ED448":
return ed448.Ed448PrivateKey.generate()
if key_type == "TYPE_EC":
return ec.generate_private_key(
curve=getattr(globals()['ec'], curve),
backend=cryptography_backend
)
raise ValueError("Unsupported key type")
def generate_csr(current_dict, key):
"""
Generates and signs a CSR for ACM PCA Certificate
Args:
current_dict: current secret values used to add extensions/metadata to CSR
key: key pair to use to sign CSR
Raises:
"""
builder = x509.CertificateSigningRequestBuilder().subject_name(x509.Name([
x509.NameAttribute(NameOID.COMMON_NAME, current_dict['COMMON_NAME'])
]))
hash_algorithm = None if (isinstance(key, ed25519.Ed25519PrivateKey) or
isinstance(key, ed448.Ed448PrivateKey)) else getattr(globals()['hashes'], current_dict["SIGNING_HASH"].upper())()
csr = builder.sign(
key,
ec.ECDSA(hash_algorithm) if (isinstance(key, ec.EllipticCurvePrivateKey)) else hash_algorithm,
cryptography_backend
)
return csr.public_bytes(serialization.Encoding.PEM).decode()
def get_signature_algorithm(alg_type, alg_hash):
"""
Returns signature algorithm for ACM PCA
Args:
isEC: true if Elliptic Curve certificate
alg_type: Hash Algorithm from cryptography package to find
Raises:
ValueError: Algorithm type not supported
"""
signing_algorithms = {
"RSA": {
"sha256": "SHA256WITHRSA",
"sha384": "SHA384WITHRSA",
"sha512": "SHA512WITHRSA"
},
"ECDSA": {
"sha256": "SHA256WITHECDSA",
"sha384": "SHA384WITHECDSA",
"sha512": "SHA512WITHECDSA"
}
}
if alg_hash not in ['sha256', 'sha384', 'sha512']:
return ValueError('Signing Algorithm not supported')
return signing_algorithms[alg_type][alg_hash]
def generate_acm_managed(current_dict, client, renew, issue):
"""
Generates an Private Certificate using AWS Ceritificate Manager (ACM)
Args:
current_dict: current secret values used to generate certificate
client: boto3 client used to make requests to ACM
renew: boto3 waiter designed to wait for certificate renewal to complete
issue: boto3 waiter to wait for certificate to be issued
Raises:
CreateSecret error if unable to export certificate and set secret value
"""
CERTIFICATE_ARN = ""
# renew certificate to test everything works
if 'CERTIFICATE_ARN' in current_dict and current_dict['ENVIRONMENT'] == 'TEST':
CERTIFICATE_ARN = current_dict['CERTIFICATE_ARN']
client.renew_certificate(CertificateArn=current_dict['CERTIFICATE_ARN'])
# wait for certificate renewal to complete
renew.wait(CertificateArn=CERTIFICATE_ARN)
else: # first time creating secret
response = client.request_certificate(
DomainName = current_dict['COMMON_NAME'],
CertificateAuthorityArn=current_dict['CA_ARN']
)
CERTIFICATE_ARN = response['CertificateArn']
current_dict['CERTIFICATE_ARN'] = CERTIFICATE_ARN
issue.wait(CertificateArn=CERTIFICATE_ARN)
try: # export certificate
pw = secrets.token_hex(16).encode()
response = client.export_certificate(
CertificateArn = CERTIFICATE_ARN,
Passphrase = pw
)
current_dict['CERTIFICATE_PEM'] = response["Certificate"]
current_dict['CERTIFICATE_CHAIN_PEM'] = response["CertificateChain"]
# decrypt and store private key
pkey = serialization.load_pem_private_key(response['PrivateKey'].encode(), password=pw, backend=cryptography_backend)
current_dict['PRIVATE_KEY_PEM'] = pkey.private_bytes(
encoding = serialization.Encoding.PEM,
format = serialization.PrivateFormat.PKCS8,
encryption_algorithm = serialization.NoEncryption()
).decode()
except WaiterError as e:
logger.error("CreateSecret: Unable to create secret with error: %s" % (e))
return current_dict
def generate_customer_managed(current_dict, client, key):
# issue PCA certificate
response = client.issue_certificate(
CertificateAuthorityArn = current_dict['CA_ARN'],
Csr = generate_csr(current_dict, key).encode(),
SigningAlgorithm = get_signature_algorithm(
current_dict['SIGNING_ALGORITHM'],
current_dict['SIGNING_HASH']),
TemplateArn = current_dict['TEMPLATE_ARN'],
Validity = {
'Value': 365 if "VALIDITY" not in current_dict else current_dict["VALIDITY"], 'Type': 'DAYS'
}
)
current_dict['CERTIFICATE_ARN'] = response['CertificateArn']
# wait for certificate to be issued
waiter = client.get_waiter("certificate_issued")
waiter.wait(
CertificateAuthorityArn=current_dict['CA_ARN'],
CertificateArn=current_dict['CERTIFICATE_ARN'],
WaiterConfig={
'Delay': 1,
'MaxAttempts': 10
})
# get certificate
response = client.get_certificate(
CertificateAuthorityArn=current_dict['CA_ARN'],
CertificateArn=current_dict['CERTIFICATE_ARN']
)
current_dict['CERTIFICATE_PEM'] = response['Certificate']
current_dict['CERTIFICATE_CHAIN_PEM'] = response['CertificateChain']
current_dict['PRIVATE_KEY_PEM'] = key.private_bytes(
encoding = serialization.Encoding.PEM,
format = serialization.PrivateFormat.PKCS8,
encryption_algorithm = serialization.NoEncryption()
).decode()
return current_dict
| StarcoderdataPython |
301191 | <reponame>IBM/UKI-Technology-Garage
import math
from collections import defaultdict
from prettytable import PrettyTable
MAX_LINE_LENGTH = 90
def split_sentence_to_lines(sentence, max_len):
if len(sentence) <= max_len:
return ['- ' + sentence]
lines = []
line = None
tokens = sentence.split(' ')
for token in tokens:
if line is None:
line = '- ' + token
else:
if len(line + ' ' + token) <= max_len:
line += ' ' + token
else:
lines.append(line)
line = ' ' + token
if line is not None:
lines.append(line)
return lines
def print_results_in_a_table(result, n_sentences_per_kp, title, one_line_sentences_only=False):
table = PrettyTable()
table.field_names = ['#', 'key point', 'size', 'example']
total_sentences = 0
matched_sentences = 0
for i, keypoint_matching in enumerate(result['keypoint_matchings']):
kp = keypoint_matching['keypoint']
matches = keypoint_matching['matching']
total_sentences += len(matches)
if keypoint_matching['keypoint'] == 'none': # skip cluster of all unmatched sentences
continue
matched_sentences += len(matches)
if len(matches) <= 1:
continue
table.add_row([i, kp, len(matches), ''])
sentences = [match['sentence_text'] for match in matches]
if one_line_sentences_only:
sentences = [sentence for sentence in sentences if len(sentence) <= MAX_LINE_LENGTH]
sentences = sentences[1:(n_sentences_per_kp+1)] # first sentence is the kp itself
for sentence in sentences:
lines = split_sentence_to_lines(sentence, MAX_LINE_LENGTH)
for line in lines:
table.add_row(['', '', '', line])
print(title + ' coverage: %.2f' % ((float(matched_sentences) / float(total_sentences)) * 100.0))
print(title + ' key points:')
print(table)
def print_results(result, n_sentences_per_kp, title):
'''
Prints the key point analysis result to console.
:param result: the result, returned by method get_result in KpAnalysisTaskFuture.
'''
def print_kp(kp, stance, n_matches, n_matches_subtree, depth, keypoint_matching, n_sentences_per_kp):
has_n_matches_subtree = n_matches_subtree is not None
print('%s%d%s - %s%s' % (('\t' * depth), n_matches_subtree if has_n_matches_subtree else n_matches,
(' - %d' % n_matches) if has_n_matches_subtree else '', kp,
'' if stance is None else ' - ' + stance))
sentences = [match['sentence_text'] for match in keypoint_matching['matching']]
sentences = sentences[1:(n_sentences_per_kp + 1)] # first sentence is the kp itself
lines = split_sentences_to_lines(sentences, depth)
for line in lines:
print('\t%s' % line)
kp_to_n_matches_subtree = defaultdict(int)
parents = list()
parent_to_kids = defaultdict(list)
for keypoint_matching in result['keypoint_matchings']:
kp = keypoint_matching['keypoint']
kp_to_n_matches_subtree[kp] += len(keypoint_matching['matching'])
parent = keypoint_matching.get("parent", None)
if parent is None or parent == 'root':
parents.append(keypoint_matching)
else:
parent_to_kids[parent].append(keypoint_matching)
kp_to_n_matches_subtree[parent] += len(keypoint_matching['matching'])
parents.sort(key=lambda x: kp_to_n_matches_subtree[x['keypoint']], reverse=True)
total_sentences = 0
matched_sentences = 0
for i, keypoint_matching in enumerate(result['keypoint_matchings']):
matches = keypoint_matching['matching']
total_sentences += len(matches)
if keypoint_matching['keypoint'] != 'none': # skip cluster of all unmatched sentences
matched_sentences += len(matches)
print(title + ' coverage: %.2f' % ((float(matched_sentences) / float(total_sentences)) * 100.0))
print(title + ' key points:')
for parent in parents:
kp = parent['keypoint']
stance = None if 'stance' not in parent else parent['stance']
if kp == 'none':
continue
print_kp(kp, stance, len(parent['matching']), None if len(parent_to_kids[kp]) == 0 else kp_to_n_matches_subtree[kp], 0, parent, n_sentences_per_kp)
for kid in parent_to_kids[kp]:
kid_kp = kid['keypoint']
kid_stance = None if 'stance' not in kid else kid['stance']
print_kp(kid_kp, kid_stance, len(kid['matching']), None, 1, kid, n_sentences_per_kp)
def split_sentences_to_lines(sentences, n_tabs):
lines = []
for sentence in sentences:
lines.extend(split_sentence_to_lines(sentence, MAX_LINE_LENGTH))
return [('\t' * n_tabs) + line for line in lines]
def print_bottom_matches_for_kp(result, kp_to_print, bottom_k):
for keypoint_matching in result['keypoint_matchings']:
kp = keypoint_matching['keypoint']
if kp != kp_to_print:
continue
matches = keypoint_matching['matching']
bottom_k_matches = matches[-bottom_k:]
print('\nBottom %d matches:' % bottom_k)
sentences = [match['sentence_text'] for match in bottom_k_matches]
print('\n'.join(split_sentences_to_lines(sentences, 1)))
break
def compare_results(result_1, title_1, result_2, title_2):
kps1 = set([kp['keypoint'] for kp in result_1['keypoint_matchings'] if kp['keypoint'] != 'none'])
kps1_n_args = {kp['keypoint']: len(kp['matching']) for kp in result_1['keypoint_matchings']
if kp['keypoint'] != 'none'}
kps2 = set([kp['keypoint'] for kp in result_2['keypoint_matchings'] if kp['keypoint'] != 'none'])
kps2_n_args = {kp['keypoint']: len(kp['matching']) for kp in result_2['keypoint_matchings']
if kp['keypoint'] != 'none'}
kps_in_both = kps1.intersection(kps2)
kps_in_both = sorted(list(kps_in_both), key=lambda kp: kps1_n_args[kp], reverse=True)
table = PrettyTable()
table.field_names = ['key point', title_1, title_2, 'change']
for kp in kps_in_both:
table.add_row([kp, kps1_n_args[kp], kps2_n_args[kp], str(math.floor((kps2_n_args[kp] - kps1_n_args[kp]) / kps1_n_args[kp] * 100.0)) + '%'])
kps1_not_in_2 = kps1 - kps2
kps1_not_in_2 = sorted(list(kps1_not_in_2), key=lambda kp: kps1_n_args[kp], reverse=True)
for kp in kps1_not_in_2:
table.add_row([kp, kps1_n_args[kp], '---', '---'])
kps2_not_in_1 = kps2 - kps1
kps2_not_in_1 = sorted(list(kps2_not_in_1), key=lambda kp: kps2_n_args[kp], reverse=True)
for kp in kps2_not_in_1:
table.add_row([kp, '---', kps2_n_args[kp], '---'])
print('%s - %s comparison:' % (title_1, title_2))
print(table)
def init_logger():
from logging import getLogger, getLevelName, Formatter, StreamHandler
log = getLogger()
log.setLevel(getLevelName('INFO'))
log_formatter = Formatter("%(asctime)s [%(levelname)s] %(filename)s %(lineno)d: %(message)s")
console_handler = StreamHandler()
console_handler.setFormatter(log_formatter)
log.handlers = []
log.addHandler(console_handler)
def print_top_and_bottom_k_sentences(sentences, k):
top_sentences = sentences[:k]
top_sentences = [sentence['text'] for sentence in top_sentences]
print('Top %d quality sentences: ' % k)
print('\n'.join(split_sentences_to_lines(top_sentences, 1)))
bottom_sentences = sentences[-k:]
bottom_sentences = [sentence['text'] for sentence in bottom_sentences]
print('\n\nBottom %d quality sentences: ' % k)
print('\n'.join(split_sentences_to_lines(bottom_sentences, 1)))
| StarcoderdataPython |
1743696 | """
Bokeh based functions for line, histogram and bar charts
"""
import os
import time
import numpy as np
from bokeh import __version__ as bokeh_release_ver
from bokeh.plotting import figure
from bokeh.models import (
HoverTool,
NumeralTickFormatter
)
LOADER = """
#loader {
position: absolute;
left: 20%;
top: 30%;
z-index: 1000;
border: 16px solid #f3f3f3;
border-top: 16px solid #3498db;
border-radius: 50%;
width: 120px;
height: 120px;
animation: spin 2s linear infinite;
}
@keyframes spin {
0% { transform: rotate(0deg); }
100% { transform: rotate(360deg); }
}
"""
PRELOADER = """
<script type="text/javascript">
function preloader(){
document.getElementById("loading").style.display = "none";
document.getElementById("content").style.display = "block";
}
window.onload = preloader;
</script>
"""
class ElapsedMilliseconds:
"""Time execution time
Usage example 1:
time = ElapsedMilliseconds()
...lenthy process...
print(time.elapsed(), 'ms')
Usage example 2:
time = ElapsedMilliseconds(log_time=True)
...lenthy process...
time.log('your custom log msg')
"""
def __init__(self, log_time=False):
self.last_elapsed = 0
self.last_local = int(round(time.time() * 1000))
self.log_time = log_time
def elapsed(self):
"""Returns elapse time in milliseconds
Returns:
int -- elapsed time in milliseconds since last call
to this method
"""
self.last_elapsed = int(round(time.time() * 1000)) - self.last_local
self.last_local = int(round(time.time() * 1000))
return self.last_elapsed
def log(self, module='', function='', process=''):
"""Print elapsed time since last call
Keyword Arguments:
module {String} -- module name (default: {''})
function {String} -- function name (default: {''})
process {String} -- process name (default: {''})
"""
if self.log_time:
print(f"{module}:{function}:{process}:{self.elapsed()}ms")
def restart(self):
"""Restart time reference
"""
self.last_elapsed = 0
self.last_local = int(round(time.time() * 1000))
def cwd():
"""Return current working directory if running from bokeh server,
jupiter or python.
Returns:
String -- path to current working directory
"""
try:
__file__
except NameError:
cur_working_dir = os.getcwd()
else:
cur_working_dir = os.path.dirname(__file__)
return cur_working_dir
def histogram(x, xlabel='x', ylabel='y', **kwargs):
"""Plot histogram
Arguments:
x {list, array, or series} -- data to plot histogram
Keyword Arguments:
xlabel {String} -- x axis label (default: {'x'})
ylabel {String} -- y axis label (default: {'y'})
Returns:
Bokeh figure -- plot
"""
# plot settings
figure_settings = dict(title=None, tools='')
quad_settings = dict(fill_color='navy', hover_fill_color='grey',
line_color="white", alpha=0.5, hover_fill_alpha=1.0)
misc_settings = dict(density=False, bins='auto')
# update plot settings
for key, value in kwargs.items():
if key in figure_settings:
figure_settings[key] = value
if key in quad_settings:
quad_settings[key] = value
if key in misc_settings:
misc_settings[key] = value
# calculate bin size using Sturge’s rule
if misc_settings['bins'] == 'auto':
misc_settings['bins'] = int(1 + 3.322 * np.log10(len(x)))
hist, edges = np.histogram(x, density=misc_settings['density'],
bins=misc_settings['bins'])
plot = figure(**figure_settings)
quad = plot.quad(top=hist, bottom=0, left=edges[:-1], right=edges[1:],
**quad_settings)
plot.add_tools(HoverTool(renderers=[quad],
tooltips=[(f"{xlabel.title()} Range", '@left{int} to @right{int}'),
(ylabel.title(), '@top{0,0}')]))
plot.y_range.start = 0
plot.xaxis.axis_label = xlabel
plot.yaxis.axis_label = ylabel
return plot
def vbar(x, y, xlabel='x', ylabel='y', **kwargs):
"""Plot histogram
Arguments:
x {list, array, or series} -- x data for vertical bars
y {list, array, or series} -- y data for vertical bars
Keyword Arguments:
xlabel {String} -- x axis label (default: {'x'})
ylabel {String} -- y axis label (default: {'y'})
Returns:
Bokeh figure -- plot
"""
# figure and vbar settings
figure_settings = dict(x_range=x, plot_height=600, plot_width=950,
title=None, toolbar_location=None, tools='')
vbar_settings = dict(width=0.9, fill_color='navy', line_color='white',
alpha=0.5, hover_fill_color='grey', hover_fill_alpha=1.0)
misc_settings = dict(yaxis_formatter='auto', user_tooltips='auto',
user_formatters='auto')
# update settings
for key, value in kwargs.items():
if key in figure_settings:
figure_settings[key] = value
if key in vbar_settings:
vbar_settings[key] = value
if key in misc_settings:
misc_settings[key] = value
plot = figure(**figure_settings)
vbar_glyph = plot.vbar(x=x, top=y, **vbar_settings)
# tooltips
tooltips = [(xlabel.title(), '@x'), (ylabel.title(), '@top{0,0}')]
if misc_settings['user_tooltips'] != 'auto':
tooltips = misc_settings['user_tooltips']
# tooltip formatters
formatters = {ylabel.title(): 'numeral'}
if misc_settings['user_formatters'] != 'auto':
formatters = misc_settings['user_formatters']
plot.add_tools(HoverTool(renderers=[vbar_glyph],
tooltips=tooltips,
formatters=formatters))
if misc_settings['yaxis_formatter'] != 'auto':
plot.yaxis.formatter = NumeralTickFormatter(format=misc_settings['yaxis_formatter'])
plot.y_range.start = 0
plot.xaxis.axis_label = xlabel
plot.yaxis.axis_label = ylabel
return plot
| StarcoderdataPython |
3219177 | i = 1
while i < 100:
i += 1
if i%3 ==0 and i%5==0:
print ("FizzBuzz")
elif i%3 == 0:
print ("Fizz")
elif i%5 == 0:
print ("Buzz")
else:
print(i) | StarcoderdataPython |
3476107 | # Copyright (c) 2021, Usama and Contributors
# See license.txt
# import frappe
import unittest
class TestSaleInvoice(unittest.TestCase):
pass
| StarcoderdataPython |
6558169 | import os
import sys
import urllib2
from bs4 import BeautifulSoup, Comment
import collections
import math
import csv
import re
import numpy as np
import json
import urlparse
import argparse
import traceback
import datetime
import time
import codecs
from operator import itemgetter
from selenium import webdriver
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
from selenium.webdriver.common.by import By
from selenium.common.exceptions import TimeoutException
from selenium.webdriver.firefox.options import Options
# Get the shared.py
script_file = os.path.abspath(__file__)
script_folder = os.path.dirname(script_file)
province_folder = os.path.dirname(script_folder)
home_folder = os.path.dirname(province_folder)
script_folder = home_folder + "\\scripts"
sys.path.append(script_folder)
import shared
import access_rest as rest
province = 'Saskatchewan'
work_folder = 'H:\\GIS_Data\\Work\\NRCan\\FGP\\TA001\\_%s' % province
site_list = collections.OrderedDict([
('regina', ('extract_regina()', 'http://open.regina.ca/')),
('saskatoon', ('extract_saskatoon()', 'http://opendata-saskatoon.cloudapp.net/DataBrowser/SaskatoonOpenDataCatalogueBeta'))])
def extract_regina():
############################################################################
# Extract from the Open Data site for Regina
regina_url = site_list['regina'][1]
# Get soup
regina_soup = shared.soup_it_up(regina_url)
categories = regina_soup.find_all('section', attrs={'class': 'tile-section'})
# Create the CSV
csv_fn = "Regina_results"
field_names = ['Title', 'Description', 'Available Formats', 'Date', 'URL']
my_csv = shared.MyCSV(csv_fn, regina_url, province, field_names)
my_csv.open_csv()
map_formats = ['xml', 'shp', 'kml', 'json', 'rest']
records = []
print "Number of categories: " + str(len(categories))
for cat in categories:
# Get the URL for the category
link = shared.get_link(cat)
sub_url = urlparse.urljoin(regina_url, link)
h3 = cat.find('h3', attrs={'class': 'tile-label'})
print "Category: " + str(h3.text)
# Filter the formats
for format in map_formats:
# Build the query
query_params = collections.OrderedDict()
query_params['res_format'] = format.upper()
final_url = shared.build_query_html(sub_url, query_params)
# Load the sub result
sub_soup = shared.soup_it_up(final_url)
# Find out the page count
page_count = shared.get_page_count(sub_soup, 'pagination', 'li')
#print "Page Count: " + str(page_count)
for page in range(0, page_count):
# Load the current page
if page > 0:
page_url = "%s?page=%s" % (final_url, page + 1)
sub_soup = shared.soup_it_up(page_url)
# Open the link
results = sub_soup.find_all('h3', attrs={'class': 'dataset-heading'})
for res in results:
rec_dict = collections.OrderedDict((k, "") for k in field_names)
res_link = shared.get_link(res)
res_url = urlparse.urljoin(final_url, res_link)
res_soup = shared.soup_it_up(res_url)
# Get the title
div = res_soup.find('div', attrs={'class': 'module-content'})
h1 = div.find('h1')
title = shared.get_text(h1)
# Get the description
notes_div = div.find('div', attrs={'class': 'notes'})
desc = shared.get_text(notes_div)
# Get the available formats
spans = div.find_all('span', attrs={'class': 'format-label'})
#print spans
formats = []
for span in spans:
format = span['data-format']
if format == 'data':
format = "REST"
else:
format = format.upper()
formats.append(format)
#print formats
formats_str = '|'.join(formats)
# Get the date
date = shared.get_text_by_label(res_soup, 'th', "Last Updated")
# Get the data url
dataset_url = shared.get_text_by_label(res_soup, 'th', "Source")
# ['Title', 'Description', 'Available Formats', 'Date', 'URL']
rec_dict['Title'] = title
rec_dict['Available Formats'] = formats_str
rec_dict['Description'] = shared.edit_description(desc)
rec_dict['Date'] = date
rec_dict['URL'] = dataset_url
if not rec_dict in records:
records.append(rec_dict)
my_csv.write_dataset(rec_dict)
############################################################################
# Extract from Regina Mapservers
rest_url = "https://opengis.regina.ca/arcgis/rest/services"
# csv_fn = "Regina_REST_results"
# field_names = ['Title', 'Type', 'Description', 'URL']
# my_csv = shared.MyCSV(csv_fn, root_url, province, header=field_names)
# my_csv.open_csv()
my_rest = rest.MyREST(rest_url)
services = my_rest.get_services()
for service in services:
rec_dict = collections.OrderedDict((k, "") for k in field_names)
rec_dict['Title'] = service['name']
#rec_dict['Type'] = service['type']
if 'serviceDescription' in service:
rec_dict['Description'] = shared.edit_description(service['serviceDescription'], 'span')
rec_dict['URL'] = service['url']
my_csv.write_dataset(rec_dict)
my_csv.remove_duplicates('URL')
#answer = raw_input("Press enter...")
my_csv.close_csv()
def extract_saskatoon():
######################################################################################
# Extract from Saskatoon's Open Catalogue
saskatoon_url = site_list['saskatoon'][1]
query_url = "http://opendata-saskatoon.cloudapp.net:8080/v1/SaskatoonOpenDataCatalogueBeta"
csv_fn = "Saskatoon_2_results"
field_names = ['Title', 'Description', 'Available Formats', 'Date', 'Metadata URL', 'Reference URL', 'URL']
my_csv = shared.MyCSV(csv_fn, saskatoon_url, province, field_names)
my_csv.open_csv()
xml_soup = shared.get_xml_soup(query_url)
colls = xml_soup.find_all('collection')
# FOR DEBUGGING
get_catalogue = False
#print collections
if get_catalogue:
for coll in colls:
rec_dict = collections.OrderedDict((k, "") for k in field_names)
# Build query URL
base_url = "%s/%s" % (saskatoon_url, coll['href'])
#print base_url
# Get the Soup
coll_soup = shared.soup_it_up(base_url)
#print coll_soup
title = shared.get_text_by_label(coll_soup, 'td', "Dataset name")
description = shared.get_text_by_label(coll_soup, 'td', "Description")
date = shared.get_text_by_label(coll_soup, 'td', "Last Updated Date")
ref_url = shared.get_text_by_label(coll_soup, 'td', "Links and references")
mdata_url = shared.get_text_by_label(coll_soup, 'td', "Metadata Url")
# Get the available formats
form_soup = coll_soup.find('select', attrs={'id': 'eidDownloadType'})
opts = form_soup.find_all('option')
formats = []
for opt in opts:
formats.append(opt.string)
formats_str = '|'.join(formats)
# Wednesday, April 04, 2018
date_obj = time.strptime(date, "%A, %B %d, %Y")
form_date = time.strftime("%Y-%m-%d", date_obj)
#print title
#print form_date
# ['Title', 'Description', 'Available Formats' 'Date', 'Metadata URL', 'Reference URL', 'URL']
rec_dict['Title'] = title
rec_dict['Description'] = shared.edit_description(description)
rec_dict['Date'] = form_date
rec_dict['URL'] = base_url
rec_dict['Reference URL'] = ref_url
rec_dict['Metadata URL'] = mdata_url
rec_dict['Available Formats'] = formats_str
my_csv.write_dataset(rec_dict)
######################################################################################
# Extract from Saskatoon's Address Map
if get_catalogue:
map_url = "https://www.arcgis.com/apps/View/index.html?appid=2199c6ba701148d58c24fcb82d4e7d8e"
#service_url = "https://www.arcgis.com/sharing/rest/content/items/2199c6ba701148d58c24fcb82d4e7d8e"
my_csv.write_line("\nSaskatoon's Address Map")
my_csv.write_url(map_url, "Map URL")
#my_csv.write_url(service_url, "Service URL")
field_names = ['Title', 'Type', 'Date', 'Data URL']
my_csv.write_header(field_names)
net_traffic = shared.get_network_traffic(map_url, ('class', 'esriAttribution'))
#d_f = open("json_test.txt", "w")
#d_f.write(net_traffic)
#d_f.close()
#print net_traffic[0]
log = net_traffic['log']
entries = log['entries']
json_urls = []
for entry in entries:
add_item = True
request_url = entry['request']['url']
if request_url.find("json") > -1 and request_url.find("items") > -1:
# Check for a duplicate already in json_urls
if len(json_urls) == 0:
json_urls.append(request_url)
for url in json_urls:
parse_url = shared.parse_query_url(url)
parse_request = shared.parse_query_url(request_url)
if parse_url[:8] == parse_request[:8]:
#print parse_url[:8]
#print parse_request[:8]
add_item = False
if add_item:
json_urls.append(request_url)
print "Number of JSON requests: " + str(len(json_urls))
#for request in json_urls:
# print request['request']['url']
#soup = shared.soup_it_up(map_url)
for url in json_urls:
#json_url = url['request']['url']
json_text = shared.get_json(url)
rec_dict = collections.OrderedDict((k, "") for k in field_names)
if 'title' in json_text: rec_dict['Title'] = json_text['title']
if 'type' in json_text: rec_dict['Type'] = json_text['type']
#if 'description' in json_text: rec_dict['Description'] = json_text['description']
if 'modified' in json_text: rec_dict['Date'] = shared.translate_date(json_text['modified'])
rec_dict['Data URL'] = url
my_csv.write_dataset(rec_dict)
######################################################################################
# Extract from Saskatoon Zoning Address Map
if get_catalogue:
map_url = "https://www.arcgis.com/apps/View/index.html?appid=2ca06c6cfdef47bbbb9876a02fa1dffe"
#service_url = "https://www.arcgis.com/sharing/rest/content/items/2ca06c6cfdef47bbbb9876a02fa1dffe"
my_csv.write_line("\nSaskatoon Zoning Address Map")
my_csv.write_url(map_url, "Map URL")
#my_csv.write_url(service_url, "Service URL")
field_names = ['Title', 'Type', 'Date', 'Data URL']
my_csv.write_header(field_names)
net_traffic = shared.get_network_traffic(map_url, ('class', 'esriAttribution'))
#d_f = open("json_test.txt", "w")
#d_f.write(net_traffic)
#d_f.close()
#print net_traffic[0]
log = net_traffic['log']
entries = log['entries']
json_urls = []
for entry in entries:
add_item = True
request_url = entry['request']['url']
if request_url.find("json") > -1 and request_url.find("items") > -1:
# Check for a duplicate already in json_urls
if len(json_urls) == 0:
json_urls.append(request_url)
for url in json_urls:
parse_url = shared.parse_query_url(url)
parse_request = shared.parse_query_url(request_url)
if parse_url[:8] == parse_request[:8]:
#print parse_url[:8]
#print parse_request[:8]
add_item = False
if add_item:
json_urls.append(request_url)
print "Number of JSON requests: " + str(len(json_urls))
for url in json_urls:
#json_url = url['request']['url']
json_text = shared.get_json(url)
rec_dict = collections.OrderedDict((k, "") for k in field_names)
if 'title' in json_text: rec_dict['Title'] = json_text['title']
if 'type' in json_text: rec_dict['Type'] = json_text['type']
#if 'description' in json_text: rec_dict['Description'] = json_text['description']
if 'modified' in json_text: rec_dict['Date'] = shared.translate_date(json_text['modified'])
rec_dict['Data URL'] = url
my_csv.write_dataset(rec_dict)
######################################################################################
# Extract from Saskatoon Snow Grading Map
map_url = "http://apps2.saskatoon.ca/app/aSnowProgram/"
rest_url = "http://rpbackgis2.saskatoon.ca/ArcGIS/rest/services"
my_csv.write_line("\nSaskatoon Snow Grading Map")
my_csv.write_url(map_url, "Map URL")
my_csv.write_url(rest_url, "ArcGIS REST URL")
field_names = ['Title', 'Type', 'Description', 'URL']
my_csv.write_header(field_names)
my_rest = rest.MyREST(rest_url)
services = my_rest.get_services()
for service in services:
rec_dict = collections.OrderedDict((k, "") for k in field_names)
rec_dict['Title'] = service['name']
rec_dict['Type'] = service['type']
rec_dict['Description'] = shared.edit_description(service['serviceDescription'], 'span')
rec_dict['URL'] = service['url']
my_csv.write_dataset(rec_dict)
my_csv.close_csv()
def main():
#city_list = ['Winnipeg', 'Brandon']
parser = argparse.ArgumentParser()
parser.add_argument("-c", "--city", help="The city to extract: %s" % ', '.join(site_list.keys()))
parser.add_argument("-w", "--word", help="The key word(s) to search for.")
#parser.add_argument("-f", "--format", help="The format(s) to search for.")
parser.add_argument("-a", "--category", help="The category to search for.")
#parser.add_argument("-d", "--downloadable", help="Determines wheter to get only downloadable datasets.")
#parser.add_argument("-l", "--html", help="The HTML file to scrape (only for OpenData website).")
parser.add_argument("-s", "--silent", action='store_true', help="If used, no extra parameters will be queried.")
args = parser.parse_args()
#print args.echo
#print "province: " + str(args.province)
#print "format: " + str(args.format)
city = args.city
word = args.word
#formats = args.format
#html = args.html
silent = args.silent
cat = args.category
#downloadable = args.downloadable
if city is None:
answer = raw_input("Please enter the city you would like to extract (%s): " % ', '.join(site_list.keys()))
if not answer == "":
city = answer.lower()
else:
print "\nERROR: Please specify a city."
print "Exiting process."
sys.exit(1)
if word is None and not silent:
answer = raw_input("Please enter the word you would like to search: ")
if not answer == "":
word = answer.lower()
if cat is None and not silent:
answer = raw_input("Please enter the category you would like to search: ")
if not answer == "":
cat = answer.lower()
if city == "all":
for key, site in site_list.items():
eval(site_list[key][0])
else:
if city in site_list.keys():
eval(site_list[city][0])
else:
print "\nERROR: Invalid city '%s'. Please enter one of the following: %s" % (city, ', '.join(site_list.keys()))
print "Exiting process."
sys.exit(1)
#geoportal_list = extract_geoportal(province)
if __name__ == '__main__':
sys.exit(main()) | StarcoderdataPython |
3397201 | <reponame>lanl/NEXMD
#/usr/bin/python
'''
This function collects timings from all trajectories.
If this function is requested, the timings located at the end of the
standard output files (i.e. md.out) are outputted to a file called
'timing.out'. The first column is directory of the trajectory,
followed by its total CPU time, and timings for the ground state,
excited states, adiabatic forces, and non-adiabatic derivative coupling,
respectively. These timings, averaged over all trajectories, are also
printed to screen when this function is executed. An error file called
'timing.err' will be generated if any problems occur such as
non-existent or incomplete files.
'''
import numpy as np
import os
import sys
import subprocess
import shlex
import glob
cwd = os.getcwd()
def timing(pathtotime):
print 'Collecting timings from trajectories.'
## Directory names ##
NEXMDir = raw_input('NEXMD directory: ')
if not os.path.exists(NEXMDir):
print 'Path %s does not exist.' % (NEXMDir)
sys.exit()
## Collect and check timings ##
print 'Collecting timings. please wait ...'
if not os.path.exists('%s/getexcited_package/collectime.sh' % (pathtotime)):
print 'The script, collectime.sh, must be in the getexcited_package.'
sys.exit()
NEXMDs = glob.glob('%s/NEXMD*/' % (NEXMDir))
NEXMDs.sort()
if len(NEXMDs) == 0:
print 'There are no NEXMD folders in %s.' % (NEXMDir)
sys.exit()
error = open('%s/timing.err' % (cwd),'w')
errflag = 0
for NEXMD in NEXMDs:
if not os.path.exists('%s/%s/dirlist1' % (cwd,NEXMD)):
print 'Path %s/%sdirlist1 does not exist.' % (cwd,NEXMD)
sys.exit()
dirlist1 = np.int_(np.genfromtxt('%s/%s/dirlist1' % (cwd,NEXMD)))
if isinstance(dirlist1,int) == True:
dirlist1 = np.array([dirlist1])
for dir in dirlist1:
if not os.path.exists('%s/%s/%04d' % (cwd,NEXMD,dir)):
print >> error, '%s%04d' % (NEXMD,dir), 'does not exist'
errflag = 1
continue
os.chdir('%s/%s/%04d' % (cwd,NEXMD,dir))
if not os.path.exists('%s/%s/%04d/md.out' % (cwd,NEXMD,dir)):
print >> error, '%s%04d/md.out' % (NEXMD,dir), 'does not exist'
errflag = 1
continue
subprocess.call(shlex.split('sh %s/getexcited_package/collectime.sh' % (pathtotime)))
if not os.path.exists('%s/%s/%04d/timing.out' % (cwd,NEXMD,dir)):
print >> error, '%s/%04d/timing.out' % (NEXMD,dir), 'does not exist'
errflag = 1
continue
with open('%s/%s/%04d/timing.out' % (cwd,NEXMD,dir),'r') as data:
data = data.readlines()
if len(data) != 6 or 'MD total CPU time' not in data[0]:
print >> error, '%s%04d/timing.out' % (NEXMD,dir), 'is incomplete'
errflag = 1
print '%s%04d' % (NEXMD,dir)
if errflag == 1:
print 'One or more trajectories did not finish, check timing.err.'
contq = input('Continue? Answer yes [1] or no [0]: ')
if contq not in [1,0]:
print 'Answer must be 1 or 0.'
sys.exit()
if contq == 0:
sys.exit()
else:
os.remove('%s/timing.err' % (cwd))
## Extract and combine timings ##
print ' Collecting time....'
timing = open('%s/timing.out' % (cwd),'w')
times = np.zeros(5) ## change 5 to 1 for old code (NAESMD)
traj = 0
for NEXMD in NEXMDs:
dirlist1 = np.int_(np.genfromtxt('%s/%s/dirlist1' % (cwd,NEXMD)))
if isinstance(dirlist1,int) == True:
dirlist1 = np.array([dirlist1])
for dir in dirlist1:
try:
data = open('%s/%s/%04d/timing.out' % (cwd,NEXMD,dir),'r')
except:
print ('%s/%s/%04d/timing.out does not exist' % (cwd,NEXMD,dir))
continue
data = data.readlines()
data = np.delete(data, (1), axis = 0) ## comment out for old code (NAESMD)
tarray = np.array([])
index = 0
fail = 0
for line in data:
val = line.split()
if index == 0:
try:
tarray = np.append(tarray, np.float(val[5]))
except:
print (' %s/%s/%04d did not finish' %(cwd,NEXMD,dir))
fail = 1
traj -= 1
break
else: ## comment out for old code (NAESMD)
tarray = np.append(tarray, np.float(val[0]))
index += 1
if fail == 0:
times += tarray
print >> timing, '%s%04d' % (NEXMD,dir), ' '.join(str('%06d' % (x)) for x in tarray)
os.remove('%s/%s/%04d/timing.out' % (cwd,NEXMD,dir))
print '%s%04d' % (NEXMD,dir)
traj += 1
times = times/traj
print'Total number of trajectories: %d' %(traj)
print 'Mean total cpu [s]:', '%06d' % (times[0])
## comment all below for old code (NAESMD) ##
print 'Mean ground state [s]:', '%06d' % (times[1])
print 'Mean excited states [s]:', '%06d' % (times[2])
print 'Mean adiabatic forces [s]:', '%06d' % (times[3])
print 'Mean non-adiabatic derivatives [s]:', '%06d' % (times[4])
| StarcoderdataPython |
5108981 | #!/usr/bin/env python
# encoding: utf-8
"""
Create an AmbiverseNLU repository for the most recent Wikipedia dump versions for the specified languages
-----
# Requires:
* Java 8
* Maven 3
* Python 3
* Postgres 9 running
* (optionally Cassandra 3 running for building)
-----
# How to run it: Check out yago3 next to ambiverse-nlu
* git clone https://github.com/yago-naga/yago3.git
# Copy over the yago_aida.ini from the this repository to yago3
* cp $WORKSPACE/ambiverse-nlu/scripts/repository_creation/yago_aida.ini $WORKSPACE/yago3/configuration/yago_aida.ini
# Run the script
* python3 -u ambiverse-nlu/scripts/repository_creation/createAidaRepository.py -s 20181109 -d /workspace/entity_linking_repository_creation/tmp_dumps/ -b /workspace/entity_linking_repository_creation/basics3/ -y /workspace/entity_linking_repository_creation/yago3/ -i /workspace/entity_linking_repository_creation/tmp_yago/ -a /workspace/entity_linking_repository_creation/ambiverse-nlu/ -t /workspace/entity_linking_repository_creation/tmp -l en -l de --db-server postgres.local --db-username **** --db-password **** -c cassandra-1.local:9042 -c cassanda-2.local:9042 --yago-ini=yago_aida.ini -o /repository/dumps --reuse-yago --include-concepts --run-neo4j --neo4j-file-dir=/workspace/entity_linking_repository_creation/tmp_neo4j/
-----
Usage:
createAidaRepository.py -d TARGET_DIR -b BASICS_DIST_DIR -y YAGO_DIST_DIR -i YAGO_INDEX_DIR -a AIDA_DIST_DIR -t AIDA_TEMP_DIR (-l LANGUAGE ...) [(--date=DATE ...)] [--wikidata-date=WIKIDATA_DATE] [-s START_DATE] [--reuse-yago] --db-server=DB_SERVER --db-username=DB_USERNAME --db-password=<PASSWORD> -o DUMPS_OUTPUT_DIR (-c CASSANDRA_HOST ...) [--yago-ini=YAGO_INI] [--skip-aida] [--include-concepts] [--run-neo4j] [--neo4j-file-dir=NEO4J_FILE_DIR] [--neo4j-server=NEO4J_SERVER] [--neo4j-ssh-username=SSH_USERNAME] [--neo4j-ssh-password=SSH_PASSWORD] [--neo4j-destination-dir=NEO4J_DESTINATION_DIR] [--neo4j-import-server-dir=NEO4j_IMPORT_SERVER_DIR] [--stages=STAGES] [--subgraph-entities=SUBGRAPH_ENTITIES] [--subgraph-classes=SUBGRAPH_CLASSES]
Options:
-d TARGET_DIR --target-dir=TARGET_DIR directory to store the Wikipedia and Wikidata dumps
-b BASICS_DIST_DIR --basics-dist-dir=BASICS_DIST_DIR directory of the BASICS3 distribution
-y YAGO_DIST_DIR --yago-dist-dir=YAGO_DIST_DIR directory of the YAGO3 distribution
-i YAGO_INDEX_DIR --yago-index-dir=YAGO_INDEX_DIR directory where to store the generated YAGO3 index
-a AIDA_DIST_DIR --aida-dist-dir=AIDA_DIST_DIR directory of the AIDA distribution
-t AIDA_TEMP_DIR --aida-temp-dir=AIDA_TEMP_DIR directory that holds some temporary AIDA output files
-l LANGUAGE --language=LANGUAGE Wikipedia dump language
-o DUMPS_OUTPUT_DIR --dumps-output-dir=DUMPS_OUTPUT_DIR the output folder for persistently stored dumps
--date=DATE Date of the Wikipedia dump
--wikidata-date=WIKIDATA_DATE Date of the Wikidata dump
-s START_DATE --start-date=START_DATE Date from where the search for dumps starts backwards in time (default: today())
--db-server=DB_SERVER database server that holds the AIDA database
--db-username=DB_USERNAME username to use with the AIDA database
--db-password=DB_PASSWORD password to use with the AIDA database
-c CASSANDRA_HOST --cassandra-host=CASSANDRA_HOST Cassandra host that the data will be populated to
--reuse-yago Flag to set reuse yago output to true
--yago-ini=YAGO_INI the YAGO configuration file to load [default: yago_aida.ini]
--skip-aida does not run the AIDA process
--include-concepts include concepts in the YAGO
--run-neo4j run neo4j extractor and import
--neo4j-file-dir=NEO4J_FILE_DIR directory where to store the generated Neo4j import files
--neo4j-server=NEO4J_SERVER neo4j database server name (default=hard)
--neo4j-ssh-username=SSH_USERNAME username to use for ssh copy
--neo4j-ssh-password=<PASSWORD> password to use for ssh copy
--neo4j-destination-dir=NEO4J_DESTINATION_DIR neo4j destination path on the server
--neo4j-import-server-dir=NEO4j_IMPORT_SERVER_DIR neo4j server path that is used for import script
--stages=STAGES stages to run (override preconfigured stages)
--subgraph-entities=SUBGRAPH_ENTITIES sets subgraphEntities in the yago.ini, restricting the entities in YAGO
--subgraph-classes=SUBGRAPH_CLASSES sets subgraphClasses in the yago.ini, restricting the entities in YAGO
"""
import fileinput
import hashlib
import os
import paramiko
import re
import shutil
import subprocess
import sys
from datetime import datetime
from git import Repo
from paramiko import SSHClient
from scp import SCPClient
from subprocess import PIPE, STDOUT
from docopt import docopt
# Constants
YAGO3_DOWNLOADDUMPS_SCRIPT = 'scripts/dumps/downloadDumps.py'
AIDA_RUN_DATA_PREPARATION_SCRIPT = 'scripts/preparation/run_data_preparation.sh'
AIDA_PERSISTENTLY_STORE_DUMPS_SCRIPT = 'scripts/repository_creation/persistentlyStoreDumps.py'
YAGO3_CONFIGURATION_SUBDIR = 'configuration'
YAGO3_CONFIGURATION_TEMPLATE = 'yago_aida.ini'
YAGO3_TMP_CONFIGURATION = 'yago_aida_createAidaRepositoryConfiguration.ini'
YAGO3_ADAPTED_CONFIGURATION_EXTENSION = '.adapted.ini'
YAGO3_LANGUAGES_PROPERTY = 'languages'
YAGO3_YAGOFOLDER_PROPERTY = 'yagoFolder'
YAGO3_NEO4JFOLDER_PROPERTY = 'neo4jFolder'
YAGO3_REUSE_YAGO_PROPERTY = 'reuse'
YAGO3_DUMPSFOLDER_PROPERTY = 'dumpsFolder'
YAGO3_WIKIPEDIAS_PROPERTY = 'wikipedias'
YAGO3_INCLUCE_CONCEPTS_PROPERTY = 'includeConcepts'
YAGO3_EXTRACTORS_PROPERTY = 'extractors'
YAGO3_NEO4J_EXTRACTOR = 'deduplicators.Neo4jThemeTransformer,'
YAGO3_SUBGRAPH_ENTITIES = 'subgraphEntities'
YAGO3_SUBGRAPH_CLASSES = 'subgraphClasses'
NEO4J_IMPORT_SCRIPT_FILE_NAME = 'import_script.txt'
NEO4J_SCRIPT_NEO4J_FOLDER_PLACEHOLDER = 'YAGOOUTPUTPATH'
NEO4J_SOURCE_SERVER_PATH = '/local_san2/tmp/neo4j-enterprise-3.4.0/' #This is hard coded and is on badr. Should it be inputed?
NEO4J_DESTINATION_DIR_PATH = '/var/tmp/neo4j/'
NEO4J_DESTINATION_SERVER_NAME = 'hard'
NEO4J_GRAPH_NAME_FILE = 'graphNameFile'
AIDA_CONFIGURATION_DIR = 'src/main/config'
AIDA_CONFIGURATION_TEMPLATE_DIR = 'default'
AIDA_CONFIGURATION_TMP_DIR = 'default_tmp'
AIDA_CONFIGURATION_STAGES = 'MINIMAL,CASSANDRA_CREATION'
AIDA_CONCEPT_CONFIGURATION_STAGES = 'MINIMAL,CONCEPT_CATEGORIES,CASSANDRA_CREATION'
AIDA_DATABASE_NAME_PREFIX = 'aida_'
AIDA_DATABASE_SCHEMA_VERSION = '_v18'
AIDA_PROPERTIES_FILE = 'aida.properties'
AIDA_DATABASE_AIDA_PROPERTIES_FILE = 'database_aida.properties'
AIDA_CASSANDRA_PROPERTIES_FILE = 'cassandra.properties'
AIDA_PREPARATION_PROPERTIES_FILE = 'preparation.properties'
AIDA_NER_PROPERTIES_FILE = 'ner.properties'
AIDA_SERVER_NAME_PROPERTY = 'dataSource.serverName'
AIDA_DATABASE_NAME_PROPERTY = 'dataSource.databaseName'
AIDA_USER_PROPERTY = 'dataSource.user'
AIDA_PASSWORD_PROPERTY = '<PASSWORD>'
AIDA_LANGUAGES_PROPERTY = 'languages'
AIDA_NER_LANGUAGES_PROPERTY = 'languages'
AIDA_TARGET_LANGUAGES_PROPERTY = 'target.languages'
AIDA_HOST_PROPERTY = 'host'
AIDA_KEYSPACE_PROPERTY = 'keyspace'
AIDA_REPLICATION_FACTOR_PROPERTY = 'replication.factor'
AIDA_LOAD_MODELS_FORM_CLASS_PATH = 'load_models_from_classpath'
AIDA_PREPARATION_CONFIG_YAGO3_FILE = 'yago3.file'
class Usage(Exception):
def __init__(self, msg):
self.msg = msg
def main(argv=None):
try:
print("Adapting the YAGO3 configuration...")
adaptYagoConfiguration() # Initial adaptation of YAGO configuration (languages, dumpsFolder)
print("Downloading dump(s)...")
downloadDumps() # Adaptation of YAGO configuration (dumps locations)
print("Loading YAGO output folder from configuration file...")
yagoFolder, neo4jFolder = loadYagoFolderFromAdaptedConfiguration() # Adaptation of YAGO configuration (yagoFolder, neo4jFolder)
print("Persistently storing dumps...")
persistentlyStoreDumps()
print("Running YAGO3...")
runYago()
if skipAida is False:
print("Adapting the AIDA configuration...")
dbName = adaptAidaConfiguration(yagoFolder)
print("Running AIDA...")
runAida()
print("Persisting the AIDA configuration...")
aidaConfigTargetDir = persistAidaConfiguration(dbName)
print("Repository creation finished. The AIDA config has been added to the Git repository as '{}'".format(os.path.basename(aidaConfigTargetDir)))
#if run_neo4j:
# graphDBname = importYagoToNeo4j(neo4jFolder)
# copyGraphToDestination(graphDBname)
except:
raise
def execute(cmd, customEnv=None):
process = subprocess.Popen(cmd, stdout=PIPE, stderr=STDOUT, universal_newlines=True, env=customEnv)
for line in iter(process.stdout.readline, ""):
print(line,)
process.stdout.close()
return_code = process.wait()
if return_code:
raise subprocess.CalledProcessError(return_code, cmd)
"""
Invokes the external shell script for downloading and extracting the Wikipedia dumps
"""
def downloadDumps():
os.chdir(os.path.abspath(yagoDistDir))
processCall = ['python3', '-u', os.path.join(os.path.abspath(yagoDistDir), YAGO3_DOWNLOADDUMPS_SCRIPT),
'-y', os.path.join(os.path.abspath(yagoDistDir), YAGO3_CONFIGURATION_SUBDIR, YAGO3_TMP_CONFIGURATION),
'-s', startDate.strftime("%Y%m%d")]
if dates:
for date in dates:
processCall.append("--date=" + date)
if options['--wikidata-date']:
processCall.append("--wikidata-date=" + options['--wikidata-date'])
print(processCall)
execute(processCall)
"""
Duplicates the YAGO3 template ini file and adapts the properties as necessary
"""
def adaptYagoConfiguration():
configDir = os.path.abspath(yagoDistDir)
iniFile = os.path.join(configDir, YAGO3_CONFIGURATION_SUBDIR, YAGO3_TMP_CONFIGURATION)
print ("Using YAGO3 configuration template:", yagoConfigurationTemplate)
shutil.copy(
os.path.join(configDir, YAGO3_CONFIGURATION_SUBDIR, yagoConfigurationTemplate),
iniFile)
dumpsFolderDone = False
includeConceptsDone = False
for line in fileinput.input(iniFile, inplace=1):
if re.match('^' + YAGO3_LANGUAGES_PROPERTY + '\s*=', line):
line = YAGO3_LANGUAGES_PROPERTY + ' = ' + ','.join(languages) + '\n'
elif re.match('^' + YAGO3_REUSE_YAGO_PROPERTY + '\s*=', line):
line = YAGO3_REUSE_YAGO_PROPERTY + ' = ' + str(reuse_yago) + '\n'
elif re.match('^' + YAGO3_DUMPSFOLDER_PROPERTY + '\s*=', line):
line = YAGO3_DUMPSFOLDER_PROPERTY + ' = ' + targetDir + '\n'
dumpsFolderDone = True
elif re.match('^' + YAGO3_INCLUCE_CONCEPTS_PROPERTY + '\s*=', line):
line = YAGO3_INCLUCE_CONCEPTS_PROPERTY + ' = ' + str(include_concepts) + '\n'
includeConceptsDone = True
elif re.match('^' + YAGO3_EXTRACTORS_PROPERTY + '\s*=', line):
if run_neo4j is True:
temp = re.sub(r'\s', '', line).split("=")
line = temp[0] + "=" + YAGO3_NEO4J_EXTRACTOR + '\n' + temp[1] + '\n'
# Write the (possibly modified) line back to the configuration file
sys.stdout.write(line)
# If the values couldn't be replaced because the property wasn't in the configuration yet, add it.
with open(iniFile, "a") as configFile:
if dumpsFolderDone == False:
configFile.write('\n' + YAGO3_DUMPSFOLDER_PROPERTY + ' = ' + targetDir + '\n')
if includeConceptsDone == False and include_concepts == True:
configFile.write('\n' + YAGO3_INCLUCE_CONCEPTS_PROPERTY + ' = ' + str(include_concepts) + '\n')
# Append new subgraph config if needed
if yagoSubgraphEntities:
configFile.write(YAGO3_SUBGRAPH_ENTITIES + "=" + yagoSubgraphEntities)
if yagoSubgraphClasses:
configFile.write(YAGO3_SUBGRAPH_CLASSES + "=" + yagoSubgraphClasses)
def loadYagoFolderFromAdaptedConfiguration():
yagoAdaptedConfigurationFile = os.path.join(os.path.abspath(yagoDistDir), YAGO3_CONFIGURATION_SUBDIR, YAGO3_TMP_CONFIGURATION + YAGO3_ADAPTED_CONFIGURATION_EXTENSION)
for line in fileinput.input(yagoAdaptedConfigurationFile, inplace=1):
if re.match('^' + YAGO3_WIKIPEDIAS_PROPERTY + '\s*=', line):
wikipedias = re.sub(r'\s', '', line).split("=")[1].split(",")
wikipediasFriendly = []
for wikipedia in wikipedias:
wikipediaLanguage = wikipedia[-34:-32]
wikipediaDate = wikipedia[-27:-19]
wikipediasFriendly.append(wikipediaLanguage + str(wikipediaDate))
yagoFolder = os.path.join(yagoIndexDir, "yago_aida_" + "_".join(wikipediasFriendly))
# If YAGO is built as a subgraph, make sure that the dir is unique for that.
yagoDirSuffix = ""
if yagoSubgraphEntities:
yagoDirSuffix += "_" + hashlib.md5(yagoSubgraphEntities.encode('utf-8')).hexdigest()
if yagoSubgraphClasses:
yagoDirSuffix += "_" + hashlib.md5(yagoSubgraphClasses.encode('utf-8')).hexdigest()
if yagoDirSuffix:
yagoFolder = yagoFolder + yagoDirSuffix
neo4jFolder = ''
if run_neo4j:
neo4jFolder = os.path.join(neo4jFileDir, "yago_aida_" + "_".join(wikipediasFriendly))
if yagoDirSuffix:
neo4jFolder = neo4jFolder + yagoDirSuffix
if not os.path.exists(neo4jFolder):
os.makedirs(neo4jFolder)
# Make sure the yagoFolder directory is there
print("Creating YAGO in: ", yagoFolder)
if not os.path.exists(yagoFolder):
os.makedirs(yagoFolder)
sys.stdout.write(line)
elif not (re.match('^' + YAGO3_YAGOFOLDER_PROPERTY + '\s*=', line) or re.match('^' + YAGO3_NEO4JFOLDER_PROPERTY + '\s*=', line)):
sys.stdout.write(line) # that way, remove YAGO folder from configuration file for now
with open(yagoAdaptedConfigurationFile, "a") as configFile:
configFile.write('\n' + YAGO3_YAGOFOLDER_PROPERTY + ' = ' + yagoFolder + '\n')
if run_neo4j:
configFile.write('\n' + YAGO3_NEO4JFOLDER_PROPERTY + ' = ' + neo4jFolder + '\n')
return yagoFolder, neo4jFolder
"""
Runs YAGO3 with the adapted configuration file
"""
def runYago():
# Install most recent version of BASICS
os.chdir(basicsDistDir)
execute(['mvn', '-U', 'clean', 'verify', 'install'])
# Build and run YAGO
# Switch to YAGO dir
os.chdir(yagoDistDir)
# Give enough memory
myEnv = os.environ.copy()
myEnv['MAVEN_OPTS'] = '-Xmx400G'
execute(
['mvn', '-U', 'clean', 'verify', 'exec:java',
'-Dexec.args=' + YAGO3_CONFIGURATION_SUBDIR + '/' + YAGO3_TMP_CONFIGURATION + '.adapted.ini'],
myEnv)
"""
Duplicates the AIDA template config folder and adapts the properties as necessary
"""
def adaptAidaConfiguration(yagoFolder):
aidaConfigDir = os.path.join(aidaDistDir, AIDA_CONFIGURATION_DIR)
aidaConfigTemplateDir = os.path.join(aidaConfigDir, AIDA_CONFIGURATION_TEMPLATE_DIR)
aidaConfigTmpDir = os.path.join(aidaConfigDir, AIDA_CONFIGURATION_TMP_DIR)
# Make sure the new configuration folder doesn't exist yet. It it does, delete it.
if os.path.exists(aidaConfigTmpDir):
shutil.rmtree(aidaConfigTmpDir)
# Copy over the template
shutil.copytree(aidaConfigTemplateDir, aidaConfigTmpDir)
# Adapt the database properties
dbName = AIDA_DATABASE_NAME_PREFIX + getDatabaseId(yagoFolder) + AIDA_DATABASE_SCHEMA_VERSION
dbPropertiesFile = os.path.join(aidaConfigTmpDir, AIDA_DATABASE_AIDA_PROPERTIES_FILE)
for line in fileinput.input(dbPropertiesFile, inplace=1):
if re.match('^' + AIDA_SERVER_NAME_PROPERTY + '\s*=', line):
line = AIDA_SERVER_NAME_PROPERTY + ' = ' + dbServer + '\n'
elif re.match('^' + AIDA_DATABASE_NAME_PROPERTY + '\s*=', line):
line = AIDA_DATABASE_NAME_PROPERTY + ' = ' + dbName + '\n'
elif re.match('^' + AIDA_USER_PROPERTY + '\s*=', line):
line = AIDA_USER_PROPERTY + ' = ' + dbUsername + '\n'
elif re.match('^' + AIDA_PASSWORD_PROPERTY + '\s*=', line):
line = AIDA_PASSWORD_PROPERTY + ' = ' + dbPassword + '\n'
sys.stdout.write(line)
# Create a list of both three- and two-letter language codes
doubleLangMappings = []
for language in languages:
doubleLangMappings.extend([getThreeLetterLanguageCode(language), language])
# Adapt the aida properties
aidaPropertiesFile = os.path.join(aidaConfigTmpDir, AIDA_PROPERTIES_FILE)
for line in fileinput.input(aidaPropertiesFile, inplace=1):
if re.match('^' + AIDA_LANGUAGES_PROPERTY + '\s*=', line):
line = AIDA_LANGUAGES_PROPERTY + ' = ' + ','.join(languages) + '\n'
sys.stdout.write(line)
# Adapt the preparation properties
prepPropertiesFile = os.path.join(aidaConfigTmpDir, AIDA_PREPARATION_PROPERTIES_FILE)
for line in fileinput.input(prepPropertiesFile, inplace=1):
if re.match('^#?\s?' + AIDA_PREPARATION_CONFIG_YAGO3_FILE + '\s*=', line):
line = AIDA_PREPARATION_CONFIG_YAGO3_FILE + ' = ' + yagoFolder + '/aidaFacts.tsv\n'
elif re.match('^' + AIDA_TARGET_LANGUAGES_PROPERTY + '\s*=', line):
line = AIDA_TARGET_LANGUAGES_PROPERTY + ' = ' + ','.join(doubleLangMappings) + '\n'
sys.stdout.write(line)
# Adapt the ner properties - KnowNER does not support chinese, remove from languages
nerLanguages = languages[:]
if 'zh' in nerLanguages:
nerLanguages.remove('zh')
nerPropertiesFile = os.path.join(aidaConfigTmpDir, AIDA_NER_PROPERTIES_FILE)
for line in fileinput.input(nerPropertiesFile, inplace=1):
if re.match('^' + AIDA_NER_LANGUAGES_PROPERTY + '\s*=', line):
line = AIDA_NER_LANGUAGES_PROPERTY + ' = ' + ','.join(nerLanguages) + '\n'
# Adapt the load_models_from_classpath to read and write to the filesystem
elif re.match('^' + AIDA_LOAD_MODELS_FORM_CLASS_PATH + '\s*=', line):
line = AIDA_LOAD_MODELS_FORM_CLASS_PATH + '=false\n'
sys.stdout.write(line)
# Adapt the Cassandra properties
cassandraPropertiesFile = os.path.join(aidaConfigTmpDir, AIDA_CASSANDRA_PROPERTIES_FILE)
for line in fileinput.input(cassandraPropertiesFile, inplace=1):
if re.match('^' + AIDA_HOST_PROPERTY + '\s*=', line):
line = AIDA_HOST_PROPERTY + ' = ' + ','.join(cassandraHosts) + '\n'
elif re.match('^' + AIDA_REPLICATION_FACTOR_PROPERTY + '\s*=', line):
line = AIDA_REPLICATION_FACTOR_PROPERTY + ' = ' + str(len(cassandraHosts)) + '\n'
# The keyspace should be exactly the same as the SQL database name
elif re.match('^' + AIDA_KEYSPACE_PROPERTY + '\s*=', line):
line = AIDA_KEYSPACE_PROPERTY + ' = ' + dbName + '\n'
sys.stdout.write(line)
return dbName
"""
Runs AIDA with the adapted configuration dir
"""
def runAida():
# Make sure the tmp directory is there
if not os.path.exists(aidaTempDir):
os.makedirs(aidaTempDir)
# Switch to AIDA dir
os.chdir(aidaDistDir)
# Give enough memory
myEnv = os.environ.copy()
myEnv['MAVEN_OPTS'] = '-Xmx212G'
if include_concepts:
stages = AIDA_CONCEPT_CONFIGURATION_STAGES
else :
stages = AIDA_CONFIGURATION_STAGES
if stagesOverride is not None:
stages = stagesOverride
execute(
[os.path.join(AIDA_RUN_DATA_PREPARATION_SCRIPT),
AIDA_CONFIGURATION_TMP_DIR, aidaTempDir, stages],
myEnv)
"""
Converts from ISO 639-1 into ISO 639-2 format. For creating the mapping, we referred to this website:
https://www.loc.gov/standards/iso639-2/php/code_list.php
"""
def getThreeLetterLanguageCode(twoLetterCode):
codeTable = {
'ar': "ara",
'de': "deu",
'en': "eng",
'fr': "fra",
'it': "ita",
'jp': "jpn",
'es': "spa",
'pt': "por",
'ru': "rus",
'cs': "ces",
'zh': "zho"
}
return codeTable.get(twoLetterCode, "xx")
"""
Constructs the database ID from yagoFolder name
"""
def getDatabaseId(yagoFolder):
languages = []
dates = []
languageDates = yagoFolder[yagoFolder.find("/yago_aida_") + len("/yago_aida_"):].split("_")
pattern = re.compile("^\d+$")
for languageDate in languageDates:
languages.append(languageDate[0:2])
date = languageDate[2:]
if pattern.match(date):
dates.append(date)
print("CHECK DB ID: " + max(dates) + '_' + '_'.join(sorted(languages)))
return max(dates) + '_' + '_'.join(sorted(languages))
def persistAidaConfiguration(dbName):
aidaConfigDir = os.path.join(aidaDistDir, AIDA_CONFIGURATION_DIR)
aidaConfigTmpDir = os.path.join(aidaConfigDir, AIDA_CONFIGURATION_TMP_DIR)
aidaConfigTargetDir = os.path.join(aidaConfigDir, dbName + "_cass")
# Make sure the new configuration folder doesn't exist yet. It it does, delete it.
if os.path.exists(aidaConfigTargetDir):
shutil.rmtree(aidaConfigTargetDir)
# Copy the AIDA configuration to its final destination
shutil.copytree(aidaConfigTmpDir, aidaConfigTargetDir)
# Commit and push configuration to the Git repository
repo = Repo(aidaDistDir)
repo.index.add(AIDA_CONFIGURATION_DIR + "/" + os.path.basename(aidaConfigTargetDir))
repo.index.commit("Added configuration " + os.path.basename(aidaConfigTargetDir))
repo.remotes.origin.pull() # Attempt a pull first, in case things were added meanwhile.
repo.remotes.origin.push()
return aidaConfigTargetDir
def persistentlyStoreDumps():
os.chdir(os.path.abspath(aidaDistDir))
processCall = ['python3', os.path.join(os.path.abspath(aidaDistDir), AIDA_PERSISTENTLY_STORE_DUMPS_SCRIPT),
'-y', os.path.join(os.path.abspath(yagoDistDir), YAGO3_CONFIGURATION_SUBDIR, YAGO3_TMP_CONFIGURATION),
'-d', targetDir,
'-o', dumpsOutputDir]
print(processCall)
execute(processCall)
"""
Execute the import script to import yago to neo4j
"""
def importYagoToNeo4j(neo4jFolder):
graphDBname = os.path.basename(os.path.normpath(neo4jFolder))
if os.path.exists(NEO4J_SOURCE_SERVER_PATH + 'data/databases/' + graphDBname + '.db'):
print("Graph database already exists.")
os.chdir(neo4jFileDir);
f=open(NEO4J_GRAPH_NAME_FILE, 'w')
f.write('DatabaseName=' + graphDBname)
f.close()
return graphDBname
command = open(os.path.join(neo4jFolder, NEO4J_IMPORT_SCRIPT_FILE_NAME), "r").read()
command = command.replace(NEO4J_SCRIPT_NEO4J_FOLDER_PLACEHOLDER, neo4jFolder)
command = command.replace('"', '')
commands = command.split(' ')
processCall = [NEO4J_SOURCE_SERVER_PATH + 'bin/neo4j-admin', 'import',
'--mode', 'csv',
'--ignore-duplicate-nodes', 'true',
'--ignore-missing-nodes', 'true',
'--delimiter', 'TAB',
'--database', graphDBname + '.db']
for cmd in commands:
if cmd != '':
processCall.append(cmd)
myEnv = os.environ.copy()
myEnv['JAVA_OPTS'] = '-Xmx250G'
print("Neo4j import start...")
execute(processCall, myEnv)
print("Neo4j import finished.")
# Write the graph name to a file, to use it in jenkins
os.chdir(neo4jFileDir);
f=open(NEO4J_GRAPH_NAME_FILE, 'w')
f.write('DatabaseName=' + graphDBname)
f.close()
return graphDBname
# Define progress callback that prints the current percentage completed for the file
def progress(filename, size, sent):
sys.stdout.write("%s\'s progress: %.2f%% \r" % (filename, float(sent)/float(size)*100) )
def copyGraphToDestination(graphDBname):
# If the graph folder existed, remove it.
transport = paramiko.Transport(NEO4J_DESTINATION_SERVER_NAME)
transport.connect(username = ssh_username, password = ssh_password)
sftp = paramiko.SFTPClient.from_transport(transport)
dbFolderExisted = False
try:
sftp.stat(os.path.join(NEO4J_DESTINATION_DIR_PATH, 'data/databases/', graphDBname + '_successful_copy'))
print("Database exists remotely.")
return graphDBname
except IOError:
print("Database copy flag does not exist remotely.")
try:
sftp.stat(os.path.join(NEO4J_DESTINATION_DIR_PATH, 'data/databases/', graphDBname + '.db')) # Test if remote_path exists
print("Database folder existed remotely, but the copy flag does not exist.")
print(os.path.join(NEO4J_DESTINATION_DIR_PATH, 'data/databases/', graphDBname + '.db'))
sys.exit("Error: Need manual check, database exists remotely.")
except IOError:
print("Database folder did not existed remotely.")
sftp.close()
print ("Start copying graph Database...")
ssh = SSHClient()
ssh.load_system_host_keys()
ssh.set_missing_host_key_policy(paramiko.AutoAddPolicy())
ssh.connect(NEO4J_DESTINATION_SERVER_NAME, username=ssh_username, password=<PASSWORD>)
# SCPCLient takes a paramiko transport as an argument, used this one to copy recursive
scp = SCPClient(ssh.get_transport(), progress = progress)
scp.put(os.path.join(NEO4J_SOURCE_SERVER_PATH, 'data/databases/', graphDBname + '.db'), recursive=True, remote_path=os.path.join(NEO4J_DESTINATION_DIR_PATH, 'data/databases/'))
scp.put(os.path.join(NEO4J_SOURCE_SERVER_PATH, 'data/databases/', 'db_successful_copy'), os.path.join(NEO4J_DESTINATION_DIR_PATH, 'data/databases/', graphDBname + '_successful_copy'))
scp.close()
ssh.close()
print ("Finished copying graph Database.")
if __name__ == "__main__":
# parse options
options = docopt(__doc__)
targetDir = options['--target-dir']
languages = options['--language']
dates = options['--date']
wikidataDate = options['--wikidata-date']
basicsDistDir = options['--basics-dist-dir']
yagoDistDir = options['--yago-dist-dir']
yagoIndexDir = options['--yago-index-dir']
aidaDistDir = options['--aida-dist-dir']
aidaTempDir = options['--aida-temp-dir']
dumpsOutputDir = options['--dumps-output-dir']
dbServer = options['--db-server']
dbUsername = options['--db-username']
dbPassword = options['--db-password']
cassandraHosts = options['--cassandra-host']
# Read optional arguments with dynamic defaults
if options['--start-date']:
startDate = datetime.strptime(options['--start-date'], '%Y%m%d')
else:
startDate = datetime.today()
if options['--skip-aida']:
skipAida = True
else:
skipAida = False
if options['--reuse-yago']:
reuse_yago = True
else:
reuse_yago = False
if options['--yago-ini']:
yagoConfigurationTemplate = options['--yago-ini']
else:
yagoConfigurationTemplate = YAGO3_CONFIGURATION_TEMPLATE
if options['--stages']:
stagesOverride = options['--stages']
else:
stagesOverride = None
if options['--include-concepts']:
include_concepts = True
else:
include_concepts = False
if options['--run-neo4j']:
run_neo4j = True
if not options['--neo4j-file-dir']:
print ("Error: Must provide neo4j-file-dir")
sys.exit(1)
neo4jFileDir = options['--neo4j-file-dir']
if options['--neo4j-server']:
NEO4J_DESTINATION_SERVER_NAME = options['--neo4j-server']
if not options['--neo4j-ssh-username'] or not options['--neo4j-ssh-password']:
print ("Error: Must provide neo4j-ssh-username and neo4j-ssh-password")
sys.exit(1)
ssh_username = options['--neo4j-ssh-username']
ssh_password = options['--neo4j-ssh-password']
if options['--neo4j-import-server-dir']:
NEO4J_SOURCE_SERVER_PATH = options['--neo4j-import-server-dir']
if options['--neo4j-destination-dir']:
NEO4J_DESTINATION_DIR_PATH = options['--neo4j-destination-dir']
else:
run_neo4j = False
yagoSubgraphEntities = ""
yagoSubgraphClasses = ""
if options['--subgraph-entities']:
yagoSubgraphEntities = options['--subgraph-entities']
if options['--subgraph-classes']:
yagoSubgraphClasses = options['--subgraph-classes']
sys.exit(main())
| StarcoderdataPython |
3747 | <gh_stars>0
import numpy as np
# from sklearn.ensemble import BaggingClassifier
# from sklearn.tree import DecisionTreeClassifier
from sklearn.neighbors import KNeighborsClassifier
from sklearn.utils.validation import (
check_X_y,
check_array,
NotFittedError,
)
from sklearn.utils.multiclass import check_classification_targets, type_of_target
from .base import BaseVoter
from tensorflow import keras
from keras import layers
class TreeClassificationVoter(BaseVoter):
def __init__(self, finite_sample_correction=False):
"""
Doc strings here.
"""
self.finite_sample_correction = finite_sample_correction
self._is_fitted = False
self.multilabel = False
def fit(self, X, y):
"""
Doc strings here.
"""
check_classification_targets(y)
if type_of_target(y) == 'multilabel-indicator':
# Fit multilabel binary task.
self.multilabel = True
return self.fit_multilabel(X, y)
num_classes = len(np.unique(y))
self.uniform_posterior = np.ones(num_classes) / num_classes
self.leaf_to_posterior = {}
for leaf_id in np.unique(X):
idxs_in_leaf = np.where(X == leaf_id)[0]
class_counts = [
len(np.where(y[idxs_in_leaf] == y_val)[0]) for y_val in np.unique(y)
]
posteriors = np.nan_to_num(np.array(class_counts) / np.sum(class_counts))
if self.finite_sample_correction:
posteriors = self._finite_sample_correction(
posteriors, len(idxs_in_leaf), len(np.unique(y))
)
self.leaf_to_posterior[leaf_id] = posteriors
self._is_fitted = True
return self
def fit_multilabel(self, X, y):
num_labels = y.shape[1]
self.uniform_posterior = y.sum(axis=0) / len(y)
# Each posterior is now a num_labels size vector or binary probabilities.
self.leaf_to_posterior = {}
for leaf_id in np.unique(X):
idxs_in_leaf = np.where(X == leaf_id)[0]
label_counts = [
len(np.where(y[idxs_in_leaf, j] == 1)[0]) for j in range(num_labels)
]
posteriors = np.nan_to_num(np.array(label_counts) / np.sum(label_counts))
# TODO: multilabel finite sample correction.
self.leaf_to_posterior[leaf_id] = posteriors
self._is_fitted = True
return self
def vote(self, X):
"""
Doc strings here.
"""
if not self.is_fitted():
msg = (
"This %(name)s instance is not fitted yet. Call 'fit' with "
"appropriate arguments before using this voter."
)
raise NotFittedError(msg % {"name": type(self).__name__})
votes_per_example = []
for x in X:
if x in list(self.leaf_to_posterior.keys()):
votes_per_example.append(self.leaf_to_posterior[x])
else:
votes_per_example.append(self.uniform_posterior)
return np.array(votes_per_example)
def is_fitted(self):
"""
Doc strings here.
"""
return self._is_fitted
def _finite_sample_correction(posteriors, num_points_in_partition, num_classes):
"""
encourage posteriors to approach uniform when there is low data
"""
correction_constant = 1 / (num_classes * num_points_in_partition)
zero_posterior_idxs = np.where(posteriors == 0)[0]
posteriors[zero_posterior_idxs] = correction_constant
posteriors /= sum(posteriors)
return posteriors
class KNNClassificationVoter(BaseVoter):
def __init__(self, k, kwargs={}):
"""
Doc strings here.
"""
self._is_fitted = False
self.k = k
self.kwargs = kwargs
def fit(self, X, y):
"""
Doc strings here.
"""
X, y = check_X_y(X, y)
self.knn = KNeighborsClassifier(self.k, **self.kwargs)
self.knn.fit(X, y)
self._is_fitted = True
return self
def vote(self, X):
"""
Doc strings here.
"""
if not self.is_fitted():
msg = (
"This %(name)s instance is not fitted yet. Call 'fit' with "
"appropriate arguments before using this transformer."
)
raise NotFittedError(msg % {"name": type(self).__name__})
X = check_array(X)
return self.knn.predict_proba(X)
def is_fitted(self):
"""
Doc strings here.
"""
return self._is_fitted
class NeuralRegressionVoter(BaseVoter):
def __init__(
self, validation_split=0.25, loss="mse", epochs=100, lr=1e-4, verbose=False,
):
"""
Doc strings here.
"""
self.validation_split = validation_split
self.loss = loss
self.epochs = epochs
self.lr = lr
self.verbose = verbose
self._is_fitted = False
def fit(self, X, y):
"""
Doc strings here.
"""
X, y = check_X_y(X, y)
self.voter = keras.Sequential()
self.voter.add(
layers.Dense(
1,
activation="linear",
input_shape=(X.shape[1],),
name="transform_to_vote",
)
)
self.voter.compile(
loss=self.loss, metrics=["mae"], optimizer=keras.optimizers.Adam(self.lr)
)
self.voter.fit(
X,
y,
epochs=self.epochs,
callbacks=[keras.callbacks.EarlyStopping(patience=20, monitor="val_loss")],
verbose=self.verbose,
validation_split=self.validation_split,
shuffle=True,
)
self._is_fitted = True
return self
def vote(self, X):
"""
Doc strings here.
"""
if not self.is_fitted():
msg = (
"This %(name)s instance is not fitted yet. Call 'fit' with "
"appropriate arguments before using this transformer."
)
raise NotFittedError(msg % {"name": type(self).__name__})
X = check_array(X)
return self.voter.predict(X)
def is_fitted(self):
"""
Doc strings here.
"""
return self._is_fitted
class TreeRegressionVoter(BaseVoter):
def __init__(self):
"""
Doc strings here.
"""
self._is_fitted = False
def fit(self, X, y):
"""
Doc strings here.
"""
self.leaf_to_yhat = {}
self.global_yhat = np.mean(y)
for leaf_id in np.unique(X):
idxs_in_leaf = np.where(X == leaf_id)[0]
# class_counts = [len(np.where(y[idxs_in_leaf] == y_val)[0]) for y_val in np.unique(y)]
self.leaf_to_yhat[leaf_id] = np.nan_to_num(np.mean(y[idxs_in_leaf]))
self._is_fitted = True
return self
def vote(self, X):
"""
Doc strings here.
"""
if not self.is_fitted():
msg = (
"This %(name)s instance is not fitted yet. Call 'fit' with "
"appropriate arguments before using this voter."
)
raise NotFittedError(msg % {"name": type(self).__name__})
votes_per_example = []
for x in X:
if x in list(self.leaf_to_yhat.keys()):
votes_per_example.append(self.leaf_to_yhat[x])
else:
votes_per_example.append(self.global_yhat)
return np.array(votes_per_example)
def is_fitted(self):
"""
Doc strings here.
"""
return self._is_fitted | StarcoderdataPython |
3222254 | <gh_stars>1000+
import numpy as np
from .base import ScalarField
from ..geometry.coord_systems import (
cartesian_to_spherical,
cartesian_to_cylindrical)
from ..ransac import (
single_fit,
RANSAC_MODELS,
RANSAC_SAMPLERS)
class XYZScalarField(ScalarField):
def extract_info(self):
self.points = self.pyntcloud.xyz
class PlaneFit(XYZScalarField):
"""
Get inliers of the best RansacPlane found.
"""
def __init__(self, *, pyntcloud, max_dist=1e-4, max_iterations=100, n_inliers_to_stop=None):
self.model = RANSAC_MODELS["plane"]
self.sampler = RANSAC_SAMPLERS["random"]
self.name = "is_plane"
self.model_kwargs = {"max_dist": max_dist}
self.max_iterations = max_iterations
self.n_inliers_to_stop = n_inliers_to_stop
super().__init__(pyntcloud=pyntcloud)
def compute(self):
inliers = single_fit(self.points, self.model, self.sampler,
model_kwargs=self.model_kwargs,
max_iterations=self.max_iterations,
n_inliers_to_stop=self.n_inliers_to_stop)
self.to_be_added[self.name] = inliers.astype(np.uint8)
class SphereFit(XYZScalarField):
"""
Get inliers of the best RansacSphere found.
"""
def __init__(self, *, pyntcloud, max_dist=1e-4, max_iterations=100, n_inliers_to_stop=None):
super().__init__(pyntcloud=pyntcloud)
self.model = RANSAC_MODELS["sphere"]
self.sampler = RANSAC_SAMPLERS["random"]
self.name = "is_sphere"
self.model_kwargs = {"max_dist": max_dist}
self.max_iterations = max_iterations
self.n_inliers_to_stop = n_inliers_to_stop
def compute(self):
inliers = single_fit(self.points, self.model, self.sampler,
model_kwargs=self.model_kwargs,
max_iterations=self.max_iterations,
n_inliers_to_stop=self.n_inliers_to_stop)
self.to_be_added[self.name] = inliers.astype(np.uint8)
class CustomFit(XYZScalarField):
"""
Get inliers of the best custom model found.
"""
def __init__(self, pyntcloud, model, sampler, name, model_kwargs={},
sampler_kwargs={}, max_iterations=100, n_inliers_to_stop=None):
super().__init__(pyntcloud=pyntcloud)
self.model = model
self.sampler = sampler
self.name = name
self.model_kwargs = model_kwargs
self.sampler_kwargs = sampler_kwargs
self.max_iterations = max_iterations
self.n_inliers_to_stop = n_inliers_to_stop
def compute(self):
inliers = single_fit(self.points, self.model, self.sampler,
model_kwargs=self.model_kwargs,
max_iterations=self.max_iterations,
n_inliers_to_stop=self.n_inliers_to_stop)
self.to_be_added[self.name] = inliers.astype(np.uint8)
class SphericalCoordinates(XYZScalarField):
"""
Get radial, azimuthal and polar values.
"""
def __init__(self, *, pyntcloud, degrees=True):
super().__init__(pyntcloud=pyntcloud)
self.degrees = degrees
def compute(self):
radial, polar, azimuthal = cartesian_to_spherical(
self.points, degrees=self.degrees)
self.to_be_added["radial"] = radial
self.to_be_added["polar"] = polar
self.to_be_added["azimuthal"] = azimuthal
class CylindricalCoordinates(XYZScalarField):
"""
Get ro and phi values.
The z value in cylindrical coordinates remain unchanged.
"""
def __init__(self, *, pyntcloud, degrees=True):
self.degrees = degrees
super().__init__(pyntcloud=pyntcloud)
def compute(self):
radial_cylindrical, angular_cylindrical, z = cartesian_to_cylindrical(
self.points, degrees=self.degrees)
self.to_be_added["radial_cylindrical"] = radial_cylindrical
self.to_be_added["angular_cylindrical"] = angular_cylindrical
| StarcoderdataPython |
3546942 | # Generated by Django 2.0.8 on 2020-04-02 11:44
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('angles', '0010_residueangle_tau_angle'),
]
operations = [
migrations.AddField(
model_name='residueangle',
name='chi1',
field=models.FloatField(default=0, null=True),
),
migrations.AddField(
model_name='residueangle',
name='chi2',
field=models.FloatField(default=0, null=True),
),
migrations.AddField(
model_name='residueangle',
name='chi3',
field=models.FloatField(default=0, null=True),
),
migrations.AddField(
model_name='residueangle',
name='chi4',
field=models.FloatField(default=0, null=True),
),
migrations.AddField(
model_name='residueangle',
name='chi5',
field=models.FloatField(default=0, null=True),
),
migrations.AddField(
model_name='residueangle',
name='missing_atoms',
field=models.IntegerField(default=0, null=True),
),
]
| StarcoderdataPython |
6649395 | # (C) Copyright 2016-2017 Hewlett Packard Enterprise Development LP
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import time
from monasca_tempest_tests.tests.api import base
from monasca_tempest_tests.tests.api import constants
from monasca_tempest_tests.tests.api import helpers
from tempest.lib.common.utils import data_utils
from tempest.lib import decorators
MIN_HISTORY = 2
class TestAlarmStateHistoryMultipleTransitions(base.BaseMonascaTest):
# For testing list alarm state history with the same alarm ID, two alarm
# transitions are needed. One transit from ALARM state to UNDETERMINED
# state and the other one from UNDETERMINED state to ALARM state.
@classmethod
def resource_setup(cls):
super(TestAlarmStateHistoryMultipleTransitions, cls).resource_setup()
alarm_definition = helpers.create_alarm_definition(
name=data_utils.rand_name('alarm_state_history'),
expression="min(name-1) < 1.0")
cls.monasca_client.create_alarm_definitions(alarm_definition)
for timer in range(constants.MAX_RETRIES):
# create some metrics to prime the system and create
# MIN_HISTORY alarms
metric = helpers.create_metric(
name="name-1", dimensions={'key1': 'value1'}, value=0.0)
cls.monasca_client.create_metrics(metric)
# sleep 1 second between metrics to make sure timestamps
# are different in the second field. Influxdb has a bug
# where it does not sort properly by milliseconds. .014
# is sorted as greater than .138
time.sleep(1.0)
resp, response_body = cls.monasca_client.\
list_alarms_state_history()
elements = response_body['elements']
if len(elements) >= 1:
break
time.sleep(constants.RETRY_WAIT_SECS)
time.sleep(constants.MAX_RETRIES)
for timer in range(constants.MAX_RETRIES * 2):
metric = helpers.create_metric(
name="name-1", dimensions={'key2': 'value2'}, value=2.0)
cls.monasca_client.create_metrics(metric)
# sleep 0.05 second between metrics to make sure timestamps
# are different
time.sleep(0.05)
resp, response_body = \
cls.monasca_client.list_alarms_state_history()
elements = response_body['elements']
if len(elements) >= 2:
return
else:
num_transitions = len(elements)
time.sleep(constants.RETRY_WAIT_SECS)
assert False, "Required {} alarm state transitions, but found {}".\
format(MIN_HISTORY, num_transitions)
@classmethod
def resource_cleanup(cls):
super(TestAlarmStateHistoryMultipleTransitions, cls).\
resource_cleanup()
@decorators.attr(type="gate")
def test_list_alarm_state_history(self):
# Get the alarm state history for a specific alarm by ID
resp, response_body = self.monasca_client.list_alarms_state_history()
self.assertEqual(200, resp.status)
elements = response_body['elements']
if elements:
element = elements[0]
alarm_id = element['alarm_id']
resp, response_body = self.monasca_client.list_alarm_state_history(
alarm_id)
self.assertEqual(200, resp.status)
# Test Response Body
self.assertTrue(set(['links', 'elements']) ==
set(response_body))
elements = response_body['elements']
links = response_body['links']
self.assertIsInstance(links, list)
link = links[0]
self.assertTrue(set(['rel', 'href']) ==
set(link))
self.assertEqual(link['rel'], u'self')
definition = elements[0]
self.assertTrue(set(['id', 'alarm_id', 'metrics', 'new_state',
'old_state', 'reason', 'reason_data',
'sub_alarms', 'timestamp']) ==
set(definition))
else:
error_msg = "Failed test_list_alarm_state_history: at least one " \
"alarm state history is needed."
self.fail(error_msg)
@decorators.attr(type="gate")
def test_list_alarm_state_history_with_offset_limit(self):
# Get the alarm state history for a specific alarm by ID
resp, response_body = self.monasca_client.list_alarms_state_history()
self.assertEqual(200, resp.status)
elements = response_body['elements']
if len(elements) >= MIN_HISTORY:
element = elements[0]
second_element = elements[1]
alarm_id = element['alarm_id']
query_parms = '?limit=1'
resp, response_body = self.monasca_client.\
list_alarm_state_history(alarm_id, query_parms)
elements = response_body['elements']
self.assertEqual(200, resp.status)
self.assertEqual(1, len(elements))
query_parms = '?offset=' + str(element['timestamp'])
resp, response_body = self.monasca_client.\
list_alarm_state_history(alarm_id, query_parms)
elements_new = response_body['elements']
self.assertEqual(200, resp.status)
self.assertEqual(1, len(elements_new))
self.assertEqual(second_element, elements_new[0])
else:
error_msg = "Failed test_list_alarm_state_history_with_offset" \
"_limit: two alarms state history are needed."
self.fail(error_msg)
| StarcoderdataPython |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.